1 /*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55
56 #include "core.h"
57 #include "hcd.h"
58
59 /**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
dwc2_enable_common_interrupts(struct dwc2_hsotg * hsotg)65 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66 {
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85 }
86
87 /*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg * hsotg)91 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92 {
93 u32 hcfg, val;
94
95 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
96 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
97 hsotg->core_params->ulpi_fs_ls > 0) ||
98 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
99 /* Full speed PHY */
100 val = HCFG_FSLSPCLKSEL_48_MHZ;
101 } else {
102 /* High speed PHY running at full speed or high speed */
103 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
104 }
105
106 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
107 hcfg = readl(hsotg->regs + HCFG);
108 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
109 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
110 writel(hcfg, hsotg->regs + HCFG);
111 }
112
113 /*
114 * Do core a soft reset of the core. Be careful with this because it
115 * resets all the internal state machines of the core.
116 */
dwc2_core_reset(struct dwc2_hsotg * hsotg)117 static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
118 {
119 u32 greset;
120 int count = 0;
121 u32 gusbcfg;
122
123 dev_vdbg(hsotg->dev, "%s()\n", __func__);
124
125 /* Wait for AHB master IDLE state */
126 do {
127 usleep_range(20000, 40000);
128 greset = readl(hsotg->regs + GRSTCTL);
129 if (++count > 50) {
130 dev_warn(hsotg->dev,
131 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
132 __func__, greset);
133 return -EBUSY;
134 }
135 } while (!(greset & GRSTCTL_AHBIDLE));
136
137 /* Core Soft Reset */
138 count = 0;
139 greset |= GRSTCTL_CSFTRST;
140 writel(greset, hsotg->regs + GRSTCTL);
141 do {
142 usleep_range(20000, 40000);
143 greset = readl(hsotg->regs + GRSTCTL);
144 if (++count > 50) {
145 dev_warn(hsotg->dev,
146 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
147 __func__, greset);
148 return -EBUSY;
149 }
150 } while (greset & GRSTCTL_CSFTRST);
151
152 if (hsotg->dr_mode == USB_DR_MODE_HOST) {
153 gusbcfg = readl(hsotg->regs + GUSBCFG);
154 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
155 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
156 writel(gusbcfg, hsotg->regs + GUSBCFG);
157 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
158 gusbcfg = readl(hsotg->regs + GUSBCFG);
159 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
160 gusbcfg |= GUSBCFG_FORCEDEVMODE;
161 writel(gusbcfg, hsotg->regs + GUSBCFG);
162 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
163 gusbcfg = readl(hsotg->regs + GUSBCFG);
164 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
165 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
166 writel(gusbcfg, hsotg->regs + GUSBCFG);
167 }
168
169 /*
170 * NOTE: This long sleep is _very_ important, otherwise the core will
171 * not stay in host mode after a connector ID change!
172 */
173 usleep_range(150000, 200000);
174
175 return 0;
176 }
177
dwc2_fs_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)178 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
179 {
180 u32 usbcfg, i2cctl;
181 int retval = 0;
182
183 /*
184 * core_init() is now called on every switch so only call the
185 * following for the first time through
186 */
187 if (select_phy) {
188 dev_dbg(hsotg->dev, "FS PHY selected\n");
189 usbcfg = readl(hsotg->regs + GUSBCFG);
190 usbcfg |= GUSBCFG_PHYSEL;
191 writel(usbcfg, hsotg->regs + GUSBCFG);
192
193 /* Reset after a PHY select */
194 retval = dwc2_core_reset(hsotg);
195 if (retval) {
196 dev_err(hsotg->dev, "%s() Reset failed, aborting",
197 __func__);
198 return retval;
199 }
200 }
201
202 /*
203 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
204 * do this on HNP Dev/Host mode switches (done in dev_init and
205 * host_init).
206 */
207 if (dwc2_is_host_mode(hsotg))
208 dwc2_init_fs_ls_pclk_sel(hsotg);
209
210 if (hsotg->core_params->i2c_enable > 0) {
211 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
212
213 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
214 usbcfg = readl(hsotg->regs + GUSBCFG);
215 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
216 writel(usbcfg, hsotg->regs + GUSBCFG);
217
218 /* Program GI2CCTL.I2CEn */
219 i2cctl = readl(hsotg->regs + GI2CCTL);
220 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
221 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
222 i2cctl &= ~GI2CCTL_I2CEN;
223 writel(i2cctl, hsotg->regs + GI2CCTL);
224 i2cctl |= GI2CCTL_I2CEN;
225 writel(i2cctl, hsotg->regs + GI2CCTL);
226 }
227
228 return retval;
229 }
230
dwc2_hs_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)231 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
232 {
233 u32 usbcfg;
234 int retval = 0;
235
236 if (!select_phy)
237 return 0;
238
239 usbcfg = readl(hsotg->regs + GUSBCFG);
240
241 /*
242 * HS PHY parameters. These parameters are preserved during soft reset
243 * so only program the first time. Do a soft reset immediately after
244 * setting phyif.
245 */
246 switch (hsotg->core_params->phy_type) {
247 case DWC2_PHY_TYPE_PARAM_ULPI:
248 /* ULPI interface */
249 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
250 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
251 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
252 if (hsotg->core_params->phy_ulpi_ddr > 0)
253 usbcfg |= GUSBCFG_DDRSEL;
254 break;
255 case DWC2_PHY_TYPE_PARAM_UTMI:
256 /* UTMI+ interface */
257 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
258 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
259 if (hsotg->core_params->phy_utmi_width == 16)
260 usbcfg |= GUSBCFG_PHYIF16;
261 break;
262 default:
263 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
264 break;
265 }
266
267 writel(usbcfg, hsotg->regs + GUSBCFG);
268
269 /* Reset after setting the PHY parameters */
270 retval = dwc2_core_reset(hsotg);
271 if (retval) {
272 dev_err(hsotg->dev, "%s() Reset failed, aborting",
273 __func__);
274 return retval;
275 }
276
277 return retval;
278 }
279
dwc2_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)280 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
281 {
282 u32 usbcfg;
283 int retval = 0;
284
285 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
286 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
287 /* If FS mode with FS PHY */
288 retval = dwc2_fs_phy_init(hsotg, select_phy);
289 if (retval)
290 return retval;
291 } else {
292 /* High speed PHY */
293 retval = dwc2_hs_phy_init(hsotg, select_phy);
294 if (retval)
295 return retval;
296 }
297
298 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
299 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
300 hsotg->core_params->ulpi_fs_ls > 0) {
301 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
302 usbcfg = readl(hsotg->regs + GUSBCFG);
303 usbcfg |= GUSBCFG_ULPI_FS_LS;
304 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
305 writel(usbcfg, hsotg->regs + GUSBCFG);
306 } else {
307 usbcfg = readl(hsotg->regs + GUSBCFG);
308 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
309 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
310 writel(usbcfg, hsotg->regs + GUSBCFG);
311 }
312
313 return retval;
314 }
315
dwc2_gahbcfg_init(struct dwc2_hsotg * hsotg)316 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
317 {
318 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
319
320 switch (hsotg->hw_params.arch) {
321 case GHWCFG2_EXT_DMA_ARCH:
322 dev_err(hsotg->dev, "External DMA Mode not supported\n");
323 return -EINVAL;
324
325 case GHWCFG2_INT_DMA_ARCH:
326 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
327 if (hsotg->core_params->ahbcfg != -1) {
328 ahbcfg &= GAHBCFG_CTRL_MASK;
329 ahbcfg |= hsotg->core_params->ahbcfg &
330 ~GAHBCFG_CTRL_MASK;
331 }
332 break;
333
334 case GHWCFG2_SLAVE_ONLY_ARCH:
335 default:
336 dev_dbg(hsotg->dev, "Slave Only Mode\n");
337 break;
338 }
339
340 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
341 hsotg->core_params->dma_enable,
342 hsotg->core_params->dma_desc_enable);
343
344 if (hsotg->core_params->dma_enable > 0) {
345 if (hsotg->core_params->dma_desc_enable > 0)
346 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
347 else
348 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
349 } else {
350 dev_dbg(hsotg->dev, "Using Slave mode\n");
351 hsotg->core_params->dma_desc_enable = 0;
352 }
353
354 if (hsotg->core_params->dma_enable > 0)
355 ahbcfg |= GAHBCFG_DMA_EN;
356
357 writel(ahbcfg, hsotg->regs + GAHBCFG);
358
359 return 0;
360 }
361
dwc2_gusbcfg_init(struct dwc2_hsotg * hsotg)362 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
363 {
364 u32 usbcfg;
365
366 usbcfg = readl(hsotg->regs + GUSBCFG);
367 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
368
369 switch (hsotg->hw_params.op_mode) {
370 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
371 if (hsotg->core_params->otg_cap ==
372 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
373 usbcfg |= GUSBCFG_HNPCAP;
374 if (hsotg->core_params->otg_cap !=
375 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
376 usbcfg |= GUSBCFG_SRPCAP;
377 break;
378
379 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
380 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
381 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
382 if (hsotg->core_params->otg_cap !=
383 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
384 usbcfg |= GUSBCFG_SRPCAP;
385 break;
386
387 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
388 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
389 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
390 default:
391 break;
392 }
393
394 writel(usbcfg, hsotg->regs + GUSBCFG);
395 }
396
397 /**
398 * dwc2_core_init() - Initializes the DWC_otg controller registers and
399 * prepares the core for device mode or host mode operation
400 *
401 * @hsotg: Programming view of the DWC_otg controller
402 * @select_phy: If true then also set the Phy type
403 * @irq: If >= 0, the irq to register
404 */
dwc2_core_init(struct dwc2_hsotg * hsotg,bool select_phy,int irq)405 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
406 {
407 u32 usbcfg, otgctl;
408 int retval;
409
410 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
411
412 usbcfg = readl(hsotg->regs + GUSBCFG);
413
414 /* Set ULPI External VBUS bit if needed */
415 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
416 if (hsotg->core_params->phy_ulpi_ext_vbus ==
417 DWC2_PHY_ULPI_EXTERNAL_VBUS)
418 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
419
420 /* Set external TS Dline pulsing bit if needed */
421 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
422 if (hsotg->core_params->ts_dline > 0)
423 usbcfg |= GUSBCFG_TERMSELDLPULSE;
424
425 writel(usbcfg, hsotg->regs + GUSBCFG);
426
427 /* Reset the Controller */
428 retval = dwc2_core_reset(hsotg);
429 if (retval) {
430 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
431 __func__);
432 return retval;
433 }
434
435 /*
436 * This needs to happen in FS mode before any other programming occurs
437 */
438 retval = dwc2_phy_init(hsotg, select_phy);
439 if (retval)
440 return retval;
441
442 /* Program the GAHBCFG Register */
443 retval = dwc2_gahbcfg_init(hsotg);
444 if (retval)
445 return retval;
446
447 /* Program the GUSBCFG register */
448 dwc2_gusbcfg_init(hsotg);
449
450 /* Program the GOTGCTL register */
451 otgctl = readl(hsotg->regs + GOTGCTL);
452 otgctl &= ~GOTGCTL_OTGVER;
453 if (hsotg->core_params->otg_ver > 0)
454 otgctl |= GOTGCTL_OTGVER;
455 writel(otgctl, hsotg->regs + GOTGCTL);
456 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
457
458 /* Clear the SRP success bit for FS-I2c */
459 hsotg->srp_success = 0;
460
461 if (irq >= 0) {
462 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
463 irq);
464 retval = devm_request_irq(hsotg->dev, irq,
465 dwc2_handle_common_intr, IRQF_SHARED,
466 dev_name(hsotg->dev), hsotg);
467 if (retval)
468 return retval;
469 }
470
471 /* Enable common interrupts */
472 dwc2_enable_common_interrupts(hsotg);
473
474 /*
475 * Do device or host intialization based on mode during PCD and
476 * HCD initialization
477 */
478 if (dwc2_is_host_mode(hsotg)) {
479 dev_dbg(hsotg->dev, "Host Mode\n");
480 hsotg->op_state = OTG_STATE_A_HOST;
481 } else {
482 dev_dbg(hsotg->dev, "Device Mode\n");
483 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
484 }
485
486 return 0;
487 }
488
489 /**
490 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
491 *
492 * @hsotg: Programming view of DWC_otg controller
493 */
dwc2_enable_host_interrupts(struct dwc2_hsotg * hsotg)494 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
495 {
496 u32 intmsk;
497
498 dev_dbg(hsotg->dev, "%s()\n", __func__);
499
500 /* Disable all interrupts */
501 writel(0, hsotg->regs + GINTMSK);
502 writel(0, hsotg->regs + HAINTMSK);
503
504 /* Enable the common interrupts */
505 dwc2_enable_common_interrupts(hsotg);
506
507 /* Enable host mode interrupts without disturbing common interrupts */
508 intmsk = readl(hsotg->regs + GINTMSK);
509 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
510 writel(intmsk, hsotg->regs + GINTMSK);
511 }
512
513 /**
514 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
515 *
516 * @hsotg: Programming view of DWC_otg controller
517 */
dwc2_disable_host_interrupts(struct dwc2_hsotg * hsotg)518 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
519 {
520 u32 intmsk = readl(hsotg->regs + GINTMSK);
521
522 /* Disable host mode interrupts without disturbing common interrupts */
523 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
524 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
525 writel(intmsk, hsotg->regs + GINTMSK);
526 }
527
528 /*
529 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
530 * For system that have a total fifo depth that is smaller than the default
531 * RX + TX fifo size.
532 *
533 * @hsotg: Programming view of DWC_otg controller
534 */
dwc2_calculate_dynamic_fifo(struct dwc2_hsotg * hsotg)535 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
536 {
537 struct dwc2_core_params *params = hsotg->core_params;
538 struct dwc2_hw_params *hw = &hsotg->hw_params;
539 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
540
541 total_fifo_size = hw->total_fifo_size;
542 rxfsiz = params->host_rx_fifo_size;
543 nptxfsiz = params->host_nperio_tx_fifo_size;
544 ptxfsiz = params->host_perio_tx_fifo_size;
545
546 /*
547 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
548 * allocation with support for high bandwidth endpoints. Synopsys
549 * defines MPS(Max Packet size) for a periodic EP=1024, and for
550 * non-periodic as 512.
551 */
552 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
553 /*
554 * For Buffer DMA mode/Scatter Gather DMA mode
555 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
556 * with n = number of host channel.
557 * 2 * ((1024/4) + 2) = 516
558 */
559 rxfsiz = 516 + hw->host_channels;
560
561 /*
562 * min non-periodic tx fifo depth
563 * 2 * (largest non-periodic USB packet used / 4)
564 * 2 * (512/4) = 256
565 */
566 nptxfsiz = 256;
567
568 /*
569 * min periodic tx fifo depth
570 * (largest packet size*MC)/4
571 * (1024 * 3)/4 = 768
572 */
573 ptxfsiz = 768;
574
575 params->host_rx_fifo_size = rxfsiz;
576 params->host_nperio_tx_fifo_size = nptxfsiz;
577 params->host_perio_tx_fifo_size = ptxfsiz;
578 }
579
580 /*
581 * If the summation of RX, NPTX and PTX fifo sizes is still
582 * bigger than the total_fifo_size, then we have a problem.
583 *
584 * We won't be able to allocate as many endpoints. Right now,
585 * we're just printing an error message, but ideally this FIFO
586 * allocation algorithm would be improved in the future.
587 *
588 * FIXME improve this FIFO allocation algorithm.
589 */
590 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
591 dev_err(hsotg->dev, "invalid fifo sizes\n");
592 }
593
dwc2_config_fifos(struct dwc2_hsotg * hsotg)594 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
595 {
596 struct dwc2_core_params *params = hsotg->core_params;
597 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
598
599 if (!params->enable_dynamic_fifo)
600 return;
601
602 dwc2_calculate_dynamic_fifo(hsotg);
603
604 /* Rx FIFO */
605 grxfsiz = readl(hsotg->regs + GRXFSIZ);
606 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
607 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
608 grxfsiz |= params->host_rx_fifo_size <<
609 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
610 writel(grxfsiz, hsotg->regs + GRXFSIZ);
611 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
612
613 /* Non-periodic Tx FIFO */
614 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
615 readl(hsotg->regs + GNPTXFSIZ));
616 nptxfsiz = params->host_nperio_tx_fifo_size <<
617 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
618 nptxfsiz |= params->host_rx_fifo_size <<
619 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
620 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
621 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
622 readl(hsotg->regs + GNPTXFSIZ));
623
624 /* Periodic Tx FIFO */
625 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
626 readl(hsotg->regs + HPTXFSIZ));
627 hptxfsiz = params->host_perio_tx_fifo_size <<
628 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
629 hptxfsiz |= (params->host_rx_fifo_size +
630 params->host_nperio_tx_fifo_size) <<
631 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
632 writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
633 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
634 readl(hsotg->regs + HPTXFSIZ));
635
636 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
637 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
638 /*
639 * Global DFIFOCFG calculation for Host mode -
640 * include RxFIFO, NPTXFIFO and HPTXFIFO
641 */
642 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
643 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
644 dfifocfg |= (params->host_rx_fifo_size +
645 params->host_nperio_tx_fifo_size +
646 params->host_perio_tx_fifo_size) <<
647 GDFIFOCFG_EPINFOBASE_SHIFT &
648 GDFIFOCFG_EPINFOBASE_MASK;
649 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
650 }
651 }
652
653 /**
654 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
655 * Host mode
656 *
657 * @hsotg: Programming view of DWC_otg controller
658 *
659 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
660 * request queues. Host channels are reset to ensure that they are ready for
661 * performing transfers.
662 */
dwc2_core_host_init(struct dwc2_hsotg * hsotg)663 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
664 {
665 u32 hcfg, hfir, otgctl;
666
667 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
668
669 /* Restart the Phy Clock */
670 writel(0, hsotg->regs + PCGCTL);
671
672 /* Initialize Host Configuration Register */
673 dwc2_init_fs_ls_pclk_sel(hsotg);
674 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
675 hcfg = readl(hsotg->regs + HCFG);
676 hcfg |= HCFG_FSLSSUPP;
677 writel(hcfg, hsotg->regs + HCFG);
678 }
679
680 /*
681 * This bit allows dynamic reloading of the HFIR register during
682 * runtime. This bit needs to be programmed during initial configuration
683 * and its value must not be changed during runtime.
684 */
685 if (hsotg->core_params->reload_ctl > 0) {
686 hfir = readl(hsotg->regs + HFIR);
687 hfir |= HFIR_RLDCTRL;
688 writel(hfir, hsotg->regs + HFIR);
689 }
690
691 if (hsotg->core_params->dma_desc_enable > 0) {
692 u32 op_mode = hsotg->hw_params.op_mode;
693 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
694 !hsotg->hw_params.dma_desc_enable ||
695 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
696 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
697 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
698 dev_err(hsotg->dev,
699 "Hardware does not support descriptor DMA mode -\n");
700 dev_err(hsotg->dev,
701 "falling back to buffer DMA mode.\n");
702 hsotg->core_params->dma_desc_enable = 0;
703 } else {
704 hcfg = readl(hsotg->regs + HCFG);
705 hcfg |= HCFG_DESCDMA;
706 writel(hcfg, hsotg->regs + HCFG);
707 }
708 }
709
710 /* Configure data FIFO sizes */
711 dwc2_config_fifos(hsotg);
712
713 /* TODO - check this */
714 /* Clear Host Set HNP Enable in the OTG Control Register */
715 otgctl = readl(hsotg->regs + GOTGCTL);
716 otgctl &= ~GOTGCTL_HSTSETHNPEN;
717 writel(otgctl, hsotg->regs + GOTGCTL);
718
719 /* Make sure the FIFOs are flushed */
720 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
721 dwc2_flush_rx_fifo(hsotg);
722
723 /* Clear Host Set HNP Enable in the OTG Control Register */
724 otgctl = readl(hsotg->regs + GOTGCTL);
725 otgctl &= ~GOTGCTL_HSTSETHNPEN;
726 writel(otgctl, hsotg->regs + GOTGCTL);
727
728 if (hsotg->core_params->dma_desc_enable <= 0) {
729 int num_channels, i;
730 u32 hcchar;
731
732 /* Flush out any leftover queued requests */
733 num_channels = hsotg->core_params->host_channels;
734 for (i = 0; i < num_channels; i++) {
735 hcchar = readl(hsotg->regs + HCCHAR(i));
736 hcchar &= ~HCCHAR_CHENA;
737 hcchar |= HCCHAR_CHDIS;
738 hcchar &= ~HCCHAR_EPDIR;
739 writel(hcchar, hsotg->regs + HCCHAR(i));
740 }
741
742 /* Halt all channels to put them into a known state */
743 for (i = 0; i < num_channels; i++) {
744 int count = 0;
745
746 hcchar = readl(hsotg->regs + HCCHAR(i));
747 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
748 hcchar &= ~HCCHAR_EPDIR;
749 writel(hcchar, hsotg->regs + HCCHAR(i));
750 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
751 __func__, i);
752 do {
753 hcchar = readl(hsotg->regs + HCCHAR(i));
754 if (++count > 1000) {
755 dev_err(hsotg->dev,
756 "Unable to clear enable on channel %d\n",
757 i);
758 break;
759 }
760 udelay(1);
761 } while (hcchar & HCCHAR_CHENA);
762 }
763 }
764
765 /* Turn on the vbus power */
766 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
767 if (hsotg->op_state == OTG_STATE_A_HOST) {
768 u32 hprt0 = dwc2_read_hprt0(hsotg);
769
770 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
771 !!(hprt0 & HPRT0_PWR));
772 if (!(hprt0 & HPRT0_PWR)) {
773 hprt0 |= HPRT0_PWR;
774 writel(hprt0, hsotg->regs + HPRT0);
775 }
776 }
777
778 dwc2_enable_host_interrupts(hsotg);
779 }
780
dwc2_hc_enable_slave_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)781 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
782 struct dwc2_host_chan *chan)
783 {
784 u32 hcintmsk = HCINTMSK_CHHLTD;
785
786 switch (chan->ep_type) {
787 case USB_ENDPOINT_XFER_CONTROL:
788 case USB_ENDPOINT_XFER_BULK:
789 dev_vdbg(hsotg->dev, "control/bulk\n");
790 hcintmsk |= HCINTMSK_XFERCOMPL;
791 hcintmsk |= HCINTMSK_STALL;
792 hcintmsk |= HCINTMSK_XACTERR;
793 hcintmsk |= HCINTMSK_DATATGLERR;
794 if (chan->ep_is_in) {
795 hcintmsk |= HCINTMSK_BBLERR;
796 } else {
797 hcintmsk |= HCINTMSK_NAK;
798 hcintmsk |= HCINTMSK_NYET;
799 if (chan->do_ping)
800 hcintmsk |= HCINTMSK_ACK;
801 }
802
803 if (chan->do_split) {
804 hcintmsk |= HCINTMSK_NAK;
805 if (chan->complete_split)
806 hcintmsk |= HCINTMSK_NYET;
807 else
808 hcintmsk |= HCINTMSK_ACK;
809 }
810
811 if (chan->error_state)
812 hcintmsk |= HCINTMSK_ACK;
813 break;
814
815 case USB_ENDPOINT_XFER_INT:
816 if (dbg_perio())
817 dev_vdbg(hsotg->dev, "intr\n");
818 hcintmsk |= HCINTMSK_XFERCOMPL;
819 hcintmsk |= HCINTMSK_NAK;
820 hcintmsk |= HCINTMSK_STALL;
821 hcintmsk |= HCINTMSK_XACTERR;
822 hcintmsk |= HCINTMSK_DATATGLERR;
823 hcintmsk |= HCINTMSK_FRMOVRUN;
824
825 if (chan->ep_is_in)
826 hcintmsk |= HCINTMSK_BBLERR;
827 if (chan->error_state)
828 hcintmsk |= HCINTMSK_ACK;
829 if (chan->do_split) {
830 if (chan->complete_split)
831 hcintmsk |= HCINTMSK_NYET;
832 else
833 hcintmsk |= HCINTMSK_ACK;
834 }
835 break;
836
837 case USB_ENDPOINT_XFER_ISOC:
838 if (dbg_perio())
839 dev_vdbg(hsotg->dev, "isoc\n");
840 hcintmsk |= HCINTMSK_XFERCOMPL;
841 hcintmsk |= HCINTMSK_FRMOVRUN;
842 hcintmsk |= HCINTMSK_ACK;
843
844 if (chan->ep_is_in) {
845 hcintmsk |= HCINTMSK_XACTERR;
846 hcintmsk |= HCINTMSK_BBLERR;
847 }
848 break;
849 default:
850 dev_err(hsotg->dev, "## Unknown EP type ##\n");
851 break;
852 }
853
854 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
855 if (dbg_hc(chan))
856 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
857 }
858
dwc2_hc_enable_dma_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)859 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
860 struct dwc2_host_chan *chan)
861 {
862 u32 hcintmsk = HCINTMSK_CHHLTD;
863
864 /*
865 * For Descriptor DMA mode core halts the channel on AHB error.
866 * Interrupt is not required.
867 */
868 if (hsotg->core_params->dma_desc_enable <= 0) {
869 if (dbg_hc(chan))
870 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
871 hcintmsk |= HCINTMSK_AHBERR;
872 } else {
873 if (dbg_hc(chan))
874 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
875 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
876 hcintmsk |= HCINTMSK_XFERCOMPL;
877 }
878
879 if (chan->error_state && !chan->do_split &&
880 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
881 if (dbg_hc(chan))
882 dev_vdbg(hsotg->dev, "setting ACK\n");
883 hcintmsk |= HCINTMSK_ACK;
884 if (chan->ep_is_in) {
885 hcintmsk |= HCINTMSK_DATATGLERR;
886 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
887 hcintmsk |= HCINTMSK_NAK;
888 }
889 }
890
891 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
892 if (dbg_hc(chan))
893 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
894 }
895
dwc2_hc_enable_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)896 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
897 struct dwc2_host_chan *chan)
898 {
899 u32 intmsk;
900
901 if (hsotg->core_params->dma_enable > 0) {
902 if (dbg_hc(chan))
903 dev_vdbg(hsotg->dev, "DMA enabled\n");
904 dwc2_hc_enable_dma_ints(hsotg, chan);
905 } else {
906 if (dbg_hc(chan))
907 dev_vdbg(hsotg->dev, "DMA disabled\n");
908 dwc2_hc_enable_slave_ints(hsotg, chan);
909 }
910
911 /* Enable the top level host channel interrupt */
912 intmsk = readl(hsotg->regs + HAINTMSK);
913 intmsk |= 1 << chan->hc_num;
914 writel(intmsk, hsotg->regs + HAINTMSK);
915 if (dbg_hc(chan))
916 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
917
918 /* Make sure host channel interrupts are enabled */
919 intmsk = readl(hsotg->regs + GINTMSK);
920 intmsk |= GINTSTS_HCHINT;
921 writel(intmsk, hsotg->regs + GINTMSK);
922 if (dbg_hc(chan))
923 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
924 }
925
926 /**
927 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
928 * a specific endpoint
929 *
930 * @hsotg: Programming view of DWC_otg controller
931 * @chan: Information needed to initialize the host channel
932 *
933 * The HCCHARn register is set up with the characteristics specified in chan.
934 * Host channel interrupts that may need to be serviced while this transfer is
935 * in progress are enabled.
936 */
dwc2_hc_init(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)937 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
938 {
939 u8 hc_num = chan->hc_num;
940 u32 hcintmsk;
941 u32 hcchar;
942 u32 hcsplt = 0;
943
944 if (dbg_hc(chan))
945 dev_vdbg(hsotg->dev, "%s()\n", __func__);
946
947 /* Clear old interrupt conditions for this host channel */
948 hcintmsk = 0xffffffff;
949 hcintmsk &= ~HCINTMSK_RESERVED14_31;
950 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
951
952 /* Enable channel interrupts required for this transfer */
953 dwc2_hc_enable_ints(hsotg, chan);
954
955 /*
956 * Program the HCCHARn register with the endpoint characteristics for
957 * the current transfer
958 */
959 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
960 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
961 if (chan->ep_is_in)
962 hcchar |= HCCHAR_EPDIR;
963 if (chan->speed == USB_SPEED_LOW)
964 hcchar |= HCCHAR_LSPDDEV;
965 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
966 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
967 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
968 if (dbg_hc(chan)) {
969 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
970 hc_num, hcchar);
971
972 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
973 __func__, hc_num);
974 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
975 chan->dev_addr);
976 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
977 chan->ep_num);
978 dev_vdbg(hsotg->dev, " Is In: %d\n",
979 chan->ep_is_in);
980 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
981 chan->speed == USB_SPEED_LOW);
982 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
983 chan->ep_type);
984 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
985 chan->max_packet);
986 }
987
988 /* Program the HCSPLT register for SPLITs */
989 if (chan->do_split) {
990 if (dbg_hc(chan))
991 dev_vdbg(hsotg->dev,
992 "Programming HC %d with split --> %s\n",
993 hc_num,
994 chan->complete_split ? "CSPLIT" : "SSPLIT");
995 if (chan->complete_split)
996 hcsplt |= HCSPLT_COMPSPLT;
997 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
998 HCSPLT_XACTPOS_MASK;
999 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1000 HCSPLT_HUBADDR_MASK;
1001 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1002 HCSPLT_PRTADDR_MASK;
1003 if (dbg_hc(chan)) {
1004 dev_vdbg(hsotg->dev, " comp split %d\n",
1005 chan->complete_split);
1006 dev_vdbg(hsotg->dev, " xact pos %d\n",
1007 chan->xact_pos);
1008 dev_vdbg(hsotg->dev, " hub addr %d\n",
1009 chan->hub_addr);
1010 dev_vdbg(hsotg->dev, " hub port %d\n",
1011 chan->hub_port);
1012 dev_vdbg(hsotg->dev, " is_in %d\n",
1013 chan->ep_is_in);
1014 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
1015 chan->max_packet);
1016 dev_vdbg(hsotg->dev, " xferlen %d\n",
1017 chan->xfer_len);
1018 }
1019 }
1020
1021 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
1022 }
1023
1024 /**
1025 * dwc2_hc_halt() - Attempts to halt a host channel
1026 *
1027 * @hsotg: Controller register interface
1028 * @chan: Host channel to halt
1029 * @halt_status: Reason for halting the channel
1030 *
1031 * This function should only be called in Slave mode or to abort a transfer in
1032 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1033 * controller halts the channel when the transfer is complete or a condition
1034 * occurs that requires application intervention.
1035 *
1036 * In slave mode, checks for a free request queue entry, then sets the Channel
1037 * Enable and Channel Disable bits of the Host Channel Characteristics
1038 * register of the specified channel to intiate the halt. If there is no free
1039 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1040 * register to flush requests for this channel. In the latter case, sets a
1041 * flag to indicate that the host channel needs to be halted when a request
1042 * queue slot is open.
1043 *
1044 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1045 * HCCHARn register. The controller ensures there is space in the request
1046 * queue before submitting the halt request.
1047 *
1048 * Some time may elapse before the core flushes any posted requests for this
1049 * host channel and halts. The Channel Halted interrupt handler completes the
1050 * deactivation of the host channel.
1051 */
dwc2_hc_halt(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,enum dwc2_halt_status halt_status)1052 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1053 enum dwc2_halt_status halt_status)
1054 {
1055 u32 nptxsts, hptxsts, hcchar;
1056
1057 if (dbg_hc(chan))
1058 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1059 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1060 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1061
1062 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1063 halt_status == DWC2_HC_XFER_AHB_ERR) {
1064 /*
1065 * Disable all channel interrupts except Ch Halted. The QTD
1066 * and QH state associated with this transfer has been cleared
1067 * (in the case of URB_DEQUEUE), so the channel needs to be
1068 * shut down carefully to prevent crashes.
1069 */
1070 u32 hcintmsk = HCINTMSK_CHHLTD;
1071
1072 dev_vdbg(hsotg->dev, "dequeue/error\n");
1073 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1074
1075 /*
1076 * Make sure no other interrupts besides halt are currently
1077 * pending. Handling another interrupt could cause a crash due
1078 * to the QTD and QH state.
1079 */
1080 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1081
1082 /*
1083 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1084 * even if the channel was already halted for some other
1085 * reason
1086 */
1087 chan->halt_status = halt_status;
1088
1089 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1090 if (!(hcchar & HCCHAR_CHENA)) {
1091 /*
1092 * The channel is either already halted or it hasn't
1093 * started yet. In DMA mode, the transfer may halt if
1094 * it finishes normally or a condition occurs that
1095 * requires driver intervention. Don't want to halt
1096 * the channel again. In either Slave or DMA mode,
1097 * it's possible that the transfer has been assigned
1098 * to a channel, but not started yet when an URB is
1099 * dequeued. Don't want to halt a channel that hasn't
1100 * started yet.
1101 */
1102 return;
1103 }
1104 }
1105 if (chan->halt_pending) {
1106 /*
1107 * A halt has already been issued for this channel. This might
1108 * happen when a transfer is aborted by a higher level in
1109 * the stack.
1110 */
1111 dev_vdbg(hsotg->dev,
1112 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1113 __func__, chan->hc_num);
1114 return;
1115 }
1116
1117 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1118
1119 /* No need to set the bit in DDMA for disabling the channel */
1120 /* TODO check it everywhere channel is disabled */
1121 if (hsotg->core_params->dma_desc_enable <= 0) {
1122 if (dbg_hc(chan))
1123 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1124 hcchar |= HCCHAR_CHENA;
1125 } else {
1126 if (dbg_hc(chan))
1127 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1128 }
1129 hcchar |= HCCHAR_CHDIS;
1130
1131 if (hsotg->core_params->dma_enable <= 0) {
1132 if (dbg_hc(chan))
1133 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1134 hcchar |= HCCHAR_CHENA;
1135
1136 /* Check for space in the request queue to issue the halt */
1137 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1138 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1139 dev_vdbg(hsotg->dev, "control/bulk\n");
1140 nptxsts = readl(hsotg->regs + GNPTXSTS);
1141 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1142 dev_vdbg(hsotg->dev, "Disabling channel\n");
1143 hcchar &= ~HCCHAR_CHENA;
1144 }
1145 } else {
1146 if (dbg_perio())
1147 dev_vdbg(hsotg->dev, "isoc/intr\n");
1148 hptxsts = readl(hsotg->regs + HPTXSTS);
1149 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1150 hsotg->queuing_high_bandwidth) {
1151 if (dbg_perio())
1152 dev_vdbg(hsotg->dev, "Disabling channel\n");
1153 hcchar &= ~HCCHAR_CHENA;
1154 }
1155 }
1156 } else {
1157 if (dbg_hc(chan))
1158 dev_vdbg(hsotg->dev, "DMA enabled\n");
1159 }
1160
1161 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1162 chan->halt_status = halt_status;
1163
1164 if (hcchar & HCCHAR_CHENA) {
1165 if (dbg_hc(chan))
1166 dev_vdbg(hsotg->dev, "Channel enabled\n");
1167 chan->halt_pending = 1;
1168 chan->halt_on_queue = 0;
1169 } else {
1170 if (dbg_hc(chan))
1171 dev_vdbg(hsotg->dev, "Channel disabled\n");
1172 chan->halt_on_queue = 1;
1173 }
1174
1175 if (dbg_hc(chan)) {
1176 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1177 chan->hc_num);
1178 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1179 hcchar);
1180 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1181 chan->halt_pending);
1182 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1183 chan->halt_on_queue);
1184 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1185 chan->halt_status);
1186 }
1187 }
1188
1189 /**
1190 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1191 *
1192 * @hsotg: Programming view of DWC_otg controller
1193 * @chan: Identifies the host channel to clean up
1194 *
1195 * This function is normally called after a transfer is done and the host
1196 * channel is being released
1197 */
dwc2_hc_cleanup(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1198 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1199 {
1200 u32 hcintmsk;
1201
1202 chan->xfer_started = 0;
1203
1204 /*
1205 * Clear channel interrupt enables and any unhandled channel interrupt
1206 * conditions
1207 */
1208 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1209 hcintmsk = 0xffffffff;
1210 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1211 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1212 }
1213
1214 /**
1215 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1216 * which frame a periodic transfer should occur
1217 *
1218 * @hsotg: Programming view of DWC_otg controller
1219 * @chan: Identifies the host channel to set up and its properties
1220 * @hcchar: Current value of the HCCHAR register for the specified host channel
1221 *
1222 * This function has no effect on non-periodic transfers
1223 */
dwc2_hc_set_even_odd_frame(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,u32 * hcchar)1224 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1225 struct dwc2_host_chan *chan, u32 *hcchar)
1226 {
1227 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1228 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1229 /* 1 if _next_ frame is odd, 0 if it's even */
1230 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1231 *hcchar |= HCCHAR_ODDFRM;
1232 }
1233 }
1234
dwc2_set_pid_isoc(struct dwc2_host_chan * chan)1235 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1236 {
1237 /* Set up the initial PID for the transfer */
1238 if (chan->speed == USB_SPEED_HIGH) {
1239 if (chan->ep_is_in) {
1240 if (chan->multi_count == 1)
1241 chan->data_pid_start = DWC2_HC_PID_DATA0;
1242 else if (chan->multi_count == 2)
1243 chan->data_pid_start = DWC2_HC_PID_DATA1;
1244 else
1245 chan->data_pid_start = DWC2_HC_PID_DATA2;
1246 } else {
1247 if (chan->multi_count == 1)
1248 chan->data_pid_start = DWC2_HC_PID_DATA0;
1249 else
1250 chan->data_pid_start = DWC2_HC_PID_MDATA;
1251 }
1252 } else {
1253 chan->data_pid_start = DWC2_HC_PID_DATA0;
1254 }
1255 }
1256
1257 /**
1258 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1259 * the Host Channel
1260 *
1261 * @hsotg: Programming view of DWC_otg controller
1262 * @chan: Information needed to initialize the host channel
1263 *
1264 * This function should only be called in Slave mode. For a channel associated
1265 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1266 * associated with a periodic EP, the periodic Tx FIFO is written.
1267 *
1268 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1269 * the number of bytes written to the Tx FIFO.
1270 */
dwc2_hc_write_packet(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1271 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1272 struct dwc2_host_chan *chan)
1273 {
1274 u32 i;
1275 u32 remaining_count;
1276 u32 byte_count;
1277 u32 dword_count;
1278 u32 __iomem *data_fifo;
1279 u32 *data_buf = (u32 *)chan->xfer_buf;
1280
1281 if (dbg_hc(chan))
1282 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1283
1284 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1285
1286 remaining_count = chan->xfer_len - chan->xfer_count;
1287 if (remaining_count > chan->max_packet)
1288 byte_count = chan->max_packet;
1289 else
1290 byte_count = remaining_count;
1291
1292 dword_count = (byte_count + 3) / 4;
1293
1294 if (((unsigned long)data_buf & 0x3) == 0) {
1295 /* xfer_buf is DWORD aligned */
1296 for (i = 0; i < dword_count; i++, data_buf++)
1297 writel(*data_buf, data_fifo);
1298 } else {
1299 /* xfer_buf is not DWORD aligned */
1300 for (i = 0; i < dword_count; i++, data_buf++) {
1301 u32 data = data_buf[0] | data_buf[1] << 8 |
1302 data_buf[2] << 16 | data_buf[3] << 24;
1303 writel(data, data_fifo);
1304 }
1305 }
1306
1307 chan->xfer_count += byte_count;
1308 chan->xfer_buf += byte_count;
1309 }
1310
1311 /**
1312 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1313 * channel and starts the transfer
1314 *
1315 * @hsotg: Programming view of DWC_otg controller
1316 * @chan: Information needed to initialize the host channel. The xfer_len value
1317 * may be reduced to accommodate the max widths of the XferSize and
1318 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1319 * changed to reflect the final xfer_len value.
1320 *
1321 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1322 * the caller must ensure that there is sufficient space in the request queue
1323 * and Tx Data FIFO.
1324 *
1325 * For an OUT transfer in Slave mode, it loads a data packet into the
1326 * appropriate FIFO. If necessary, additional data packets are loaded in the
1327 * Host ISR.
1328 *
1329 * For an IN transfer in Slave mode, a data packet is requested. The data
1330 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1331 * additional data packets are requested in the Host ISR.
1332 *
1333 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1334 * register along with a packet count of 1 and the channel is enabled. This
1335 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1336 * simply set to 0 since no data transfer occurs in this case.
1337 *
1338 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1339 * all the information required to perform the subsequent data transfer. In
1340 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1341 * controller performs the entire PING protocol, then starts the data
1342 * transfer.
1343 */
dwc2_hc_start_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1344 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1345 struct dwc2_host_chan *chan)
1346 {
1347 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1348 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1349 u32 hcchar;
1350 u32 hctsiz = 0;
1351 u16 num_packets;
1352
1353 if (dbg_hc(chan))
1354 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1355
1356 if (chan->do_ping) {
1357 if (hsotg->core_params->dma_enable <= 0) {
1358 if (dbg_hc(chan))
1359 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1360 dwc2_hc_do_ping(hsotg, chan);
1361 chan->xfer_started = 1;
1362 return;
1363 } else {
1364 if (dbg_hc(chan))
1365 dev_vdbg(hsotg->dev, "ping, DMA\n");
1366 hctsiz |= TSIZ_DOPNG;
1367 }
1368 }
1369
1370 if (chan->do_split) {
1371 if (dbg_hc(chan))
1372 dev_vdbg(hsotg->dev, "split\n");
1373 num_packets = 1;
1374
1375 if (chan->complete_split && !chan->ep_is_in)
1376 /*
1377 * For CSPLIT OUT Transfer, set the size to 0 so the
1378 * core doesn't expect any data written to the FIFO
1379 */
1380 chan->xfer_len = 0;
1381 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1382 chan->xfer_len = chan->max_packet;
1383 else if (!chan->ep_is_in && chan->xfer_len > 188)
1384 chan->xfer_len = 188;
1385
1386 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1387 TSIZ_XFERSIZE_MASK;
1388 } else {
1389 if (dbg_hc(chan))
1390 dev_vdbg(hsotg->dev, "no split\n");
1391 /*
1392 * Ensure that the transfer length and packet count will fit
1393 * in the widths allocated for them in the HCTSIZn register
1394 */
1395 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1396 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1397 /*
1398 * Make sure the transfer size is no larger than one
1399 * (micro)frame's worth of data. (A check was done
1400 * when the periodic transfer was accepted to ensure
1401 * that a (micro)frame's worth of data can be
1402 * programmed into a channel.)
1403 */
1404 u32 max_periodic_len =
1405 chan->multi_count * chan->max_packet;
1406
1407 if (chan->xfer_len > max_periodic_len)
1408 chan->xfer_len = max_periodic_len;
1409 } else if (chan->xfer_len > max_hc_xfer_size) {
1410 /*
1411 * Make sure that xfer_len is a multiple of max packet
1412 * size
1413 */
1414 chan->xfer_len =
1415 max_hc_xfer_size - chan->max_packet + 1;
1416 }
1417
1418 if (chan->xfer_len > 0) {
1419 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1420 chan->max_packet;
1421 if (num_packets > max_hc_pkt_count) {
1422 num_packets = max_hc_pkt_count;
1423 chan->xfer_len = num_packets * chan->max_packet;
1424 }
1425 } else {
1426 /* Need 1 packet for transfer length of 0 */
1427 num_packets = 1;
1428 }
1429
1430 if (chan->ep_is_in)
1431 /*
1432 * Always program an integral # of max packets for IN
1433 * transfers
1434 */
1435 chan->xfer_len = num_packets * chan->max_packet;
1436
1437 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1438 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1439 /*
1440 * Make sure that the multi_count field matches the
1441 * actual transfer length
1442 */
1443 chan->multi_count = num_packets;
1444
1445 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1446 dwc2_set_pid_isoc(chan);
1447
1448 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1449 TSIZ_XFERSIZE_MASK;
1450 }
1451
1452 chan->start_pkt_count = num_packets;
1453 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1454 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1455 TSIZ_SC_MC_PID_MASK;
1456 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1457 if (dbg_hc(chan)) {
1458 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1459 hctsiz, chan->hc_num);
1460
1461 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1462 chan->hc_num);
1463 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1464 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1465 TSIZ_XFERSIZE_SHIFT);
1466 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1467 (hctsiz & TSIZ_PKTCNT_MASK) >>
1468 TSIZ_PKTCNT_SHIFT);
1469 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1470 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1471 TSIZ_SC_MC_PID_SHIFT);
1472 }
1473
1474 if (hsotg->core_params->dma_enable > 0) {
1475 dma_addr_t dma_addr;
1476
1477 if (chan->align_buf) {
1478 if (dbg_hc(chan))
1479 dev_vdbg(hsotg->dev, "align_buf\n");
1480 dma_addr = chan->align_buf;
1481 } else {
1482 dma_addr = chan->xfer_dma;
1483 }
1484 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1485 if (dbg_hc(chan))
1486 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1487 (unsigned long)dma_addr, chan->hc_num);
1488 }
1489
1490 /* Start the split */
1491 if (chan->do_split) {
1492 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1493
1494 hcsplt |= HCSPLT_SPLTENA;
1495 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1496 }
1497
1498 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1499 hcchar &= ~HCCHAR_MULTICNT_MASK;
1500 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1501 HCCHAR_MULTICNT_MASK;
1502 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1503
1504 if (hcchar & HCCHAR_CHDIS)
1505 dev_warn(hsotg->dev,
1506 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1507 __func__, chan->hc_num, hcchar);
1508
1509 /* Set host channel enable after all other setup is complete */
1510 hcchar |= HCCHAR_CHENA;
1511 hcchar &= ~HCCHAR_CHDIS;
1512
1513 if (dbg_hc(chan))
1514 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1515 (hcchar & HCCHAR_MULTICNT_MASK) >>
1516 HCCHAR_MULTICNT_SHIFT);
1517
1518 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1519 if (dbg_hc(chan))
1520 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1521 chan->hc_num);
1522
1523 chan->xfer_started = 1;
1524 chan->requests++;
1525
1526 if (hsotg->core_params->dma_enable <= 0 &&
1527 !chan->ep_is_in && chan->xfer_len > 0)
1528 /* Load OUT packet into the appropriate Tx FIFO */
1529 dwc2_hc_write_packet(hsotg, chan);
1530 }
1531
1532 /**
1533 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1534 * host channel and starts the transfer in Descriptor DMA mode
1535 *
1536 * @hsotg: Programming view of DWC_otg controller
1537 * @chan: Information needed to initialize the host channel
1538 *
1539 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1540 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1541 * with micro-frame bitmap.
1542 *
1543 * Initializes HCDMA register with descriptor list address and CTD value then
1544 * starts the transfer via enabling the channel.
1545 */
dwc2_hc_start_transfer_ddma(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1546 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1547 struct dwc2_host_chan *chan)
1548 {
1549 u32 hcchar;
1550 u32 hc_dma;
1551 u32 hctsiz = 0;
1552
1553 if (chan->do_ping)
1554 hctsiz |= TSIZ_DOPNG;
1555
1556 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1557 dwc2_set_pid_isoc(chan);
1558
1559 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1560 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1561 TSIZ_SC_MC_PID_MASK;
1562
1563 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1564 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1565
1566 /* Non-zero only for high-speed interrupt endpoints */
1567 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1568
1569 if (dbg_hc(chan)) {
1570 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1571 chan->hc_num);
1572 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1573 chan->data_pid_start);
1574 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1575 }
1576
1577 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1578
1579 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1580
1581 /* Always start from first descriptor */
1582 hc_dma &= ~HCDMA_CTD_MASK;
1583 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
1584 if (dbg_hc(chan))
1585 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1586 hc_dma, chan->hc_num);
1587
1588 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1589 hcchar &= ~HCCHAR_MULTICNT_MASK;
1590 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1591 HCCHAR_MULTICNT_MASK;
1592
1593 if (hcchar & HCCHAR_CHDIS)
1594 dev_warn(hsotg->dev,
1595 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1596 __func__, chan->hc_num, hcchar);
1597
1598 /* Set host channel enable after all other setup is complete */
1599 hcchar |= HCCHAR_CHENA;
1600 hcchar &= ~HCCHAR_CHDIS;
1601
1602 if (dbg_hc(chan))
1603 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1604 (hcchar & HCCHAR_MULTICNT_MASK) >>
1605 HCCHAR_MULTICNT_SHIFT);
1606
1607 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1608 if (dbg_hc(chan))
1609 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1610 chan->hc_num);
1611
1612 chan->xfer_started = 1;
1613 chan->requests++;
1614 }
1615
1616 /**
1617 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1618 * a previous call to dwc2_hc_start_transfer()
1619 *
1620 * @hsotg: Programming view of DWC_otg controller
1621 * @chan: Information needed to initialize the host channel
1622 *
1623 * The caller must ensure there is sufficient space in the request queue and Tx
1624 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1625 * the controller acts autonomously to complete transfers programmed to a host
1626 * channel.
1627 *
1628 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1629 * if there is any data remaining to be queued. For an IN transfer, another
1630 * data packet is always requested. For the SETUP phase of a control transfer,
1631 * this function does nothing.
1632 *
1633 * Return: 1 if a new request is queued, 0 if no more requests are required
1634 * for this transfer
1635 */
dwc2_hc_continue_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1636 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1637 struct dwc2_host_chan *chan)
1638 {
1639 if (dbg_hc(chan))
1640 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1641 chan->hc_num);
1642
1643 if (chan->do_split)
1644 /* SPLITs always queue just once per channel */
1645 return 0;
1646
1647 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1648 /* SETUPs are queued only once since they can't be NAK'd */
1649 return 0;
1650
1651 if (chan->ep_is_in) {
1652 /*
1653 * Always queue another request for other IN transfers. If
1654 * back-to-back INs are issued and NAKs are received for both,
1655 * the driver may still be processing the first NAK when the
1656 * second NAK is received. When the interrupt handler clears
1657 * the NAK interrupt for the first NAK, the second NAK will
1658 * not be seen. So we can't depend on the NAK interrupt
1659 * handler to requeue a NAK'd request. Instead, IN requests
1660 * are issued each time this function is called. When the
1661 * transfer completes, the extra requests for the channel will
1662 * be flushed.
1663 */
1664 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1665
1666 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1667 hcchar |= HCCHAR_CHENA;
1668 hcchar &= ~HCCHAR_CHDIS;
1669 if (dbg_hc(chan))
1670 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1671 hcchar);
1672 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1673 chan->requests++;
1674 return 1;
1675 }
1676
1677 /* OUT transfers */
1678
1679 if (chan->xfer_count < chan->xfer_len) {
1680 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1681 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1682 u32 hcchar = readl(hsotg->regs +
1683 HCCHAR(chan->hc_num));
1684
1685 dwc2_hc_set_even_odd_frame(hsotg, chan,
1686 &hcchar);
1687 }
1688
1689 /* Load OUT packet into the appropriate Tx FIFO */
1690 dwc2_hc_write_packet(hsotg, chan);
1691 chan->requests++;
1692 return 1;
1693 }
1694
1695 return 0;
1696 }
1697
1698 /**
1699 * dwc2_hc_do_ping() - Starts a PING transfer
1700 *
1701 * @hsotg: Programming view of DWC_otg controller
1702 * @chan: Information needed to initialize the host channel
1703 *
1704 * This function should only be called in Slave mode. The Do Ping bit is set in
1705 * the HCTSIZ register, then the channel is enabled.
1706 */
dwc2_hc_do_ping(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1707 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1708 {
1709 u32 hcchar;
1710 u32 hctsiz;
1711
1712 if (dbg_hc(chan))
1713 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1714 chan->hc_num);
1715
1716
1717 hctsiz = TSIZ_DOPNG;
1718 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1719 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1720
1721 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1722 hcchar |= HCCHAR_CHENA;
1723 hcchar &= ~HCCHAR_CHDIS;
1724 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1725 }
1726
1727 /**
1728 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1729 * the HFIR register according to PHY type and speed
1730 *
1731 * @hsotg: Programming view of DWC_otg controller
1732 *
1733 * NOTE: The caller can modify the value of the HFIR register only after the
1734 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1735 * has been set
1736 */
dwc2_calc_frame_interval(struct dwc2_hsotg * hsotg)1737 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1738 {
1739 u32 usbcfg;
1740 u32 hprt0;
1741 int clock = 60; /* default value */
1742
1743 usbcfg = readl(hsotg->regs + GUSBCFG);
1744 hprt0 = readl(hsotg->regs + HPRT0);
1745
1746 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1747 !(usbcfg & GUSBCFG_PHYIF16))
1748 clock = 60;
1749 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1750 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1751 clock = 48;
1752 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1753 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1754 clock = 30;
1755 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1756 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1757 clock = 60;
1758 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1759 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1760 clock = 48;
1761 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1762 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1763 clock = 48;
1764 if ((usbcfg & GUSBCFG_PHYSEL) &&
1765 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1766 clock = 48;
1767
1768 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1769 /* High speed case */
1770 return 125 * clock;
1771 else
1772 /* FS/LS case */
1773 return 1000 * clock;
1774 }
1775
1776 /**
1777 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1778 * buffer
1779 *
1780 * @core_if: Programming view of DWC_otg controller
1781 * @dest: Destination buffer for the packet
1782 * @bytes: Number of bytes to copy to the destination
1783 */
dwc2_read_packet(struct dwc2_hsotg * hsotg,u8 * dest,u16 bytes)1784 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1785 {
1786 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1787 u32 *data_buf = (u32 *)dest;
1788 int word_count = (bytes + 3) / 4;
1789 int i;
1790
1791 /*
1792 * Todo: Account for the case where dest is not dword aligned. This
1793 * requires reading data from the FIFO into a u32 temp buffer, then
1794 * moving it into the data buffer.
1795 */
1796
1797 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1798
1799 for (i = 0; i < word_count; i++, data_buf++)
1800 *data_buf = readl(fifo);
1801 }
1802
1803 /**
1804 * dwc2_dump_host_registers() - Prints the host registers
1805 *
1806 * @hsotg: Programming view of DWC_otg controller
1807 *
1808 * NOTE: This function will be removed once the peripheral controller code
1809 * is integrated and the driver is stable
1810 */
dwc2_dump_host_registers(struct dwc2_hsotg * hsotg)1811 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1812 {
1813 #ifdef DEBUG
1814 u32 __iomem *addr;
1815 int i;
1816
1817 dev_dbg(hsotg->dev, "Host Global Registers\n");
1818 addr = hsotg->regs + HCFG;
1819 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1820 (unsigned long)addr, readl(addr));
1821 addr = hsotg->regs + HFIR;
1822 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1823 (unsigned long)addr, readl(addr));
1824 addr = hsotg->regs + HFNUM;
1825 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1826 (unsigned long)addr, readl(addr));
1827 addr = hsotg->regs + HPTXSTS;
1828 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1829 (unsigned long)addr, readl(addr));
1830 addr = hsotg->regs + HAINT;
1831 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1832 (unsigned long)addr, readl(addr));
1833 addr = hsotg->regs + HAINTMSK;
1834 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1835 (unsigned long)addr, readl(addr));
1836 if (hsotg->core_params->dma_desc_enable > 0) {
1837 addr = hsotg->regs + HFLBADDR;
1838 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1839 (unsigned long)addr, readl(addr));
1840 }
1841
1842 addr = hsotg->regs + HPRT0;
1843 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1844 (unsigned long)addr, readl(addr));
1845
1846 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1847 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1848 addr = hsotg->regs + HCCHAR(i);
1849 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1850 (unsigned long)addr, readl(addr));
1851 addr = hsotg->regs + HCSPLT(i);
1852 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1853 (unsigned long)addr, readl(addr));
1854 addr = hsotg->regs + HCINT(i);
1855 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1856 (unsigned long)addr, readl(addr));
1857 addr = hsotg->regs + HCINTMSK(i);
1858 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1859 (unsigned long)addr, readl(addr));
1860 addr = hsotg->regs + HCTSIZ(i);
1861 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1862 (unsigned long)addr, readl(addr));
1863 addr = hsotg->regs + HCDMA(i);
1864 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1865 (unsigned long)addr, readl(addr));
1866 if (hsotg->core_params->dma_desc_enable > 0) {
1867 addr = hsotg->regs + HCDMAB(i);
1868 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1869 (unsigned long)addr, readl(addr));
1870 }
1871 }
1872 #endif
1873 }
1874
1875 /**
1876 * dwc2_dump_global_registers() - Prints the core global registers
1877 *
1878 * @hsotg: Programming view of DWC_otg controller
1879 *
1880 * NOTE: This function will be removed once the peripheral controller code
1881 * is integrated and the driver is stable
1882 */
dwc2_dump_global_registers(struct dwc2_hsotg * hsotg)1883 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1884 {
1885 #ifdef DEBUG
1886 u32 __iomem *addr;
1887
1888 dev_dbg(hsotg->dev, "Core Global Registers\n");
1889 addr = hsotg->regs + GOTGCTL;
1890 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1891 (unsigned long)addr, readl(addr));
1892 addr = hsotg->regs + GOTGINT;
1893 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1894 (unsigned long)addr, readl(addr));
1895 addr = hsotg->regs + GAHBCFG;
1896 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1897 (unsigned long)addr, readl(addr));
1898 addr = hsotg->regs + GUSBCFG;
1899 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1900 (unsigned long)addr, readl(addr));
1901 addr = hsotg->regs + GRSTCTL;
1902 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1903 (unsigned long)addr, readl(addr));
1904 addr = hsotg->regs + GINTSTS;
1905 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1906 (unsigned long)addr, readl(addr));
1907 addr = hsotg->regs + GINTMSK;
1908 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1909 (unsigned long)addr, readl(addr));
1910 addr = hsotg->regs + GRXSTSR;
1911 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1912 (unsigned long)addr, readl(addr));
1913 addr = hsotg->regs + GRXFSIZ;
1914 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1915 (unsigned long)addr, readl(addr));
1916 addr = hsotg->regs + GNPTXFSIZ;
1917 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1918 (unsigned long)addr, readl(addr));
1919 addr = hsotg->regs + GNPTXSTS;
1920 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1921 (unsigned long)addr, readl(addr));
1922 addr = hsotg->regs + GI2CCTL;
1923 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1924 (unsigned long)addr, readl(addr));
1925 addr = hsotg->regs + GPVNDCTL;
1926 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1927 (unsigned long)addr, readl(addr));
1928 addr = hsotg->regs + GGPIO;
1929 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1930 (unsigned long)addr, readl(addr));
1931 addr = hsotg->regs + GUID;
1932 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1933 (unsigned long)addr, readl(addr));
1934 addr = hsotg->regs + GSNPSID;
1935 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1936 (unsigned long)addr, readl(addr));
1937 addr = hsotg->regs + GHWCFG1;
1938 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1939 (unsigned long)addr, readl(addr));
1940 addr = hsotg->regs + GHWCFG2;
1941 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1942 (unsigned long)addr, readl(addr));
1943 addr = hsotg->regs + GHWCFG3;
1944 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1945 (unsigned long)addr, readl(addr));
1946 addr = hsotg->regs + GHWCFG4;
1947 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1948 (unsigned long)addr, readl(addr));
1949 addr = hsotg->regs + GLPMCFG;
1950 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1951 (unsigned long)addr, readl(addr));
1952 addr = hsotg->regs + GPWRDN;
1953 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1954 (unsigned long)addr, readl(addr));
1955 addr = hsotg->regs + GDFIFOCFG;
1956 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1957 (unsigned long)addr, readl(addr));
1958 addr = hsotg->regs + HPTXFSIZ;
1959 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1960 (unsigned long)addr, readl(addr));
1961
1962 addr = hsotg->regs + PCGCTL;
1963 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1964 (unsigned long)addr, readl(addr));
1965 #endif
1966 }
1967
1968 /**
1969 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1970 *
1971 * @hsotg: Programming view of DWC_otg controller
1972 * @num: Tx FIFO to flush
1973 */
dwc2_flush_tx_fifo(struct dwc2_hsotg * hsotg,const int num)1974 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1975 {
1976 u32 greset;
1977 int count = 0;
1978
1979 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1980
1981 greset = GRSTCTL_TXFFLSH;
1982 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1983 writel(greset, hsotg->regs + GRSTCTL);
1984
1985 do {
1986 greset = readl(hsotg->regs + GRSTCTL);
1987 if (++count > 10000) {
1988 dev_warn(hsotg->dev,
1989 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1990 __func__, greset,
1991 readl(hsotg->regs + GNPTXSTS));
1992 break;
1993 }
1994 udelay(1);
1995 } while (greset & GRSTCTL_TXFFLSH);
1996
1997 /* Wait for at least 3 PHY Clocks */
1998 udelay(1);
1999 }
2000
2001 /**
2002 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2003 *
2004 * @hsotg: Programming view of DWC_otg controller
2005 */
dwc2_flush_rx_fifo(struct dwc2_hsotg * hsotg)2006 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2007 {
2008 u32 greset;
2009 int count = 0;
2010
2011 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2012
2013 greset = GRSTCTL_RXFFLSH;
2014 writel(greset, hsotg->regs + GRSTCTL);
2015
2016 do {
2017 greset = readl(hsotg->regs + GRSTCTL);
2018 if (++count > 10000) {
2019 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2020 __func__, greset);
2021 break;
2022 }
2023 udelay(1);
2024 } while (greset & GRSTCTL_RXFFLSH);
2025
2026 /* Wait for at least 3 PHY Clocks */
2027 udelay(1);
2028 }
2029
2030 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
2031
2032 /* Parameter access functions */
dwc2_set_param_otg_cap(struct dwc2_hsotg * hsotg,int val)2033 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2034 {
2035 int valid = 1;
2036
2037 switch (val) {
2038 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2039 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2040 valid = 0;
2041 break;
2042 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2043 switch (hsotg->hw_params.op_mode) {
2044 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2045 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2046 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2047 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2048 break;
2049 default:
2050 valid = 0;
2051 break;
2052 }
2053 break;
2054 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2055 /* always valid */
2056 break;
2057 default:
2058 valid = 0;
2059 break;
2060 }
2061
2062 if (!valid) {
2063 if (val >= 0)
2064 dev_err(hsotg->dev,
2065 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2066 val);
2067 switch (hsotg->hw_params.op_mode) {
2068 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2069 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2070 break;
2071 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2072 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2073 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2074 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2075 break;
2076 default:
2077 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2078 break;
2079 }
2080 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2081 }
2082
2083 hsotg->core_params->otg_cap = val;
2084 }
2085
dwc2_set_param_dma_enable(struct dwc2_hsotg * hsotg,int val)2086 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2087 {
2088 int valid = 1;
2089
2090 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2091 valid = 0;
2092 if (val < 0)
2093 valid = 0;
2094
2095 if (!valid) {
2096 if (val >= 0)
2097 dev_err(hsotg->dev,
2098 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2099 val);
2100 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2101 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2102 }
2103
2104 hsotg->core_params->dma_enable = val;
2105 }
2106
dwc2_set_param_dma_desc_enable(struct dwc2_hsotg * hsotg,int val)2107 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2108 {
2109 int valid = 1;
2110
2111 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2112 !hsotg->hw_params.dma_desc_enable))
2113 valid = 0;
2114 if (val < 0)
2115 valid = 0;
2116
2117 if (!valid) {
2118 if (val >= 0)
2119 dev_err(hsotg->dev,
2120 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2121 val);
2122 val = (hsotg->core_params->dma_enable > 0 &&
2123 hsotg->hw_params.dma_desc_enable);
2124 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2125 }
2126
2127 hsotg->core_params->dma_desc_enable = val;
2128 }
2129
dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg * hsotg,int val)2130 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2131 int val)
2132 {
2133 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2134 if (val >= 0) {
2135 dev_err(hsotg->dev,
2136 "Wrong value for host_support_fs_low_power\n");
2137 dev_err(hsotg->dev,
2138 "host_support_fs_low_power must be 0 or 1\n");
2139 }
2140 val = 0;
2141 dev_dbg(hsotg->dev,
2142 "Setting host_support_fs_low_power to %d\n", val);
2143 }
2144
2145 hsotg->core_params->host_support_fs_ls_low_power = val;
2146 }
2147
dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg * hsotg,int val)2148 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2149 {
2150 int valid = 1;
2151
2152 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2153 valid = 0;
2154 if (val < 0)
2155 valid = 0;
2156
2157 if (!valid) {
2158 if (val >= 0)
2159 dev_err(hsotg->dev,
2160 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2161 val);
2162 val = hsotg->hw_params.enable_dynamic_fifo;
2163 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2164 }
2165
2166 hsotg->core_params->enable_dynamic_fifo = val;
2167 }
2168
dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg * hsotg,int val)2169 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2170 {
2171 int valid = 1;
2172
2173 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2174 valid = 0;
2175
2176 if (!valid) {
2177 if (val >= 0)
2178 dev_err(hsotg->dev,
2179 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2180 val);
2181 val = hsotg->hw_params.host_rx_fifo_size;
2182 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2183 }
2184
2185 hsotg->core_params->host_rx_fifo_size = val;
2186 }
2187
dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg * hsotg,int val)2188 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2189 {
2190 int valid = 1;
2191
2192 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2193 valid = 0;
2194
2195 if (!valid) {
2196 if (val >= 0)
2197 dev_err(hsotg->dev,
2198 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2199 val);
2200 val = hsotg->hw_params.host_nperio_tx_fifo_size;
2201 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2202 val);
2203 }
2204
2205 hsotg->core_params->host_nperio_tx_fifo_size = val;
2206 }
2207
dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg * hsotg,int val)2208 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2209 {
2210 int valid = 1;
2211
2212 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2213 valid = 0;
2214
2215 if (!valid) {
2216 if (val >= 0)
2217 dev_err(hsotg->dev,
2218 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2219 val);
2220 val = hsotg->hw_params.host_perio_tx_fifo_size;
2221 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2222 val);
2223 }
2224
2225 hsotg->core_params->host_perio_tx_fifo_size = val;
2226 }
2227
dwc2_set_param_max_transfer_size(struct dwc2_hsotg * hsotg,int val)2228 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2229 {
2230 int valid = 1;
2231
2232 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2233 valid = 0;
2234
2235 if (!valid) {
2236 if (val >= 0)
2237 dev_err(hsotg->dev,
2238 "%d invalid for max_transfer_size. Check HW configuration.\n",
2239 val);
2240 val = hsotg->hw_params.max_transfer_size;
2241 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2242 }
2243
2244 hsotg->core_params->max_transfer_size = val;
2245 }
2246
dwc2_set_param_max_packet_count(struct dwc2_hsotg * hsotg,int val)2247 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2248 {
2249 int valid = 1;
2250
2251 if (val < 15 || val > hsotg->hw_params.max_packet_count)
2252 valid = 0;
2253
2254 if (!valid) {
2255 if (val >= 0)
2256 dev_err(hsotg->dev,
2257 "%d invalid for max_packet_count. Check HW configuration.\n",
2258 val);
2259 val = hsotg->hw_params.max_packet_count;
2260 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2261 }
2262
2263 hsotg->core_params->max_packet_count = val;
2264 }
2265
dwc2_set_param_host_channels(struct dwc2_hsotg * hsotg,int val)2266 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2267 {
2268 int valid = 1;
2269
2270 if (val < 1 || val > hsotg->hw_params.host_channels)
2271 valid = 0;
2272
2273 if (!valid) {
2274 if (val >= 0)
2275 dev_err(hsotg->dev,
2276 "%d invalid for host_channels. Check HW configuration.\n",
2277 val);
2278 val = hsotg->hw_params.host_channels;
2279 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2280 }
2281
2282 hsotg->core_params->host_channels = val;
2283 }
2284
dwc2_set_param_phy_type(struct dwc2_hsotg * hsotg,int val)2285 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2286 {
2287 int valid = 0;
2288 u32 hs_phy_type, fs_phy_type;
2289
2290 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2291 DWC2_PHY_TYPE_PARAM_ULPI)) {
2292 if (val >= 0) {
2293 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2294 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2295 }
2296
2297 valid = 0;
2298 }
2299
2300 hs_phy_type = hsotg->hw_params.hs_phy_type;
2301 fs_phy_type = hsotg->hw_params.fs_phy_type;
2302 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2303 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2304 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2305 valid = 1;
2306 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2307 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2308 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2309 valid = 1;
2310 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2311 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2312 valid = 1;
2313
2314 if (!valid) {
2315 if (val >= 0)
2316 dev_err(hsotg->dev,
2317 "%d invalid for phy_type. Check HW configuration.\n",
2318 val);
2319 val = DWC2_PHY_TYPE_PARAM_FS;
2320 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2321 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2322 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2323 val = DWC2_PHY_TYPE_PARAM_UTMI;
2324 else
2325 val = DWC2_PHY_TYPE_PARAM_ULPI;
2326 }
2327 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2328 }
2329
2330 hsotg->core_params->phy_type = val;
2331 }
2332
dwc2_get_param_phy_type(struct dwc2_hsotg * hsotg)2333 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2334 {
2335 return hsotg->core_params->phy_type;
2336 }
2337
dwc2_set_param_speed(struct dwc2_hsotg * hsotg,int val)2338 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2339 {
2340 int valid = 1;
2341
2342 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2343 if (val >= 0) {
2344 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2345 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2346 }
2347 valid = 0;
2348 }
2349
2350 if (val == DWC2_SPEED_PARAM_HIGH &&
2351 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2352 valid = 0;
2353
2354 if (!valid) {
2355 if (val >= 0)
2356 dev_err(hsotg->dev,
2357 "%d invalid for speed parameter. Check HW configuration.\n",
2358 val);
2359 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2360 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2361 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2362 }
2363
2364 hsotg->core_params->speed = val;
2365 }
2366
dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg * hsotg,int val)2367 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2368 {
2369 int valid = 1;
2370
2371 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2372 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2373 if (val >= 0) {
2374 dev_err(hsotg->dev,
2375 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2376 dev_err(hsotg->dev,
2377 "host_ls_low_power_phy_clk must be 0 or 1\n");
2378 }
2379 valid = 0;
2380 }
2381
2382 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2383 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2384 valid = 0;
2385
2386 if (!valid) {
2387 if (val >= 0)
2388 dev_err(hsotg->dev,
2389 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2390 val);
2391 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2392 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2393 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2394 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2395 val);
2396 }
2397
2398 hsotg->core_params->host_ls_low_power_phy_clk = val;
2399 }
2400
dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg * hsotg,int val)2401 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2402 {
2403 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2404 if (val >= 0) {
2405 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2406 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2407 }
2408 val = 0;
2409 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2410 }
2411
2412 hsotg->core_params->phy_ulpi_ddr = val;
2413 }
2414
dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg * hsotg,int val)2415 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2416 {
2417 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2418 if (val >= 0) {
2419 dev_err(hsotg->dev,
2420 "Wrong value for phy_ulpi_ext_vbus\n");
2421 dev_err(hsotg->dev,
2422 "phy_ulpi_ext_vbus must be 0 or 1\n");
2423 }
2424 val = 0;
2425 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2426 }
2427
2428 hsotg->core_params->phy_ulpi_ext_vbus = val;
2429 }
2430
dwc2_set_param_phy_utmi_width(struct dwc2_hsotg * hsotg,int val)2431 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2432 {
2433 int valid = 0;
2434
2435 switch (hsotg->hw_params.utmi_phy_data_width) {
2436 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2437 valid = (val == 8);
2438 break;
2439 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2440 valid = (val == 16);
2441 break;
2442 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2443 valid = (val == 8 || val == 16);
2444 break;
2445 }
2446
2447 if (!valid) {
2448 if (val >= 0) {
2449 dev_err(hsotg->dev,
2450 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2451 val);
2452 }
2453 val = (hsotg->hw_params.utmi_phy_data_width ==
2454 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2455 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2456 }
2457
2458 hsotg->core_params->phy_utmi_width = val;
2459 }
2460
dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg * hsotg,int val)2461 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2462 {
2463 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2464 if (val >= 0) {
2465 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2466 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2467 }
2468 val = 0;
2469 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2470 }
2471
2472 hsotg->core_params->ulpi_fs_ls = val;
2473 }
2474
dwc2_set_param_ts_dline(struct dwc2_hsotg * hsotg,int val)2475 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2476 {
2477 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2478 if (val >= 0) {
2479 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2480 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2481 }
2482 val = 0;
2483 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2484 }
2485
2486 hsotg->core_params->ts_dline = val;
2487 }
2488
dwc2_set_param_i2c_enable(struct dwc2_hsotg * hsotg,int val)2489 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2490 {
2491 int valid = 1;
2492
2493 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2494 if (val >= 0) {
2495 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2496 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2497 }
2498
2499 valid = 0;
2500 }
2501
2502 if (val == 1 && !(hsotg->hw_params.i2c_enable))
2503 valid = 0;
2504
2505 if (!valid) {
2506 if (val >= 0)
2507 dev_err(hsotg->dev,
2508 "%d invalid for i2c_enable. Check HW configuration.\n",
2509 val);
2510 val = hsotg->hw_params.i2c_enable;
2511 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2512 }
2513
2514 hsotg->core_params->i2c_enable = val;
2515 }
2516
dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg * hsotg,int val)2517 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2518 {
2519 int valid = 1;
2520
2521 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2522 if (val >= 0) {
2523 dev_err(hsotg->dev,
2524 "Wrong value for en_multiple_tx_fifo,\n");
2525 dev_err(hsotg->dev,
2526 "en_multiple_tx_fifo must be 0 or 1\n");
2527 }
2528 valid = 0;
2529 }
2530
2531 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2532 valid = 0;
2533
2534 if (!valid) {
2535 if (val >= 0)
2536 dev_err(hsotg->dev,
2537 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2538 val);
2539 val = hsotg->hw_params.en_multiple_tx_fifo;
2540 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2541 }
2542
2543 hsotg->core_params->en_multiple_tx_fifo = val;
2544 }
2545
dwc2_set_param_reload_ctl(struct dwc2_hsotg * hsotg,int val)2546 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2547 {
2548 int valid = 1;
2549
2550 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2551 if (val >= 0) {
2552 dev_err(hsotg->dev,
2553 "'%d' invalid for parameter reload_ctl\n", val);
2554 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2555 }
2556 valid = 0;
2557 }
2558
2559 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2560 valid = 0;
2561
2562 if (!valid) {
2563 if (val >= 0)
2564 dev_err(hsotg->dev,
2565 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2566 val);
2567 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2568 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2569 }
2570
2571 hsotg->core_params->reload_ctl = val;
2572 }
2573
dwc2_set_param_ahbcfg(struct dwc2_hsotg * hsotg,int val)2574 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2575 {
2576 if (val != -1)
2577 hsotg->core_params->ahbcfg = val;
2578 else
2579 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2580 GAHBCFG_HBSTLEN_SHIFT;
2581 }
2582
dwc2_set_param_otg_ver(struct dwc2_hsotg * hsotg,int val)2583 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2584 {
2585 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2586 if (val >= 0) {
2587 dev_err(hsotg->dev,
2588 "'%d' invalid for parameter otg_ver\n", val);
2589 dev_err(hsotg->dev,
2590 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2591 }
2592 val = 0;
2593 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2594 }
2595
2596 hsotg->core_params->otg_ver = val;
2597 }
2598
dwc2_set_param_uframe_sched(struct dwc2_hsotg * hsotg,int val)2599 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2600 {
2601 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2602 if (val >= 0) {
2603 dev_err(hsotg->dev,
2604 "'%d' invalid for parameter uframe_sched\n",
2605 val);
2606 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2607 }
2608 val = 1;
2609 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2610 }
2611
2612 hsotg->core_params->uframe_sched = val;
2613 }
2614
2615 /*
2616 * This function is called during module intialization to pass module parameters
2617 * for the DWC_otg core.
2618 */
dwc2_set_parameters(struct dwc2_hsotg * hsotg,const struct dwc2_core_params * params)2619 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2620 const struct dwc2_core_params *params)
2621 {
2622 dev_dbg(hsotg->dev, "%s()\n", __func__);
2623
2624 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2625 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2626 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2627 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2628 params->host_support_fs_ls_low_power);
2629 dwc2_set_param_enable_dynamic_fifo(hsotg,
2630 params->enable_dynamic_fifo);
2631 dwc2_set_param_host_rx_fifo_size(hsotg,
2632 params->host_rx_fifo_size);
2633 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2634 params->host_nperio_tx_fifo_size);
2635 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2636 params->host_perio_tx_fifo_size);
2637 dwc2_set_param_max_transfer_size(hsotg,
2638 params->max_transfer_size);
2639 dwc2_set_param_max_packet_count(hsotg,
2640 params->max_packet_count);
2641 dwc2_set_param_host_channels(hsotg, params->host_channels);
2642 dwc2_set_param_phy_type(hsotg, params->phy_type);
2643 dwc2_set_param_speed(hsotg, params->speed);
2644 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2645 params->host_ls_low_power_phy_clk);
2646 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2647 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2648 params->phy_ulpi_ext_vbus);
2649 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2650 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2651 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2652 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2653 dwc2_set_param_en_multiple_tx_fifo(hsotg,
2654 params->en_multiple_tx_fifo);
2655 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2656 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2657 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2658 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2659 }
2660
2661 /**
2662 * During device initialization, read various hardware configuration
2663 * registers and interpret the contents.
2664 */
dwc2_get_hwparams(struct dwc2_hsotg * hsotg)2665 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2666 {
2667 struct dwc2_hw_params *hw = &hsotg->hw_params;
2668 unsigned width;
2669 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
2670 u32 hptxfsiz, grxfsiz, gnptxfsiz;
2671 u32 gusbcfg;
2672
2673 /*
2674 * Attempt to ensure this device is really a DWC_otg Controller.
2675 * Read and verify the GSNPSID register contents. The value should be
2676 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2677 * as in "OTG version 2.xx" or "OTG version 3.xx".
2678 */
2679 hw->snpsid = readl(hsotg->regs + GSNPSID);
2680 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2681 (hw->snpsid & 0xfffff000) != 0x4f543000) {
2682 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2683 hw->snpsid);
2684 return -ENODEV;
2685 }
2686
2687 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2688 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2689 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2690
2691 hwcfg1 = readl(hsotg->regs + GHWCFG1);
2692 hwcfg2 = readl(hsotg->regs + GHWCFG2);
2693 hwcfg3 = readl(hsotg->regs + GHWCFG3);
2694 hwcfg4 = readl(hsotg->regs + GHWCFG4);
2695 grxfsiz = readl(hsotg->regs + GRXFSIZ);
2696
2697 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
2698 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2699 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2700 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2701 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2702
2703 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
2704 gusbcfg = readl(hsotg->regs + GUSBCFG);
2705 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2706 writel(gusbcfg, hsotg->regs + GUSBCFG);
2707 usleep_range(100000, 150000);
2708
2709 gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
2710 hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2711 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2712 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2713 gusbcfg = readl(hsotg->regs + GUSBCFG);
2714 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2715 writel(gusbcfg, hsotg->regs + GUSBCFG);
2716 usleep_range(100000, 150000);
2717
2718 /* hwcfg2 */
2719 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2720 GHWCFG2_OP_MODE_SHIFT;
2721 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2722 GHWCFG2_ARCHITECTURE_SHIFT;
2723 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2724 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2725 GHWCFG2_NUM_HOST_CHAN_SHIFT);
2726 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2727 GHWCFG2_HS_PHY_TYPE_SHIFT;
2728 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2729 GHWCFG2_FS_PHY_TYPE_SHIFT;
2730 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2731 GHWCFG2_NUM_DEV_EP_SHIFT;
2732 hw->nperio_tx_q_depth =
2733 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2734 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2735 hw->host_perio_tx_q_depth =
2736 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2737 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2738 hw->dev_token_q_depth =
2739 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2740 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2741
2742 /* hwcfg3 */
2743 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2744 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2745 hw->max_transfer_size = (1 << (width + 11)) - 1;
2746 /*
2747 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
2748 * coherent buffers with this size, and if it's too large we can
2749 * exhaust the coherent DMA pool.
2750 */
2751 if (hw->max_transfer_size > 65535)
2752 hw->max_transfer_size = 65535;
2753 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2754 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2755 hw->max_packet_count = (1 << (width + 4)) - 1;
2756 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2757 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2758 GHWCFG3_DFIFO_DEPTH_SHIFT;
2759
2760 /* hwcfg4 */
2761 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2762 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2763 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2764 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2765 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2766 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2767 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2768
2769 /* fifo sizes */
2770 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2771 GRXFSIZ_DEPTH_SHIFT;
2772 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2773 FIFOSIZE_DEPTH_SHIFT;
2774 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2775 FIFOSIZE_DEPTH_SHIFT;
2776
2777 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2778 dev_dbg(hsotg->dev, " op_mode=%d\n",
2779 hw->op_mode);
2780 dev_dbg(hsotg->dev, " arch=%d\n",
2781 hw->arch);
2782 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
2783 hw->dma_desc_enable);
2784 dev_dbg(hsotg->dev, " power_optimized=%d\n",
2785 hw->power_optimized);
2786 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
2787 hw->i2c_enable);
2788 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
2789 hw->hs_phy_type);
2790 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
2791 hw->fs_phy_type);
2792 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
2793 hw->utmi_phy_data_width);
2794 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
2795 hw->num_dev_ep);
2796 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
2797 hw->num_dev_perio_in_ep);
2798 dev_dbg(hsotg->dev, " host_channels=%d\n",
2799 hw->host_channels);
2800 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
2801 hw->max_transfer_size);
2802 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
2803 hw->max_packet_count);
2804 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
2805 hw->nperio_tx_q_depth);
2806 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
2807 hw->host_perio_tx_q_depth);
2808 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
2809 hw->dev_token_q_depth);
2810 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
2811 hw->enable_dynamic_fifo);
2812 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
2813 hw->en_multiple_tx_fifo);
2814 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
2815 hw->total_fifo_size);
2816 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
2817 hw->host_rx_fifo_size);
2818 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
2819 hw->host_nperio_tx_fifo_size);
2820 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
2821 hw->host_perio_tx_fifo_size);
2822 dev_dbg(hsotg->dev, "\n");
2823
2824 return 0;
2825 }
2826
dwc2_get_otg_version(struct dwc2_hsotg * hsotg)2827 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2828 {
2829 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2830 }
2831
dwc2_is_controller_alive(struct dwc2_hsotg * hsotg)2832 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2833 {
2834 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2835 return false;
2836 else
2837 return true;
2838 }
2839
2840 /**
2841 * dwc2_enable_global_interrupts() - Enables the controller's Global
2842 * Interrupt in the AHB Config register
2843 *
2844 * @hsotg: Programming view of DWC_otg controller
2845 */
dwc2_enable_global_interrupts(struct dwc2_hsotg * hsotg)2846 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2847 {
2848 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2849
2850 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2851 writel(ahbcfg, hsotg->regs + GAHBCFG);
2852 }
2853
2854 /**
2855 * dwc2_disable_global_interrupts() - Disables the controller's Global
2856 * Interrupt in the AHB Config register
2857 *
2858 * @hsotg: Programming view of DWC_otg controller
2859 */
dwc2_disable_global_interrupts(struct dwc2_hsotg * hsotg)2860 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2861 {
2862 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2863
2864 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2865 writel(ahbcfg, hsotg->regs + GAHBCFG);
2866 }
2867
2868 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2869 MODULE_AUTHOR("Synopsys, Inc.");
2870 MODULE_LICENSE("Dual BSD/GPL");
2871