• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core.c - DesignWare USB3 DRD Controller Core file
4  *
5  * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6  *
7  * Authors: Felipe Balbi <balbi@ti.com>,
8  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_graph.h>
27 #include <linux/acpi.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/bitfield.h>
31 
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/of.h>
35 #include <linux/usb/otg.h>
36 
37 #include "core.h"
38 #include "gadget.h"
39 #include "io.h"
40 
41 #include "debug.h"
42 
43 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY	5000 /* ms */
44 
45 /**
46  * dwc3_get_dr_mode - Validates and sets dr_mode
47  * @dwc: pointer to our context structure
48  */
dwc3_get_dr_mode(struct dwc3 * dwc)49 static int dwc3_get_dr_mode(struct dwc3 *dwc)
50 {
51 	enum usb_dr_mode mode;
52 	struct device *dev = dwc->dev;
53 	unsigned int hw_mode;
54 
55 	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
56 		dwc->dr_mode = USB_DR_MODE_OTG;
57 
58 	mode = dwc->dr_mode;
59 	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
60 
61 	switch (hw_mode) {
62 	case DWC3_GHWPARAMS0_MODE_GADGET:
63 		if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
64 			dev_err(dev,
65 				"Controller does not support host mode.\n");
66 			return -EINVAL;
67 		}
68 		mode = USB_DR_MODE_PERIPHERAL;
69 		break;
70 	case DWC3_GHWPARAMS0_MODE_HOST:
71 		if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
72 			dev_err(dev,
73 				"Controller does not support device mode.\n");
74 			return -EINVAL;
75 		}
76 		mode = USB_DR_MODE_HOST;
77 		break;
78 	default:
79 		if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
80 			mode = USB_DR_MODE_HOST;
81 		else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
82 			mode = USB_DR_MODE_PERIPHERAL;
83 
84 		/*
85 		 * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
86 		 * mode. If the controller supports DRD but the dr_mode is not
87 		 * specified or set to OTG, then set the mode to peripheral.
88 		 */
89 		if (mode == USB_DR_MODE_OTG && !dwc->edev &&
90 		    (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
91 		     !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
92 		    !DWC3_VER_IS_PRIOR(DWC3, 330A))
93 			mode = USB_DR_MODE_PERIPHERAL;
94 	}
95 
96 	if (mode != dwc->dr_mode) {
97 		dev_warn(dev,
98 			 "Configuration mismatch. dr_mode forced to %s\n",
99 			 mode == USB_DR_MODE_HOST ? "host" : "gadget");
100 
101 		dwc->dr_mode = mode;
102 	}
103 
104 	return 0;
105 }
106 
dwc3_enable_susphy(struct dwc3 * dwc,bool enable)107 void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
108 {
109 	u32 reg;
110 
111 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
112 	if (enable && !dwc->dis_u3_susphy_quirk)
113 		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
114 	else
115 		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
116 
117 	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
118 
119 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
120 	if (enable && !dwc->dis_u2_susphy_quirk)
121 		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
122 	else
123 		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
124 
125 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
126 }
127 
dwc3_set_prtcap(struct dwc3 * dwc,u32 mode)128 void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
129 {
130 	u32 reg;
131 
132 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
133 	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
134 	reg |= DWC3_GCTL_PRTCAPDIR(mode);
135 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
136 
137 	dwc->current_dr_role = mode;
138 }
139 
__dwc3_set_mode(struct work_struct * work)140 static void __dwc3_set_mode(struct work_struct *work)
141 {
142 	struct dwc3 *dwc = work_to_dwc(work);
143 	unsigned long flags;
144 	int ret;
145 	u32 reg;
146 	u32 desired_dr_role;
147 
148 	mutex_lock(&dwc->mutex);
149 	spin_lock_irqsave(&dwc->lock, flags);
150 	desired_dr_role = dwc->desired_dr_role;
151 	spin_unlock_irqrestore(&dwc->lock, flags);
152 
153 	pm_runtime_get_sync(dwc->dev);
154 
155 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
156 		dwc3_otg_update(dwc, 0);
157 
158 	if (!desired_dr_role)
159 		goto out;
160 
161 	if (desired_dr_role == dwc->current_dr_role)
162 		goto out;
163 
164 	if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
165 		goto out;
166 
167 	switch (dwc->current_dr_role) {
168 	case DWC3_GCTL_PRTCAP_HOST:
169 		dwc3_host_exit(dwc);
170 		break;
171 	case DWC3_GCTL_PRTCAP_DEVICE:
172 		dwc3_gadget_exit(dwc);
173 		dwc3_event_buffers_cleanup(dwc);
174 		break;
175 	case DWC3_GCTL_PRTCAP_OTG:
176 		dwc3_otg_exit(dwc);
177 		spin_lock_irqsave(&dwc->lock, flags);
178 		dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
179 		spin_unlock_irqrestore(&dwc->lock, flags);
180 		dwc3_otg_update(dwc, 1);
181 		break;
182 	default:
183 		break;
184 	}
185 
186 	/*
187 	 * When current_dr_role is not set, there's no role switching.
188 	 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
189 	 */
190 	if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
191 			DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
192 			desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
193 		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
194 		reg |= DWC3_GCTL_CORESOFTRESET;
195 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
196 
197 		/*
198 		 * Wait for internal clocks to synchronized. DWC_usb31 and
199 		 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
200 		 * keep it consistent across different IPs, let's wait up to
201 		 * 100ms before clearing GCTL.CORESOFTRESET.
202 		 */
203 		msleep(100);
204 
205 		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
206 		reg &= ~DWC3_GCTL_CORESOFTRESET;
207 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
208 	}
209 
210 	spin_lock_irqsave(&dwc->lock, flags);
211 
212 	dwc3_set_prtcap(dwc, desired_dr_role);
213 
214 	spin_unlock_irqrestore(&dwc->lock, flags);
215 
216 	switch (desired_dr_role) {
217 	case DWC3_GCTL_PRTCAP_HOST:
218 		ret = dwc3_host_init(dwc);
219 		if (ret) {
220 			dev_err(dwc->dev, "failed to initialize host\n");
221 		} else {
222 			if (dwc->usb2_phy)
223 				otg_set_vbus(dwc->usb2_phy->otg, true);
224 			phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
225 			phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
226 			if (dwc->dis_split_quirk) {
227 				reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
228 				reg |= DWC3_GUCTL3_SPLITDISABLE;
229 				dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
230 			}
231 		}
232 		break;
233 	case DWC3_GCTL_PRTCAP_DEVICE:
234 		dwc3_core_soft_reset(dwc);
235 
236 		dwc3_event_buffers_setup(dwc);
237 
238 		if (dwc->usb2_phy)
239 			otg_set_vbus(dwc->usb2_phy->otg, false);
240 		phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
241 		phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
242 
243 		ret = dwc3_gadget_init(dwc);
244 		if (ret)
245 			dev_err(dwc->dev, "failed to initialize peripheral\n");
246 		break;
247 	case DWC3_GCTL_PRTCAP_OTG:
248 		dwc3_otg_init(dwc);
249 		dwc3_otg_update(dwc, 0);
250 		break;
251 	default:
252 		break;
253 	}
254 
255 out:
256 	pm_runtime_mark_last_busy(dwc->dev);
257 	pm_runtime_put_autosuspend(dwc->dev);
258 	mutex_unlock(&dwc->mutex);
259 }
260 
dwc3_set_mode(struct dwc3 * dwc,u32 mode)261 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
262 {
263 	unsigned long flags;
264 
265 	if (dwc->dr_mode != USB_DR_MODE_OTG)
266 		return;
267 
268 	spin_lock_irqsave(&dwc->lock, flags);
269 	dwc->desired_dr_role = mode;
270 	spin_unlock_irqrestore(&dwc->lock, flags);
271 
272 	queue_work(system_freezable_wq, &dwc->drd_work);
273 }
274 
dwc3_core_fifo_space(struct dwc3_ep * dep,u8 type)275 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
276 {
277 	struct dwc3		*dwc = dep->dwc;
278 	u32			reg;
279 
280 	dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
281 			DWC3_GDBGFIFOSPACE_NUM(dep->number) |
282 			DWC3_GDBGFIFOSPACE_TYPE(type));
283 
284 	reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
285 
286 	return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
287 }
288 
289 /**
290  * dwc3_core_soft_reset - Issues core soft reset and PHY reset
291  * @dwc: pointer to our context structure
292  */
dwc3_core_soft_reset(struct dwc3 * dwc)293 int dwc3_core_soft_reset(struct dwc3 *dwc)
294 {
295 	u32		reg;
296 	int		retries = 1000;
297 
298 	/*
299 	 * We're resetting only the device side because, if we're in host mode,
300 	 * XHCI driver will reset the host block. If dwc3 was configured for
301 	 * host-only mode, then we can return early.
302 	 */
303 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
304 		return 0;
305 
306 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
307 	reg |= DWC3_DCTL_CSFTRST;
308 	reg &= ~DWC3_DCTL_RUN_STOP;
309 	dwc3_gadget_dctl_write_safe(dwc, reg);
310 
311 	/*
312 	 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
313 	 * is cleared only after all the clocks are synchronized. This can
314 	 * take a little more than 50ms. Set the polling rate at 20ms
315 	 * for 10 times instead.
316 	 */
317 	if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
318 		retries = 10;
319 
320 	do {
321 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
322 		if (!(reg & DWC3_DCTL_CSFTRST))
323 			goto done;
324 
325 		if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
326 			msleep(20);
327 		else
328 			udelay(1);
329 	} while (--retries);
330 
331 	dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
332 	return -ETIMEDOUT;
333 
334 done:
335 	/*
336 	 * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
337 	 * is cleared, we must wait at least 50ms before accessing the PHY
338 	 * domain (synchronization delay).
339 	 */
340 	if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
341 		msleep(50);
342 
343 	return 0;
344 }
345 
346 /*
347  * dwc3_frame_length_adjustment - Adjusts frame length if required
348  * @dwc3: Pointer to our controller context structure
349  */
dwc3_frame_length_adjustment(struct dwc3 * dwc)350 static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
351 {
352 	u32 reg;
353 	u32 dft;
354 
355 	if (DWC3_VER_IS_PRIOR(DWC3, 250A))
356 		return;
357 
358 	if (dwc->fladj == 0)
359 		return;
360 
361 	reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
362 	dft = reg & DWC3_GFLADJ_30MHZ_MASK;
363 	if (dft != dwc->fladj) {
364 		reg &= ~DWC3_GFLADJ_30MHZ_MASK;
365 		reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
366 		dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
367 	}
368 }
369 
370 /**
371  * dwc3_ref_clk_period - Reference clock period configuration
372  *		Default reference clock period depends on hardware
373  *		configuration. For systems with reference clock that differs
374  *		from the default, this will set clock period in DWC3_GUCTL
375  *		register.
376  * @dwc: Pointer to our controller context structure
377  */
dwc3_ref_clk_period(struct dwc3 * dwc)378 static void dwc3_ref_clk_period(struct dwc3 *dwc)
379 {
380 	unsigned long period;
381 	unsigned long fladj;
382 	unsigned long decr;
383 	unsigned long rate;
384 	u32 reg;
385 
386 	if (dwc->ref_clk) {
387 		rate = clk_get_rate(dwc->ref_clk);
388 		if (!rate)
389 			return;
390 		period = NSEC_PER_SEC / rate;
391 	} else if (dwc->ref_clk_per) {
392 		period = dwc->ref_clk_per;
393 		rate = NSEC_PER_SEC / period;
394 	} else {
395 		return;
396 	}
397 
398 	reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
399 	reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
400 	reg |=  FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
401 	dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
402 
403 	if (DWC3_VER_IS_PRIOR(DWC3, 250A))
404 		return;
405 
406 	/*
407 	 * The calculation below is
408 	 *
409 	 * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
410 	 *
411 	 * but rearranged for fixed-point arithmetic. The division must be
412 	 * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
413 	 * neither does rate * period).
414 	 *
415 	 * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
416 	 * nanoseconds of error caused by the truncation which happened during
417 	 * the division when calculating rate or period (whichever one was
418 	 * derived from the other). We first calculate the relative error, then
419 	 * scale it to units of 8 ppm.
420 	 */
421 	fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
422 	fladj -= 125000;
423 
424 	/*
425 	 * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
426 	 */
427 	decr = 480000000 / rate;
428 
429 	reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
430 	reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
431 	    &  ~DWC3_GFLADJ_240MHZDECR
432 	    &  ~DWC3_GFLADJ_240MHZDECR_PLS1;
433 	reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
434 	    |  FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
435 	    |  FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
436 
437 	if (dwc->gfladj_refclk_lpm_sel)
438 		reg |=  DWC3_GFLADJ_REFCLK_LPM_SEL;
439 
440 	dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
441 }
442 
443 /**
444  * dwc3_free_one_event_buffer - Frees one event buffer
445  * @dwc: Pointer to our controller context structure
446  * @evt: Pointer to event buffer to be freed
447  */
dwc3_free_one_event_buffer(struct dwc3 * dwc,struct dwc3_event_buffer * evt)448 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
449 		struct dwc3_event_buffer *evt)
450 {
451 	dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
452 }
453 
454 /**
455  * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
456  * @dwc: Pointer to our controller context structure
457  * @length: size of the event buffer
458  *
459  * Returns a pointer to the allocated event buffer structure on success
460  * otherwise ERR_PTR(errno).
461  */
dwc3_alloc_one_event_buffer(struct dwc3 * dwc,unsigned int length)462 static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
463 		unsigned int length)
464 {
465 	struct dwc3_event_buffer	*evt;
466 
467 	evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
468 	if (!evt)
469 		return ERR_PTR(-ENOMEM);
470 
471 	evt->dwc	= dwc;
472 	evt->length	= length;
473 	evt->cache	= devm_kzalloc(dwc->dev, length, GFP_KERNEL);
474 	if (!evt->cache)
475 		return ERR_PTR(-ENOMEM);
476 
477 	evt->buf	= dma_alloc_coherent(dwc->sysdev, length,
478 			&evt->dma, GFP_KERNEL);
479 	if (!evt->buf)
480 		return ERR_PTR(-ENOMEM);
481 
482 	return evt;
483 }
484 
485 /**
486  * dwc3_free_event_buffers - frees all allocated event buffers
487  * @dwc: Pointer to our controller context structure
488  */
dwc3_free_event_buffers(struct dwc3 * dwc)489 static void dwc3_free_event_buffers(struct dwc3 *dwc)
490 {
491 	struct dwc3_event_buffer	*evt;
492 
493 	evt = dwc->ev_buf;
494 	if (evt)
495 		dwc3_free_one_event_buffer(dwc, evt);
496 }
497 
498 /**
499  * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
500  * @dwc: pointer to our controller context structure
501  * @length: size of event buffer
502  *
503  * Returns 0 on success otherwise negative errno. In the error case, dwc
504  * may contain some buffers allocated but not all which were requested.
505  */
dwc3_alloc_event_buffers(struct dwc3 * dwc,unsigned int length)506 static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
507 {
508 	struct dwc3_event_buffer *evt;
509 	unsigned int hw_mode;
510 
511 	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
512 	if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
513 		dwc->ev_buf = NULL;
514 		return 0;
515 	}
516 
517 	evt = dwc3_alloc_one_event_buffer(dwc, length);
518 	if (IS_ERR(evt)) {
519 		dev_err(dwc->dev, "can't allocate event buffer\n");
520 		return PTR_ERR(evt);
521 	}
522 	dwc->ev_buf = evt;
523 
524 	return 0;
525 }
526 
527 /**
528  * dwc3_event_buffers_setup - setup our allocated event buffers
529  * @dwc: pointer to our controller context structure
530  *
531  * Returns 0 on success otherwise negative errno.
532  */
dwc3_event_buffers_setup(struct dwc3 * dwc)533 int dwc3_event_buffers_setup(struct dwc3 *dwc)
534 {
535 	struct dwc3_event_buffer	*evt;
536 
537 	if (!dwc->ev_buf)
538 		return 0;
539 
540 	evt = dwc->ev_buf;
541 	evt->lpos = 0;
542 	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
543 			lower_32_bits(evt->dma));
544 	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
545 			upper_32_bits(evt->dma));
546 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
547 			DWC3_GEVNTSIZ_SIZE(evt->length));
548 	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
549 
550 	return 0;
551 }
552 
dwc3_event_buffers_cleanup(struct dwc3 * dwc)553 void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
554 {
555 	struct dwc3_event_buffer	*evt;
556 
557 	if (!dwc->ev_buf)
558 		return;
559 
560 	evt = dwc->ev_buf;
561 
562 	evt->lpos = 0;
563 
564 	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
565 	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
566 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
567 			| DWC3_GEVNTSIZ_SIZE(0));
568 	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
569 }
570 
dwc3_core_num_eps(struct dwc3 * dwc)571 static void dwc3_core_num_eps(struct dwc3 *dwc)
572 {
573 	struct dwc3_hwparams	*parms = &dwc->hwparams;
574 
575 	dwc->num_eps = DWC3_NUM_EPS(parms);
576 }
577 
dwc3_cache_hwparams(struct dwc3 * dwc)578 static void dwc3_cache_hwparams(struct dwc3 *dwc)
579 {
580 	struct dwc3_hwparams	*parms = &dwc->hwparams;
581 
582 	parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
583 	parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
584 	parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
585 	parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
586 	parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
587 	parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
588 	parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
589 	parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
590 	parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
591 
592 	if (DWC3_IP_IS(DWC32))
593 		parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
594 }
595 
dwc3_core_ulpi_init(struct dwc3 * dwc)596 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
597 {
598 	int intf;
599 	int ret = 0;
600 
601 	intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
602 
603 	if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
604 	    (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
605 	     dwc->hsphy_interface &&
606 	     !strncmp(dwc->hsphy_interface, "ulpi", 4)))
607 		ret = dwc3_ulpi_init(dwc);
608 
609 	return ret;
610 }
611 
612 /**
613  * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
614  * @dwc: Pointer to our controller context structure
615  *
616  * Returns 0 on success. The USB PHY interfaces are configured but not
617  * initialized. The PHY interfaces and the PHYs get initialized together with
618  * the core in dwc3_core_init.
619  */
dwc3_phy_setup(struct dwc3 * dwc)620 static int dwc3_phy_setup(struct dwc3 *dwc)
621 {
622 	u32 reg;
623 
624 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
625 
626 	/*
627 	 * Make sure UX_EXIT_PX is cleared as that causes issues with some
628 	 * PHYs. Also, this bit is not supposed to be used in normal operation.
629 	 */
630 	reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
631 
632 	/*
633 	 * Above DWC_usb3.0 1.94a, it is recommended to set
634 	 * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
635 	 * So default value will be '0' when the core is reset. Application
636 	 * needs to set it to '1' after the core initialization is completed.
637 	 *
638 	 * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
639 	 * cleared after power-on reset, and it can be set after core
640 	 * initialization.
641 	 */
642 	reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
643 
644 	if (dwc->u2ss_inp3_quirk)
645 		reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
646 
647 	if (dwc->dis_rxdet_inp3_quirk)
648 		reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
649 
650 	if (dwc->req_p1p2p3_quirk)
651 		reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
652 
653 	if (dwc->del_p1p2p3_quirk)
654 		reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
655 
656 	if (dwc->del_phy_power_chg_quirk)
657 		reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
658 
659 	if (dwc->lfps_filter_quirk)
660 		reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
661 
662 	if (dwc->rx_detect_poll_quirk)
663 		reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
664 
665 	if (dwc->tx_de_emphasis_quirk)
666 		reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
667 
668 	if (dwc->dis_del_phy_power_chg_quirk)
669 		reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
670 
671 	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
672 
673 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
674 
675 	/* Select the HS PHY interface */
676 	switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
677 	case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
678 		if (dwc->hsphy_interface &&
679 				!strncmp(dwc->hsphy_interface, "utmi", 4)) {
680 			reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
681 			break;
682 		} else if (dwc->hsphy_interface &&
683 				!strncmp(dwc->hsphy_interface, "ulpi", 4)) {
684 			reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
685 			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
686 		} else {
687 			/* Relying on default value. */
688 			if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
689 				break;
690 		}
691 		fallthrough;
692 	case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
693 	default:
694 		break;
695 	}
696 
697 	switch (dwc->hsphy_mode) {
698 	case USBPHY_INTERFACE_MODE_UTMI:
699 		reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
700 		       DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
701 		reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
702 		       DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
703 		break;
704 	case USBPHY_INTERFACE_MODE_UTMIW:
705 		reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
706 		       DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
707 		reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
708 		       DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
709 		break;
710 	default:
711 		break;
712 	}
713 
714 	/*
715 	 * Above DWC_usb3.0 1.94a, it is recommended to set
716 	 * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
717 	 * So default value will be '0' when the core is reset. Application
718 	 * needs to set it to '1' after the core initialization is completed.
719 	 *
720 	 * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
721 	 * after power-on reset, and it can be set after core initialization.
722 	 */
723 	reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
724 
725 	if (dwc->dis_enblslpm_quirk)
726 		reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
727 	else
728 		reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
729 
730 	if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
731 		reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
732 
733 	/*
734 	 * Some ULPI USB PHY does not support internal VBUS supply, to drive
735 	 * the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
736 	 * bit of OTG_CTRL register. Controller configures the USB2 PHY
737 	 * ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
738 	 * with an external supply.
739 	 */
740 	if (dwc->ulpi_ext_vbus_drv)
741 		reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
742 
743 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
744 
745 	return 0;
746 }
747 
dwc3_phy_init(struct dwc3 * dwc)748 static int dwc3_phy_init(struct dwc3 *dwc)
749 {
750 	int ret;
751 
752 	usb_phy_init(dwc->usb2_phy);
753 	usb_phy_init(dwc->usb3_phy);
754 
755 	ret = phy_init(dwc->usb2_generic_phy);
756 	if (ret < 0)
757 		goto err_shutdown_usb3_phy;
758 
759 	ret = phy_init(dwc->usb3_generic_phy);
760 	if (ret < 0)
761 		goto err_exit_usb2_phy;
762 
763 	return 0;
764 
765 err_exit_usb2_phy:
766 	phy_exit(dwc->usb2_generic_phy);
767 err_shutdown_usb3_phy:
768 	usb_phy_shutdown(dwc->usb3_phy);
769 	usb_phy_shutdown(dwc->usb2_phy);
770 
771 	return ret;
772 }
773 
dwc3_phy_exit(struct dwc3 * dwc)774 static void dwc3_phy_exit(struct dwc3 *dwc)
775 {
776 	phy_exit(dwc->usb3_generic_phy);
777 	phy_exit(dwc->usb2_generic_phy);
778 
779 	usb_phy_shutdown(dwc->usb3_phy);
780 	usb_phy_shutdown(dwc->usb2_phy);
781 }
782 
dwc3_phy_power_on(struct dwc3 * dwc)783 static int dwc3_phy_power_on(struct dwc3 *dwc)
784 {
785 	int ret;
786 
787 	usb_phy_set_suspend(dwc->usb2_phy, 0);
788 	usb_phy_set_suspend(dwc->usb3_phy, 0);
789 
790 	ret = phy_power_on(dwc->usb2_generic_phy);
791 	if (ret < 0)
792 		goto err_suspend_usb3_phy;
793 
794 	ret = phy_power_on(dwc->usb3_generic_phy);
795 	if (ret < 0)
796 		goto err_power_off_usb2_phy;
797 
798 	return 0;
799 
800 err_power_off_usb2_phy:
801 	phy_power_off(dwc->usb2_generic_phy);
802 err_suspend_usb3_phy:
803 	usb_phy_set_suspend(dwc->usb3_phy, 1);
804 	usb_phy_set_suspend(dwc->usb2_phy, 1);
805 
806 	return ret;
807 }
808 
dwc3_phy_power_off(struct dwc3 * dwc)809 static void dwc3_phy_power_off(struct dwc3 *dwc)
810 {
811 	phy_power_off(dwc->usb3_generic_phy);
812 	phy_power_off(dwc->usb2_generic_phy);
813 
814 	usb_phy_set_suspend(dwc->usb3_phy, 1);
815 	usb_phy_set_suspend(dwc->usb2_phy, 1);
816 }
817 
dwc3_clk_enable(struct dwc3 * dwc)818 static int dwc3_clk_enable(struct dwc3 *dwc)
819 {
820 	int ret;
821 
822 	ret = clk_prepare_enable(dwc->bus_clk);
823 	if (ret)
824 		return ret;
825 
826 	ret = clk_prepare_enable(dwc->ref_clk);
827 	if (ret)
828 		goto disable_bus_clk;
829 
830 	ret = clk_prepare_enable(dwc->susp_clk);
831 	if (ret)
832 		goto disable_ref_clk;
833 
834 	return 0;
835 
836 disable_ref_clk:
837 	clk_disable_unprepare(dwc->ref_clk);
838 disable_bus_clk:
839 	clk_disable_unprepare(dwc->bus_clk);
840 	return ret;
841 }
842 
dwc3_clk_disable(struct dwc3 * dwc)843 static void dwc3_clk_disable(struct dwc3 *dwc)
844 {
845 	clk_disable_unprepare(dwc->susp_clk);
846 	clk_disable_unprepare(dwc->ref_clk);
847 	clk_disable_unprepare(dwc->bus_clk);
848 }
849 
dwc3_core_exit(struct dwc3 * dwc)850 static void dwc3_core_exit(struct dwc3 *dwc)
851 {
852 	dwc3_event_buffers_cleanup(dwc);
853 	dwc3_phy_power_off(dwc);
854 	dwc3_phy_exit(dwc);
855 	dwc3_clk_disable(dwc);
856 	reset_control_assert(dwc->reset);
857 }
858 
dwc3_core_is_valid(struct dwc3 * dwc)859 static bool dwc3_core_is_valid(struct dwc3 *dwc)
860 {
861 	u32 reg;
862 
863 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
864 	dwc->ip = DWC3_GSNPS_ID(reg);
865 
866 	/* This should read as U3 followed by revision number */
867 	if (DWC3_IP_IS(DWC3)) {
868 		dwc->revision = reg;
869 	} else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
870 		dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
871 		dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
872 	} else {
873 		return false;
874 	}
875 
876 	return true;
877 }
878 
dwc3_core_setup_global_control(struct dwc3 * dwc)879 static void dwc3_core_setup_global_control(struct dwc3 *dwc)
880 {
881 	unsigned int power_opt;
882 	unsigned int hw_mode;
883 	u32 reg;
884 
885 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
886 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
887 	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
888 	power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
889 
890 	switch (power_opt) {
891 	case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
892 		/**
893 		 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
894 		 * issue which would cause xHCI compliance tests to fail.
895 		 *
896 		 * Because of that we cannot enable clock gating on such
897 		 * configurations.
898 		 *
899 		 * Refers to:
900 		 *
901 		 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
902 		 * SOF/ITP Mode Used
903 		 */
904 		if ((dwc->dr_mode == USB_DR_MODE_HOST ||
905 				dwc->dr_mode == USB_DR_MODE_OTG) &&
906 				DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
907 			reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
908 		else
909 			reg &= ~DWC3_GCTL_DSBLCLKGTNG;
910 		break;
911 	case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
912 		/*
913 		 * REVISIT Enabling this bit so that host-mode hibernation
914 		 * will work. Device-mode hibernation is not yet implemented.
915 		 */
916 		reg |= DWC3_GCTL_GBLHIBERNATIONEN;
917 		break;
918 	default:
919 		/* nothing */
920 		break;
921 	}
922 
923 	/*
924 	 * This is a workaround for STAR#4846132, which only affects
925 	 * DWC_usb31 version2.00a operating in host mode.
926 	 *
927 	 * There is a problem in DWC_usb31 version 2.00a operating
928 	 * in host mode that would cause a CSR read timeout When CSR
929 	 * read coincides with RAM Clock Gating Entry. By disable
930 	 * Clock Gating, sacrificing power consumption for normal
931 	 * operation.
932 	 */
933 	if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
934 	    hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
935 		reg |= DWC3_GCTL_DSBLCLKGTNG;
936 
937 	/* check if current dwc3 is on simulation board */
938 	if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
939 		dev_info(dwc->dev, "Running with FPGA optimizations\n");
940 		dwc->is_fpga = true;
941 	}
942 
943 	WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
944 			"disable_scramble cannot be used on non-FPGA builds\n");
945 
946 	if (dwc->disable_scramble_quirk && dwc->is_fpga)
947 		reg |= DWC3_GCTL_DISSCRAMBLE;
948 	else
949 		reg &= ~DWC3_GCTL_DISSCRAMBLE;
950 
951 	if (dwc->u2exit_lfps_quirk)
952 		reg |= DWC3_GCTL_U2EXIT_LFPS;
953 
954 	/*
955 	 * WORKAROUND: DWC3 revisions <1.90a have a bug
956 	 * where the device can fail to connect at SuperSpeed
957 	 * and falls back to high-speed mode which causes
958 	 * the device to enter a Connect/Disconnect loop
959 	 */
960 	if (DWC3_VER_IS_PRIOR(DWC3, 190A))
961 		reg |= DWC3_GCTL_U2RSTECN;
962 
963 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
964 }
965 
966 static int dwc3_core_get_phy(struct dwc3 *dwc);
967 static int dwc3_core_ulpi_init(struct dwc3 *dwc);
968 
969 /* set global incr burst type configuration registers */
dwc3_set_incr_burst_type(struct dwc3 * dwc)970 static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
971 {
972 	struct device *dev = dwc->dev;
973 	/* incrx_mode : for INCR burst type. */
974 	bool incrx_mode;
975 	/* incrx_size : for size of INCRX burst. */
976 	u32 incrx_size;
977 	u32 *vals;
978 	u32 cfg;
979 	int ntype;
980 	int ret;
981 	int i;
982 
983 	cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
984 
985 	/*
986 	 * Handle property "snps,incr-burst-type-adjustment".
987 	 * Get the number of value from this property:
988 	 * result <= 0, means this property is not supported.
989 	 * result = 1, means INCRx burst mode supported.
990 	 * result > 1, means undefined length burst mode supported.
991 	 */
992 	ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
993 	if (ntype <= 0)
994 		return;
995 
996 	vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
997 	if (!vals)
998 		return;
999 
1000 	/* Get INCR burst type, and parse it */
1001 	ret = device_property_read_u32_array(dev,
1002 			"snps,incr-burst-type-adjustment", vals, ntype);
1003 	if (ret) {
1004 		kfree(vals);
1005 		dev_err(dev, "Error to get property\n");
1006 		return;
1007 	}
1008 
1009 	incrx_size = *vals;
1010 
1011 	if (ntype > 1) {
1012 		/* INCRX (undefined length) burst mode */
1013 		incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
1014 		for (i = 1; i < ntype; i++) {
1015 			if (vals[i] > incrx_size)
1016 				incrx_size = vals[i];
1017 		}
1018 	} else {
1019 		/* INCRX burst mode */
1020 		incrx_mode = INCRX_BURST_MODE;
1021 	}
1022 
1023 	kfree(vals);
1024 
1025 	/* Enable Undefined Length INCR Burst and Enable INCRx Burst */
1026 	cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
1027 	if (incrx_mode)
1028 		cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
1029 	switch (incrx_size) {
1030 	case 256:
1031 		cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
1032 		break;
1033 	case 128:
1034 		cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
1035 		break;
1036 	case 64:
1037 		cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
1038 		break;
1039 	case 32:
1040 		cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
1041 		break;
1042 	case 16:
1043 		cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
1044 		break;
1045 	case 8:
1046 		cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
1047 		break;
1048 	case 4:
1049 		cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
1050 		break;
1051 	case 1:
1052 		break;
1053 	default:
1054 		dev_err(dev, "Invalid property\n");
1055 		break;
1056 	}
1057 
1058 	dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
1059 }
1060 
dwc3_set_power_down_clk_scale(struct dwc3 * dwc)1061 static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
1062 {
1063 	u32 scale;
1064 	u32 reg;
1065 
1066 	if (!dwc->susp_clk)
1067 		return;
1068 
1069 	/*
1070 	 * The power down scale field specifies how many suspend_clk
1071 	 * periods fit into a 16KHz clock period. When performing
1072 	 * the division, round up the remainder.
1073 	 *
1074 	 * The power down scale value is calculated using the fastest
1075 	 * frequency of the suspend_clk. If it isn't fixed (but within
1076 	 * the accuracy requirement), the driver may not know the max
1077 	 * rate of the suspend_clk, so only update the power down scale
1078 	 * if the default is less than the calculated value from
1079 	 * clk_get_rate() or if the default is questionably high
1080 	 * (3x or more) to be within the requirement.
1081 	 */
1082 	scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
1083 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1084 	if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
1085 	    (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
1086 		reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
1087 		reg |= DWC3_GCTL_PWRDNSCALE(scale);
1088 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1089 	}
1090 }
1091 
dwc3_config_threshold(struct dwc3 * dwc)1092 static void dwc3_config_threshold(struct dwc3 *dwc)
1093 {
1094 	u32 reg;
1095 	u8 rx_thr_num;
1096 	u8 rx_maxburst;
1097 	u8 tx_thr_num;
1098 	u8 tx_maxburst;
1099 
1100 	/*
1101 	 * Must config both number of packets and max burst settings to enable
1102 	 * RX and/or TX threshold.
1103 	 */
1104 	if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
1105 		rx_thr_num = dwc->rx_thr_num_pkt_prd;
1106 		rx_maxburst = dwc->rx_max_burst_prd;
1107 		tx_thr_num = dwc->tx_thr_num_pkt_prd;
1108 		tx_maxburst = dwc->tx_max_burst_prd;
1109 
1110 		if (rx_thr_num && rx_maxburst) {
1111 			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1112 			reg |= DWC31_RXTHRNUMPKTSEL_PRD;
1113 
1114 			reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
1115 			reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
1116 
1117 			reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
1118 			reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
1119 
1120 			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1121 		}
1122 
1123 		if (tx_thr_num && tx_maxburst) {
1124 			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1125 			reg |= DWC31_TXTHRNUMPKTSEL_PRD;
1126 
1127 			reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
1128 			reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
1129 
1130 			reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
1131 			reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
1132 
1133 			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1134 		}
1135 	}
1136 
1137 	rx_thr_num = dwc->rx_thr_num_pkt;
1138 	rx_maxburst = dwc->rx_max_burst;
1139 	tx_thr_num = dwc->tx_thr_num_pkt;
1140 	tx_maxburst = dwc->tx_max_burst;
1141 
1142 	if (DWC3_IP_IS(DWC3)) {
1143 		if (rx_thr_num && rx_maxburst) {
1144 			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1145 			reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
1146 
1147 			reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
1148 			reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1149 
1150 			reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1151 			reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1152 
1153 			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1154 		}
1155 
1156 		if (tx_thr_num && tx_maxburst) {
1157 			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1158 			reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
1159 
1160 			reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
1161 			reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1162 
1163 			reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1164 			reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1165 
1166 			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1167 		}
1168 	} else {
1169 		if (rx_thr_num && rx_maxburst) {
1170 			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1171 			reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
1172 
1173 			reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
1174 			reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
1175 
1176 			reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
1177 			reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
1178 
1179 			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1180 		}
1181 
1182 		if (tx_thr_num && tx_maxburst) {
1183 			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
1184 			reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
1185 
1186 			reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
1187 			reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
1188 
1189 			reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
1190 			reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
1191 
1192 			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
1193 		}
1194 	}
1195 }
1196 
1197 /**
1198  * dwc3_core_init - Low-level initialization of DWC3 Core
1199  * @dwc: Pointer to our controller context structure
1200  *
1201  * Returns 0 on success otherwise negative errno.
1202  */
dwc3_core_init(struct dwc3 * dwc)1203 static int dwc3_core_init(struct dwc3 *dwc)
1204 {
1205 	unsigned int		hw_mode;
1206 	u32			reg;
1207 	int			ret;
1208 
1209 	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
1210 
1211 	/*
1212 	 * Write Linux Version Code to our GUID register so it's easy to figure
1213 	 * out which kernel version a bug was found.
1214 	 */
1215 	dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
1216 
1217 	ret = dwc3_phy_setup(dwc);
1218 	if (ret)
1219 		return ret;
1220 
1221 	if (!dwc->ulpi_ready) {
1222 		ret = dwc3_core_ulpi_init(dwc);
1223 		if (ret) {
1224 			if (ret == -ETIMEDOUT) {
1225 				dwc3_core_soft_reset(dwc);
1226 				ret = -EPROBE_DEFER;
1227 			}
1228 			return ret;
1229 		}
1230 		dwc->ulpi_ready = true;
1231 	}
1232 
1233 	if (!dwc->phys_ready) {
1234 		ret = dwc3_core_get_phy(dwc);
1235 		if (ret)
1236 			goto err_exit_ulpi;
1237 		dwc->phys_ready = true;
1238 	}
1239 
1240 	ret = dwc3_phy_init(dwc);
1241 	if (ret)
1242 		goto err_exit_ulpi;
1243 
1244 	ret = dwc3_core_soft_reset(dwc);
1245 	if (ret)
1246 		goto err_exit_phy;
1247 
1248 	dwc3_core_setup_global_control(dwc);
1249 	dwc3_core_num_eps(dwc);
1250 
1251 	/* Set power down scale of suspend_clk */
1252 	dwc3_set_power_down_clk_scale(dwc);
1253 
1254 	/* Adjust Frame Length */
1255 	dwc3_frame_length_adjustment(dwc);
1256 
1257 	/* Adjust Reference Clock Period */
1258 	dwc3_ref_clk_period(dwc);
1259 
1260 	dwc3_set_incr_burst_type(dwc);
1261 
1262 	ret = dwc3_phy_power_on(dwc);
1263 	if (ret)
1264 		goto err_exit_phy;
1265 
1266 	ret = dwc3_event_buffers_setup(dwc);
1267 	if (ret) {
1268 		dev_err(dwc->dev, "failed to setup event buffers\n");
1269 		goto err_power_off_phy;
1270 	}
1271 
1272 	/*
1273 	 * ENDXFER polling is available on version 3.10a and later of
1274 	 * the DWC_usb3 controller. It is NOT available in the
1275 	 * DWC_usb31 controller.
1276 	 */
1277 	if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
1278 		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
1279 		reg |= DWC3_GUCTL2_RST_ACTBITLATER;
1280 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
1281 	}
1282 
1283 	/*
1284 	 * When configured in HOST mode, after issuing U3/L2 exit controller
1285 	 * fails to send proper CRC checksum in CRC5 feild. Because of this
1286 	 * behaviour Transaction Error is generated, resulting in reset and
1287 	 * re-enumeration of usb device attached. All the termsel, xcvrsel,
1288 	 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
1289 	 * will correct this problem. This option is to support certain
1290 	 * legacy ULPI PHYs.
1291 	 */
1292 	if (dwc->resume_hs_terminations) {
1293 		reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1294 		reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
1295 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1296 	}
1297 
1298 	if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
1299 		reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
1300 
1301 		/*
1302 		 * Enable hardware control of sending remote wakeup
1303 		 * in HS when the device is in the L1 state.
1304 		 */
1305 		if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
1306 			reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
1307 
1308 		/*
1309 		 * Decouple USB 2.0 L1 & L2 events which will allow for
1310 		 * gadget driver to only receive U3/L2 suspend & wakeup
1311 		 * events and prevent the more frequent L1 LPM transitions
1312 		 * from interrupting the driver.
1313 		 */
1314 		if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
1315 			reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
1316 
1317 		if (dwc->dis_tx_ipgap_linecheck_quirk)
1318 			reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
1319 
1320 		if (dwc->parkmode_disable_ss_quirk)
1321 			reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
1322 
1323 		if (dwc->parkmode_disable_hs_quirk)
1324 			reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
1325 
1326 		if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
1327 		    (dwc->maximum_speed == USB_SPEED_HIGH ||
1328 		     dwc->maximum_speed == USB_SPEED_FULL))
1329 			reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
1330 
1331 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
1332 	}
1333 
1334 	dwc3_config_threshold(dwc);
1335 
1336 	/*
1337 	 * Modify this for all supported Super Speed ports when
1338 	 * multiport support is added.
1339 	 */
1340 	if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
1341 	    (DWC3_IP_IS(DWC31)) &&
1342 	    dwc->maximum_speed == USB_SPEED_SUPER) {
1343 		reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
1344 		reg |= DWC3_LLUCTL_FORCE_GEN1;
1345 		dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
1346 	}
1347 
1348 	return 0;
1349 
1350 err_power_off_phy:
1351 	dwc3_phy_power_off(dwc);
1352 err_exit_phy:
1353 	dwc3_phy_exit(dwc);
1354 err_exit_ulpi:
1355 	dwc3_ulpi_exit(dwc);
1356 
1357 	return ret;
1358 }
1359 
dwc3_core_get_phy(struct dwc3 * dwc)1360 static int dwc3_core_get_phy(struct dwc3 *dwc)
1361 {
1362 	struct device		*dev = dwc->dev;
1363 	struct device_node	*node = dev->of_node;
1364 	int ret;
1365 
1366 	if (node) {
1367 		dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
1368 		dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
1369 	} else {
1370 		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
1371 		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
1372 	}
1373 
1374 	if (IS_ERR(dwc->usb2_phy)) {
1375 		ret = PTR_ERR(dwc->usb2_phy);
1376 		if (ret == -ENXIO || ret == -ENODEV)
1377 			dwc->usb2_phy = NULL;
1378 		else
1379 			return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1380 	}
1381 
1382 	if (IS_ERR(dwc->usb3_phy)) {
1383 		ret = PTR_ERR(dwc->usb3_phy);
1384 		if (ret == -ENXIO || ret == -ENODEV)
1385 			dwc->usb3_phy = NULL;
1386 		else
1387 			return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1388 	}
1389 
1390 	dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
1391 	if (IS_ERR(dwc->usb2_generic_phy)) {
1392 		ret = PTR_ERR(dwc->usb2_generic_phy);
1393 		if (ret == -ENOSYS || ret == -ENODEV)
1394 			dwc->usb2_generic_phy = NULL;
1395 		else
1396 			return dev_err_probe(dev, ret, "no usb2 phy configured\n");
1397 	}
1398 
1399 	dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
1400 	if (IS_ERR(dwc->usb3_generic_phy)) {
1401 		ret = PTR_ERR(dwc->usb3_generic_phy);
1402 		if (ret == -ENOSYS || ret == -ENODEV)
1403 			dwc->usb3_generic_phy = NULL;
1404 		else
1405 			return dev_err_probe(dev, ret, "no usb3 phy configured\n");
1406 	}
1407 
1408 	return 0;
1409 }
1410 
dwc3_core_init_mode(struct dwc3 * dwc)1411 static int dwc3_core_init_mode(struct dwc3 *dwc)
1412 {
1413 	struct device *dev = dwc->dev;
1414 	int ret;
1415 
1416 	switch (dwc->dr_mode) {
1417 	case USB_DR_MODE_PERIPHERAL:
1418 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1419 
1420 		if (dwc->usb2_phy)
1421 			otg_set_vbus(dwc->usb2_phy->otg, false);
1422 		phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
1423 		phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
1424 
1425 		ret = dwc3_gadget_init(dwc);
1426 		if (ret)
1427 			return dev_err_probe(dev, ret, "failed to initialize gadget\n");
1428 		break;
1429 	case USB_DR_MODE_HOST:
1430 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
1431 
1432 		if (dwc->usb2_phy)
1433 			otg_set_vbus(dwc->usb2_phy->otg, true);
1434 		phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
1435 		phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
1436 
1437 		ret = dwc3_host_init(dwc);
1438 		if (ret)
1439 			return dev_err_probe(dev, ret, "failed to initialize host\n");
1440 		break;
1441 	case USB_DR_MODE_OTG:
1442 		INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
1443 		ret = dwc3_drd_init(dwc);
1444 		if (ret)
1445 			return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
1446 		break;
1447 	default:
1448 		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
1449 		return -EINVAL;
1450 	}
1451 
1452 	return 0;
1453 }
1454 
dwc3_core_exit_mode(struct dwc3 * dwc)1455 static void dwc3_core_exit_mode(struct dwc3 *dwc)
1456 {
1457 	switch (dwc->dr_mode) {
1458 	case USB_DR_MODE_PERIPHERAL:
1459 		dwc3_gadget_exit(dwc);
1460 		break;
1461 	case USB_DR_MODE_HOST:
1462 		dwc3_host_exit(dwc);
1463 		break;
1464 	case USB_DR_MODE_OTG:
1465 		dwc3_drd_exit(dwc);
1466 		break;
1467 	default:
1468 		/* do nothing */
1469 		break;
1470 	}
1471 
1472 	/* de-assert DRVVBUS for HOST and OTG mode */
1473 	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
1474 }
1475 
dwc3_get_properties(struct dwc3 * dwc)1476 static void dwc3_get_properties(struct dwc3 *dwc)
1477 {
1478 	struct device		*dev = dwc->dev;
1479 	u8			lpm_nyet_threshold;
1480 	u8			tx_de_emphasis;
1481 	u8			hird_threshold;
1482 	u8			rx_thr_num_pkt = 0;
1483 	u8			rx_max_burst = 0;
1484 	u8			tx_thr_num_pkt = 0;
1485 	u8			tx_max_burst = 0;
1486 	u8			rx_thr_num_pkt_prd = 0;
1487 	u8			rx_max_burst_prd = 0;
1488 	u8			tx_thr_num_pkt_prd = 0;
1489 	u8			tx_max_burst_prd = 0;
1490 	u8			tx_fifo_resize_max_num;
1491 	u16			num_hc_interrupters;
1492 	const char		*usb_psy_name;
1493 	int			ret;
1494 
1495 	/* default to highest possible threshold */
1496 	lpm_nyet_threshold = 0xf;
1497 
1498 	/* default to -3.5dB de-emphasis */
1499 	tx_de_emphasis = 1;
1500 
1501 	/*
1502 	 * default to assert utmi_sleep_n and use maximum allowed HIRD
1503 	 * threshold value of 0b1100
1504 	 */
1505 	hird_threshold = 12;
1506 
1507 	/*
1508 	 * default to a TXFIFO size large enough to fit 6 max packets.  This
1509 	 * allows for systems with larger bus latencies to have some headroom
1510 	 * for endpoints that have a large bMaxBurst value.
1511 	 */
1512 	tx_fifo_resize_max_num = 6;
1513 
1514 	/* default to a single XHCI interrupter */
1515 	num_hc_interrupters = 1;
1516 
1517 	dwc->maximum_speed = usb_get_maximum_speed(dev);
1518 	dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
1519 	dwc->dr_mode = usb_get_dr_mode(dev);
1520 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
1521 
1522 	dwc->sysdev_is_parent = device_property_read_bool(dev,
1523 				"linux,sysdev_is_parent");
1524 	if (dwc->sysdev_is_parent)
1525 		dwc->sysdev = dwc->dev->parent;
1526 	else
1527 		dwc->sysdev = dwc->dev;
1528 
1529 	dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
1530 
1531 	ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
1532 	if (ret >= 0) {
1533 		dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
1534 		if (!dwc->usb_psy)
1535 			dev_err(dev, "couldn't get usb power supply\n");
1536 	}
1537 
1538 	dwc->has_lpm_erratum = device_property_read_bool(dev,
1539 				"snps,has-lpm-erratum");
1540 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
1541 				&lpm_nyet_threshold);
1542 	dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
1543 				"snps,is-utmi-l1-suspend");
1544 	device_property_read_u8(dev, "snps,hird-threshold",
1545 				&hird_threshold);
1546 	dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
1547 				"snps,dis-start-transfer-quirk");
1548 	dwc->usb3_lpm_capable = device_property_read_bool(dev,
1549 				"snps,usb3_lpm_capable");
1550 	dwc->usb2_lpm_disable = device_property_read_bool(dev,
1551 				"snps,usb2-lpm-disable");
1552 	dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
1553 				"snps,usb2-gadget-lpm-disable");
1554 	device_property_read_u8(dev, "snps,rx-thr-num-pkt",
1555 				&rx_thr_num_pkt);
1556 	device_property_read_u8(dev, "snps,rx-max-burst",
1557 				&rx_max_burst);
1558 	device_property_read_u8(dev, "snps,tx-thr-num-pkt",
1559 				&tx_thr_num_pkt);
1560 	device_property_read_u8(dev, "snps,tx-max-burst",
1561 				&tx_max_burst);
1562 	device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
1563 				&rx_thr_num_pkt_prd);
1564 	device_property_read_u8(dev, "snps,rx-max-burst-prd",
1565 				&rx_max_burst_prd);
1566 	device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
1567 				&tx_thr_num_pkt_prd);
1568 	device_property_read_u8(dev, "snps,tx-max-burst-prd",
1569 				&tx_max_burst_prd);
1570 	device_property_read_u16(dev, "num-hc-interrupters",
1571 				&num_hc_interrupters);
1572 	/* DWC3 core allowed to have a max of 8 interrupters */
1573 	if (num_hc_interrupters > 8)
1574 		num_hc_interrupters = 8;
1575 
1576 	dwc->do_fifo_resize = device_property_read_bool(dev,
1577 							"tx-fifo-resize");
1578 	if (dwc->do_fifo_resize)
1579 		device_property_read_u8(dev, "tx-fifo-max-num",
1580 					&tx_fifo_resize_max_num);
1581 
1582 	dwc->disable_scramble_quirk = device_property_read_bool(dev,
1583 				"snps,disable_scramble_quirk");
1584 	dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
1585 				"snps,u2exit_lfps_quirk");
1586 	dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
1587 				"snps,u2ss_inp3_quirk");
1588 	dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
1589 				"snps,req_p1p2p3_quirk");
1590 	dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
1591 				"snps,del_p1p2p3_quirk");
1592 	dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
1593 				"snps,del_phy_power_chg_quirk");
1594 	dwc->lfps_filter_quirk = device_property_read_bool(dev,
1595 				"snps,lfps_filter_quirk");
1596 	dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
1597 				"snps,rx_detect_poll_quirk");
1598 	dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
1599 				"snps,dis_u3_susphy_quirk");
1600 	dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
1601 				"snps,dis_u2_susphy_quirk");
1602 	dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
1603 				"snps,dis_enblslpm_quirk");
1604 	dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
1605 				"snps,dis-u1-entry-quirk");
1606 	dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
1607 				"snps,dis-u2-entry-quirk");
1608 	dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
1609 				"snps,dis_rxdet_inp3_quirk");
1610 	dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
1611 				"snps,dis-u2-freeclk-exists-quirk");
1612 	dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
1613 				"snps,dis-del-phy-power-chg-quirk");
1614 	dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
1615 				"snps,dis-tx-ipgap-linecheck-quirk");
1616 	dwc->resume_hs_terminations = device_property_read_bool(dev,
1617 				"snps,resume-hs-terminations");
1618 	dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
1619 				"snps,ulpi-ext-vbus-drv");
1620 	dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
1621 				"snps,parkmode-disable-ss-quirk");
1622 	dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
1623 				"snps,parkmode-disable-hs-quirk");
1624 	dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
1625 				"snps,gfladj-refclk-lpm-sel-quirk");
1626 
1627 	dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
1628 				"snps,tx_de_emphasis_quirk");
1629 	device_property_read_u8(dev, "snps,tx_de_emphasis",
1630 				&tx_de_emphasis);
1631 	device_property_read_string(dev, "snps,hsphy_interface",
1632 				    &dwc->hsphy_interface);
1633 	device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
1634 				 &dwc->fladj);
1635 	device_property_read_u32(dev, "snps,ref-clock-period-ns",
1636 				 &dwc->ref_clk_per);
1637 
1638 	dwc->dis_metastability_quirk = device_property_read_bool(dev,
1639 				"snps,dis_metastability_quirk");
1640 
1641 	dwc->dis_split_quirk = device_property_read_bool(dev,
1642 				"snps,dis-split-quirk");
1643 
1644 	dwc->lpm_nyet_threshold = lpm_nyet_threshold;
1645 	dwc->tx_de_emphasis = tx_de_emphasis;
1646 
1647 	dwc->hird_threshold = hird_threshold;
1648 
1649 	dwc->rx_thr_num_pkt = rx_thr_num_pkt;
1650 	dwc->rx_max_burst = rx_max_burst;
1651 
1652 	dwc->tx_thr_num_pkt = tx_thr_num_pkt;
1653 	dwc->tx_max_burst = tx_max_burst;
1654 
1655 	dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
1656 	dwc->rx_max_burst_prd = rx_max_burst_prd;
1657 
1658 	dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
1659 	dwc->tx_max_burst_prd = tx_max_burst_prd;
1660 
1661 	dwc->imod_interval = 0;
1662 
1663 	dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
1664 
1665 	dwc->num_hc_interrupters = num_hc_interrupters;
1666 }
1667 
1668 /* check whether the core supports IMOD */
dwc3_has_imod(struct dwc3 * dwc)1669 bool dwc3_has_imod(struct dwc3 *dwc)
1670 {
1671 	return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
1672 		DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
1673 		DWC3_IP_IS(DWC32);
1674 }
1675 
dwc3_check_params(struct dwc3 * dwc)1676 static void dwc3_check_params(struct dwc3 *dwc)
1677 {
1678 	struct device *dev = dwc->dev;
1679 	unsigned int hwparam_gen =
1680 		DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
1681 
1682 	/* Check for proper value of imod_interval */
1683 	if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
1684 		dev_warn(dwc->dev, "Interrupt moderation not supported\n");
1685 		dwc->imod_interval = 0;
1686 	}
1687 
1688 	/*
1689 	 * Workaround for STAR 9000961433 which affects only version
1690 	 * 3.00a of the DWC_usb3 core. This prevents the controller
1691 	 * interrupt from being masked while handling events. IMOD
1692 	 * allows us to work around this issue. Enable it for the
1693 	 * affected version.
1694 	 */
1695 	if (!dwc->imod_interval &&
1696 	    DWC3_VER_IS(DWC3, 300A))
1697 		dwc->imod_interval = 1;
1698 
1699 	/* Check the maximum_speed parameter */
1700 	switch (dwc->maximum_speed) {
1701 	case USB_SPEED_FULL:
1702 	case USB_SPEED_HIGH:
1703 		break;
1704 	case USB_SPEED_SUPER:
1705 		if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
1706 			dev_warn(dev, "UDC doesn't support Gen 1\n");
1707 		break;
1708 	case USB_SPEED_SUPER_PLUS:
1709 		if ((DWC3_IP_IS(DWC32) &&
1710 		     hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
1711 		    (!DWC3_IP_IS(DWC32) &&
1712 		     hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
1713 			dev_warn(dev, "UDC doesn't support SSP\n");
1714 		break;
1715 	default:
1716 		dev_err(dev, "invalid maximum_speed parameter %d\n",
1717 			dwc->maximum_speed);
1718 		fallthrough;
1719 	case USB_SPEED_UNKNOWN:
1720 		switch (hwparam_gen) {
1721 		case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1722 			dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1723 			break;
1724 		case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1725 			if (DWC3_IP_IS(DWC32))
1726 				dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
1727 			else
1728 				dwc->maximum_speed = USB_SPEED_SUPER;
1729 			break;
1730 		case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
1731 			dwc->maximum_speed = USB_SPEED_HIGH;
1732 			break;
1733 		default:
1734 			dwc->maximum_speed = USB_SPEED_SUPER;
1735 			break;
1736 		}
1737 		break;
1738 	}
1739 
1740 	/*
1741 	 * Currently the controller does not have visibility into the HW
1742 	 * parameter to determine the maximum number of lanes the HW supports.
1743 	 * If the number of lanes is not specified in the device property, then
1744 	 * set the default to support dual-lane for DWC_usb32 and single-lane
1745 	 * for DWC_usb31 for super-speed-plus.
1746 	 */
1747 	if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
1748 		switch (dwc->max_ssp_rate) {
1749 		case USB_SSP_GEN_2x1:
1750 			if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
1751 				dev_warn(dev, "UDC only supports Gen 1\n");
1752 			break;
1753 		case USB_SSP_GEN_1x2:
1754 		case USB_SSP_GEN_2x2:
1755 			if (DWC3_IP_IS(DWC31))
1756 				dev_warn(dev, "UDC only supports single lane\n");
1757 			break;
1758 		case USB_SSP_GEN_UNKNOWN:
1759 		default:
1760 			switch (hwparam_gen) {
1761 			case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
1762 				if (DWC3_IP_IS(DWC32))
1763 					dwc->max_ssp_rate = USB_SSP_GEN_2x2;
1764 				else
1765 					dwc->max_ssp_rate = USB_SSP_GEN_2x1;
1766 				break;
1767 			case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
1768 				if (DWC3_IP_IS(DWC32))
1769 					dwc->max_ssp_rate = USB_SSP_GEN_1x2;
1770 				break;
1771 			}
1772 			break;
1773 		}
1774 	}
1775 }
1776 
dwc3_get_extcon(struct dwc3 * dwc)1777 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
1778 {
1779 	struct device *dev = dwc->dev;
1780 	struct device_node *np_phy;
1781 	struct extcon_dev *edev = NULL;
1782 	const char *name;
1783 
1784 	if (device_property_read_bool(dev, "extcon"))
1785 		return extcon_get_edev_by_phandle(dev, 0);
1786 
1787 	/*
1788 	 * Device tree platforms should get extcon via phandle.
1789 	 * On ACPI platforms, we get the name from a device property.
1790 	 * This device property is for kernel internal use only and
1791 	 * is expected to be set by the glue code.
1792 	 */
1793 	if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
1794 		return extcon_get_extcon_dev(name);
1795 
1796 	/*
1797 	 * Check explicitly if "usb-role-switch" is used since
1798 	 * extcon_find_edev_by_node() can not be used to check the absence of
1799 	 * an extcon device. In the absence of an device it will always return
1800 	 * EPROBE_DEFER.
1801 	 */
1802 	if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
1803 	    device_property_read_bool(dev, "usb-role-switch"))
1804 		return NULL;
1805 
1806 	/*
1807 	 * Try to get an extcon device from the USB PHY controller's "port"
1808 	 * node. Check if it has the "port" node first, to avoid printing the
1809 	 * error message from underlying code, as it's a valid case: extcon
1810 	 * device (and "port" node) may be missing in case of "usb-role-switch"
1811 	 * or OTG mode.
1812 	 */
1813 	np_phy = of_parse_phandle(dev->of_node, "phys", 0);
1814 	if (of_graph_is_present(np_phy)) {
1815 		struct device_node *np_conn;
1816 
1817 		np_conn = of_graph_get_remote_node(np_phy, -1, -1);
1818 		if (np_conn)
1819 			edev = extcon_find_edev_by_node(np_conn);
1820 		of_node_put(np_conn);
1821 	}
1822 	of_node_put(np_phy);
1823 
1824 	return edev;
1825 }
1826 
dwc3_get_clocks(struct dwc3 * dwc)1827 static int dwc3_get_clocks(struct dwc3 *dwc)
1828 {
1829 	struct device *dev = dwc->dev;
1830 
1831 	if (!dev->of_node)
1832 		return 0;
1833 
1834 	/*
1835 	 * Clocks are optional, but new DT platforms should support all clocks
1836 	 * as required by the DT-binding.
1837 	 * Some devices have different clock names in legacy device trees,
1838 	 * check for them to retain backwards compatibility.
1839 	 */
1840 	dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
1841 	if (IS_ERR(dwc->bus_clk)) {
1842 		return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1843 				"could not get bus clock\n");
1844 	}
1845 
1846 	if (dwc->bus_clk == NULL) {
1847 		dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
1848 		if (IS_ERR(dwc->bus_clk)) {
1849 			return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
1850 					"could not get bus clock\n");
1851 		}
1852 	}
1853 
1854 	dwc->ref_clk = devm_clk_get_optional(dev, "ref");
1855 	if (IS_ERR(dwc->ref_clk)) {
1856 		return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1857 				"could not get ref clock\n");
1858 	}
1859 
1860 	if (dwc->ref_clk == NULL) {
1861 		dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
1862 		if (IS_ERR(dwc->ref_clk)) {
1863 			return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
1864 					"could not get ref clock\n");
1865 		}
1866 	}
1867 
1868 	dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
1869 	if (IS_ERR(dwc->susp_clk)) {
1870 		return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1871 				"could not get suspend clock\n");
1872 	}
1873 
1874 	if (dwc->susp_clk == NULL) {
1875 		dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
1876 		if (IS_ERR(dwc->susp_clk)) {
1877 			return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
1878 					"could not get suspend clock\n");
1879 		}
1880 	}
1881 
1882 	return 0;
1883 }
1884 
dwc3_probe(struct platform_device * pdev)1885 static int dwc3_probe(struct platform_device *pdev)
1886 {
1887 	struct device		*dev = &pdev->dev;
1888 	struct resource		*res, dwc_res;
1889 	void __iomem		*regs;
1890 	struct dwc3		*dwc;
1891 	int			ret;
1892 
1893 	dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
1894 	if (!dwc)
1895 		return -ENOMEM;
1896 
1897 	dwc->dev = dev;
1898 
1899 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1900 	if (!res) {
1901 		dev_err(dev, "missing memory resource\n");
1902 		return -ENODEV;
1903 	}
1904 
1905 	dwc->xhci_resources[0].start = res->start;
1906 	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
1907 					DWC3_XHCI_REGS_END;
1908 	dwc->xhci_resources[0].flags = res->flags;
1909 	dwc->xhci_resources[0].name = res->name;
1910 
1911 	/*
1912 	 * Request memory region but exclude xHCI regs,
1913 	 * since it will be requested by the xhci-plat driver.
1914 	 */
1915 	dwc_res = *res;
1916 	dwc_res.start += DWC3_GLOBALS_REGS_START;
1917 
1918 	if (dev->of_node) {
1919 		struct device_node *parent = of_get_parent(dev->of_node);
1920 
1921 		if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
1922 			dwc_res.start -= DWC3_GLOBALS_REGS_START;
1923 			dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
1924 		}
1925 
1926 		of_node_put(parent);
1927 	}
1928 
1929 	regs = devm_ioremap_resource(dev, &dwc_res);
1930 	if (IS_ERR(regs))
1931 		return PTR_ERR(regs);
1932 
1933 	dwc->regs	= regs;
1934 	dwc->regs_size	= resource_size(&dwc_res);
1935 
1936 	dwc3_get_properties(dwc);
1937 
1938 	dwc->reset = devm_reset_control_array_get_optional_shared(dev);
1939 	if (IS_ERR(dwc->reset)) {
1940 		ret = PTR_ERR(dwc->reset);
1941 		goto err_put_psy;
1942 	}
1943 
1944 	ret = dwc3_get_clocks(dwc);
1945 	if (ret)
1946 		goto err_put_psy;
1947 
1948 	ret = reset_control_deassert(dwc->reset);
1949 	if (ret)
1950 		goto err_put_psy;
1951 
1952 	ret = dwc3_clk_enable(dwc);
1953 	if (ret)
1954 		goto err_assert_reset;
1955 
1956 	if (!dwc3_core_is_valid(dwc)) {
1957 		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
1958 		ret = -ENODEV;
1959 		goto err_disable_clks;
1960 	}
1961 
1962 	platform_set_drvdata(pdev, dwc);
1963 	dwc3_cache_hwparams(dwc);
1964 
1965 	if (!dwc->sysdev_is_parent &&
1966 	    DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
1967 		ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
1968 		if (ret)
1969 			goto err_disable_clks;
1970 	}
1971 
1972 	spin_lock_init(&dwc->lock);
1973 	mutex_init(&dwc->mutex);
1974 
1975 	pm_runtime_get_noresume(dev);
1976 	pm_runtime_set_active(dev);
1977 	pm_runtime_use_autosuspend(dev);
1978 	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
1979 	pm_runtime_enable(dev);
1980 
1981 	pm_runtime_forbid(dev);
1982 
1983 	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
1984 	if (ret) {
1985 		dev_err(dwc->dev, "failed to allocate event buffers\n");
1986 		ret = -ENOMEM;
1987 		goto err_allow_rpm;
1988 	}
1989 
1990 	dwc->edev = dwc3_get_extcon(dwc);
1991 	if (IS_ERR(dwc->edev)) {
1992 		ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
1993 		goto err_free_event_buffers;
1994 	}
1995 
1996 	ret = dwc3_get_dr_mode(dwc);
1997 	if (ret)
1998 		goto err_free_event_buffers;
1999 
2000 	ret = dwc3_core_init(dwc);
2001 	if (ret) {
2002 		dev_err_probe(dev, ret, "failed to initialize core\n");
2003 		goto err_free_event_buffers;
2004 	}
2005 
2006 	dwc3_check_params(dwc);
2007 	dwc3_debugfs_init(dwc);
2008 
2009 	ret = dwc3_core_init_mode(dwc);
2010 	if (ret)
2011 		goto err_exit_debugfs;
2012 
2013 	pm_runtime_put(dev);
2014 
2015 	dma_set_max_seg_size(dev, UINT_MAX);
2016 
2017 	return 0;
2018 
2019 err_exit_debugfs:
2020 	dwc3_debugfs_exit(dwc);
2021 	dwc3_event_buffers_cleanup(dwc);
2022 	dwc3_phy_power_off(dwc);
2023 	dwc3_phy_exit(dwc);
2024 	dwc3_ulpi_exit(dwc);
2025 err_free_event_buffers:
2026 	dwc3_free_event_buffers(dwc);
2027 err_allow_rpm:
2028 	pm_runtime_allow(dev);
2029 	pm_runtime_disable(dev);
2030 	pm_runtime_dont_use_autosuspend(dev);
2031 	pm_runtime_set_suspended(dev);
2032 	pm_runtime_put_noidle(dev);
2033 err_disable_clks:
2034 	dwc3_clk_disable(dwc);
2035 err_assert_reset:
2036 	reset_control_assert(dwc->reset);
2037 err_put_psy:
2038 	if (dwc->usb_psy)
2039 		power_supply_put(dwc->usb_psy);
2040 
2041 	return ret;
2042 }
2043 
dwc3_remove(struct platform_device * pdev)2044 static void dwc3_remove(struct platform_device *pdev)
2045 {
2046 	struct dwc3	*dwc = platform_get_drvdata(pdev);
2047 
2048 	pm_runtime_get_sync(&pdev->dev);
2049 
2050 	dwc3_core_exit_mode(dwc);
2051 	dwc3_debugfs_exit(dwc);
2052 
2053 	dwc3_core_exit(dwc);
2054 	dwc3_ulpi_exit(dwc);
2055 
2056 	pm_runtime_allow(&pdev->dev);
2057 	pm_runtime_disable(&pdev->dev);
2058 	pm_runtime_dont_use_autosuspend(&pdev->dev);
2059 	pm_runtime_put_noidle(&pdev->dev);
2060 	/*
2061 	 * HACK: Clear the driver data, which is currently accessed by parent
2062 	 * glue drivers, before allowing the parent to suspend.
2063 	 */
2064 	platform_set_drvdata(pdev, NULL);
2065 	pm_runtime_set_suspended(&pdev->dev);
2066 
2067 	dwc3_free_event_buffers(dwc);
2068 
2069 	if (dwc->usb_psy)
2070 		power_supply_put(dwc->usb_psy);
2071 }
2072 
2073 #ifdef CONFIG_PM
dwc3_core_init_for_resume(struct dwc3 * dwc)2074 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
2075 {
2076 	int ret;
2077 
2078 	ret = reset_control_deassert(dwc->reset);
2079 	if (ret)
2080 		return ret;
2081 
2082 	ret = dwc3_clk_enable(dwc);
2083 	if (ret)
2084 		goto assert_reset;
2085 
2086 	ret = dwc3_core_init(dwc);
2087 	if (ret)
2088 		goto disable_clks;
2089 
2090 	return 0;
2091 
2092 disable_clks:
2093 	dwc3_clk_disable(dwc);
2094 assert_reset:
2095 	reset_control_assert(dwc->reset);
2096 
2097 	return ret;
2098 }
2099 
dwc3_suspend_common(struct dwc3 * dwc,pm_message_t msg)2100 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
2101 {
2102 	u32 reg;
2103 
2104 	switch (dwc->current_dr_role) {
2105 	case DWC3_GCTL_PRTCAP_DEVICE:
2106 		if (pm_runtime_suspended(dwc->dev))
2107 			break;
2108 		dwc3_gadget_suspend(dwc);
2109 		synchronize_irq(dwc->irq_gadget);
2110 		dwc3_core_exit(dwc);
2111 		break;
2112 	case DWC3_GCTL_PRTCAP_HOST:
2113 		if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2114 			dwc3_core_exit(dwc);
2115 			break;
2116 		}
2117 
2118 		/* Let controller to suspend HSPHY before PHY driver suspends */
2119 		if (dwc->dis_u2_susphy_quirk ||
2120 		    dwc->dis_enblslpm_quirk) {
2121 			reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2122 			reg |=  DWC3_GUSB2PHYCFG_ENBLSLPM |
2123 				DWC3_GUSB2PHYCFG_SUSPHY;
2124 			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2125 
2126 			/* Give some time for USB2 PHY to suspend */
2127 			usleep_range(5000, 6000);
2128 		}
2129 
2130 		phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
2131 		phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
2132 		break;
2133 	case DWC3_GCTL_PRTCAP_OTG:
2134 		/* do nothing during runtime_suspend */
2135 		if (PMSG_IS_AUTO(msg))
2136 			break;
2137 
2138 		if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2139 			dwc3_gadget_suspend(dwc);
2140 			synchronize_irq(dwc->irq_gadget);
2141 		}
2142 
2143 		dwc3_otg_exit(dwc);
2144 		dwc3_core_exit(dwc);
2145 		break;
2146 	default:
2147 		/* do nothing */
2148 		break;
2149 	}
2150 
2151 	return 0;
2152 }
2153 
dwc3_resume_common(struct dwc3 * dwc,pm_message_t msg)2154 static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2155 {
2156 	int		ret;
2157 	u32		reg;
2158 
2159 	switch (dwc->current_dr_role) {
2160 	case DWC3_GCTL_PRTCAP_DEVICE:
2161 		ret = dwc3_core_init_for_resume(dwc);
2162 		if (ret)
2163 			return ret;
2164 
2165 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
2166 		dwc3_gadget_resume(dwc);
2167 		break;
2168 	case DWC3_GCTL_PRTCAP_HOST:
2169 		if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
2170 			ret = dwc3_core_init_for_resume(dwc);
2171 			if (ret)
2172 				return ret;
2173 			dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
2174 			break;
2175 		}
2176 		/* Restore GUSB2PHYCFG bits that were modified in suspend */
2177 		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2178 		if (dwc->dis_u2_susphy_quirk)
2179 			reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2180 
2181 		if (dwc->dis_enblslpm_quirk)
2182 			reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
2183 
2184 		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2185 
2186 		phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
2187 		phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
2188 		break;
2189 	case DWC3_GCTL_PRTCAP_OTG:
2190 		/* nothing to do on runtime_resume */
2191 		if (PMSG_IS_AUTO(msg))
2192 			break;
2193 
2194 		ret = dwc3_core_init_for_resume(dwc);
2195 		if (ret)
2196 			return ret;
2197 
2198 		dwc3_set_prtcap(dwc, dwc->current_dr_role);
2199 
2200 		dwc3_otg_init(dwc);
2201 		if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
2202 			dwc3_otg_host_init(dwc);
2203 		} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
2204 			dwc3_gadget_resume(dwc);
2205 		}
2206 
2207 		break;
2208 	default:
2209 		/* do nothing */
2210 		break;
2211 	}
2212 
2213 	return 0;
2214 }
2215 
dwc3_runtime_checks(struct dwc3 * dwc)2216 static int dwc3_runtime_checks(struct dwc3 *dwc)
2217 {
2218 	switch (dwc->current_dr_role) {
2219 	case DWC3_GCTL_PRTCAP_DEVICE:
2220 		if (dwc->connected)
2221 			return -EBUSY;
2222 		break;
2223 	case DWC3_GCTL_PRTCAP_HOST:
2224 	default:
2225 		/* do nothing */
2226 		break;
2227 	}
2228 
2229 	return 0;
2230 }
2231 
dwc3_runtime_suspend(struct device * dev)2232 static int dwc3_runtime_suspend(struct device *dev)
2233 {
2234 	struct dwc3     *dwc = dev_get_drvdata(dev);
2235 	int		ret;
2236 
2237 	if (dwc3_runtime_checks(dwc))
2238 		return -EBUSY;
2239 
2240 	ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
2241 	if (ret)
2242 		return ret;
2243 
2244 	return 0;
2245 }
2246 
dwc3_runtime_resume(struct device * dev)2247 static int dwc3_runtime_resume(struct device *dev)
2248 {
2249 	struct dwc3     *dwc = dev_get_drvdata(dev);
2250 	int		ret;
2251 
2252 	ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
2253 	if (ret)
2254 		return ret;
2255 
2256 	switch (dwc->current_dr_role) {
2257 	case DWC3_GCTL_PRTCAP_DEVICE:
2258 		dwc3_gadget_process_pending_events(dwc);
2259 		break;
2260 	case DWC3_GCTL_PRTCAP_HOST:
2261 	default:
2262 		/* do nothing */
2263 		break;
2264 	}
2265 
2266 	pm_runtime_mark_last_busy(dev);
2267 
2268 	return 0;
2269 }
2270 
dwc3_runtime_idle(struct device * dev)2271 static int dwc3_runtime_idle(struct device *dev)
2272 {
2273 	struct dwc3     *dwc = dev_get_drvdata(dev);
2274 
2275 	switch (dwc->current_dr_role) {
2276 	case DWC3_GCTL_PRTCAP_DEVICE:
2277 		if (dwc3_runtime_checks(dwc))
2278 			return -EBUSY;
2279 		break;
2280 	case DWC3_GCTL_PRTCAP_HOST:
2281 	default:
2282 		/* do nothing */
2283 		break;
2284 	}
2285 
2286 	pm_runtime_mark_last_busy(dev);
2287 	pm_runtime_autosuspend(dev);
2288 
2289 	return 0;
2290 }
2291 #endif /* CONFIG_PM */
2292 
2293 #ifdef CONFIG_PM_SLEEP
dwc3_suspend(struct device * dev)2294 static int dwc3_suspend(struct device *dev)
2295 {
2296 	struct dwc3	*dwc = dev_get_drvdata(dev);
2297 	int		ret;
2298 
2299 	ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
2300 	if (ret)
2301 		return ret;
2302 
2303 	pinctrl_pm_select_sleep_state(dev);
2304 
2305 	return 0;
2306 }
2307 
dwc3_resume(struct device * dev)2308 static int dwc3_resume(struct device *dev)
2309 {
2310 	struct dwc3	*dwc = dev_get_drvdata(dev);
2311 	int		ret;
2312 
2313 	pinctrl_pm_select_default_state(dev);
2314 
2315 	pm_runtime_disable(dev);
2316 	pm_runtime_set_active(dev);
2317 
2318 	ret = dwc3_resume_common(dwc, PMSG_RESUME);
2319 	if (ret) {
2320 		pm_runtime_set_suspended(dev);
2321 		return ret;
2322 	}
2323 
2324 	pm_runtime_enable(dev);
2325 
2326 	return 0;
2327 }
2328 
dwc3_complete(struct device * dev)2329 static void dwc3_complete(struct device *dev)
2330 {
2331 	struct dwc3	*dwc = dev_get_drvdata(dev);
2332 	u32		reg;
2333 
2334 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
2335 			dwc->dis_split_quirk) {
2336 		reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
2337 		reg |= DWC3_GUCTL3_SPLITDISABLE;
2338 		dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
2339 	}
2340 }
2341 #else
2342 #define dwc3_complete NULL
2343 #endif /* CONFIG_PM_SLEEP */
2344 
2345 static const struct dev_pm_ops dwc3_dev_pm_ops = {
2346 	SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
2347 	.complete = dwc3_complete,
2348 	SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
2349 			dwc3_runtime_idle)
2350 };
2351 
2352 #ifdef CONFIG_OF
2353 static const struct of_device_id of_dwc3_match[] = {
2354 	{
2355 		.compatible = "snps,dwc3"
2356 	},
2357 	{
2358 		.compatible = "synopsys,dwc3"
2359 	},
2360 	{ },
2361 };
2362 MODULE_DEVICE_TABLE(of, of_dwc3_match);
2363 #endif
2364 
2365 #ifdef CONFIG_ACPI
2366 
2367 #define ACPI_ID_INTEL_BSW	"808622B7"
2368 
2369 static const struct acpi_device_id dwc3_acpi_match[] = {
2370 	{ ACPI_ID_INTEL_BSW, 0 },
2371 	{ },
2372 };
2373 MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
2374 #endif
2375 
2376 static struct platform_driver dwc3_driver = {
2377 	.probe		= dwc3_probe,
2378 	.remove_new	= dwc3_remove,
2379 	.driver		= {
2380 		.name	= "dwc3",
2381 		.of_match_table	= of_match_ptr(of_dwc3_match),
2382 		.acpi_match_table = ACPI_PTR(dwc3_acpi_match),
2383 		.pm	= &dwc3_dev_pm_ops,
2384 	},
2385 };
2386 
2387 module_platform_driver(dwc3_driver);
2388 
2389 /*
2390  * For type visibility (http://b/236036821)
2391  */
2392 const struct dwc3 *const ANDROID_GKI_struct_dwc3;
2393 EXPORT_SYMBOL_GPL(ANDROID_GKI_struct_dwc3);
2394 
2395 MODULE_ALIAS("platform:dwc3");
2396 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
2397 MODULE_LICENSE("GPL v2");
2398 MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
2399