• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Enhanced Host Controller Interface (EHCI) driver for USB.
4  *
5  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6  *
7  * Copyright (c) 2000-2004 by David Brownell
8  */
9 
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/dmapool.h>
13 #include <linux/kernel.h>
14 #include <linux/delay.h>
15 #include <linux/ioport.h>
16 #include <linux/sched.h>
17 #include <linux/vmalloc.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/hrtimer.h>
21 #include <linux/list.h>
22 #include <linux/interrupt.h>
23 #include <linux/usb.h>
24 #include <linux/usb/hcd.h>
25 #include <linux/usb/otg.h>
26 #include <linux/moduleparam.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/debugfs.h>
29 #include <linux/slab.h>
30 
31 #include <asm/byteorder.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/unaligned.h>
35 
36 #if defined(CONFIG_PPC_PS3)
37 #include <asm/firmware.h>
38 #endif
39 
40 /*-------------------------------------------------------------------------*/
41 
42 /*
43  * EHCI hc_driver implementation ... experimental, incomplete.
44  * Based on the final 1.0 register interface specification.
45  *
46  * USB 2.0 shows up in upcoming www.pcmcia.org technology.
47  * First was PCMCIA, like ISA; then CardBus, which is PCI.
48  * Next comes "CardBay", using USB 2.0 signals.
49  *
50  * Contains additional contributions by Brad Hards, Rory Bolt, and others.
51  * Special thanks to Intel and VIA for providing host controllers to
52  * test this driver on, and Cypress (including In-System Design) for
53  * providing early devices for those host controllers to talk to!
54  */
55 
56 #define DRIVER_AUTHOR "David Brownell"
57 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
58 
59 static const char	hcd_name [] = "ehci_hcd";
60 
61 
62 #undef EHCI_URB_TRACE
63 
64 /* magic numbers that can affect system performance */
65 #define	EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
66 #define	EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */
67 #define	EHCI_TUNE_RL_TT		0
68 #define	EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */
69 #define	EHCI_TUNE_MULT_TT	1
70 /*
71  * Some drivers think it's safe to schedule isochronous transfers more than
72  * 256 ms into the future (partly as a result of an old bug in the scheduling
73  * code).  In an attempt to avoid trouble, we will use a minimum scheduling
74  * length of 512 frames instead of 256.
75  */
76 #define	EHCI_TUNE_FLS		1	/* (medium) 512-frame schedule */
77 
78 /* Initial IRQ latency:  faster than hw default */
79 static int log2_irq_thresh = 0;		// 0 to 6
80 module_param (log2_irq_thresh, int, S_IRUGO);
81 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
82 
83 /* initial park setting:  slower than hw default */
84 static unsigned park = 0;
85 module_param (park, uint, S_IRUGO);
86 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
87 
88 /* for flakey hardware, ignore overcurrent indicators */
89 static bool ignore_oc;
90 module_param (ignore_oc, bool, S_IRUGO);
91 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
92 
93 #define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
94 
95 /*-------------------------------------------------------------------------*/
96 
97 #include "ehci.h"
98 #include "pci-quirks.h"
99 
100 static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
101 		struct ehci_tt *tt);
102 
103 /*
104  * The MosChip MCS9990 controller updates its microframe counter
105  * a little before the frame counter, and occasionally we will read
106  * the invalid intermediate value.  Avoid problems by checking the
107  * microframe number (the low-order 3 bits); if they are 0 then
108  * re-read the register to get the correct value.
109  */
ehci_moschip_read_frame_index(struct ehci_hcd * ehci)110 static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
111 {
112 	unsigned uf;
113 
114 	uf = ehci_readl(ehci, &ehci->regs->frame_index);
115 	if (unlikely((uf & 7) == 0))
116 		uf = ehci_readl(ehci, &ehci->regs->frame_index);
117 	return uf;
118 }
119 
ehci_read_frame_index(struct ehci_hcd * ehci)120 static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
121 {
122 	if (ehci->frame_index_bug)
123 		return ehci_moschip_read_frame_index(ehci);
124 	return ehci_readl(ehci, &ehci->regs->frame_index);
125 }
126 
127 #include "ehci-dbg.c"
128 
129 /*-------------------------------------------------------------------------*/
130 
131 /*
132  * ehci_handshake - spin reading hc until handshake completes or fails
133  * @ptr: address of hc register to be read
134  * @mask: bits to look at in result of read
135  * @done: value of those bits when handshake succeeds
136  * @usec: timeout in microseconds
137  *
138  * Returns negative errno, or zero on success
139  *
140  * Success happens when the "mask" bits have the specified value (hardware
141  * handshake done).  There are two failure modes:  "usec" have passed (major
142  * hardware flakeout), or the register reads as all-ones (hardware removed).
143  *
144  * That last failure should_only happen in cases like physical cardbus eject
145  * before driver shutdown. But it also seems to be caused by bugs in cardbus
146  * bridge shutdown:  shutting down the bridge before the devices using it.
147  */
ehci_handshake(struct ehci_hcd * ehci,void __iomem * ptr,u32 mask,u32 done,int usec)148 int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
149 		   u32 mask, u32 done, int usec)
150 {
151 	u32	result;
152 
153 	do {
154 		result = ehci_readl(ehci, ptr);
155 		if (result == ~(u32)0)		/* card removed */
156 			return -ENODEV;
157 		result &= mask;
158 		if (result == done)
159 			return 0;
160 		udelay (1);
161 		usec--;
162 	} while (usec > 0);
163 	return -ETIMEDOUT;
164 }
165 EXPORT_SYMBOL_GPL(ehci_handshake);
166 
167 /* check TDI/ARC silicon is in host mode */
tdi_in_host_mode(struct ehci_hcd * ehci)168 static int tdi_in_host_mode (struct ehci_hcd *ehci)
169 {
170 	u32		tmp;
171 
172 	tmp = ehci_readl(ehci, &ehci->regs->usbmode);
173 	return (tmp & 3) == USBMODE_CM_HC;
174 }
175 
176 /*
177  * Force HC to halt state from unknown (EHCI spec section 2.3).
178  * Must be called with interrupts enabled and the lock not held.
179  */
ehci_halt(struct ehci_hcd * ehci)180 static int ehci_halt (struct ehci_hcd *ehci)
181 {
182 	u32	temp;
183 
184 	spin_lock_irq(&ehci->lock);
185 
186 	/* disable any irqs left enabled by previous code */
187 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
188 
189 	if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
190 		spin_unlock_irq(&ehci->lock);
191 		return 0;
192 	}
193 
194 	/*
195 	 * This routine gets called during probe before ehci->command
196 	 * has been initialized, so we can't rely on its value.
197 	 */
198 	ehci->command &= ~CMD_RUN;
199 	temp = ehci_readl(ehci, &ehci->regs->command);
200 	temp &= ~(CMD_RUN | CMD_IAAD);
201 	ehci_writel(ehci, temp, &ehci->regs->command);
202 
203 	spin_unlock_irq(&ehci->lock);
204 	synchronize_irq(ehci_to_hcd(ehci)->irq);
205 
206 	return ehci_handshake(ehci, &ehci->regs->status,
207 			  STS_HALT, STS_HALT, 16 * 125);
208 }
209 
210 /* put TDI/ARC silicon into EHCI mode */
tdi_reset(struct ehci_hcd * ehci)211 static void tdi_reset (struct ehci_hcd *ehci)
212 {
213 	u32		tmp;
214 
215 	tmp = ehci_readl(ehci, &ehci->regs->usbmode);
216 	tmp |= USBMODE_CM_HC;
217 	/* The default byte access to MMR space is LE after
218 	 * controller reset. Set the required endian mode
219 	 * for transfer buffers to match the host microprocessor
220 	 */
221 	if (ehci_big_endian_mmio(ehci))
222 		tmp |= USBMODE_BE;
223 	ehci_writel(ehci, tmp, &ehci->regs->usbmode);
224 }
225 
226 /*
227  * Reset a non-running (STS_HALT == 1) controller.
228  * Must be called with interrupts enabled and the lock not held.
229  */
ehci_reset(struct ehci_hcd * ehci)230 int ehci_reset(struct ehci_hcd *ehci)
231 {
232 	int	retval;
233 	u32	command = ehci_readl(ehci, &ehci->regs->command);
234 
235 	/* If the EHCI debug controller is active, special care must be
236 	 * taken before and after a host controller reset */
237 	if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
238 		ehci->debug = NULL;
239 
240 	command |= CMD_RESET;
241 	dbg_cmd (ehci, "reset", command);
242 	ehci_writel(ehci, command, &ehci->regs->command);
243 	ehci->rh_state = EHCI_RH_HALTED;
244 	ehci->next_statechange = jiffies;
245 	retval = ehci_handshake(ehci, &ehci->regs->command,
246 			    CMD_RESET, 0, 250 * 1000);
247 
248 	if (ehci->has_hostpc) {
249 		ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
250 				&ehci->regs->usbmode_ex);
251 		ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
252 	}
253 	if (retval)
254 		return retval;
255 
256 	if (ehci_is_TDI(ehci))
257 		tdi_reset (ehci);
258 
259 	if (ehci->debug)
260 		dbgp_external_startup(ehci_to_hcd(ehci));
261 
262 	ehci->port_c_suspend = ehci->suspended_ports =
263 			ehci->resuming_ports = 0;
264 	return retval;
265 }
266 EXPORT_SYMBOL_GPL(ehci_reset);
267 
268 /*
269  * Idle the controller (turn off the schedules).
270  * Must be called with interrupts enabled and the lock not held.
271  */
ehci_quiesce(struct ehci_hcd * ehci)272 static void ehci_quiesce (struct ehci_hcd *ehci)
273 {
274 	u32	temp;
275 
276 	if (ehci->rh_state != EHCI_RH_RUNNING)
277 		return;
278 
279 	/* wait for any schedule enables/disables to take effect */
280 	temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
281 	ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
282 			16 * 125);
283 
284 	/* then disable anything that's still active */
285 	spin_lock_irq(&ehci->lock);
286 	ehci->command &= ~(CMD_ASE | CMD_PSE);
287 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
288 	spin_unlock_irq(&ehci->lock);
289 
290 	/* hardware can take 16 microframes to turn off ... */
291 	ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
292 			16 * 125);
293 }
294 
295 /*-------------------------------------------------------------------------*/
296 
297 static void end_iaa_cycle(struct ehci_hcd *ehci);
298 static void end_unlink_async(struct ehci_hcd *ehci);
299 static void unlink_empty_async(struct ehci_hcd *ehci);
300 static void ehci_work(struct ehci_hcd *ehci);
301 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
302 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
303 static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable);
304 
305 #include "ehci-timer.c"
306 #include "ehci-hub.c"
307 #include "ehci-mem.c"
308 #include "ehci-q.c"
309 #include "ehci-sched.c"
310 #include "ehci-sysfs.c"
311 
312 /*-------------------------------------------------------------------------*/
313 
314 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
315  * The firmware seems to think that powering off is a wakeup event!
316  * This routine turns off remote wakeup and everything else, on all ports.
317  */
ehci_turn_off_all_ports(struct ehci_hcd * ehci)318 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
319 {
320 	int	port = HCS_N_PORTS(ehci->hcs_params);
321 
322 	while (port--) {
323 		spin_unlock_irq(&ehci->lock);
324 		ehci_port_power(ehci, port, false);
325 		spin_lock_irq(&ehci->lock);
326 		ehci_writel(ehci, PORT_RWC_BITS,
327 				&ehci->regs->port_status[port]);
328 	}
329 }
330 
331 /*
332  * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
333  * Must be called with interrupts enabled and the lock not held.
334  */
ehci_silence_controller(struct ehci_hcd * ehci)335 static void ehci_silence_controller(struct ehci_hcd *ehci)
336 {
337 	ehci_halt(ehci);
338 
339 	spin_lock_irq(&ehci->lock);
340 	ehci->rh_state = EHCI_RH_HALTED;
341 	ehci_turn_off_all_ports(ehci);
342 
343 	/* make BIOS/etc use companion controller during reboot */
344 	ehci_writel(ehci, 0, &ehci->regs->configured_flag);
345 
346 	/* unblock posted writes */
347 	ehci_readl(ehci, &ehci->regs->configured_flag);
348 	spin_unlock_irq(&ehci->lock);
349 }
350 
351 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
352  * This forcibly disables dma and IRQs, helping kexec and other cases
353  * where the next system software may expect clean state.
354  */
ehci_shutdown(struct usb_hcd * hcd)355 static void ehci_shutdown(struct usb_hcd *hcd)
356 {
357 	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
358 
359 	/**
360 	 * Protect the system from crashing at system shutdown in cases where
361 	 * usb host is not added yet from OTG controller driver.
362 	 * As ehci_setup() not done yet, so stop accessing registers or
363 	 * variables initialized in ehci_setup()
364 	 */
365 	if (!ehci->sbrn)
366 		return;
367 
368 	spin_lock_irq(&ehci->lock);
369 	ehci->shutdown = true;
370 	ehci->rh_state = EHCI_RH_STOPPING;
371 	ehci->enabled_hrtimer_events = 0;
372 	spin_unlock_irq(&ehci->lock);
373 
374 	ehci_silence_controller(ehci);
375 
376 	hrtimer_cancel(&ehci->hrtimer);
377 }
378 
379 /*-------------------------------------------------------------------------*/
380 
381 /*
382  * ehci_work is called from some interrupts, timers, and so on.
383  * it calls driver completion functions, after dropping ehci->lock.
384  */
ehci_work(struct ehci_hcd * ehci)385 static void ehci_work (struct ehci_hcd *ehci)
386 {
387 	/* another CPU may drop ehci->lock during a schedule scan while
388 	 * it reports urb completions.  this flag guards against bogus
389 	 * attempts at re-entrant schedule scanning.
390 	 */
391 	if (ehci->scanning) {
392 		ehci->need_rescan = true;
393 		return;
394 	}
395 	ehci->scanning = true;
396 
397  rescan:
398 	ehci->need_rescan = false;
399 	if (ehci->async_count)
400 		scan_async(ehci);
401 	if (ehci->intr_count > 0)
402 		scan_intr(ehci);
403 	if (ehci->isoc_count > 0)
404 		scan_isoc(ehci);
405 	if (ehci->need_rescan)
406 		goto rescan;
407 	ehci->scanning = false;
408 
409 	/* the IO watchdog guards against hardware or driver bugs that
410 	 * misplace IRQs, and should let us run completely without IRQs.
411 	 * such lossage has been observed on both VT6202 and VT8235.
412 	 */
413 	turn_on_io_watchdog(ehci);
414 }
415 
416 /*
417  * Called when the ehci_hcd module is removed.
418  */
ehci_stop(struct usb_hcd * hcd)419 static void ehci_stop (struct usb_hcd *hcd)
420 {
421 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
422 
423 	ehci_dbg (ehci, "stop\n");
424 
425 	/* no more interrupts ... */
426 
427 	spin_lock_irq(&ehci->lock);
428 	ehci->enabled_hrtimer_events = 0;
429 	spin_unlock_irq(&ehci->lock);
430 
431 	ehci_quiesce(ehci);
432 	ehci_silence_controller(ehci);
433 	ehci_reset (ehci);
434 
435 	hrtimer_cancel(&ehci->hrtimer);
436 	remove_sysfs_files(ehci);
437 	remove_debug_files (ehci);
438 
439 	/* root hub is shut down separately (first, when possible) */
440 	spin_lock_irq (&ehci->lock);
441 	end_free_itds(ehci);
442 	spin_unlock_irq (&ehci->lock);
443 	ehci_mem_cleanup (ehci);
444 
445 	if (ehci->amd_pll_fix == 1)
446 		usb_amd_dev_put();
447 
448 	dbg_status (ehci, "ehci_stop completed",
449 		    ehci_readl(ehci, &ehci->regs->status));
450 }
451 
452 /* one-time init, only for memory state */
ehci_init(struct usb_hcd * hcd)453 static int ehci_init(struct usb_hcd *hcd)
454 {
455 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
456 	u32			temp;
457 	int			retval;
458 	u32			hcc_params;
459 	struct ehci_qh_hw	*hw;
460 
461 	spin_lock_init(&ehci->lock);
462 
463 	/*
464 	 * keep io watchdog by default, those good HCDs could turn off it later
465 	 */
466 	ehci->need_io_watchdog = 1;
467 
468 	hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
469 	ehci->hrtimer.function = ehci_hrtimer_func;
470 	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
471 
472 	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
473 
474 	/*
475 	 * by default set standard 80% (== 100 usec/uframe) max periodic
476 	 * bandwidth as required by USB 2.0
477 	 */
478 	ehci->uframe_periodic_max = 100;
479 
480 	/*
481 	 * hw default: 1K periodic list heads, one per frame.
482 	 * periodic_size can shrink by USBCMD update if hcc_params allows.
483 	 */
484 	ehci->periodic_size = DEFAULT_I_TDPS;
485 	INIT_LIST_HEAD(&ehci->async_unlink);
486 	INIT_LIST_HEAD(&ehci->async_idle);
487 	INIT_LIST_HEAD(&ehci->intr_unlink_wait);
488 	INIT_LIST_HEAD(&ehci->intr_unlink);
489 	INIT_LIST_HEAD(&ehci->intr_qh_list);
490 	INIT_LIST_HEAD(&ehci->cached_itd_list);
491 	INIT_LIST_HEAD(&ehci->cached_sitd_list);
492 	INIT_LIST_HEAD(&ehci->tt_list);
493 
494 	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
495 		/* periodic schedule size can be smaller than default */
496 		switch (EHCI_TUNE_FLS) {
497 		case 0: ehci->periodic_size = 1024; break;
498 		case 1: ehci->periodic_size = 512; break;
499 		case 2: ehci->periodic_size = 256; break;
500 		default:	BUG();
501 		}
502 	}
503 	if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
504 		return retval;
505 
506 	/* controllers may cache some of the periodic schedule ... */
507 	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
508 		ehci->i_thresh = 0;
509 	else					// N microframes cached
510 		ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
511 
512 	/*
513 	 * dedicate a qh for the async ring head, since we couldn't unlink
514 	 * a 'real' qh without stopping the async schedule [4.8].  use it
515 	 * as the 'reclamation list head' too.
516 	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
517 	 * from automatically advancing to the next td after short reads.
518 	 */
519 	ehci->async->qh_next.qh = NULL;
520 	hw = ehci->async->hw;
521 	hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
522 	hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
523 #if defined(CONFIG_PPC_PS3)
524 	hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
525 #endif
526 	hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
527 	hw->hw_qtd_next = EHCI_LIST_END(ehci);
528 	ehci->async->qh_state = QH_STATE_LINKED;
529 	hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
530 
531 	/* clear interrupt enables, set irq latency */
532 	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
533 		log2_irq_thresh = 0;
534 	temp = 1 << (16 + log2_irq_thresh);
535 	if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
536 		ehci->has_ppcd = 1;
537 		ehci_dbg(ehci, "enable per-port change event\n");
538 		temp |= CMD_PPCEE;
539 	}
540 	if (HCC_CANPARK(hcc_params)) {
541 		/* HW default park == 3, on hardware that supports it (like
542 		 * NVidia and ALI silicon), maximizes throughput on the async
543 		 * schedule by avoiding QH fetches between transfers.
544 		 *
545 		 * With fast usb storage devices and NForce2, "park" seems to
546 		 * make problems:  throughput reduction (!), data errors...
547 		 */
548 		if (park) {
549 			park = min(park, (unsigned) 3);
550 			temp |= CMD_PARK;
551 			temp |= park << 8;
552 		}
553 		ehci_dbg(ehci, "park %d\n", park);
554 	}
555 	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
556 		/* periodic schedule size can be smaller than default */
557 		temp &= ~(3 << 2);
558 		temp |= (EHCI_TUNE_FLS << 2);
559 	}
560 	ehci->command = temp;
561 
562 	/* Accept arbitrarily long scatter-gather lists */
563 	if (!hcd->localmem_pool)
564 		hcd->self.sg_tablesize = ~0;
565 
566 	/* Prepare for unlinking active QHs */
567 	ehci->old_current = ~0;
568 	return 0;
569 }
570 
571 /* start HC running; it's halted, ehci_init() has been run (once) */
ehci_run(struct usb_hcd * hcd)572 static int ehci_run (struct usb_hcd *hcd)
573 {
574 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
575 	u32			temp;
576 	u32			hcc_params;
577 	int			rc;
578 
579 	hcd->uses_new_polling = 1;
580 
581 	/* EHCI spec section 4.1 */
582 
583 	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
584 	ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
585 
586 	/*
587 	 * hcc_params controls whether ehci->regs->segment must (!!!)
588 	 * be used; it constrains QH/ITD/SITD and QTD locations.
589 	 * dma_pool consistent memory always uses segment zero.
590 	 * streaming mappings for I/O buffers, like pci_map_single(),
591 	 * can return segments above 4GB, if the device allows.
592 	 *
593 	 * NOTE:  the dma mask is visible through dev->dma_mask, so
594 	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
595 	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
596 	 * host side drivers though.
597 	 */
598 	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
599 	if (HCC_64BIT_ADDR(hcc_params)) {
600 		ehci_writel(ehci, 0, &ehci->regs->segment);
601 #if 0
602 // this is deeply broken on almost all architectures
603 		if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
604 			ehci_info(ehci, "enabled 64bit DMA\n");
605 #endif
606 	}
607 
608 
609 	// Philips, Intel, and maybe others need CMD_RUN before the
610 	// root hub will detect new devices (why?); NEC doesn't
611 	ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
612 	ehci->command |= CMD_RUN;
613 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
614 	dbg_cmd (ehci, "init", ehci->command);
615 
616 	/*
617 	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
618 	 * are explicitly handed to companion controller(s), so no TT is
619 	 * involved with the root hub.  (Except where one is integrated,
620 	 * and there's no companion controller unless maybe for USB OTG.)
621 	 *
622 	 * Turning on the CF flag will transfer ownership of all ports
623 	 * from the companions to the EHCI controller.  If any of the
624 	 * companions are in the middle of a port reset at the time, it
625 	 * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
626 	 * guarantees that no resets are in progress.  After we set CF,
627 	 * a short delay lets the hardware catch up; new resets shouldn't
628 	 * be started before the port switching actions could complete.
629 	 */
630 	down_write(&ehci_cf_port_reset_rwsem);
631 	ehci->rh_state = EHCI_RH_RUNNING;
632 	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
633 
634 	/* Wait until HC become operational */
635 	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
636 	msleep(5);
637 
638 	/* For Aspeed, STS_HALT also depends on ASS/PSS status.
639 	 * Check CMD_RUN instead.
640 	 */
641 	if (ehci->is_aspeed)
642 		rc = ehci_handshake(ehci, &ehci->regs->command, CMD_RUN,
643 				    1, 100 * 1000);
644 	else
645 		rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT,
646 				    0, 100 * 1000);
647 
648 	up_write(&ehci_cf_port_reset_rwsem);
649 
650 	if (rc) {
651 		ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
652 			 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
653 		return rc;
654 	}
655 
656 	ehci->last_periodic_enable = ktime_get_real();
657 
658 	temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
659 	ehci_info (ehci,
660 		"USB %x.%x started, EHCI %x.%02x%s\n",
661 		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
662 		temp >> 8, temp & 0xff,
663 		ignore_oc ? ", overcurrent ignored" : "");
664 
665 	ehci_writel(ehci, INTR_MASK,
666 		    &ehci->regs->intr_enable); /* Turn On Interrupts */
667 
668 	/* GRR this is run-once init(), being done every time the HC starts.
669 	 * So long as they're part of class devices, we can't do it init()
670 	 * since the class device isn't created that early.
671 	 */
672 	create_debug_files(ehci);
673 	create_sysfs_files(ehci);
674 
675 	return 0;
676 }
677 
ehci_setup(struct usb_hcd * hcd)678 int ehci_setup(struct usb_hcd *hcd)
679 {
680 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
681 	int retval;
682 
683 	ehci->regs = (void __iomem *)ehci->caps +
684 	    HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
685 	dbg_hcs_params(ehci, "reset");
686 	dbg_hcc_params(ehci, "reset");
687 
688 	/* cache this readonly data; minimize chip reads */
689 	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
690 
691 	ehci->sbrn = HCD_USB2;
692 
693 	/* data structure init */
694 	retval = ehci_init(hcd);
695 	if (retval)
696 		return retval;
697 
698 	retval = ehci_halt(ehci);
699 	if (retval) {
700 		ehci_mem_cleanup(ehci);
701 		return retval;
702 	}
703 
704 	ehci_reset(ehci);
705 
706 	return 0;
707 }
708 EXPORT_SYMBOL_GPL(ehci_setup);
709 
710 /*-------------------------------------------------------------------------*/
711 
ehci_irq(struct usb_hcd * hcd)712 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
713 {
714 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
715 	u32			status, current_status, masked_status, pcd_status = 0;
716 	u32			cmd;
717 	int			bh;
718 	unsigned long		flags;
719 
720 	/*
721 	 * For threadirqs option we use spin_lock_irqsave() variant to prevent
722 	 * deadlock with ehci hrtimer callback, because hrtimer callbacks run
723 	 * in interrupt context even when threadirqs is specified. We can go
724 	 * back to spin_lock() variant when hrtimer callbacks become threaded.
725 	 */
726 	spin_lock_irqsave(&ehci->lock, flags);
727 
728 	status = 0;
729 	current_status = ehci_readl(ehci, &ehci->regs->status);
730 restart:
731 
732 	/* e.g. cardbus physical eject */
733 	if (current_status == ~(u32) 0) {
734 		ehci_dbg (ehci, "device removed\n");
735 		goto dead;
736 	}
737 	status |= current_status;
738 
739 	/*
740 	 * We don't use STS_FLR, but some controllers don't like it to
741 	 * remain on, so mask it out along with the other status bits.
742 	 */
743 	masked_status = current_status & (INTR_MASK | STS_FLR);
744 
745 	/* Shared IRQ? */
746 	if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
747 		spin_unlock_irqrestore(&ehci->lock, flags);
748 		return IRQ_NONE;
749 	}
750 
751 	/* clear (just) interrupts */
752 	ehci_writel(ehci, masked_status, &ehci->regs->status);
753 
754 	/* For edge interrupts, don't race with an interrupt bit being raised */
755 	current_status = ehci_readl(ehci, &ehci->regs->status);
756 	if (current_status & INTR_MASK)
757 		goto restart;
758 
759 	cmd = ehci_readl(ehci, &ehci->regs->command);
760 	bh = 0;
761 
762 	/* normal [4.15.1.2] or error [4.15.1.1] completion */
763 	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
764 		if (likely ((status & STS_ERR) == 0))
765 			INCR(ehci->stats.normal);
766 		else
767 			INCR(ehci->stats.error);
768 		bh = 1;
769 	}
770 
771 	/* complete the unlinking of some qh [4.15.2.3] */
772 	if (status & STS_IAA) {
773 
774 		/* Turn off the IAA watchdog */
775 		ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
776 
777 		/*
778 		 * Mild optimization: Allow another IAAD to reset the
779 		 * hrtimer, if one occurs before the next expiration.
780 		 * In theory we could always cancel the hrtimer, but
781 		 * tests show that about half the time it will be reset
782 		 * for some other event anyway.
783 		 */
784 		if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
785 			++ehci->next_hrtimer_event;
786 
787 		/* guard against (alleged) silicon errata */
788 		if (cmd & CMD_IAAD)
789 			ehci_dbg(ehci, "IAA with IAAD still set?\n");
790 		if (ehci->iaa_in_progress)
791 			INCR(ehci->stats.iaa);
792 		end_iaa_cycle(ehci);
793 	}
794 
795 	/* remote wakeup [4.3.1] */
796 	if (status & STS_PCD) {
797 		unsigned	i = HCS_N_PORTS (ehci->hcs_params);
798 		u32		ppcd = ~0;
799 
800 		/* kick root hub later */
801 		pcd_status = status;
802 
803 		/* resume root hub? */
804 		if (ehci->rh_state == EHCI_RH_SUSPENDED)
805 			usb_hcd_resume_root_hub(hcd);
806 
807 		/* get per-port change detect bits */
808 		if (ehci->has_ppcd)
809 			ppcd = status >> 16;
810 
811 		while (i--) {
812 			int pstatus;
813 
814 			/* leverage per-port change bits feature */
815 			if (!(ppcd & (1 << i)))
816 				continue;
817 			pstatus = ehci_readl(ehci,
818 					 &ehci->regs->port_status[i]);
819 
820 			if (pstatus & PORT_OWNER)
821 				continue;
822 			if (!(test_bit(i, &ehci->suspended_ports) &&
823 					((pstatus & PORT_RESUME) ||
824 						!(pstatus & PORT_SUSPEND)) &&
825 					(pstatus & PORT_PE) &&
826 					ehci->reset_done[i] == 0))
827 				continue;
828 
829 			/* start USB_RESUME_TIMEOUT msec resume signaling from
830 			 * this port, and make hub_wq collect
831 			 * PORT_STAT_C_SUSPEND to stop that signaling.
832 			 */
833 			ehci->reset_done[i] = jiffies +
834 				msecs_to_jiffies(USB_RESUME_TIMEOUT);
835 			set_bit(i, &ehci->resuming_ports);
836 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
837 			usb_hcd_start_port_resume(&hcd->self, i);
838 			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
839 		}
840 	}
841 
842 	/* PCI errors [4.15.2.4] */
843 	if (unlikely ((status & STS_FATAL) != 0)) {
844 		ehci_err(ehci, "fatal error\n");
845 		dbg_cmd(ehci, "fatal", cmd);
846 		dbg_status(ehci, "fatal", status);
847 dead:
848 		usb_hc_died(hcd);
849 
850 		/* Don't let the controller do anything more */
851 		ehci->shutdown = true;
852 		ehci->rh_state = EHCI_RH_STOPPING;
853 		ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
854 		ehci_writel(ehci, ehci->command, &ehci->regs->command);
855 		ehci_writel(ehci, 0, &ehci->regs->intr_enable);
856 		ehci_handle_controller_death(ehci);
857 
858 		/* Handle completions when the controller stops */
859 		bh = 0;
860 	}
861 
862 	if (bh)
863 		ehci_work (ehci);
864 	spin_unlock_irqrestore(&ehci->lock, flags);
865 	if (pcd_status)
866 		usb_hcd_poll_rh_status(hcd);
867 	return IRQ_HANDLED;
868 }
869 
870 /*-------------------------------------------------------------------------*/
871 
872 /*
873  * non-error returns are a promise to giveback() the urb later
874  * we drop ownership so next owner (or urb unlink) can get it
875  *
876  * urb + dev is in hcd.self.controller.urb_list
877  * we're queueing TDs onto software and hardware lists
878  *
879  * hcd-specific init for hcpriv hasn't been done yet
880  *
881  * NOTE:  control, bulk, and interrupt share the same code to append TDs
882  * to a (possibly active) QH, and the same QH scanning code.
883  */
ehci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)884 static int ehci_urb_enqueue (
885 	struct usb_hcd	*hcd,
886 	struct urb	*urb,
887 	gfp_t		mem_flags
888 ) {
889 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
890 	struct list_head	qtd_list;
891 
892 	INIT_LIST_HEAD (&qtd_list);
893 
894 	switch (usb_pipetype (urb->pipe)) {
895 	case PIPE_CONTROL:
896 		/* qh_completions() code doesn't handle all the fault cases
897 		 * in multi-TD control transfers.  Even 1KB is rare anyway.
898 		 */
899 		if (urb->transfer_buffer_length > (16 * 1024))
900 			return -EMSGSIZE;
901 		/* FALLTHROUGH */
902 	/* case PIPE_BULK: */
903 	default:
904 		if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
905 			return -ENOMEM;
906 		return submit_async(ehci, urb, &qtd_list, mem_flags);
907 
908 	case PIPE_INTERRUPT:
909 		if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
910 			return -ENOMEM;
911 		return intr_submit(ehci, urb, &qtd_list, mem_flags);
912 
913 	case PIPE_ISOCHRONOUS:
914 		if (urb->dev->speed == USB_SPEED_HIGH)
915 			return itd_submit (ehci, urb, mem_flags);
916 		else
917 			return sitd_submit (ehci, urb, mem_flags);
918 	}
919 }
920 
921 /* remove from hardware lists
922  * completions normally happen asynchronously
923  */
924 
ehci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)925 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
926 {
927 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
928 	struct ehci_qh		*qh;
929 	unsigned long		flags;
930 	int			rc;
931 
932 	spin_lock_irqsave (&ehci->lock, flags);
933 	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
934 	if (rc)
935 		goto done;
936 
937 	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
938 		/*
939 		 * We don't expedite dequeue for isochronous URBs.
940 		 * Just wait until they complete normally or their
941 		 * time slot expires.
942 		 */
943 	} else {
944 		qh = (struct ehci_qh *) urb->hcpriv;
945 		qh->unlink_reason |= QH_UNLINK_REQUESTED;
946 		switch (qh->qh_state) {
947 		case QH_STATE_LINKED:
948 			if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
949 				start_unlink_intr(ehci, qh);
950 			else
951 				start_unlink_async(ehci, qh);
952 			break;
953 		case QH_STATE_COMPLETING:
954 			qh->dequeue_during_giveback = 1;
955 			break;
956 		case QH_STATE_UNLINK:
957 		case QH_STATE_UNLINK_WAIT:
958 			/* already started */
959 			break;
960 		case QH_STATE_IDLE:
961 			/* QH might be waiting for a Clear-TT-Buffer */
962 			qh_completions(ehci, qh);
963 			break;
964 		}
965 	}
966 done:
967 	spin_unlock_irqrestore (&ehci->lock, flags);
968 	return rc;
969 }
970 
971 /*-------------------------------------------------------------------------*/
972 
973 // bulk qh holds the data toggle
974 
975 static void
ehci_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * ep)976 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
977 {
978 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
979 	unsigned long		flags;
980 	struct ehci_qh		*qh;
981 
982 	/* ASSERT:  any requests/urbs are being unlinked */
983 	/* ASSERT:  nobody can be submitting urbs for this any more */
984 
985 rescan:
986 	spin_lock_irqsave (&ehci->lock, flags);
987 	qh = ep->hcpriv;
988 	if (!qh)
989 		goto done;
990 
991 	/* endpoints can be iso streams.  for now, we don't
992 	 * accelerate iso completions ... so spin a while.
993 	 */
994 	if (qh->hw == NULL) {
995 		struct ehci_iso_stream	*stream = ep->hcpriv;
996 
997 		if (!list_empty(&stream->td_list))
998 			goto idle_timeout;
999 
1000 		/* BUG_ON(!list_empty(&stream->free_list)); */
1001 		reserve_release_iso_bandwidth(ehci, stream, -1);
1002 		kfree(stream);
1003 		goto done;
1004 	}
1005 
1006 	qh->unlink_reason |= QH_UNLINK_REQUESTED;
1007 	switch (qh->qh_state) {
1008 	case QH_STATE_LINKED:
1009 		if (list_empty(&qh->qtd_list))
1010 			qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
1011 		else
1012 			WARN_ON(1);
1013 		if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
1014 			start_unlink_async(ehci, qh);
1015 		else
1016 			start_unlink_intr(ehci, qh);
1017 		fallthrough;
1018 	case QH_STATE_COMPLETING:	/* already in unlinking */
1019 	case QH_STATE_UNLINK:		/* wait for hw to finish? */
1020 	case QH_STATE_UNLINK_WAIT:
1021 idle_timeout:
1022 		spin_unlock_irqrestore (&ehci->lock, flags);
1023 		schedule_timeout_uninterruptible(1);
1024 		goto rescan;
1025 	case QH_STATE_IDLE:		/* fully unlinked */
1026 		if (qh->clearing_tt)
1027 			goto idle_timeout;
1028 		if (list_empty (&qh->qtd_list)) {
1029 			if (qh->ps.bw_uperiod)
1030 				reserve_release_intr_bandwidth(ehci, qh, -1);
1031 			qh_destroy(ehci, qh);
1032 			break;
1033 		}
1034 		fallthrough;
1035 	default:
1036 		/* caller was supposed to have unlinked any requests;
1037 		 * that's not our job.  just leak this memory.
1038 		 */
1039 		ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
1040 			qh, ep->desc.bEndpointAddress, qh->qh_state,
1041 			list_empty (&qh->qtd_list) ? "" : "(has tds)");
1042 		break;
1043 	}
1044  done:
1045 	ep->hcpriv = NULL;
1046 	spin_unlock_irqrestore (&ehci->lock, flags);
1047 }
1048 
1049 static void
ehci_endpoint_reset(struct usb_hcd * hcd,struct usb_host_endpoint * ep)1050 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1051 {
1052 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
1053 	struct ehci_qh		*qh;
1054 	int			eptype = usb_endpoint_type(&ep->desc);
1055 	int			epnum = usb_endpoint_num(&ep->desc);
1056 	int			is_out = usb_endpoint_dir_out(&ep->desc);
1057 	unsigned long		flags;
1058 
1059 	if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1060 		return;
1061 
1062 	spin_lock_irqsave(&ehci->lock, flags);
1063 	qh = ep->hcpriv;
1064 
1065 	/* For Bulk and Interrupt endpoints we maintain the toggle state
1066 	 * in the hardware; the toggle bits in udev aren't used at all.
1067 	 * When an endpoint is reset by usb_clear_halt() we must reset
1068 	 * the toggle bit in the QH.
1069 	 */
1070 	if (qh) {
1071 		if (!list_empty(&qh->qtd_list)) {
1072 			WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1073 		} else {
1074 			/* The toggle value in the QH can't be updated
1075 			 * while the QH is active.  Unlink it now;
1076 			 * re-linking will call qh_refresh().
1077 			 */
1078 			usb_settoggle(qh->ps.udev, epnum, is_out, 0);
1079 			qh->unlink_reason |= QH_UNLINK_REQUESTED;
1080 			if (eptype == USB_ENDPOINT_XFER_BULK)
1081 				start_unlink_async(ehci, qh);
1082 			else
1083 				start_unlink_intr(ehci, qh);
1084 		}
1085 	}
1086 	spin_unlock_irqrestore(&ehci->lock, flags);
1087 }
1088 
ehci_get_frame(struct usb_hcd * hcd)1089 static int ehci_get_frame (struct usb_hcd *hcd)
1090 {
1091 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
1092 	return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
1093 }
1094 
1095 /*-------------------------------------------------------------------------*/
1096 
1097 /* Device addition and removal */
1098 
ehci_remove_device(struct usb_hcd * hcd,struct usb_device * udev)1099 static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
1100 {
1101 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
1102 
1103 	spin_lock_irq(&ehci->lock);
1104 	drop_tt(udev);
1105 	spin_unlock_irq(&ehci->lock);
1106 }
1107 
1108 /*-------------------------------------------------------------------------*/
1109 
1110 #ifdef	CONFIG_PM
1111 
1112 /* suspend/resume, section 4.3 */
1113 
1114 /* These routines handle the generic parts of controller suspend/resume */
1115 
ehci_suspend(struct usb_hcd * hcd,bool do_wakeup)1116 int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
1117 {
1118 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
1119 
1120 	if (time_before(jiffies, ehci->next_statechange))
1121 		msleep(10);
1122 
1123 	/*
1124 	 * Root hub was already suspended.  Disable IRQ emission and
1125 	 * mark HW unaccessible.  The PM and USB cores make sure that
1126 	 * the root hub is either suspended or stopped.
1127 	 */
1128 	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
1129 
1130 	spin_lock_irq(&ehci->lock);
1131 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
1132 	(void) ehci_readl(ehci, &ehci->regs->intr_enable);
1133 
1134 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1135 	spin_unlock_irq(&ehci->lock);
1136 
1137 	synchronize_irq(hcd->irq);
1138 
1139 	/* Check for race with a wakeup request */
1140 	if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
1141 		ehci_resume(hcd, false);
1142 		return -EBUSY;
1143 	}
1144 
1145 	return 0;
1146 }
1147 EXPORT_SYMBOL_GPL(ehci_suspend);
1148 
1149 /* Returns 0 if power was preserved, 1 if power was lost */
ehci_resume(struct usb_hcd * hcd,bool force_reset)1150 int ehci_resume(struct usb_hcd *hcd, bool force_reset)
1151 {
1152 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
1153 
1154 	if (time_before(jiffies, ehci->next_statechange))
1155 		msleep(100);
1156 
1157 	/* Mark hardware accessible again as we are back to full power by now */
1158 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1159 
1160 	if (ehci->shutdown)
1161 		return 0;		/* Controller is dead */
1162 
1163 	/*
1164 	 * If CF is still set and reset isn't forced
1165 	 * then we maintained suspend power.
1166 	 * Just undo the effect of ehci_suspend().
1167 	 */
1168 	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
1169 			!force_reset) {
1170 		int	mask = INTR_MASK;
1171 
1172 		ehci_prepare_ports_for_controller_resume(ehci);
1173 
1174 		spin_lock_irq(&ehci->lock);
1175 		if (ehci->shutdown)
1176 			goto skip;
1177 
1178 		if (!hcd->self.root_hub->do_remote_wakeup)
1179 			mask &= ~STS_PCD;
1180 		ehci_writel(ehci, mask, &ehci->regs->intr_enable);
1181 		ehci_readl(ehci, &ehci->regs->intr_enable);
1182  skip:
1183 		spin_unlock_irq(&ehci->lock);
1184 		return 0;
1185 	}
1186 
1187 	/*
1188 	 * Else reset, to cope with power loss or resume from hibernation
1189 	 * having let the firmware kick in during reboot.
1190 	 */
1191 	usb_root_hub_lost_power(hcd->self.root_hub);
1192 	(void) ehci_halt(ehci);
1193 	(void) ehci_reset(ehci);
1194 
1195 	spin_lock_irq(&ehci->lock);
1196 	if (ehci->shutdown)
1197 		goto skip;
1198 
1199 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
1200 	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
1201 	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
1202 
1203 	ehci->rh_state = EHCI_RH_SUSPENDED;
1204 	spin_unlock_irq(&ehci->lock);
1205 
1206 	return 1;
1207 }
1208 EXPORT_SYMBOL_GPL(ehci_resume);
1209 
1210 #endif
1211 
1212 /*-------------------------------------------------------------------------*/
1213 
1214 /*
1215  * Generic structure: This gets copied for platform drivers so that
1216  * individual entries can be overridden as needed.
1217  */
1218 
1219 static const struct hc_driver ehci_hc_driver = {
1220 	.description =		hcd_name,
1221 	.product_desc =		"EHCI Host Controller",
1222 	.hcd_priv_size =	sizeof(struct ehci_hcd),
1223 
1224 	/*
1225 	 * generic hardware linkage
1226 	 */
1227 	.irq =			ehci_irq,
1228 	.flags =		HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
1229 
1230 	/*
1231 	 * basic lifecycle operations
1232 	 */
1233 	.reset =		ehci_setup,
1234 	.start =		ehci_run,
1235 	.stop =			ehci_stop,
1236 	.shutdown =		ehci_shutdown,
1237 
1238 	/*
1239 	 * managing i/o requests and associated device resources
1240 	 */
1241 	.urb_enqueue =		ehci_urb_enqueue,
1242 	.urb_dequeue =		ehci_urb_dequeue,
1243 	.endpoint_disable =	ehci_endpoint_disable,
1244 	.endpoint_reset =	ehci_endpoint_reset,
1245 	.clear_tt_buffer_complete =	ehci_clear_tt_buffer_complete,
1246 
1247 	/*
1248 	 * scheduling support
1249 	 */
1250 	.get_frame_number =	ehci_get_frame,
1251 
1252 	/*
1253 	 * root hub support
1254 	 */
1255 	.hub_status_data =	ehci_hub_status_data,
1256 	.hub_control =		ehci_hub_control,
1257 	.bus_suspend =		ehci_bus_suspend,
1258 	.bus_resume =		ehci_bus_resume,
1259 	.relinquish_port =	ehci_relinquish_port,
1260 	.port_handed_over =	ehci_port_handed_over,
1261 	.get_resuming_ports =	ehci_get_resuming_ports,
1262 
1263 	/*
1264 	 * device support
1265 	 */
1266 	.free_dev =		ehci_remove_device,
1267 };
1268 
ehci_init_driver(struct hc_driver * drv,const struct ehci_driver_overrides * over)1269 void ehci_init_driver(struct hc_driver *drv,
1270 		const struct ehci_driver_overrides *over)
1271 {
1272 	/* Copy the generic table to drv and then apply the overrides */
1273 	*drv = ehci_hc_driver;
1274 
1275 	if (over) {
1276 		drv->hcd_priv_size += over->extra_priv_size;
1277 		if (over->reset)
1278 			drv->reset = over->reset;
1279 		if (over->port_power)
1280 			drv->port_power = over->port_power;
1281 	}
1282 }
1283 EXPORT_SYMBOL_GPL(ehci_init_driver);
1284 
1285 /*-------------------------------------------------------------------------*/
1286 
1287 MODULE_DESCRIPTION(DRIVER_DESC);
1288 MODULE_AUTHOR (DRIVER_AUTHOR);
1289 MODULE_LICENSE ("GPL");
1290 
1291 #ifdef CONFIG_USB_EHCI_SH
1292 #include "ehci-sh.c"
1293 #define PLATFORM_DRIVER		ehci_hcd_sh_driver
1294 #endif
1295 
1296 #ifdef CONFIG_PPC_PS3
1297 #include "ehci-ps3.c"
1298 #define	PS3_SYSTEM_BUS_DRIVER	ps3_ehci_driver
1299 #endif
1300 
1301 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
1302 #include "ehci-ppc-of.c"
1303 #define OF_PLATFORM_DRIVER	ehci_hcd_ppc_of_driver
1304 #endif
1305 
1306 #ifdef CONFIG_XPS_USB_HCD_XILINX
1307 #include "ehci-xilinx-of.c"
1308 #define XILINX_OF_PLATFORM_DRIVER	ehci_hcd_xilinx_of_driver
1309 #endif
1310 
1311 #ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
1312 #include "ehci-pmcmsp.c"
1313 #define	PLATFORM_DRIVER		ehci_hcd_msp_driver
1314 #endif
1315 
1316 #ifdef CONFIG_SPARC_LEON
1317 #include "ehci-grlib.c"
1318 #define PLATFORM_DRIVER		ehci_grlib_driver
1319 #endif
1320 
ehci_hcd_init(void)1321 static int __init ehci_hcd_init(void)
1322 {
1323 	int retval = 0;
1324 
1325 	if (usb_disabled())
1326 		return -ENODEV;
1327 
1328 	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
1329 	set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1330 	if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
1331 			test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
1332 		printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
1333 				" before uhci_hcd and ohci_hcd, not after\n");
1334 
1335 	pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd sitd %zd\n",
1336 		 hcd_name,
1337 		 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
1338 		 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1339 
1340 #ifdef CONFIG_DYNAMIC_DEBUG
1341 	ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
1342 #endif
1343 
1344 #ifdef PLATFORM_DRIVER
1345 	retval = platform_driver_register(&PLATFORM_DRIVER);
1346 	if (retval < 0)
1347 		goto clean0;
1348 #endif
1349 
1350 #ifdef PS3_SYSTEM_BUS_DRIVER
1351 	retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
1352 	if (retval < 0)
1353 		goto clean2;
1354 #endif
1355 
1356 #ifdef OF_PLATFORM_DRIVER
1357 	retval = platform_driver_register(&OF_PLATFORM_DRIVER);
1358 	if (retval < 0)
1359 		goto clean3;
1360 #endif
1361 
1362 #ifdef XILINX_OF_PLATFORM_DRIVER
1363 	retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
1364 	if (retval < 0)
1365 		goto clean4;
1366 #endif
1367 	return retval;
1368 
1369 #ifdef XILINX_OF_PLATFORM_DRIVER
1370 	/* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
1371 clean4:
1372 #endif
1373 #ifdef OF_PLATFORM_DRIVER
1374 	platform_driver_unregister(&OF_PLATFORM_DRIVER);
1375 clean3:
1376 #endif
1377 #ifdef PS3_SYSTEM_BUS_DRIVER
1378 	ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1379 clean2:
1380 #endif
1381 #ifdef PLATFORM_DRIVER
1382 	platform_driver_unregister(&PLATFORM_DRIVER);
1383 clean0:
1384 #endif
1385 #ifdef CONFIG_DYNAMIC_DEBUG
1386 	debugfs_remove(ehci_debug_root);
1387 	ehci_debug_root = NULL;
1388 #endif
1389 	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1390 	return retval;
1391 }
1392 module_init(ehci_hcd_init);
1393 
ehci_hcd_cleanup(void)1394 static void __exit ehci_hcd_cleanup(void)
1395 {
1396 #ifdef XILINX_OF_PLATFORM_DRIVER
1397 	platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
1398 #endif
1399 #ifdef OF_PLATFORM_DRIVER
1400 	platform_driver_unregister(&OF_PLATFORM_DRIVER);
1401 #endif
1402 #ifdef PLATFORM_DRIVER
1403 	platform_driver_unregister(&PLATFORM_DRIVER);
1404 #endif
1405 #ifdef PS3_SYSTEM_BUS_DRIVER
1406 	ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1407 #endif
1408 #ifdef CONFIG_DYNAMIC_DEBUG
1409 	debugfs_remove(ehci_debug_root);
1410 #endif
1411 	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1412 }
1413 module_exit(ehci_hcd_cleanup);
1414