• Home
  • Raw
  • Download

Lines Matching refs:xhci

87 void xhci_quiesce(struct xhci_hcd *xhci)  in xhci_quiesce()  argument
94 halted = readl(&xhci->op_regs->status) & STS_HALT; in xhci_quiesce()
98 cmd = readl(&xhci->op_regs->command); in xhci_quiesce()
100 writel(cmd, &xhci->op_regs->command); in xhci_quiesce()
111 int xhci_halt(struct xhci_hcd *xhci) in xhci_halt() argument
114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); in xhci_halt()
115 xhci_quiesce(xhci); in xhci_halt()
117 ret = xhci_handshake(&xhci->op_regs->status, in xhci_halt()
120 xhci_warn(xhci, "Host halt failed, %d\n", ret); in xhci_halt()
123 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_halt()
124 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_halt()
131 int xhci_start(struct xhci_hcd *xhci) in xhci_start() argument
136 temp = readl(&xhci->op_regs->command); in xhci_start()
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", in xhci_start()
140 writel(temp, &xhci->op_regs->command); in xhci_start()
146 ret = xhci_handshake(&xhci->op_regs->status, in xhci_start()
149 xhci_err(xhci, "Host took too long to start, " in xhci_start()
154 xhci->xhc_state = 0; in xhci_start()
166 int xhci_reset(struct xhci_hcd *xhci) in xhci_reset() argument
172 state = readl(&xhci->op_regs->status); in xhci_reset()
175 xhci_warn(xhci, "Host not accessible, reset failed.\n"); in xhci_reset()
180 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); in xhci_reset()
184 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); in xhci_reset()
185 command = readl(&xhci->op_regs->command); in xhci_reset()
187 writel(command, &xhci->op_regs->command); in xhci_reset()
196 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_reset()
199 ret = xhci_handshake(&xhci->op_regs->command, in xhci_reset()
204 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_reset()
205 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); in xhci_reset()
207 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_reset()
213 ret = xhci_handshake(&xhci->op_regs->status, in xhci_reset()
216 xhci->usb2_rhub.bus_state.port_c_suspend = 0; in xhci_reset()
217 xhci->usb2_rhub.bus_state.suspended_ports = 0; in xhci_reset()
218 xhci->usb2_rhub.bus_state.resuming_ports = 0; in xhci_reset()
219 xhci->usb3_rhub.bus_state.port_c_suspend = 0; in xhci_reset()
220 xhci->usb3_rhub.bus_state.suspended_ports = 0; in xhci_reset()
221 xhci->usb3_rhub.bus_state.resuming_ports = 0; in xhci_reset()
226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) in xhci_zero_64b_regs() argument
228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_zero_64b_regs()
246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) in xhci_zero_64b_regs()
249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); in xhci_zero_64b_regs()
252 val = readl(&xhci->op_regs->command); in xhci_zero_64b_regs()
254 writel(val, &xhci->op_regs->command); in xhci_zero_64b_regs()
257 val = readl(&xhci->op_regs->status); in xhci_zero_64b_regs()
259 writel(val, &xhci->op_regs->status); in xhci_zero_64b_regs()
262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); in xhci_zero_64b_regs()
265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); in xhci_zero_64b_regs()
269 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { in xhci_zero_64b_regs()
272 ir = &xhci->run_regs->ir_set[i]; in xhci_zero_64b_regs()
273 val = xhci_read_64(xhci, &ir->erst_base); in xhci_zero_64b_regs()
275 xhci_write_64(xhci, 0, &ir->erst_base); in xhci_zero_64b_regs()
276 val= xhci_read_64(xhci, &ir->erst_dequeue); in xhci_zero_64b_regs()
278 xhci_write_64(xhci, 0, &ir->erst_dequeue); in xhci_zero_64b_regs()
282 err = xhci_handshake(&xhci->op_regs->status, in xhci_zero_64b_regs()
286 xhci_info(xhci, "Fault detected\n"); in xhci_zero_64b_regs()
293 static int xhci_setup_msi(struct xhci_hcd *xhci) in xhci_setup_msi() argument
299 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_setup_msi()
303 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
309 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msi()
311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
322 static int xhci_setup_msix(struct xhci_hcd *xhci) in xhci_setup_msix() argument
325 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_setup_msix()
335 xhci->msix_count = min(num_online_cpus() + 1, in xhci_setup_msix()
336 HCS_MAX_INTRS(xhci->hcs_params1)); in xhci_setup_msix()
338 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, in xhci_setup_msix()
341 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msix()
346 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
348 "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msix()
357 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); in xhci_setup_msix()
359 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_setup_msix()
365 static void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
367 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cleanup_msix()
370 if (xhci->quirks & XHCI_PLAT) in xhci_cleanup_msix()
380 for (i = 0; i < xhci->msix_count; i++) in xhci_cleanup_msix()
381 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
383 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); in xhci_cleanup_msix()
390 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
392 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_msix_sync_irqs()
398 for (i = 0; i < xhci->msix_count; i++) in xhci_msix_sync_irqs()
405 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_try_enable_msi() local
410 if (xhci->quirks & XHCI_PLAT) in xhci_try_enable_msi()
413 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_try_enable_msi()
418 if (xhci->quirks & XHCI_BROKEN_MSI) in xhci_try_enable_msi()
426 ret = xhci_setup_msix(xhci); in xhci_try_enable_msi()
429 ret = xhci_setup_msi(xhci); in xhci_try_enable_msi()
437 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); in xhci_try_enable_msi()
450 xhci_err(xhci, "request interrupt %d failed\n", in xhci_try_enable_msi()
465 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
469 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
477 struct xhci_hcd *xhci; in compliance_mode_recovery() local
483 xhci = from_timer(xhci, t, comp_mode_recovery_timer); in compliance_mode_recovery()
484 rhub = &xhci->usb3_rhub; in compliance_mode_recovery()
493 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
498 hcd = xhci->shared_hcd; in compliance_mode_recovery()
507 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) in compliance_mode_recovery()
508 mod_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery()
522 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) in compliance_mode_recovery_timer_init() argument
524 xhci->port_status_u0 = 0; in compliance_mode_recovery_timer_init()
525 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, in compliance_mode_recovery_timer_init()
527 xhci->comp_mode_recovery_timer.expires = jiffies + in compliance_mode_recovery_timer_init()
530 add_timer(&xhci->comp_mode_recovery_timer); in compliance_mode_recovery_timer_init()
531 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery_timer_init()
562 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) in xhci_all_ports_seen_u0() argument
564 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); in xhci_all_ports_seen_u0()
577 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_init() local
580 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); in xhci_init()
581 spin_lock_init(&xhci->lock); in xhci_init()
582 if (xhci->hci_version == 0x95 && link_quirk) { in xhci_init()
583 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_init()
585 xhci->quirks |= XHCI_LINK_TRB_QUIRK; in xhci_init()
587 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_init()
590 retval = xhci_mem_init(xhci, GFP_KERNEL); in xhci_init()
591 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); in xhci_init()
595 xhci->quirks |= XHCI_COMP_MODE_QUIRK; in xhci_init()
596 compliance_mode_recovery_timer_init(xhci); in xhci_init()
605 static int xhci_run_finished(struct xhci_hcd *xhci) in xhci_run_finished() argument
607 if (xhci_start(xhci)) { in xhci_run_finished()
608 xhci_halt(xhci); in xhci_run_finished()
611 xhci->shared_hcd->state = HC_STATE_RUNNING; in xhci_run_finished()
612 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_run_finished()
614 if (xhci->quirks & XHCI_NEC_HOST) in xhci_run_finished()
615 xhci_ring_cmd_db(xhci); in xhci_run_finished()
617 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run_finished()
639 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_run() local
647 return xhci_run_finished(xhci); in xhci_run()
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); in xhci_run()
655 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_run()
657 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
660 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
662 temp = readl(&xhci->ir_set->irq_control); in xhci_run()
664 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; in xhci_run()
665 writel(temp, &xhci->ir_set->irq_control); in xhci_run()
668 temp = readl(&xhci->op_regs->command); in xhci_run()
670 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
672 writel(temp, &xhci->op_regs->command); in xhci_run()
674 temp = readl(&xhci->ir_set->irq_pending); in xhci_run()
675 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
677 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); in xhci_run()
678 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); in xhci_run()
680 if (xhci->quirks & XHCI_NEC_HOST) { in xhci_run()
683 command = xhci_alloc_command(xhci, false, GFP_KERNEL); in xhci_run()
687 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, in xhci_run()
690 xhci_free_command(xhci, command); in xhci_run()
692 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
695 xhci_dbc_init(xhci); in xhci_run()
697 xhci_debugfs_init(xhci); in xhci_run()
715 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_stop() local
717 mutex_lock(&xhci->mutex); in xhci_stop()
721 mutex_unlock(&xhci->mutex); in xhci_stop()
725 xhci_dbc_exit(xhci); in xhci_stop()
727 spin_lock_irq(&xhci->lock); in xhci_stop()
728 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_stop()
729 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_stop()
730 xhci_halt(xhci); in xhci_stop()
731 xhci_reset(xhci); in xhci_stop()
732 spin_unlock_irq(&xhci->lock); in xhci_stop()
734 xhci_cleanup_msix(xhci); in xhci_stop()
737 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_stop()
738 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_stop()
739 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_stop()
740 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_stop()
745 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_stop()
748 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
750 temp = readl(&xhci->op_regs->status); in xhci_stop()
751 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_stop()
752 temp = readl(&xhci->ir_set->irq_pending); in xhci_stop()
753 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_stop()
755 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); in xhci_stop()
756 xhci_mem_cleanup(xhci); in xhci_stop()
757 xhci_debugfs_exit(xhci); in xhci_stop()
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
760 readl(&xhci->op_regs->status)); in xhci_stop()
761 mutex_unlock(&xhci->mutex); in xhci_stop()
775 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_shutdown() local
777 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) in xhci_shutdown()
780 spin_lock_irq(&xhci->lock); in xhci_shutdown()
781 xhci_halt(xhci); in xhci_shutdown()
783 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
784 xhci_reset(xhci); in xhci_shutdown()
785 spin_unlock_irq(&xhci->lock); in xhci_shutdown()
787 xhci_cleanup_msix(xhci); in xhci_shutdown()
789 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_shutdown()
791 readl(&xhci->op_regs->status)); in xhci_shutdown()
796 static void xhci_save_registers(struct xhci_hcd *xhci) in xhci_save_registers() argument
798 xhci->s3.command = readl(&xhci->op_regs->command); in xhci_save_registers()
799 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); in xhci_save_registers()
800 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_save_registers()
801 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); in xhci_save_registers()
802 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); in xhci_save_registers()
803 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); in xhci_save_registers()
804 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_save_registers()
805 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_save_registers()
806 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); in xhci_save_registers()
809 static void xhci_restore_registers(struct xhci_hcd *xhci) in xhci_restore_registers() argument
811 writel(xhci->s3.command, &xhci->op_regs->command); in xhci_restore_registers()
812 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); in xhci_restore_registers()
813 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); in xhci_restore_registers()
814 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); in xhci_restore_registers()
815 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); in xhci_restore_registers()
816 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); in xhci_restore_registers()
817 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); in xhci_restore_registers()
818 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); in xhci_restore_registers()
819 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); in xhci_restore_registers()
822 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) in xhci_set_cmd_ring_deq() argument
827 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
829 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in xhci_set_cmd_ring_deq()
830 xhci->cmd_ring->dequeue) & in xhci_set_cmd_ring_deq()
832 xhci->cmd_ring->cycle_state; in xhci_set_cmd_ring_deq()
833 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_set_cmd_ring_deq()
836 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
848 static void xhci_clear_command_ring(struct xhci_hcd *xhci) in xhci_clear_command_ring() argument
853 ring = xhci->cmd_ring; in xhci_clear_command_ring()
883 xhci_set_cmd_ring_deq(xhci); in xhci_clear_command_ring()
886 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) in xhci_disable_port_wake_on_bits() argument
893 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
896 port_index = xhci->usb3_rhub.num_ports; in xhci_disable_port_wake_on_bits()
897 ports = xhci->usb3_rhub.ports; in xhci_disable_port_wake_on_bits()
905 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", in xhci_disable_port_wake_on_bits()
906 xhci->usb3_rhub.hcd->self.busnum, in xhci_disable_port_wake_on_bits()
912 port_index = xhci->usb2_rhub.num_ports; in xhci_disable_port_wake_on_bits()
913 ports = xhci->usb2_rhub.ports; in xhci_disable_port_wake_on_bits()
921 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", in xhci_disable_port_wake_on_bits()
922 xhci->usb2_rhub.hcd->self.busnum, in xhci_disable_port_wake_on_bits()
926 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
929 static bool xhci_pending_portevent(struct xhci_hcd *xhci) in xhci_pending_portevent() argument
936 status = readl(&xhci->op_regs->status); in xhci_pending_portevent()
945 port_index = xhci->usb2_rhub.num_ports; in xhci_pending_portevent()
946 ports = xhci->usb2_rhub.ports; in xhci_pending_portevent()
953 port_index = xhci->usb3_rhub.num_ports; in xhci_pending_portevent()
954 ports = xhci->usb3_rhub.ports; in xhci_pending_portevent()
970 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) in xhci_suspend() argument
974 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_suspend()
982 xhci->shared_hcd->state != HC_STATE_SUSPENDED) in xhci_suspend()
985 xhci_dbc_suspend(xhci); in xhci_suspend()
989 xhci_disable_port_wake_on_bits(xhci); in xhci_suspend()
992 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); in xhci_suspend()
995 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_suspend()
996 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_suspend()
998 if (xhci->quirks & XHCI_SUSPEND_DELAY) in xhci_suspend()
1001 spin_lock_irq(&xhci->lock); in xhci_suspend()
1003 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_suspend()
1008 command = readl(&xhci->op_regs->command); in xhci_suspend()
1010 writel(command, &xhci->op_regs->command); in xhci_suspend()
1013 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; in xhci_suspend()
1015 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1017 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); in xhci_suspend()
1018 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1021 xhci_clear_command_ring(xhci); in xhci_suspend()
1024 xhci_save_registers(xhci); in xhci_suspend()
1027 command = readl(&xhci->op_regs->command); in xhci_suspend()
1029 writel(command, &xhci->op_regs->command); in xhci_suspend()
1030 xhci->broken_suspend = 0; in xhci_suspend()
1031 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
1042 res = readl(&xhci->op_regs->status); in xhci_suspend()
1043 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && in xhci_suspend()
1046 xhci->broken_suspend = 1; in xhci_suspend()
1048 xhci_warn(xhci, "WARN: xHC save state timeout\n"); in xhci_suspend()
1049 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1053 spin_unlock_irq(&xhci->lock); in xhci_suspend()
1059 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_suspend()
1060 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_suspend()
1061 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_suspend()
1062 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_suspend()
1069 xhci_msix_sync_irqs(xhci); in xhci_suspend()
1081 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) in xhci_resume() argument
1084 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_resume()
1096 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || in xhci_resume()
1097 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) in xhci_resume()
1101 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_resume()
1103 spin_lock_irq(&xhci->lock); in xhci_resume()
1104 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) in xhci_resume()
1112 retval = xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1115 xhci_warn(xhci, "Controller not ready at resume %d\n", in xhci_resume()
1117 spin_unlock_irq(&xhci->lock); in xhci_resume()
1121 xhci_restore_registers(xhci); in xhci_resume()
1123 xhci_set_cmd_ring_deq(xhci); in xhci_resume()
1126 command = readl(&xhci->op_regs->command); in xhci_resume()
1128 writel(command, &xhci->op_regs->command); in xhci_resume()
1134 if (xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1136 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); in xhci_resume()
1137 spin_unlock_irq(&xhci->lock); in xhci_resume()
1140 temp = readl(&xhci->op_regs->status); in xhci_resume()
1146 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_resume()
1147 !(xhci_all_ports_seen_u0(xhci))) { in xhci_resume()
1148 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_resume()
1149 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_resume()
1154 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); in xhci_resume()
1155 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); in xhci_resume()
1157 xhci_dbg(xhci, "Stop HCD\n"); in xhci_resume()
1158 xhci_halt(xhci); in xhci_resume()
1159 xhci_zero_64b_regs(xhci); in xhci_resume()
1160 xhci_reset(xhci); in xhci_resume()
1161 spin_unlock_irq(&xhci->lock); in xhci_resume()
1162 xhci_cleanup_msix(xhci); in xhci_resume()
1164 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); in xhci_resume()
1165 temp = readl(&xhci->op_regs->status); in xhci_resume()
1166 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); in xhci_resume()
1167 temp = readl(&xhci->ir_set->irq_pending); in xhci_resume()
1168 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_resume()
1170 xhci_dbg(xhci, "cleaning up memory\n"); in xhci_resume()
1171 xhci_mem_cleanup(xhci); in xhci_resume()
1172 xhci_debugfs_exit(xhci); in xhci_resume()
1173 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", in xhci_resume()
1174 readl(&xhci->op_regs->status)); in xhci_resume()
1183 secondary_hcd = xhci->shared_hcd; in xhci_resume()
1185 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); in xhci_resume()
1191 xhci_dbg(xhci, "Start the primary HCD\n"); in xhci_resume()
1194 xhci_dbg(xhci, "Start the secondary HCD\n"); in xhci_resume()
1198 xhci->shared_hcd->state = HC_STATE_SUSPENDED; in xhci_resume()
1203 command = readl(&xhci->op_regs->command); in xhci_resume()
1205 writel(command, &xhci->op_regs->command); in xhci_resume()
1206 xhci_handshake(&xhci->op_regs->status, STS_HALT, in xhci_resume()
1218 spin_unlock_irq(&xhci->lock); in xhci_resume()
1220 xhci_dbc_resume(xhci); in xhci_resume()
1225 if (xhci_pending_portevent(xhci)) { in xhci_resume()
1226 usb_hcd_resume_root_hub(xhci->shared_hcd); in xhci_resume()
1237 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) in xhci_resume()
1238 compliance_mode_recovery_timer_init(xhci); in xhci_resume()
1240 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) in xhci_resume()
1244 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in xhci_resume()
1245 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_resume()
1246 usb_hcd_poll_rh_status(xhci->shared_hcd); in xhci_resume()
1338 struct xhci_hcd *xhci; in xhci_check_args() local
1350 xhci = hcd_to_xhci(hcd); in xhci_check_args()
1352 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { in xhci_check_args()
1353 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", in xhci_check_args()
1358 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_args()
1360 xhci_dbg(xhci, "xHCI %s called with udev and " in xhci_check_args()
1366 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_check_args()
1372 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1382 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_check_maxpacket() argument
1393 out_ctx = xhci->devs[slot_id]->out_ctx; in xhci_check_maxpacket()
1394 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1398 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1400 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1403 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1406 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1414 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_check_maxpacket()
1418 command->in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_check_maxpacket()
1421 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_maxpacket()
1427 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_check_maxpacket()
1428 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_check_maxpacket()
1430 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1437 ret = xhci_configure_endpoint(xhci, urb->dev, command, in xhci_check_maxpacket()
1457 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_urb_enqueue() local
1471 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_urb_enqueue()
1475 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); in xhci_urb_enqueue()
1478 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { in xhci_urb_enqueue()
1479 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); in xhci_urb_enqueue()
1508 ret = xhci_check_maxpacket(xhci, slot_id, in xhci_urb_enqueue()
1518 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1520 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_enqueue()
1521 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", in xhci_urb_enqueue()
1527 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", in xhci_urb_enqueue()
1533 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); in xhci_urb_enqueue()
1541 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1545 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1549 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1553 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1562 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1602 struct xhci_hcd *xhci; in xhci_urb_dequeue() local
1611 xhci = hcd_to_xhci(hcd); in xhci_urb_dequeue()
1612 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_dequeue()
1622 vdev = xhci->devs[urb->dev->slot_id]; in xhci_urb_dequeue()
1629 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_urb_dequeue()
1634 temp = readl(&xhci->op_regs->status); in xhci_urb_dequeue()
1635 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { in xhci_urb_dequeue()
1636 xhci_hc_died(xhci); in xhci_urb_dequeue()
1646 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); in xhci_urb_dequeue()
1655 if (xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_urb_dequeue()
1656 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1672 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1690 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_urb_dequeue()
1699 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, in xhci_urb_dequeue()
1701 xhci_ring_cmd_db(xhci); in xhci_urb_dequeue()
1704 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1711 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1732 struct xhci_hcd *xhci; in xhci_drop_endpoint() local
1744 xhci = hcd_to_xhci(hcd); in xhci_drop_endpoint()
1745 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_drop_endpoint()
1748 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_drop_endpoint()
1751 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", in xhci_drop_endpoint()
1756 in_ctx = xhci->devs[udev->slot_id]->in_ctx; in xhci_drop_endpoint()
1757 out_ctx = xhci->devs[udev->slot_id]->out_ctx; in xhci_drop_endpoint()
1760 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_drop_endpoint()
1766 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_drop_endpoint()
1774 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) in xhci_drop_endpoint()
1775 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", in xhci_drop_endpoint()
1786 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); in xhci_drop_endpoint()
1788 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); in xhci_drop_endpoint()
1790 if (xhci->quirks & XHCI_MTK_HOST) in xhci_drop_endpoint()
1793 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_drop_endpoint()
1817 struct xhci_hcd *xhci; in xhci_add_endpoint() local
1833 xhci = hcd_to_xhci(hcd); in xhci_add_endpoint()
1834 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_add_endpoint()
1843 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", in xhci_add_endpoint()
1848 virt_dev = xhci->devs[udev->slot_id]; in xhci_add_endpoint()
1852 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_add_endpoint()
1863 xhci_warn(xhci, "Trying to add endpoint 0x%x " in xhci_add_endpoint()
1873 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", in xhci_add_endpoint()
1883 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { in xhci_add_endpoint()
1889 if (xhci->quirks & XHCI_MTK_HOST) { in xhci_add_endpoint()
1892 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); in xhci_add_endpoint()
1912 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); in xhci_add_endpoint()
1915 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); in xhci_add_endpoint()
1917 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_add_endpoint()
1925 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) in xhci_zero_in_ctx() argument
1934 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_zero_in_ctx()
1946 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_zero_in_ctx()
1951 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); in xhci_zero_in_ctx()
1959 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, in xhci_configure_endpoint_result() argument
1967 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); in xhci_configure_endpoint_result()
1996 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint_result()
2001 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_configure_endpoint_result()
2009 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, in xhci_evaluate_context_result() argument
2017 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); in xhci_evaluate_context_result()
2046 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_evaluate_context_result()
2051 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_evaluate_context_result()
2059 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, in xhci_count_num_new_endpoints() argument
2080 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, in xhci_count_num_dropped_endpoints() argument
2106 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, in xhci_reserve_host_resources() argument
2111 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_reserve_host_resources()
2112 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { in xhci_reserve_host_resources()
2113 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2116 xhci->num_active_eps, added_eps, in xhci_reserve_host_resources()
2117 xhci->limit_active_eps); in xhci_reserve_host_resources()
2120 xhci->num_active_eps += added_eps; in xhci_reserve_host_resources()
2121 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2123 xhci->num_active_eps); in xhci_reserve_host_resources()
2133 static void xhci_free_host_resources(struct xhci_hcd *xhci, in xhci_free_host_resources() argument
2138 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_free_host_resources()
2139 xhci->num_active_eps -= num_failed_eps; in xhci_free_host_resources()
2140 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_host_resources()
2143 xhci->num_active_eps); in xhci_free_host_resources()
2152 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, in xhci_finish_resource_reservation() argument
2157 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); in xhci_finish_resource_reservation()
2158 xhci->num_active_eps -= num_dropped_eps; in xhci_finish_resource_reservation()
2160 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_finish_resource_reservation()
2163 xhci->num_active_eps); in xhci_finish_resource_reservation()
2199 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, in xhci_check_tt_bw_table() argument
2207 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; in xhci_check_tt_bw_table()
2229 static int xhci_check_ss_bw(struct xhci_hcd *xhci, in xhci_check_ss_bw() argument
2286 static int xhci_check_bw_table(struct xhci_hcd *xhci, in xhci_check_bw_table() argument
2302 return xhci_check_ss_bw(xhci, virt_dev); in xhci_check_bw_table()
2323 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2326 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_check_bw_table()
2327 xhci_warn(xhci, "Not enough bandwidth on HS bus for " in xhci_check_bw_table()
2331 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2336 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2419 xhci_warn(xhci, "Not enough bandwidth. " in xhci_check_bw_table()
2442 xhci->rh_bw[port_index].num_active_tts; in xhci_check_bw_table()
2445 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2454 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", in xhci_check_bw_table()
2489 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, in xhci_drop_ep_from_interval_table() argument
2504 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= in xhci_drop_ep_from_interval_table()
2507 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= in xhci_drop_ep_from_interval_table()
2553 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, in xhci_add_ep_to_interval_table() argument
2569 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += in xhci_add_ep_to_interval_table()
2572 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += in xhci_add_ep_to_interval_table()
2627 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, in xhci_update_tt_active_eps() argument
2635 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; in xhci_update_tt_active_eps()
2647 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, in xhci_reserve_bandwidth() argument
2661 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_reserve_bandwidth()
2677 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2685 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); in xhci_reserve_bandwidth()
2689 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2697 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_reserve_bandwidth()
2701 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_reserve_bandwidth()
2714 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2726 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2740 static int xhci_configure_endpoint(struct xhci_hcd *xhci, in xhci_configure_endpoint() argument
2754 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2756 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_configure_endpoint()
2757 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2761 virt_dev = xhci->devs[udev->slot_id]; in xhci_configure_endpoint()
2765 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2766 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_configure_endpoint()
2771 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && in xhci_configure_endpoint()
2772 xhci_reserve_host_resources(xhci, ctrl_ctx)) { in xhci_configure_endpoint()
2773 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2774 xhci_warn(xhci, "Not enough host resources, " in xhci_configure_endpoint()
2776 xhci->num_active_eps); in xhci_configure_endpoint()
2779 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && in xhci_configure_endpoint()
2780 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { in xhci_configure_endpoint()
2781 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2782 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2783 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2784 xhci_warn(xhci, "Not enough bandwidth\n"); in xhci_configure_endpoint()
2788 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_configure_endpoint()
2794 ret = xhci_queue_configure_endpoint(xhci, command, in xhci_configure_endpoint()
2798 ret = xhci_queue_evaluate_context(xhci, command, in xhci_configure_endpoint()
2802 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2803 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2804 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2805 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint()
2809 xhci_ring_cmd_db(xhci); in xhci_configure_endpoint()
2810 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2816 ret = xhci_configure_endpoint_result(xhci, udev, in xhci_configure_endpoint()
2819 ret = xhci_evaluate_context_result(xhci, udev, in xhci_configure_endpoint()
2822 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_configure_endpoint()
2823 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2828 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2830 xhci_finish_resource_reservation(xhci, ctrl_ctx); in xhci_configure_endpoint()
2831 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2836 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, in xhci_check_bw_drop_ep_streams() argument
2842 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", in xhci_check_bw_drop_ep_streams()
2844 xhci_free_stream_info(xhci, ep->stream_info); in xhci_check_bw_drop_ep_streams()
2864 struct xhci_hcd *xhci; in xhci_check_bandwidth() local
2873 xhci = hcd_to_xhci(hcd); in xhci_check_bandwidth()
2874 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_check_bandwidth()
2875 (xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_check_bandwidth()
2878 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_check_bandwidth()
2879 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_bandwidth()
2881 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_check_bandwidth()
2890 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_bandwidth()
2906 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_check_bandwidth()
2918 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_check_bandwidth()
2928 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2929 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2932 xhci_zero_in_ctx(xhci, virt_dev); in xhci_check_bandwidth()
2944 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2946 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2959 struct xhci_hcd *xhci; in xhci_reset_bandwidth() local
2966 xhci = hcd_to_xhci(hcd); in xhci_reset_bandwidth()
2968 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_reset_bandwidth()
2969 virt_dev = xhci->devs[udev->slot_id]; in xhci_reset_bandwidth()
2973 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_reset_bandwidth()
2974 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); in xhci_reset_bandwidth()
2978 xhci_zero_in_ctx(xhci, virt_dev); in xhci_reset_bandwidth()
2981 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_config_ep() argument
2989 xhci_slot_copy(xhci, in_ctx, out_ctx); in xhci_setup_input_ctx_for_config_ep()
2993 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_quirk() argument
3003 in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_setup_input_ctx_for_quirk()
3006 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_input_ctx_for_quirk()
3011 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
3012 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
3013 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
3017 xhci_warn(xhci, "WARN Cannot submit config ep after " in xhci_setup_input_ctx_for_quirk()
3019 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", in xhci_setup_input_ctx_for_quirk()
3027 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
3028 xhci->devs[slot_id]->out_ctx, ctrl_ctx, in xhci_setup_input_ctx_for_quirk()
3032 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, in xhci_cleanup_stalled_ring() argument
3038 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
3043 xhci_find_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
3052 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { in xhci_cleanup_stalled_ring()
3053 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
3055 xhci_queue_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
3063 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_cleanup_stalled_ring()
3066 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
3074 struct xhci_hcd *xhci; in xhci_endpoint_disable() local
3081 xhci = hcd_to_xhci(hcd); in xhci_endpoint_disable()
3083 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_disable()
3089 vdev = xhci->devs[udev->slot_id]; in xhci_endpoint_disable()
3100 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_disable()
3106 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", in xhci_endpoint_disable()
3110 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_disable()
3128 struct xhci_hcd *xhci; in xhci_endpoint_reset() local
3139 xhci = hcd_to_xhci(hcd); in xhci_endpoint_reset()
3143 vdev = xhci->devs[udev->slot_id]; in xhci_endpoint_reset()
3172 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3176 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); in xhci_endpoint_reset()
3180 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3193 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3194 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3198 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, in xhci_endpoint_reset()
3201 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3202 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3203 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", in xhci_endpoint_reset()
3208 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3209 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3213 spin_lock_irqsave(&xhci->lock, flags); in xhci_endpoint_reset()
3217 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, in xhci_endpoint_reset()
3219 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); in xhci_endpoint_reset()
3221 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, in xhci_endpoint_reset()
3224 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3225 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3226 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", in xhci_endpoint_reset()
3231 xhci_ring_cmd_db(xhci); in xhci_endpoint_reset()
3232 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_endpoint_reset()
3237 xhci_free_command(xhci, cfg_cmd); in xhci_endpoint_reset()
3239 xhci_free_command(xhci, stop_cmd); in xhci_endpoint_reset()
3242 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, in xhci_check_streams_endpoint() argument
3252 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); in xhci_check_streams_endpoint()
3256 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" in xhci_check_streams_endpoint()
3263 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_check_streams_endpoint()
3266 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " in xhci_check_streams_endpoint()
3269 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " in xhci_check_streams_endpoint()
3273 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { in xhci_check_streams_endpoint()
3274 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " in xhci_check_streams_endpoint()
3282 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, in xhci_calculate_streams_entries() argument
3295 max_streams = HCC_MAX_PSA(xhci->hcc_params); in xhci_calculate_streams_entries()
3297 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", in xhci_calculate_streams_entries()
3308 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, in xhci_calculate_streams_and_bitmask() argument
3319 ret = xhci_check_streams_endpoint(xhci, udev, in xhci_calculate_streams_and_bitmask()
3326 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", in xhci_calculate_streams_and_bitmask()
3340 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, in xhci_calculate_no_streams_bitmask() argument
3351 if (!xhci->devs[slot_id]) in xhci_calculate_no_streams_bitmask()
3356 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_calculate_no_streams_bitmask()
3359 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3368 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3372 xhci_warn(xhci, "WARN xhci_free_streams() called " in xhci_calculate_no_streams_bitmask()
3402 struct xhci_hcd *xhci; in xhci_alloc_streams() local
3419 xhci = hcd_to_xhci(hcd); in xhci_alloc_streams()
3420 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", in xhci_alloc_streams()
3424 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || in xhci_alloc_streams()
3425 HCC_MAX_PSA(xhci->hcc_params) < 4) { in xhci_alloc_streams()
3426 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); in xhci_alloc_streams()
3430 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_alloc_streams()
3436 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_alloc_streams()
3438 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3446 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3447 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, in xhci_alloc_streams()
3450 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3451 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3455 xhci_warn(xhci, "WARN: endpoints can't handle " in xhci_alloc_streams()
3457 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3458 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3461 vdev = xhci->devs[udev->slot_id]; in xhci_alloc_streams()
3469 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3475 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); in xhci_alloc_streams()
3476 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", in xhci_alloc_streams()
3482 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, in xhci_alloc_streams()
3498 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); in xhci_alloc_streams()
3500 xhci_endpoint_copy(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3502 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, in xhci_alloc_streams()
3508 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3513 ret = xhci_configure_endpoint(xhci, udev, config_cmd, in xhci_alloc_streams()
3523 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3527 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", in xhci_alloc_streams()
3531 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3532 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3541 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_alloc_streams()
3548 xhci_endpoint_zero(xhci, vdev, eps[i]); in xhci_alloc_streams()
3550 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3565 struct xhci_hcd *xhci; in xhci_free_streams() local
3573 xhci = hcd_to_xhci(hcd); in xhci_free_streams()
3574 vdev = xhci->devs[udev->slot_id]; in xhci_free_streams()
3577 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3578 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, in xhci_free_streams()
3581 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3593 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3594 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_free_streams()
3603 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_free_streams()
3604 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= in xhci_free_streams()
3607 xhci_endpoint_copy(xhci, command->in_ctx, in xhci_free_streams()
3612 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, in xhci_free_streams()
3615 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3620 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_free_streams()
3629 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3632 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_free_streams()
3640 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3652 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, in xhci_free_device_endpoint_resources() argument
3665 xhci->num_active_eps -= num_dropped_eps; in xhci_free_device_endpoint_resources()
3667 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_device_endpoint_resources()
3671 xhci->num_active_eps); in xhci_free_device_endpoint_resources()
3697 struct xhci_hcd *xhci; in xhci_discover_or_reset_device() local
3707 xhci = hcd_to_xhci(hcd); in xhci_discover_or_reset_device()
3709 virt_dev = xhci->devs[slot_id]; in xhci_discover_or_reset_device()
3711 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3728 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3739 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_discover_or_reset_device()
3746 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); in xhci_discover_or_reset_device()
3753 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); in xhci_discover_or_reset_device()
3755 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); in xhci_discover_or_reset_device()
3760 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3762 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); in xhci_discover_or_reset_device()
3764 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_discover_or_reset_device()
3765 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3768 xhci_ring_cmd_db(xhci); in xhci_discover_or_reset_device()
3769 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3782 xhci_warn(xhci, "Timeout waiting for reset device command\n"); in xhci_discover_or_reset_device()
3787 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", in xhci_discover_or_reset_device()
3789 xhci_get_slot_state(xhci, virt_dev->out_ctx)); in xhci_discover_or_reset_device()
3790 xhci_dbg(xhci, "Not freeing device rings.\n"); in xhci_discover_or_reset_device()
3795 xhci_dbg(xhci, "Successful reset device command.\n"); in xhci_discover_or_reset_device()
3798 if (xhci_is_vendor_info_code(xhci, ret)) in xhci_discover_or_reset_device()
3800 xhci_warn(xhci, "Unknown completion code %u for " in xhci_discover_or_reset_device()
3807 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_discover_or_reset_device()
3808 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3810 xhci_free_device_endpoint_resources(xhci, virt_dev, false); in xhci_discover_or_reset_device()
3811 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3819 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", in xhci_discover_or_reset_device()
3821 xhci_free_stream_info(xhci, ep->stream_info); in xhci_discover_or_reset_device()
3827 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3828 xhci_free_endpoint_ring(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3831 xhci_drop_ep_from_interval_table(xhci, in xhci_discover_or_reset_device()
3840 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_discover_or_reset_device()
3845 xhci_free_command(xhci, reset_device_cmd); in xhci_discover_or_reset_device()
3856 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_free_dev() local
3867 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_free_dev()
3878 virt_dev = xhci->devs[udev->slot_id]; in xhci_free_dev()
3879 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_free_dev()
3888 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_free_dev()
3890 xhci_free_virt_device(xhci, udev->slot_id); in xhci_free_dev()
3893 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) in xhci_disable_slot() argument
3900 command = xhci_alloc_command(xhci, false, GFP_KERNEL); in xhci_disable_slot()
3904 xhci_debugfs_remove_slot(xhci, slot_id); in xhci_disable_slot()
3906 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_slot()
3908 state = readl(&xhci->op_regs->status); in xhci_disable_slot()
3909 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || in xhci_disable_slot()
3910 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_disable_slot()
3911 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3916 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_disable_slot()
3919 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3923 xhci_ring_cmd_db(xhci); in xhci_disable_slot()
3924 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_slot()
3934 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) in xhci_reserve_host_control_ep_resources() argument
3936 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { in xhci_reserve_host_control_ep_resources()
3937 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3940 xhci->num_active_eps, xhci->limit_active_eps); in xhci_reserve_host_control_ep_resources()
3943 xhci->num_active_eps += 1; in xhci_reserve_host_control_ep_resources()
3944 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3946 xhci->num_active_eps); in xhci_reserve_host_control_ep_resources()
3957 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_alloc_dev() local
3964 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_alloc_dev()
3968 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3969 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); in xhci_alloc_dev()
3971 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3972 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_alloc_dev()
3973 xhci_free_command(xhci, command); in xhci_alloc_dev()
3976 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3977 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3983 xhci_err(xhci, "Error while assigning device slot ID\n"); in xhci_alloc_dev()
3984 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", in xhci_alloc_dev()
3986 readl(&xhci->cap_regs->hcs_params1))); in xhci_alloc_dev()
3987 xhci_free_command(xhci, command); in xhci_alloc_dev()
3991 xhci_free_command(xhci, command); in xhci_alloc_dev()
3993 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_alloc_dev()
3994 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3995 ret = xhci_reserve_host_control_ep_resources(xhci); in xhci_alloc_dev()
3997 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3998 xhci_warn(xhci, "Not enough host resources, " in xhci_alloc_dev()
4000 xhci->num_active_eps); in xhci_alloc_dev()
4003 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
4009 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { in xhci_alloc_dev()
4010 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); in xhci_alloc_dev()
4013 vdev = xhci->devs[slot_id]; in xhci_alloc_dev()
4014 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_alloc_dev()
4019 xhci_debugfs_create_slot(xhci, slot_id); in xhci_alloc_dev()
4026 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_alloc_dev()
4035 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_alloc_dev()
4037 xhci_free_virt_device(xhci, udev->slot_id); in xhci_alloc_dev()
4053 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_setup_device() local
4059 mutex_lock(&xhci->mutex); in xhci_setup_device()
4061 if (xhci->xhc_state) { /* dying, removing or halted */ in xhci_setup_device()
4067 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4073 virt_dev = xhci->devs[udev->slot_id]; in xhci_setup_device()
4081 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", in xhci_setup_device()
4086 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
4092 xhci_dbg(xhci, "Slot already in default state\n"); in xhci_setup_device()
4097 command = xhci_alloc_command(xhci, true, GFP_KERNEL); in xhci_setup_device()
4105 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_setup_device()
4108 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_device()
4119 xhci_setup_addressable_virt_dev(xhci, udev); in xhci_setup_device()
4122 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); in xhci_setup_device()
4126 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
4130 spin_lock_irqsave(&xhci->lock, flags); in xhci_setup_device()
4132 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, in xhci_setup_device()
4135 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4136 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4140 xhci_ring_cmd_db(xhci); in xhci_setup_device()
4141 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
4153 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); in xhci_setup_device()
4158 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", in xhci_setup_device()
4165 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4166 ret = xhci_disable_slot(xhci, udev->slot_id); in xhci_setup_device()
4178 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4182 xhci_err(xhci, in xhci_setup_device()
4185 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); in xhci_setup_device()
4191 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_setup_device()
4192 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4194 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4197 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], in xhci_setup_device()
4199 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); in xhci_setup_device()
4200 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4203 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
4209 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, in xhci_setup_device()
4214 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
4217 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
4221 mutex_unlock(&xhci->mutex); in xhci_setup_device()
4257 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, in xhci_change_max_exit_latency() argument
4267 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4269 virt_dev = xhci->devs[udev->slot_id]; in xhci_change_max_exit_latency()
4278 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4283 command = xhci->lpm_command; in xhci_change_max_exit_latency()
4286 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4287 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_change_max_exit_latency()
4292 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); in xhci_change_max_exit_latency()
4293 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4296 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_change_max_exit_latency()
4301 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_change_max_exit_latency()
4305 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_change_max_exit_latency()
4309 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4311 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4323 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, in xhci_calculate_hird_besl() argument
4330 u2del = HCS_U2_LATENCY(xhci->hcs_params3); in xhci_calculate_hird_besl()
4382 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_set_usb2_hardware_lpm() local
4391 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || in xhci_set_usb2_hardware_lpm()
4402 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4404 ports = xhci->usb2_rhub.ports; in xhci_set_usb2_hardware_lpm()
4410 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", in xhci_set_usb2_hardware_lpm()
4413 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { in xhci_set_usb2_hardware_lpm()
4428 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4438 ret = xhci_change_max_exit_latency(xhci, udev, in xhci_set_usb2_hardware_lpm()
4444 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4451 hird = xhci_calculate_hird_besl(xhci, udev); in xhci_set_usb2_hardware_lpm()
4468 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4470 xhci_change_max_exit_latency(xhci, udev, 0); in xhci_set_usb2_hardware_lpm()
4476 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4484 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, in xhci_check_usb2_port_capability() argument
4490 for (i = 0; i < xhci->num_ext_caps; i++) { in xhci_check_usb2_port_capability()
4491 if (xhci->ext_caps[i] & capability) { in xhci_check_usb2_port_capability()
4493 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; in xhci_check_usb2_port_capability()
4494 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); in xhci_check_usb2_port_capability()
4505 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_device() local
4516 if (xhci->hw_lpm_support == 1 && in xhci_update_device()
4518 xhci, portnum, XHCI_HLC)) { in xhci_update_device()
4522 if (xhci_check_usb2_port_capability(xhci, portnum, in xhci_update_device()
4627 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, in xhci_calculate_u1_timeout() argument
4641 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u1_timeout()
4691 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, in xhci_calculate_u2_timeout() argument
4705 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u2_timeout()
4722 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_call_host_update_timeout_for_endpoint() argument
4729 return xhci_calculate_u1_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4731 return xhci_calculate_u2_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4736 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_update_timeout_for_endpoint() argument
4744 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_endpoint()
4761 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, in xhci_update_timeout_for_interface() argument
4770 if (xhci_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_interface()
4802 static int xhci_check_tier_policy(struct xhci_hcd *xhci, in xhci_check_tier_policy() argument
4806 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_check_tier_policy()
4820 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_calculate_lpm_timeout() local
4836 if (xhci_check_tier_policy(xhci, udev, state) < 0) in xhci_calculate_lpm_timeout()
4842 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, in xhci_calculate_lpm_timeout()
4876 if (xhci_update_timeout_for_interface(xhci, udev, in xhci_calculate_lpm_timeout()
4933 struct xhci_hcd *xhci; in xhci_enable_usb3_lpm_timeout() local
4938 xhci = hcd_to_xhci(hcd); in xhci_enable_usb3_lpm_timeout()
4943 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_enable_usb3_lpm_timeout()
4944 !xhci->devs[udev->slot_id]) in xhci_enable_usb3_lpm_timeout()
4955 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_enable_usb3_lpm_timeout()
4964 struct xhci_hcd *xhci; in xhci_disable_usb3_lpm_timeout() local
4967 xhci = hcd_to_xhci(hcd); in xhci_disable_usb3_lpm_timeout()
4968 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_disable_usb3_lpm_timeout()
4969 !xhci->devs[udev->slot_id]) in xhci_disable_usb3_lpm_timeout()
4973 return xhci_change_max_exit_latency(xhci, udev, mel); in xhci_disable_usb3_lpm_timeout()
5009 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_hub_device() local
5022 vdev = xhci->devs[hdev->slot_id]; in xhci_update_hub_device()
5024 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); in xhci_update_hub_device()
5028 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); in xhci_update_hub_device()
5034 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_update_hub_device()
5036 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5040 spin_lock_irqsave(&xhci->lock, flags); in xhci_update_hub_device()
5042 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { in xhci_update_hub_device()
5043 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); in xhci_update_hub_device()
5044 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5045 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
5049 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); in xhci_update_hub_device()
5051 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); in xhci_update_hub_device()
5063 if (xhci->hci_version > 0x95) { in xhci_update_hub_device()
5064 xhci_dbg(xhci, "xHCI version %x needs hub " in xhci_update_hub_device()
5066 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
5078 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) in xhci_update_hub_device()
5082 xhci_dbg(xhci, "xHCI version %x doesn't need hub " in xhci_update_hub_device()
5084 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
5087 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
5089 xhci_dbg(xhci, "Set up %s for hub device.\n", in xhci_update_hub_device()
5090 (xhci->hci_version > 0x95) ? in xhci_update_hub_device()
5096 if (xhci->hci_version > 0x95) in xhci_update_hub_device()
5097 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
5100 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
5103 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
5109 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_get_frame() local
5111 return readl(&xhci->run_regs->microframe_index) >> 3; in xhci_get_frame()
5116 struct xhci_hcd *xhci; in xhci_gen_setup() local
5134 xhci = hcd_to_xhci(hcd); in xhci_gen_setup()
5137 xhci->main_hcd = hcd; in xhci_gen_setup()
5138 xhci->usb2_rhub.hcd = hcd; in xhci_gen_setup()
5160 if (xhci->usb3_rhub.min_rev == 0x1) in xhci_gen_setup()
5163 minor_rev = xhci->usb3_rhub.min_rev / 0x10; in xhci_gen_setup()
5177 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", in xhci_gen_setup()
5181 xhci->usb3_rhub.hcd = hcd; in xhci_gen_setup()
5188 mutex_init(&xhci->mutex); in xhci_gen_setup()
5189 xhci->cap_regs = hcd->regs; in xhci_gen_setup()
5190 xhci->op_regs = hcd->regs + in xhci_gen_setup()
5191 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); in xhci_gen_setup()
5192 xhci->run_regs = hcd->regs + in xhci_gen_setup()
5193 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); in xhci_gen_setup()
5195 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); in xhci_gen_setup()
5196 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); in xhci_gen_setup()
5197 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); in xhci_gen_setup()
5198 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); in xhci_gen_setup()
5199 xhci->hci_version = HC_VERSION(xhci->hcc_params); in xhci_gen_setup()
5200 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); in xhci_gen_setup()
5201 if (xhci->hci_version > 0x100) in xhci_gen_setup()
5202 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); in xhci_gen_setup()
5204 xhci->quirks |= quirks; in xhci_gen_setup()
5206 get_quirks(dev, xhci); in xhci_gen_setup()
5212 if (xhci->hci_version > 0x96) in xhci_gen_setup()
5213 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; in xhci_gen_setup()
5216 retval = xhci_halt(xhci); in xhci_gen_setup()
5220 xhci_zero_64b_regs(xhci); in xhci_gen_setup()
5222 xhci_dbg(xhci, "Resetting HCD\n"); in xhci_gen_setup()
5224 retval = xhci_reset(xhci); in xhci_gen_setup()
5227 xhci_dbg(xhci, "Reset complete\n"); in xhci_gen_setup()
5236 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) in xhci_gen_setup()
5237 xhci->hcc_params &= ~BIT(0); in xhci_gen_setup()
5241 if (HCC_64BIT_ADDR(xhci->hcc_params) && in xhci_gen_setup()
5243 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); in xhci_gen_setup()
5253 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); in xhci_gen_setup()
5257 xhci_dbg(xhci, "Calling HCD init\n"); in xhci_gen_setup()
5262 xhci_dbg(xhci, "Called HCD init\n"); in xhci_gen_setup()
5264 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", in xhci_gen_setup()
5265 xhci->hcc_params, xhci->hci_version, xhci->quirks); in xhci_gen_setup()
5274 struct xhci_hcd *xhci; in xhci_clear_tt_buffer_complete() local
5280 xhci = hcd_to_xhci(hcd); in xhci_clear_tt_buffer_complete()
5282 spin_lock_irqsave(&xhci->lock, flags); in xhci_clear_tt_buffer_complete()
5287 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; in xhci_clear_tt_buffer_complete()
5288 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_clear_tt_buffer_complete()
5289 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_clear_tt_buffer_complete()