Lines Matching refs:xhci
61 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
143 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
159 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
179 xhci_warn(xhci, "Missing link TRB at end of segment\n"); in inc_deq()
191 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in inc_deq()
217 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
230 xhci_err(xhci, "Tried to move enqueue past ring segment\n"); in inc_enq()
254 (xhci->quirks & XHCI_AMD_0x96_HOST)) && in inc_enq()
255 !xhci_link_trb_quirk(xhci)) { in inc_enq()
272 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); in inc_enq()
304 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, in room_on_ring() argument
322 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
324 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
327 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
331 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
333 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
337 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) in xhci_mod_cmd_timer() argument
339 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); in xhci_mod_cmd_timer()
342 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) in xhci_next_queued_cmd() argument
344 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
353 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
359 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
366 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
377 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
380 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
381 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
382 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
383 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in xhci_handle_stopped_cmd_ring()
384 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
389 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) in xhci_abort_cmd_ring() argument
391 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
392 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
396 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
398 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
408 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
410 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
413 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
421 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
424 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); in xhci_abort_cmd_ring()
425 xhci_halt(xhci); in xhci_abort_cmd_ring()
426 xhci_hc_died(xhci); in xhci_abort_cmd_ring()
435 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
436 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
438 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
440 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); in xhci_abort_cmd_ring()
441 xhci_cleanup_command_queue(xhci); in xhci_abort_cmd_ring()
443 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); in xhci_abort_cmd_ring()
448 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
453 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
454 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
475 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
482 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
487 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
495 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
500 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in xhci_ring_doorbell_for_active_rings() argument
504 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_ring_doorbell_for_active_rings()
507 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, in xhci_get_virt_ep() argument
512 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in xhci_get_virt_ep()
516 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); in xhci_get_virt_ep()
519 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
520 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); in xhci_get_virt_ep()
524 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
527 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, in xhci_virt_ep_to_ring() argument
539 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", in xhci_virt_ep_to_ring()
551 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
557 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
561 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
571 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, in xhci_get_hw_deq() argument
584 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
588 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, in xhci_move_dequeue_past_td() argument
592 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
606 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_move_dequeue_past_td()
609 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", in xhci_move_dequeue_past_td()
625 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); in xhci_move_dequeue_past_td()
628 xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); in xhci_move_dequeue_past_td()
633 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); in xhci_move_dequeue_past_td()
658 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_move_dequeue_past_td()
662 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_move_dequeue_past_td()
673 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); in xhci_move_dequeue_past_td()
674 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); in xhci_move_dequeue_past_td()
679 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", in xhci_move_dequeue_past_td()
685 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_move_dequeue_past_td()
687 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); in xhci_move_dequeue_past_td()
693 ret = queue_command(xhci, cmd, in xhci_move_dequeue_past_td()
699 xhci_free_command(xhci, cmd); in xhci_move_dequeue_past_td()
705 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_move_dequeue_past_td()
714 xhci_ring_cmd_db(xhci); in xhci_move_dequeue_past_td()
722 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
738 next_trb(xhci, ep_ring, &seg, &trb); in td_to_noop()
742 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, in xhci_stop_watchdog_timer_in_irq() argument
754 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
762 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
763 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
764 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
774 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, in xhci_unmap_td_bounce_buffer() argument
777 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_unmap_td_bounce_buffer()
798 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", in xhci_unmap_td_bounce_buffer()
808 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
817 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
825 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", in xhci_td_cleanup()
843 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", in xhci_td_cleanup()
850 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
866 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
869 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
871 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
873 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
876 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
881 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_reset_halted_ep() argument
887 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_reset_halted_ep()
893 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
897 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); in xhci_reset_halted_ep()
900 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", in xhci_reset_halted_ep()
905 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, in xhci_handle_halted_endpoint() argument
930 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
935 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
941 xhci_ring_cmd_db(xhci); in xhci_handle_halted_endpoint()
957 struct xhci_hcd *xhci; in xhci_invalidate_cancelled_tds() local
966 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
969 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_invalidate_cancelled_tds()
975 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
977 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", in xhci_invalidate_cancelled_tds()
987 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
992 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { in xhci_invalidate_cancelled_tds()
1002 xhci_dbg(xhci, in xhci_invalidate_cancelled_tds()
1010 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1019 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1027 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", in xhci_invalidate_cancelled_tds()
1029 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1046 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1049 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, in find_halted_td()
1066 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
1078 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1079 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", in xhci_handle_cmd_stop_ep()
1085 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1089 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1110 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); in xhci_handle_cmd_stop_ep()
1120 err = xhci_handle_halted_endpoint(xhci, ep, 0, td, in xhci_handle_cmd_stop_ep()
1124 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
1128 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); in xhci_handle_cmd_stop_ep()
1130 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_handle_cmd_stop_ep()
1132 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
1136 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); in xhci_handle_cmd_stop_ep()
1137 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
1146 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
1150 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1153 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
1164 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); in xhci_kill_ring_urbs()
1168 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1172 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
1180 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_kill_endpoint_urbs()
1194 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1197 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1203 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1206 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1215 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1228 void xhci_hc_died(struct xhci_hcd *xhci) in xhci_hc_died() argument
1232 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1235 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); in xhci_hc_died()
1236 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1238 xhci_cleanup_command_queue(xhci); in xhci_hc_died()
1241 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1242 if (!xhci->devs[i]) in xhci_hc_died()
1245 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_hc_died()
1249 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_hc_died()
1250 usb_hc_died(xhci_to_hcd(xhci)); in xhci_hc_died()
1273 struct xhci_hcd *xhci = ep->xhci; in xhci_stop_endpoint_command_watchdog() local
1278 spin_lock_irqsave(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
1283 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
1284 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit"); in xhci_stop_endpoint_command_watchdog()
1287 usbsts = readl(&xhci->op_regs->status); in xhci_stop_endpoint_command_watchdog()
1289 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); in xhci_stop_endpoint_command_watchdog()
1290 xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); in xhci_stop_endpoint_command_watchdog()
1294 xhci_halt(xhci); in xhci_stop_endpoint_command_watchdog()
1301 xhci_hc_died(xhci); in xhci_stop_endpoint_command_watchdog()
1303 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
1304 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
1308 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
1349 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
1361 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
1374 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1378 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1380 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
1386 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1387 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1397 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
1400 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1404 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1409 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1413 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1433 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1440 update_ring_for_set_deq_completion(xhci, ep->vdev, in xhci_handle_cmd_set_deq()
1443 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1444 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1451 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1454 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1456 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1458 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1467 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1470 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1478 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1482 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1488 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1494 if (xhci->quirks & XHCI_RESET_EP_QUIRK) in xhci_handle_cmd_reset_ep()
1495 xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw"); in xhci_handle_cmd_reset_ep()
1503 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1506 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1515 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1520 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1524 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1527 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1529 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1532 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1550 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1555 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1564 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1573 if (xhci->quirks & XHCI_RESET_EP_QUIRK && in xhci_handle_cmd_config_ep()
1579 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_config_ep()
1585 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_config_ep()
1591 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_addr_dev() argument
1596 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1599 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1603 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_reset_dev() argument
1608 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1610 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", in xhci_handle_cmd_reset_dev()
1614 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1617 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1620 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1623 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1624 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1627 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1645 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1648 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1649 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1655 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1659 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); in xhci_handle_command_timeout()
1661 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1667 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1668 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1672 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1675 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1677 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1681 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1684 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1685 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1686 xhci_abort_cmd_ring(xhci, flags); in xhci_handle_command_timeout()
1691 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1692 xhci_dbg(xhci, "host removed, ring start fail?\n"); in xhci_handle_command_timeout()
1693 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1699 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1700 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1703 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1707 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1719 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in handle_cmd_completion()
1724 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1726 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); in handle_cmd_completion()
1728 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1735 xhci_warn(xhci, in handle_cmd_completion()
1740 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1742 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1748 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1752 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1753 xhci_err(xhci, in handle_cmd_completion()
1765 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1767 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1768 xhci->current_cmd = NULL; in handle_cmd_completion()
1776 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); in handle_cmd_completion()
1779 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1783 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1788 xhci_handle_cmd_addr_dev(xhci, slot_id); in handle_cmd_completion()
1794 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, in handle_cmd_completion()
1800 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1810 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1818 xhci_handle_cmd_reset_dev(xhci, slot_id); in handle_cmd_completion()
1821 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1825 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); in handle_cmd_completion()
1830 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1831 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1833 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in handle_cmd_completion()
1834 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1835 xhci->current_cmd = NULL; in handle_cmd_completion()
1841 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1844 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1847 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1848 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1849 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1852 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1859 if (!xhci->devs[slot_id]) { in handle_device_notification()
1860 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1865 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1867 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1884 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) in xhci_cavium_reset_phy_quirk() argument
1886 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cavium_reset_phy_quirk()
1901 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1916 xhci_warn(xhci, in handle_port_status()
1920 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1923 xhci_warn(xhci, "Port change event with invalid port ID %d\n", in handle_port_status()
1925 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1929 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
1931 xhci_warn(xhci, "Port change event, no port for port ID %u\n", in handle_port_status()
1938 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
1939 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); in handle_port_status()
1949 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
1955 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
1961 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
1962 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1963 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; in handle_port_status()
1967 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
1969 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
1971 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
1976 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
1982 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
1984 xhci_set_link_state(xhci, port, XDEV_U0); in handle_port_status()
1991 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
2012 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
2021 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
2022 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
2023 xhci_ring_device(xhci, slot_id); in handle_port_status()
2025 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2047 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2048 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
2050 xhci_cavium_reset_phy_quirk(xhci); in handle_port_status()
2055 inc_deq(xhci, xhci->event_ring); in handle_port_status()
2071 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in handle_port_status()
2074 spin_unlock(&xhci->lock); in handle_port_status()
2077 spin_lock(&xhci->lock); in handle_port_status()
2086 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
2111 xhci_warn(xhci, in trb_in_td()
2147 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2155 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2170 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
2190 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
2196 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
2198 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
2204 static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2211 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2245 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2254 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2255 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in finish_td()
2270 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2272 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in finish_td()
2285 xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n"); in finish_td()
2290 inc_deq(xhci, ep_ring); in finish_td()
2292 return xhci_td_cleanup(xhci, td, ep_ring, td->status); in finish_td()
2296 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, in sum_trb_lengths() argument
2303 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { in sum_trb_lengths()
2313 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2323 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2331 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", in process_ctrl_td()
2345 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
2360 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", in process_ctrl_td()
2367 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
2370 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", in process_ctrl_td()
2394 xhci_dbg(xhci, "Waiting for status stage event\n"); in process_ctrl_td()
2403 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2409 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2436 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) in process_isoc_td()
2481 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + in process_isoc_td()
2488 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2491 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2512 inc_deq(xhci, ep->ring); in skip_isoc_td()
2514 return xhci_td_cleanup(xhci, td, ep->ring, status); in skip_isoc_td()
2520 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2528 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2539 xhci_warn(xhci, "WARN Successful completion on short TX\n"); in process_bulk_intr_td()
2540 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2547 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2561 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2568 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, in process_bulk_intr_td()
2580 sum_trb_lengths(xhci, ep_ring, ep_trb) + in process_bulk_intr_td()
2584 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", in process_bulk_intr_td()
2589 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2597 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2620 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2622 xhci_err(xhci, "ERROR Invalid Transfer event\n"); in handle_tx_event()
2627 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2630 xhci_err(xhci, in handle_tx_event()
2643 xhci_handle_halted_endpoint(xhci, ep, 0, NULL, in handle_tx_event()
2651 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", in handle_tx_event()
2671 if (xhci->quirks & XHCI_TRUST_TX_LENGTH || in handle_tx_event()
2675 xhci_warn_ratelimited(xhci, in handle_tx_event()
2683 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2687 xhci_dbg(xhci, in handle_tx_event()
2692 xhci_dbg(xhci, in handle_tx_event()
2698 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2703 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2708 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2713 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2719 xhci_warn(xhci, in handle_tx_event()
2726 xhci_warn(xhci, in handle_tx_event()
2732 xhci_warn(xhci, in handle_tx_event()
2737 xhci_warn(xhci, in handle_tx_event()
2747 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2749 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2755 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2757 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2770 xhci_dbg(xhci, in handle_tx_event()
2776 xhci_dbg(xhci, in handle_tx_event()
2783 xhci_warn(xhci, in handle_tx_event()
2789 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2793 xhci_warn(xhci, in handle_tx_event()
2815 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2821 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2825 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2827 xhci_handle_halted_endpoint(xhci, ep, in handle_tx_event()
2838 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2849 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2872 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2878 xhci_err(xhci, in handle_tx_event()
2883 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2889 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2898 xhci_dbg(xhci, in handle_tx_event()
2920 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2922 xhci_handle_halted_endpoint(xhci, ep, in handle_tx_event()
2932 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2934 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2936 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2947 inc_deq(xhci, xhci->event_ring); in handle_tx_event()
2960 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2962 xhci->event_ring->deq_seg, in handle_tx_event()
2963 xhci->event_ring->dequeue), in handle_tx_event()
2977 static int xhci_handle_event(struct xhci_hcd *xhci) in xhci_handle_event() argument
2985 if (!xhci->event_ring || !xhci->event_ring->dequeue) { in xhci_handle_event()
2986 xhci_err(xhci, "ERROR event ring not ready\n"); in xhci_handle_event()
2990 event = xhci->event_ring->dequeue; in xhci_handle_event()
2993 xhci->event_ring->cycle_state) in xhci_handle_event()
2996 trace_xhci_handle_event(xhci->event_ring, &event->generic); in xhci_handle_event()
3008 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
3011 handle_port_status(xhci, event); in xhci_handle_event()
3015 ret = handle_tx_event(xhci, &event->trans_event); in xhci_handle_event()
3020 handle_device_notification(xhci, event); in xhci_handle_event()
3024 handle_vendor_event(xhci, event, trb_type); in xhci_handle_event()
3026 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); in xhci_handle_event()
3031 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
3032 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
3039 inc_deq(xhci, xhci->event_ring); in xhci_handle_event()
3052 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, in xhci_update_erst_dequeue() argument
3058 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3060 if (event_ring_deq != xhci->event_ring->dequeue) { in xhci_update_erst_dequeue()
3061 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, in xhci_update_erst_dequeue()
3062 xhci->event_ring->dequeue); in xhci_update_erst_dequeue()
3064 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); in xhci_update_erst_dequeue()
3080 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3090 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
3097 spin_lock(&xhci->lock); in xhci_irq()
3099 status = readl(&xhci->op_regs->status); in xhci_irq()
3101 xhci_hc_died(xhci); in xhci_irq()
3110 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
3111 xhci_halt(xhci); in xhci_irq()
3122 writel(status, &xhci->op_regs->status); in xhci_irq()
3126 irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_irq()
3128 writel(irq_pending, &xhci->ir_set->irq_pending); in xhci_irq()
3131 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_irq()
3132 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_irq()
3133 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
3138 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
3139 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
3140 &xhci->ir_set->erst_dequeue); in xhci_irq()
3145 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
3149 while (xhci_handle_event(xhci) > 0) { in xhci_irq()
3152 xhci_update_erst_dequeue(xhci, event_ring_deq); in xhci_irq()
3153 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
3156 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_irq()
3157 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2; in xhci_irq()
3162 xhci_update_erst_dequeue(xhci, event_ring_deq); in xhci_irq()
3166 spin_unlock(&xhci->lock); in xhci_irq()
3185 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
3201 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
3208 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
3221 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3224 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3229 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
3235 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3244 if (room_on_ring(xhci, ep_ring, num_trbs)) in prepare_ring()
3247 if (ep_ring == xhci->cmd_ring) { in prepare_ring()
3248 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
3252 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
3255 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, in prepare_ring()
3257 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
3266 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
3268 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
3287 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in prepare_ring()
3293 xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); in prepare_ring()
3300 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
3313 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3315 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3318 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
3323 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in prepare_transfer()
3407 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3420 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3423 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, in check_interval() argument
3459 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3464 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3465 check_interval(xhci, urb, ep_ctx); in xhci_queue_intr_tx()
3467 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3490 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3497 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3506 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3517 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, in xhci_align_td() argument
3520 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_align_td()
3533 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", in xhci_align_td()
3539 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3559 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", in xhci_align_td()
3574 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); in xhci_align_td()
3581 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3587 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3605 ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3622 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3671 if (xhci_align_td(xhci, urb, enqd_len, in xhci_queue_bulk_tx()
3699 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, in xhci_queue_bulk_tx()
3706 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, in xhci_queue_bulk_tx()
3731 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3737 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); in xhci_queue_bulk_tx()
3742 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3748 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3761 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3781 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3808 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3817 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3844 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3853 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3870 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3877 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3890 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3895 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
3910 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3916 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3942 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
3962 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
3963 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
3979 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
3987 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", in xhci_get_isoc_frame_id()
3988 __func__, index, readl(&xhci->run_regs->microframe_index), in xhci_get_isoc_frame_id()
4016 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
4019 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
4027 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) in trb_block_event_intr() argument
4029 if (xhci->hci_version < 0x100) in trb_block_event_intr()
4038 if (i && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
4039 return !!(i % xhci->isoc_bei_interval); in trb_block_event_intr()
4045 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
4063 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
4064 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
4068 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
4093 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); in xhci_queue_isoc_tx()
4094 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
4099 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4111 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4112 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); in xhci_queue_isoc_tx()
4152 if (trb_block_event_intr(xhci, num_tds, i)) in xhci_queue_isoc_tx()
4161 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
4175 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
4188 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
4195 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4198 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4199 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4202 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4204 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4220 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); in xhci_queue_isoc_tx()
4238 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
4250 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4251 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4253 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4263 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in xhci_queue_isoc_tx_prepare()
4272 check_interval(xhci, urb, ep_ctx); in xhci_queue_isoc_tx_prepare()
4275 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4282 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4288 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4289 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4310 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4323 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4327 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4330 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4331 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4332 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4339 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4342 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4344 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4349 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4352 if (list_empty(&xhci->cmd_list)) { in queue_command()
4353 xhci->current_cmd = cmd; in queue_command()
4354 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); in queue_command()
4357 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4359 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4360 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4365 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4368 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4373 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4376 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4382 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4385 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4389 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4392 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4398 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4402 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4409 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4412 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4422 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4430 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4435 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4446 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()