• Home
  • Raw
  • Download

Lines Matching +full:ps +full:- +full:speed

1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2001-2004 by David Brownell
4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
7 /* this file is part of ehci-hcd.c */
9 /*-------------------------------------------------------------------------*/
21 * pre-calculated schedule data to make appending to the queue be quick.
27 * periodic_next_shadow - return "next" pointer on shadow list
37 return &periodic->qh->qh_next; in periodic_next_shadow()
39 return &periodic->fstn->fstn_next; in periodic_next_shadow()
41 return &periodic->itd->itd_next; in periodic_next_shadow()
44 return &periodic->sitd->sitd_next; in periodic_next_shadow()
55 return &periodic->qh->hw->hw_next; in shadow_next_periodic()
58 return periodic->hw_next; in shadow_next_periodic()
62 /* caller must hold ehci->lock */
65 union ehci_shadow *prev_p = &ehci->pshadow[frame]; in periodic_unlink()
66 __hc32 *hw_p = &ehci->periodic[frame]; in periodic_unlink()
87 if (!ehci->use_dummy_qh || in periodic_unlink()
93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in periodic_unlink()
96 /*-------------------------------------------------------------------------*/
103 struct usb_tt *utt = udev->tt; in find_tt()
117 if (utt->multi) { in find_tt()
118 tt_index = utt->hcpriv; in find_tt()
120 tt_index = kcalloc(utt->hub->maxchild, in find_tt()
124 return ERR_PTR(-ENOMEM); in find_tt()
125 utt->hcpriv = tt_index; in find_tt()
128 port = udev->ttport - 1; in find_tt()
132 ptt = (struct ehci_tt **) &utt->hcpriv; in find_tt()
138 hcd_to_ehci(bus_to_hcd(udev->bus)); in find_tt()
143 utt->hcpriv = NULL; in find_tt()
146 return ERR_PTR(-ENOMEM); in find_tt()
148 list_add_tail(&tt->tt_list, &ehci->tt_list); in find_tt()
149 INIT_LIST_HEAD(&tt->ps_list); in find_tt()
150 tt->usb_tt = utt; in find_tt()
151 tt->tt_port = port; in find_tt()
161 struct usb_tt *utt = udev->tt; in drop_tt()
165 if (!utt || !utt->hcpriv) in drop_tt()
169 if (utt->multi) { in drop_tt()
170 tt_index = utt->hcpriv; in drop_tt()
171 ptt = &tt_index[udev->ttport - 1]; in drop_tt()
174 for (i = 0; i < utt->hub->maxchild; ++i) in drop_tt()
178 ptt = (struct ehci_tt **) &utt->hcpriv; in drop_tt()
182 if (!tt || !list_empty(&tt->ps_list)) in drop_tt()
185 list_del(&tt->tt_list); in drop_tt()
189 utt->hcpriv = NULL; in drop_tt()
195 struct ehci_per_sched *ps) in bandwidth_dbg() argument
197 dev_dbg(&ps->udev->dev, in bandwidth_dbg()
199 ps->ep->desc.bEndpointAddress, in bandwidth_dbg()
201 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod, in bandwidth_dbg()
202 ps->phase, ps->phase_uf, ps->period, in bandwidth_dbg()
203 ps->usecs, ps->c_usecs, ps->cs_mask); in bandwidth_dbg()
211 int usecs = qh->ps.usecs; in reserve_release_intr_bandwidth()
212 int c_usecs = qh->ps.c_usecs; in reserve_release_intr_bandwidth()
213 int tt_usecs = qh->ps.tt_usecs; in reserve_release_intr_bandwidth()
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ in reserve_release_intr_bandwidth()
218 start_uf = qh->ps.bw_phase << 3; in reserve_release_intr_bandwidth()
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps); in reserve_release_intr_bandwidth()
223 usecs = -usecs; in reserve_release_intr_bandwidth()
224 c_usecs = -c_usecs; in reserve_release_intr_bandwidth()
225 tt_usecs = -tt_usecs; in reserve_release_intr_bandwidth()
228 /* Entire transaction (high speed) or start-split (full/low speed) */ in reserve_release_intr_bandwidth()
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; in reserve_release_intr_bandwidth()
230 i += qh->ps.bw_uperiod) in reserve_release_intr_bandwidth()
231 ehci->bandwidth[i] += usecs; in reserve_release_intr_bandwidth()
233 /* Complete-split (full/low speed) */ in reserve_release_intr_bandwidth()
234 if (qh->ps.c_usecs) { in reserve_release_intr_bandwidth()
237 i += qh->ps.bw_uperiod) { in reserve_release_intr_bandwidth()
239 if (qh->ps.cs_mask & m) in reserve_release_intr_bandwidth()
240 ehci->bandwidth[i+j] += c_usecs; in reserve_release_intr_bandwidth()
247 tt = find_tt(qh->ps.udev); in reserve_release_intr_bandwidth()
249 list_add_tail(&qh->ps.ps_list, &tt->ps_list); in reserve_release_intr_bandwidth()
251 list_del(&qh->ps.ps_list); in reserve_release_intr_bandwidth()
254 i += qh->ps.bw_period) in reserve_release_intr_bandwidth()
255 tt->bandwidth[i] += tt_usecs; in reserve_release_intr_bandwidth()
259 /*-------------------------------------------------------------------------*/
264 struct ehci_per_sched *ps; in compute_tt_budget() local
273 list_for_each_entry(ps, &tt->ps_list, ps_list) { in compute_tt_budget()
274 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE; in compute_tt_budget()
275 uframe += ps->bw_uperiod) { in compute_tt_budget()
277 x = ps->tt_usecs; in compute_tt_budget()
280 for (uf = ps->phase_uf; uf < 8; ++uf) { in compute_tt_budget()
289 x -= 125; in compute_tt_budget()
298 if (!dev1->tt || !dev2->tt) in same_tt()
300 if (dev1->tt != dev2->tt) in same_tt()
302 if (dev1->tt->multi) in same_tt()
303 return dev1->ttport == dev2->ttport; in same_tt()
312 * The parameter is the mask of ssplits in "H-frame" terms
313 * and this returns the transfer start uframe in "B-frame" terms,
314 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
315 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
316 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
327 return ffs(smask) - 1; in tt_start_uframe()
340 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; in carryover_tt_bandwidth()
354 * should be executed in "B-frame" terms, which is the same as the
355 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
356 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
361 * in USB 2.0 spec section 11.18.1 fig 11-60.
369 struct ehci_per_sched *ps, in tt_available() argument
375 unsigned period = ps->bw_period; in tt_available()
376 unsigned usecs = ps->tt_usecs; in tt_available()
381 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES; in tt_available()
386 if (tt->bandwidth[frame] + usecs > 900) in tt_available()
391 tt_usecs[i] = ehci->tt_budget[uf]; in tt_available()
442 for (; frame < ehci->periodic_size; frame += period) { in tt_no_collision()
447 here = ehci->pshadow[frame]; in tt_no_collision()
448 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]); in tt_no_collision()
452 type = Q_NEXT_TYPE(ehci, here.itd->hw_next); in tt_no_collision()
453 here = here.itd->itd_next; in tt_no_collision()
456 hw = here.qh->hw; in tt_no_collision()
457 if (same_tt(dev, here.qh->ps.udev)) { in tt_no_collision()
461 hw->hw_info2); in tt_no_collision()
467 type = Q_NEXT_TYPE(ehci, hw->hw_next); in tt_no_collision()
468 here = here.qh->qh_next; in tt_no_collision()
471 if (same_tt(dev, here.sitd->urb->dev)) { in tt_no_collision()
475 ->hw_uframe); in tt_no_collision()
481 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); in tt_no_collision()
482 here = here.sitd->sitd_next; in tt_no_collision()
502 /*-------------------------------------------------------------------------*/
506 if (ehci->periodic_count++) in enable_periodic()
510 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); in enable_periodic()
519 if (--ehci->periodic_count) in disable_periodic()
526 /*-------------------------------------------------------------------------*/
537 unsigned period = qh->ps.period; in qh_link_periodic()
539 dev_dbg(&qh->ps.udev->dev, in qh_link_periodic()
540 "link qh%d-%04x/%p start %d [%d/%d us]\n", in qh_link_periodic()
541 period, hc32_to_cpup(ehci, &qh->hw->hw_info2) in qh_link_periodic()
543 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); in qh_link_periodic()
549 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) { in qh_link_periodic()
550 union ehci_shadow *prev = &ehci->pshadow[i]; in qh_link_periodic()
551 __hc32 *hw_p = &ehci->periodic[i]; in qh_link_periodic()
565 /* sorting each branch by period (slow-->fast) in qh_link_periodic()
569 if (qh->ps.period > here.qh->ps.period) in qh_link_periodic()
571 prev = &here.qh->qh_next; in qh_link_periodic()
572 hw_p = &here.qh->hw->hw_next; in qh_link_periodic()
577 qh->qh_next = here; in qh_link_periodic()
579 qh->hw->hw_next = *hw_p; in qh_link_periodic()
581 prev->qh = qh; in qh_link_periodic()
582 *hw_p = QH_NEXT(ehci, qh->qh_dma); in qh_link_periodic()
585 qh->qh_state = QH_STATE_LINKED; in qh_link_periodic()
586 qh->xacterrs = 0; in qh_link_periodic()
587 qh->unlink_reason = 0; in qh_link_periodic()
589 /* update per-qh bandwidth for debugfs */ in qh_link_periodic()
590 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period in qh_link_periodic()
591 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) in qh_link_periodic()
592 : (qh->ps.usecs * 8); in qh_link_periodic()
594 list_add(&qh->intr_node, &ehci->intr_qh_list); in qh_link_periodic()
597 ++ehci->intr_count; in qh_link_periodic()
607 * If qh is for a low/full-speed device, simply unlinking it in qh_unlink_periodic()
622 period = qh->ps.period ? : 1; in qh_unlink_periodic()
624 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) in qh_unlink_periodic()
627 /* update per-qh bandwidth for debugfs */ in qh_unlink_periodic()
628 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period in qh_unlink_periodic()
629 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) in qh_unlink_periodic()
630 : (qh->ps.usecs * 8); in qh_unlink_periodic()
632 dev_dbg(&qh->ps.udev->dev, in qh_unlink_periodic()
633 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", in qh_unlink_periodic()
634 qh->ps.period, in qh_unlink_periodic()
635 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), in qh_unlink_periodic()
636 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); in qh_unlink_periodic()
638 /* qh->qh_next still "live" to HC */ in qh_unlink_periodic()
639 qh->qh_state = QH_STATE_UNLINK; in qh_unlink_periodic()
640 qh->qh_next.ptr = NULL; in qh_unlink_periodic()
642 if (ehci->qh_scan_next == qh) in qh_unlink_periodic()
643 ehci->qh_scan_next = list_entry(qh->intr_node.next, in qh_unlink_periodic()
645 list_del(&qh->intr_node); in qh_unlink_periodic()
650 if (qh->qh_state != QH_STATE_LINKED || in cancel_unlink_wait_intr()
651 list_empty(&qh->unlink_node)) in cancel_unlink_wait_intr()
654 list_del_init(&qh->unlink_node); in cancel_unlink_wait_intr()
665 if (qh->qh_state != QH_STATE_LINKED) in start_unlink_intr()
681 qh->unlink_cycle = ehci->intr_unlink_cycle; in start_unlink_intr()
684 list_add_tail(&qh->unlink_node, &ehci->intr_unlink); in start_unlink_intr()
686 if (ehci->intr_unlinking) in start_unlink_intr()
688 else if (ehci->rh_state < EHCI_RH_RUNNING) in start_unlink_intr()
690 else if (ehci->intr_unlink.next == &qh->unlink_node) { in start_unlink_intr()
692 ++ehci->intr_unlink_cycle; in start_unlink_intr()
704 qh->unlink_cycle = ehci->intr_unlink_wait_cycle; in start_unlink_intr_wait()
707 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); in start_unlink_intr_wait()
709 if (ehci->rh_state < EHCI_RH_RUNNING) in start_unlink_intr_wait()
711 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { in start_unlink_intr_wait()
713 ++ehci->intr_unlink_wait_cycle; in start_unlink_intr_wait()
719 struct ehci_qh_hw *hw = qh->hw; in end_unlink_intr()
722 qh->qh_state = QH_STATE_IDLE; in end_unlink_intr()
723 hw->hw_next = EHCI_LIST_END(ehci); in end_unlink_intr()
725 if (!list_empty(&qh->qtd_list)) in end_unlink_intr()
729 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { in end_unlink_intr()
740 * FIXME kill the now-dysfunctional queued urbs in end_unlink_intr()
749 --ehci->intr_count; in end_unlink_intr()
753 /*-------------------------------------------------------------------------*/
769 usecs = ehci->uframe_periodic_max - usecs; in check_period()
773 if (ehci->bandwidth[uframe] > usecs) in check_period()
790 int retval = -ENOSPC; in check_intr_schedule()
793 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */ in check_intr_schedule()
796 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs)) in check_intr_schedule()
798 if (!qh->ps.c_usecs) { in check_intr_schedule()
805 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) { in check_intr_schedule()
811 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
822 * We pessimize a bit; probably the typical full speed case in check_intr_schedule()
828 mask = 0x03 << (uframe + qh->gap_uf); in check_intr_schedule()
832 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) { in check_intr_schedule()
833 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1, in check_intr_schedule()
834 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
836 if (!check_period(ehci, frame, uframe + qh->gap_uf, in check_intr_schedule()
837 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
847 * or when the previous schedule slot can't be re-used.
854 struct ehci_qh_hw *hw = qh->hw; in qh_schedule()
857 hw->hw_next = EHCI_LIST_END(ehci); in qh_schedule()
860 if (qh->ps.phase != NO_FRAME) { in qh_schedule()
867 tt = find_tt(qh->ps.udev); in qh_schedule()
872 compute_tt_budget(ehci->tt_budget, tt); in qh_schedule()
878 if (qh->ps.bw_period) { in qh_schedule()
882 for (i = qh->ps.bw_period; i > 0; --i) { in qh_schedule()
883 frame = ++ehci->random_frame & (qh->ps.bw_period - 1); in qh_schedule()
892 /* qh->ps.bw_period == 0 means every uframe */ in qh_schedule()
900 qh->ps.phase = (qh->ps.period ? ehci->random_frame & in qh_schedule()
901 (qh->ps.period - 1) : 0); in qh_schedule()
902 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1); in qh_schedule()
903 qh->ps.phase_uf = uframe; in qh_schedule()
904 qh->ps.cs_mask = qh->ps.period ? in qh_schedule()
908 /* reset S-frame and (maybe) C-frame masks */ in qh_schedule()
909 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); in qh_schedule()
910 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask); in qh_schedule()
930 epnum = urb->ep->desc.bEndpointAddress; in intr_submit()
932 spin_lock_irqsave(&ehci->lock, flags); in intr_submit()
935 status = -ESHUTDOWN; in intr_submit()
944 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); in intr_submit()
946 status = -ENOMEM; in intr_submit()
949 if (qh->qh_state == QH_STATE_IDLE) { in intr_submit()
956 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); in intr_submit()
960 if (qh->qh_state == QH_STATE_IDLE) { in intr_submit()
969 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; in intr_submit()
975 spin_unlock_irqrestore(&ehci->lock, flags); in intr_submit()
986 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, in scan_intr()
990 if (!list_empty(&qh->qtd_list)) { in scan_intr()
995 * drops the lock. That's why ehci->qh_scan_next in scan_intr()
997 * gets unlinked then ehci->qh_scan_next is adjusted in scan_intr()
1003 else if (unlikely(list_empty(&qh->qtd_list) && in scan_intr()
1004 qh->qh_state == QH_STATE_LINKED)) in scan_intr()
1010 /*-------------------------------------------------------------------------*/
1021 INIT_LIST_HEAD(&stream->td_list); in iso_stream_alloc()
1022 INIT_LIST_HEAD(&stream->free_list); in iso_stream_alloc()
1023 stream->next_uframe = NO_FRAME; in iso_stream_alloc()
1024 stream->ps.phase = NO_FRAME; in iso_stream_alloc()
1038 struct usb_device *dev = urb->dev; in iso_stream_init()
1048 epnum = usb_pipeendpoint(urb->pipe); in iso_stream_init()
1049 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0; in iso_stream_init()
1050 maxp = usb_endpoint_maxp(&urb->ep->desc); in iso_stream_init()
1054 if (dev->speed == USB_SPEED_HIGH) { in iso_stream_init()
1055 unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc); in iso_stream_init()
1057 stream->highspeed = 1; in iso_stream_init()
1062 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); in iso_stream_init()
1063 stream->buf1 = cpu_to_hc32(ehci, buf1); in iso_stream_init()
1064 stream->buf2 = cpu_to_hc32(ehci, multi); in iso_stream_init()
1069 stream->ps.usecs = HS_USECS_ISO(maxp); in iso_stream_init()
1073 1 << (urb->ep->desc.bInterval - 1)); in iso_stream_init()
1075 /* Allow urb->interval to override */ in iso_stream_init()
1076 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); in iso_stream_init()
1078 stream->uperiod = urb->interval; in iso_stream_init()
1079 stream->ps.period = urb->interval >> 3; in iso_stream_init()
1080 stream->bandwidth = stream->ps.usecs * 8 / in iso_stream_init()
1081 stream->ps.bw_uperiod; in iso_stream_init()
1088 addr = dev->ttport << 24; in iso_stream_init()
1090 || (dev->tt->hub != in iso_stream_init()
1091 ehci_to_hcd(ehci)->self.root_hub)) in iso_stream_init()
1092 addr |= dev->tt->hub->devnum << 16; in iso_stream_init()
1094 addr |= dev->devnum; in iso_stream_init()
1095 stream->ps.usecs = HS_USECS_ISO(maxp); in iso_stream_init()
1096 think_time = dev->tt->think_time; in iso_stream_init()
1097 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time( in iso_stream_init()
1098 dev->speed, is_input, 1, maxp)); in iso_stream_init()
1104 stream->ps.c_usecs = stream->ps.usecs; in iso_stream_init()
1105 stream->ps.usecs = HS_USECS_ISO(1); in iso_stream_init()
1106 stream->ps.cs_mask = 1; in iso_stream_init()
1108 /* c-mask as specified in USB 2.0 11.18.4 3.c */ in iso_stream_init()
1109 tmp = (1 << (hs_transfers + 2)) - 1; in iso_stream_init()
1110 stream->ps.cs_mask |= tmp << (8 + 2); in iso_stream_init()
1112 stream->ps.cs_mask = smask_out[hs_transfers - 1]; in iso_stream_init()
1116 1 << (urb->ep->desc.bInterval - 1)); in iso_stream_init()
1118 /* Allow urb->interval to override */ in iso_stream_init()
1119 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval); in iso_stream_init()
1120 stream->ps.bw_uperiod = stream->ps.bw_period << 3; in iso_stream_init()
1122 stream->ps.period = urb->interval; in iso_stream_init()
1123 stream->uperiod = urb->interval << 3; in iso_stream_init()
1124 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) / in iso_stream_init()
1125 stream->ps.bw_period; in iso_stream_init()
1127 /* stream->splits gets created from cs_mask later */ in iso_stream_init()
1128 stream->address = cpu_to_hc32(ehci, addr); in iso_stream_init()
1131 stream->ps.udev = dev; in iso_stream_init()
1132 stream->ps.ep = urb->ep; in iso_stream_init()
1134 stream->bEndpointAddress = is_input | epnum; in iso_stream_init()
1135 stream->maxp = maxp; in iso_stream_init()
1146 epnum = usb_pipeendpoint (urb->pipe); in iso_stream_find()
1147 if (usb_pipein(urb->pipe)) in iso_stream_find()
1148 ep = urb->dev->ep_in[epnum]; in iso_stream_find()
1150 ep = urb->dev->ep_out[epnum]; in iso_stream_find()
1152 spin_lock_irqsave(&ehci->lock, flags); in iso_stream_find()
1153 stream = ep->hcpriv; in iso_stream_find()
1158 ep->hcpriv = stream; in iso_stream_find()
1162 /* if dev->ep [epnum] is a QH, hw is set */ in iso_stream_find()
1163 } else if (unlikely(stream->hw != NULL)) { in iso_stream_find()
1165 urb->dev->devpath, epnum, in iso_stream_find()
1166 usb_pipein(urb->pipe) ? "in" : "out"); in iso_stream_find()
1170 spin_unlock_irqrestore(&ehci->lock, flags); in iso_stream_find()
1174 /*-------------------------------------------------------------------------*/
1176 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1187 INIT_LIST_HEAD(&iso_sched->td_list); in iso_sched_alloc()
1201 dma_addr_t dma = urb->transfer_dma; in itd_sched_init()
1204 iso_sched->span = urb->number_of_packets * stream->uperiod; in itd_sched_init()
1206 /* figure out per-uframe itd fields that we'll need later in itd_sched_init()
1209 for (i = 0; i < urb->number_of_packets; i++) { in itd_sched_init()
1210 struct ehci_iso_packet *uframe = &iso_sched->packet[i]; in itd_sched_init()
1215 length = urb->iso_frame_desc[i].length; in itd_sched_init()
1216 buf = dma + urb->iso_frame_desc[i].offset; in itd_sched_init()
1220 if (unlikely(((i + 1) == urb->number_of_packets)) in itd_sched_init()
1221 && !(urb->transfer_flags & URB_NO_INTERRUPT)) in itd_sched_init()
1224 uframe->transaction = cpu_to_hc32(ehci, trans); in itd_sched_init()
1227 uframe->bufp = (buf & ~(u64)0x0fff); in itd_sched_init()
1229 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff)))) in itd_sched_init()
1230 uframe->cross = 1; in itd_sched_init()
1242 /* caller must hold ehci->lock! */ in iso_sched_free()
1243 list_splice(&iso_sched->td_list, &stream->free_list); in iso_sched_free()
1262 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
1264 return -ENOMEM; in itd_urb_transaction()
1268 if (urb->interval < 8) in itd_urb_transaction()
1269 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1271 num_itds = urb->number_of_packets; in itd_urb_transaction()
1274 spin_lock_irqsave(&ehci->lock, flags); in itd_urb_transaction()
1281 if (likely(!list_empty(&stream->free_list))) { in itd_urb_transaction()
1282 itd = list_first_entry(&stream->free_list, in itd_urb_transaction()
1284 if (itd->frame == ehci->now_frame) in itd_urb_transaction()
1286 list_del(&itd->itd_list); in itd_urb_transaction()
1287 itd_dma = itd->itd_dma; in itd_urb_transaction()
1290 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1291 itd = dma_pool_alloc(ehci->itd_pool, mem_flags, in itd_urb_transaction()
1293 spin_lock_irqsave(&ehci->lock, flags); in itd_urb_transaction()
1296 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1297 return -ENOMEM; in itd_urb_transaction()
1302 itd->itd_dma = itd_dma; in itd_urb_transaction()
1303 itd->frame = NO_FRAME; in itd_urb_transaction()
1304 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1306 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1309 urb->hcpriv = sched; in itd_urb_transaction()
1310 urb->error_count = 0; in itd_urb_transaction()
1314 /*-------------------------------------------------------------------------*/
1322 int usecs = stream->ps.usecs; in reserve_release_iso_bandwidth()
1323 int c_usecs = stream->ps.c_usecs; in reserve_release_iso_bandwidth()
1324 int tt_usecs = stream->ps.tt_usecs; in reserve_release_iso_bandwidth()
1327 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ in reserve_release_iso_bandwidth()
1329 uframe = stream->ps.bw_phase << 3; in reserve_release_iso_bandwidth()
1331 bandwidth_dbg(ehci, sign, "iso", &stream->ps); in reserve_release_iso_bandwidth()
1334 usecs = -usecs; in reserve_release_iso_bandwidth()
1335 c_usecs = -c_usecs; in reserve_release_iso_bandwidth()
1336 tt_usecs = -tt_usecs; in reserve_release_iso_bandwidth()
1339 if (!stream->splits) { /* High speed */ in reserve_release_iso_bandwidth()
1340 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; in reserve_release_iso_bandwidth()
1341 i += stream->ps.bw_uperiod) in reserve_release_iso_bandwidth()
1342 ehci->bandwidth[i] += usecs; in reserve_release_iso_bandwidth()
1344 } else { /* Full speed */ in reserve_release_iso_bandwidth()
1345 s_mask = stream->ps.cs_mask; in reserve_release_iso_bandwidth()
1350 i += stream->ps.bw_uperiod) { in reserve_release_iso_bandwidth()
1351 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8; in reserve_release_iso_bandwidth()
1354 ehci->bandwidth[i+j] += usecs; in reserve_release_iso_bandwidth()
1356 ehci->bandwidth[i+j] += c_usecs; in reserve_release_iso_bandwidth()
1360 tt = find_tt(stream->ps.udev); in reserve_release_iso_bandwidth()
1362 list_add_tail(&stream->ps.ps_list, &tt->ps_list); in reserve_release_iso_bandwidth()
1364 list_del(&stream->ps.ps_list); in reserve_release_iso_bandwidth()
1367 i += stream->ps.bw_period) in reserve_release_iso_bandwidth()
1368 tt->bandwidth[i] += tt_usecs; in reserve_release_iso_bandwidth()
1382 usecs = ehci->uframe_periodic_max - stream->ps.usecs; in itd_slot_ok()
1384 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE; in itd_slot_ok()
1385 uframe += stream->ps.bw_uperiod) { in itd_slot_ok()
1386 if (ehci->bandwidth[uframe] > usecs) in itd_slot_ok()
1404 mask = stream->ps.cs_mask << (uframe & 7); in sitd_slot_ok()
1406 /* for OUT, don't wrap SSPLIT into H-microframe 7 */ in sitd_slot_ok()
1407 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7)) in sitd_slot_ok()
1415 uframe &= stream->ps.bw_uperiod - 1; in sitd_slot_ok()
1423 if (!tt_available(ehci, &stream->ps, tt, frame, uf)) in sitd_slot_ok()
1429 if (!tt_no_collision(ehci, stream->ps.bw_period, in sitd_slot_ok()
1430 stream->ps.udev, frame, mask)) in sitd_slot_ok()
1440 max_used = ehci->uframe_periodic_max - stream->ps.usecs; in sitd_slot_ok()
1441 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) { in sitd_slot_ok()
1442 if (ehci->bandwidth[uf] > max_used) in sitd_slot_ok()
1447 if (stream->ps.c_usecs) { in sitd_slot_ok()
1448 max_used = ehci->uframe_periodic_max - in sitd_slot_ok()
1449 stream->ps.c_usecs; in sitd_slot_ok()
1453 if ((stream->ps.cs_mask & tmp) == 0) in sitd_slot_ok()
1455 if (ehci->bandwidth[uf+i] > max_used) in sitd_slot_ok()
1460 uframe += stream->ps.bw_uperiod; in sitd_slot_ok()
1463 stream->ps.cs_mask <<= uframe & 7; in sitd_slot_ok()
1464 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask); in sitd_slot_ok()
1471 * "as small as possible" to be cache-friendlier.) That limits the size
1474 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1489 unsigned mod = ehci->periodic_size << 3; in iso_stream_schedule()
1490 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule()
1491 bool empty = list_empty(&stream->td_list); in iso_stream_schedule()
1494 period = stream->uperiod; in iso_stream_schedule()
1495 span = sched->span; in iso_stream_schedule()
1496 if (!stream->highspeed) in iso_stream_schedule()
1501 ehci_to_hcd(ehci), urb->ep))) { in iso_stream_schedule()
1504 if (stream->ps.phase == NO_FRAME) { in iso_stream_schedule()
1506 struct ehci_tt *tt = find_tt(stream->ps.udev); in iso_stream_schedule()
1512 compute_tt_budget(ehci->tt_budget, tt); in iso_stream_schedule()
1514 start = ((-(++ehci->random_frame)) << 3) & (period - 1); in iso_stream_schedule()
1517 * Early uframes are more precious because full-speed in iso_stream_schedule()
1524 start--; in iso_stream_schedule()
1526 if (stream->highspeed) { in iso_stream_schedule()
1541 status = -ENOSPC; in iso_stream_schedule()
1544 stream->ps.phase = (start >> 3) & in iso_stream_schedule()
1545 (stream->ps.period - 1); in iso_stream_schedule()
1546 stream->ps.bw_phase = stream->ps.phase & in iso_stream_schedule()
1547 (stream->ps.bw_period - 1); in iso_stream_schedule()
1548 stream->ps.phase_uf = start & 7; in iso_stream_schedule()
1554 start = (stream->ps.phase << 3) + stream->ps.phase_uf; in iso_stream_schedule()
1557 stream->next_uframe = start; in iso_stream_schedule()
1561 now = ehci_read_frame_index(ehci) & (mod - 1); in iso_stream_schedule()
1564 if (ehci->i_thresh) in iso_stream_schedule()
1565 next = now + ehci->i_thresh; /* uframe cache */ in iso_stream_schedule()
1570 if (ehci->isoc_count == 0) in iso_stream_schedule()
1571 ehci->last_iso_frame = now >> 3; in iso_stream_schedule()
1574 * Use ehci->last_iso_frame as the base. There can't be any in iso_stream_schedule()
1577 base = ehci->last_iso_frame << 3; in iso_stream_schedule()
1578 next = (next - base) & (mod - 1); in iso_stream_schedule()
1579 start = (stream->next_uframe - base) & (mod - 1); in iso_stream_schedule()
1590 now2 = (now - base) & (mod - 1); in iso_stream_schedule()
1594 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", in iso_stream_schedule()
1595 urb, stream->next_uframe, base, period, mod); in iso_stream_schedule()
1596 status = -EFBIG; in iso_stream_schedule()
1605 (urb->transfer_flags & URB_ISO_ASAP))) in iso_stream_schedule()
1614 if (urb->transfer_flags & URB_ISO_ASAP) in iso_stream_schedule()
1621 skip = (now2 - start + period - 1) & -period; in iso_stream_schedule()
1624 urb, start + base, span - period, now2 + base, in iso_stream_schedule()
1628 skip = span - period; in iso_stream_schedule()
1638 urb->error_count = skip / period; in iso_stream_schedule()
1640 sched->first_packet = urb->error_count; in iso_stream_schedule()
1645 start = next + ((start - next) & (period - 1)); in iso_stream_schedule()
1649 if (unlikely(start + span - period >= mod + wrap)) { in iso_stream_schedule()
1651 urb, start, span - period, mod + wrap); in iso_stream_schedule()
1652 status = -EFBIG; in iso_stream_schedule()
1657 stream->next_uframe = (start + skip) & (mod - 1); in iso_stream_schedule()
1659 /* report high speed start in uframes; full speed, in frames */ in iso_stream_schedule()
1660 urb->start_frame = start & (mod - 1); in iso_stream_schedule()
1661 if (!stream->highspeed) in iso_stream_schedule()
1662 urb->start_frame >>= 3; in iso_stream_schedule()
1667 urb->hcpriv = NULL; in iso_stream_schedule()
1671 /*-------------------------------------------------------------------------*/
1680 itd->hw_next = EHCI_LIST_END(ehci); in itd_init()
1681 itd->hw_bufp[0] = stream->buf0; in itd_init()
1682 itd->hw_bufp[1] = stream->buf1; in itd_init()
1683 itd->hw_bufp[2] = stream->buf2; in itd_init()
1686 itd->index[i] = -1; in itd_init()
1700 struct ehci_iso_packet *uf = &iso_sched->packet[index]; in itd_patch()
1701 unsigned pg = itd->pg; in itd_patch()
1703 /* BUG_ON(pg == 6 && uf->cross); */ in itd_patch()
1706 itd->index[uframe] = index; in itd_patch()
1708 itd->hw_transaction[uframe] = uf->transaction; in itd_patch()
1709 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); in itd_patch()
1710 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); in itd_patch()
1711 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); in itd_patch()
1714 if (unlikely(uf->cross)) { in itd_patch()
1715 u64 bufp = uf->bufp + 4096; in itd_patch()
1717 itd->pg = ++pg; in itd_patch()
1718 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); in itd_patch()
1719 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); in itd_patch()
1726 union ehci_shadow *prev = &ehci->pshadow[frame]; in itd_link()
1727 __hc32 *hw_p = &ehci->periodic[frame]; in itd_link()
1741 itd->itd_next = here; in itd_link()
1742 itd->hw_next = *hw_p; in itd_link()
1743 prev->itd = itd; in itd_link()
1744 itd->frame = frame; in itd_link()
1746 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); in itd_link()
1759 struct ehci_iso_sched *iso_sched = urb->hcpriv; in itd_link_urb()
1762 next_uframe = stream->next_uframe & (mod - 1); in itd_link_urb()
1764 if (unlikely(list_empty(&stream->td_list))) in itd_link_urb()
1765 ehci_to_hcd(ehci)->self.bandwidth_allocated in itd_link_urb()
1766 += stream->bandwidth; in itd_link_urb()
1768 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in itd_link_urb()
1769 if (ehci->amd_pll_fix == 1) in itd_link_urb()
1773 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; in itd_link_urb()
1776 for (packet = iso_sched->first_packet, itd = NULL; in itd_link_urb()
1777 packet < urb->number_of_packets;) { in itd_link_urb()
1780 /* BUG_ON(list_empty(&iso_sched->td_list)); */ in itd_link_urb()
1784 itd = list_entry(iso_sched->td_list.next, in itd_link_urb()
1786 list_move_tail(&itd->itd_list, &stream->td_list); in itd_link_urb()
1787 itd->stream = stream; in itd_link_urb()
1788 itd->urb = urb; in itd_link_urb()
1797 next_uframe += stream->uperiod; in itd_link_urb()
1798 next_uframe &= mod - 1; in itd_link_urb()
1803 || packet == urb->number_of_packets) { in itd_link_urb()
1804 itd_link(ehci, frame & (ehci->periodic_size - 1), itd); in itd_link_urb()
1808 stream->next_uframe = next_uframe; in itd_link_urb()
1812 urb->hcpriv = stream; in itd_link_urb()
1814 ++ehci->isoc_count; in itd_link_urb()
1832 struct urb *urb = itd->urb; in itd_complete()
1836 int urb_index = -1; in itd_complete()
1837 struct ehci_iso_stream *stream = itd->stream; in itd_complete()
1842 if (likely(itd->index[uframe] == -1)) in itd_complete()
1844 urb_index = itd->index[uframe]; in itd_complete()
1845 desc = &urb->iso_frame_desc[urb_index]; in itd_complete()
1847 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]); in itd_complete()
1848 itd->hw_transaction[uframe] = 0; in itd_complete()
1852 urb->error_count++; in itd_complete()
1854 desc->status = usb_pipein(urb->pipe) in itd_complete()
1855 ? -ENOSR /* hc couldn't read */ in itd_complete()
1856 : -ECOMM; /* hc couldn't write */ in itd_complete()
1858 desc->status = -EOVERFLOW; in itd_complete()
1860 desc->status = -EPROTO; in itd_complete()
1864 desc->actual_length = EHCI_ITD_LENGTH(t); in itd_complete()
1865 urb->actual_length += desc->actual_length; in itd_complete()
1868 desc->status = 0; in itd_complete()
1869 desc->actual_length = EHCI_ITD_LENGTH(t); in itd_complete()
1870 urb->actual_length += desc->actual_length; in itd_complete()
1873 urb->error_count++; in itd_complete()
1878 if (likely((urb_index + 1) != urb->number_of_packets)) in itd_complete()
1883 * list_for_each_entry (itd, &stream->td_list, itd_list) in itd_complete()
1884 * BUG_ON(itd->urb == urb); in itd_complete()
1892 --ehci->isoc_count; in itd_complete()
1895 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; in itd_complete()
1896 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in itd_complete()
1897 if (ehci->amd_pll_fix == 1) in itd_complete()
1901 if (unlikely(list_is_singular(&stream->td_list))) in itd_complete()
1902 ehci_to_hcd(ehci)->self.bandwidth_allocated in itd_complete()
1903 -= stream->bandwidth; in itd_complete()
1906 itd->urb = NULL; in itd_complete()
1909 list_move_tail(&itd->itd_list, &stream->free_list); in itd_complete()
1912 if (list_empty(&stream->td_list)) { in itd_complete()
1913 list_splice_tail_init(&stream->free_list, in itd_complete()
1914 &ehci->cached_itd_list); in itd_complete()
1921 /*-------------------------------------------------------------------------*/
1926 int status = -EINVAL; in itd_submit()
1934 return -ENOMEM; in itd_submit()
1936 if (unlikely(urb->interval != stream->uperiod)) { in itd_submit()
1937 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", in itd_submit()
1938 stream->uperiod, urb->interval); in itd_submit()
1945 __func__, urb->dev->devpath, urb, in itd_submit()
1946 usb_pipeendpoint(urb->pipe), in itd_submit()
1947 usb_pipein(urb->pipe) ? "in" : "out", in itd_submit()
1948 urb->transfer_buffer_length, in itd_submit()
1949 urb->number_of_packets, urb->interval, in itd_submit()
1961 spin_lock_irqsave(&ehci->lock, flags); in itd_submit()
1963 status = -ESHUTDOWN; in itd_submit()
1971 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); in itd_submit()
1979 spin_unlock_irqrestore(&ehci->lock, flags); in itd_submit()
1984 /*-------------------------------------------------------------------------*/
2000 dma_addr_t dma = urb->transfer_dma; in sitd_sched_init()
2003 iso_sched->span = urb->number_of_packets * stream->ps.period; in sitd_sched_init()
2005 /* figure out per-frame sitd fields that we'll need later in sitd_sched_init()
2008 for (i = 0; i < urb->number_of_packets; i++) { in sitd_sched_init()
2009 struct ehci_iso_packet *packet = &iso_sched->packet[i]; in sitd_sched_init()
2014 length = urb->iso_frame_desc[i].length & 0x03ff; in sitd_sched_init()
2015 buf = dma + urb->iso_frame_desc[i].offset; in sitd_sched_init()
2018 if (((i + 1) == urb->number_of_packets) in sitd_sched_init()
2019 && !(urb->transfer_flags & URB_NO_INTERRUPT)) in sitd_sched_init()
2022 packet->transaction = cpu_to_hc32(ehci, trans); in sitd_sched_init()
2025 packet->bufp = buf; in sitd_sched_init()
2026 packet->buf1 = (buf + length) & ~0x0fff; in sitd_sched_init()
2027 if (packet->buf1 != (buf & ~(u64)0x0fff)) in sitd_sched_init()
2028 packet->cross = 1; in sitd_sched_init()
2030 /* OUT uses multiple start-splits */ in sitd_sched_init()
2031 if (stream->bEndpointAddress & USB_DIR_IN) in sitd_sched_init()
2036 packet->buf1 |= length; in sitd_sched_init()
2054 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in sitd_urb_transaction()
2056 return -ENOMEM; in sitd_urb_transaction()
2061 spin_lock_irqsave(&ehci->lock, flags); in sitd_urb_transaction()
2062 for (i = 0; i < urb->number_of_packets; i++) { in sitd_urb_transaction()
2065 * for IN (using sitd->hw_backpointer, like a FSTN), which in sitd_urb_transaction()
2066 * means we never need two sitds for full speed packets. in sitd_urb_transaction()
2073 if (likely(!list_empty(&stream->free_list))) { in sitd_urb_transaction()
2074 sitd = list_first_entry(&stream->free_list, in sitd_urb_transaction()
2076 if (sitd->frame == ehci->now_frame) in sitd_urb_transaction()
2078 list_del(&sitd->sitd_list); in sitd_urb_transaction()
2079 sitd_dma = sitd->sitd_dma; in sitd_urb_transaction()
2082 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2083 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags, in sitd_urb_transaction()
2085 spin_lock_irqsave(&ehci->lock, flags); in sitd_urb_transaction()
2088 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2089 return -ENOMEM; in sitd_urb_transaction()
2094 sitd->sitd_dma = sitd_dma; in sitd_urb_transaction()
2095 sitd->frame = NO_FRAME; in sitd_urb_transaction()
2096 list_add(&sitd->sitd_list, &iso_sched->td_list); in sitd_urb_transaction()
2100 urb->hcpriv = iso_sched; in sitd_urb_transaction()
2101 urb->error_count = 0; in sitd_urb_transaction()
2103 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2107 /*-------------------------------------------------------------------------*/
2118 struct ehci_iso_packet *uf = &iso_sched->packet[index]; in sitd_patch()
2121 sitd->hw_next = EHCI_LIST_END(ehci); in sitd_patch()
2122 sitd->hw_fullspeed_ep = stream->address; in sitd_patch()
2123 sitd->hw_uframe = stream->splits; in sitd_patch()
2124 sitd->hw_results = uf->transaction; in sitd_patch()
2125 sitd->hw_backpointer = EHCI_LIST_END(ehci); in sitd_patch()
2127 bufp = uf->bufp; in sitd_patch()
2128 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); in sitd_patch()
2129 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); in sitd_patch()
2131 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); in sitd_patch()
2132 if (uf->cross) in sitd_patch()
2134 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); in sitd_patch()
2135 sitd->index = index; in sitd_patch()
2142 sitd->sitd_next = ehci->pshadow[frame]; in sitd_link()
2143 sitd->hw_next = ehci->periodic[frame]; in sitd_link()
2144 ehci->pshadow[frame].sitd = sitd; in sitd_link()
2145 sitd->frame = frame; in sitd_link()
2147 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); in sitd_link()
2160 struct ehci_iso_sched *sched = urb->hcpriv; in sitd_link_urb()
2163 next_uframe = stream->next_uframe; in sitd_link_urb()
2165 if (list_empty(&stream->td_list)) in sitd_link_urb()
2167 ehci_to_hcd(ehci)->self.bandwidth_allocated in sitd_link_urb()
2168 += stream->bandwidth; in sitd_link_urb()
2170 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in sitd_link_urb()
2171 if (ehci->amd_pll_fix == 1) in sitd_link_urb()
2175 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; in sitd_link_urb()
2178 for (packet = sched->first_packet, sitd = NULL; in sitd_link_urb()
2179 packet < urb->number_of_packets; in sitd_link_urb()
2183 BUG_ON(list_empty(&sched->td_list)); in sitd_link_urb()
2187 sitd = list_entry(sched->td_list.next, in sitd_link_urb()
2189 list_move_tail(&sitd->sitd_list, &stream->td_list); in sitd_link_urb()
2190 sitd->stream = stream; in sitd_link_urb()
2191 sitd->urb = urb; in sitd_link_urb()
2194 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), in sitd_link_urb()
2197 next_uframe += stream->uperiod; in sitd_link_urb()
2199 stream->next_uframe = next_uframe & (mod - 1); in sitd_link_urb()
2203 urb->hcpriv = stream; in sitd_link_urb()
2205 ++ehci->isoc_count; in sitd_link_urb()
2209 /*-------------------------------------------------------------------------*/
2226 struct urb *urb = sitd->urb; in sitd_complete()
2230 struct ehci_iso_stream *stream = sitd->stream; in sitd_complete()
2233 urb_index = sitd->index; in sitd_complete()
2234 desc = &urb->iso_frame_desc[urb_index]; in sitd_complete()
2235 t = hc32_to_cpup(ehci, &sitd->hw_results); in sitd_complete()
2239 urb->error_count++; in sitd_complete()
2241 desc->status = usb_pipein(urb->pipe) in sitd_complete()
2242 ? -ENOSR /* hc couldn't read */ in sitd_complete()
2243 : -ECOMM; /* hc couldn't write */ in sitd_complete()
2245 desc->status = -EOVERFLOW; in sitd_complete()
2247 desc->status = -EPROTO; in sitd_complete()
2250 urb->error_count++; in sitd_complete()
2252 desc->status = 0; in sitd_complete()
2253 desc->actual_length = desc->length - SITD_LENGTH(t); in sitd_complete()
2254 urb->actual_length += desc->actual_length; in sitd_complete()
2258 if ((urb_index + 1) != urb->number_of_packets) in sitd_complete()
2263 * list_for_each_entry (sitd, &stream->td_list, sitd_list) in sitd_complete()
2264 * BUG_ON(sitd->urb == urb); in sitd_complete()
2272 --ehci->isoc_count; in sitd_complete()
2275 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; in sitd_complete()
2276 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in sitd_complete()
2277 if (ehci->amd_pll_fix == 1) in sitd_complete()
2281 if (list_is_singular(&stream->td_list)) in sitd_complete()
2282 ehci_to_hcd(ehci)->self.bandwidth_allocated in sitd_complete()
2283 -= stream->bandwidth; in sitd_complete()
2286 sitd->urb = NULL; in sitd_complete()
2289 list_move_tail(&sitd->sitd_list, &stream->free_list); in sitd_complete()
2292 if (list_empty(&stream->td_list)) { in sitd_complete()
2293 list_splice_tail_init(&stream->free_list, in sitd_complete()
2294 &ehci->cached_sitd_list); in sitd_complete()
2305 int status = -EINVAL; in sitd_submit()
2313 return -ENOMEM; in sitd_submit()
2315 if (urb->interval != stream->ps.period) { in sitd_submit()
2316 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", in sitd_submit()
2317 stream->ps.period, urb->interval); in sitd_submit()
2323 "submit %p dev%s ep%d%s-iso len %d\n", in sitd_submit()
2324 urb, urb->dev->devpath, in sitd_submit()
2325 usb_pipeendpoint(urb->pipe), in sitd_submit()
2326 usb_pipein(urb->pipe) ? "in" : "out", in sitd_submit()
2327 urb->transfer_buffer_length); in sitd_submit()
2338 spin_lock_irqsave(&ehci->lock, flags); in sitd_submit()
2340 status = -ESHUTDOWN; in sitd_submit()
2348 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); in sitd_submit()
2356 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_submit()
2361 /*-------------------------------------------------------------------------*/
2366 unsigned fmask = ehci->periodic_size - 1; in scan_isoc()
2374 * Touches as few pages as possible: cache-friendly. in scan_isoc()
2376 if (ehci->rh_state >= EHCI_RH_RUNNING) { in scan_isoc()
2381 now_frame = (ehci->last_iso_frame - 1) & fmask; in scan_isoc()
2384 ehci->now_frame = now_frame; in scan_isoc()
2386 frame = ehci->last_iso_frame; in scan_isoc()
2390 q_p = &ehci->pshadow[frame]; in scan_isoc()
2391 hw_p = &ehci->periodic[frame]; in scan_isoc()
2392 q.ptr = q_p->ptr; in scan_isoc()
2408 if (q.itd->hw_transaction[uf] & in scan_isoc()
2413 q_p = &q.itd->itd_next; in scan_isoc()
2414 hw_p = &q.itd->hw_next; in scan_isoc()
2416 q.itd->hw_next); in scan_isoc()
2428 *q_p = q.itd->itd_next; in scan_isoc()
2429 if (!ehci->use_dummy_qh || in scan_isoc()
2430 q.itd->hw_next != EHCI_LIST_END(ehci)) in scan_isoc()
2431 *hw_p = q.itd->hw_next; in scan_isoc()
2433 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in scan_isoc()
2434 type = Q_NEXT_TYPE(ehci, q.itd->hw_next); in scan_isoc()
2449 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) { in scan_isoc()
2451 q_p = &q.sitd->sitd_next; in scan_isoc()
2452 hw_p = &q.sitd->hw_next; in scan_isoc()
2453 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); in scan_isoc()
2463 *q_p = q.sitd->sitd_next; in scan_isoc()
2464 if (!ehci->use_dummy_qh || in scan_isoc()
2465 q.sitd->hw_next != EHCI_LIST_END(ehci)) in scan_isoc()
2466 *hw_p = q.sitd->hw_next; in scan_isoc()
2468 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in scan_isoc()
2469 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); in scan_isoc()
2487 if (unlikely(modified && ehci->isoc_count > 0)) in scan_isoc()
2496 ehci->last_iso_frame = frame; in scan_isoc()