Lines Matching refs:dd
42 static void vl15_watchdog_enq(struct ipath_devdata *dd) in vl15_watchdog_enq() argument
45 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) { in vl15_watchdog_enq()
47 dd->ipath_sdma_vl15_timer.expires = jiffies + interval; in vl15_watchdog_enq()
48 add_timer(&dd->ipath_sdma_vl15_timer); in vl15_watchdog_enq()
52 static void vl15_watchdog_deq(struct ipath_devdata *dd) in vl15_watchdog_deq() argument
55 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) { in vl15_watchdog_deq()
57 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval); in vl15_watchdog_deq()
59 del_timer(&dd->ipath_sdma_vl15_timer); in vl15_watchdog_deq()
65 struct ipath_devdata *dd = (struct ipath_devdata *)opaque; in vl15_watchdog_timeout() local
67 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) { in vl15_watchdog_timeout()
69 ipath_cancel_sends(dd, 1); in vl15_watchdog_timeout()
70 ipath_hol_down(dd); in vl15_watchdog_timeout()
77 static void unmap_desc(struct ipath_devdata *dd, unsigned head) in unmap_desc() argument
79 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0]; in unmap_desc()
89 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE); in unmap_desc()
95 int ipath_sdma_make_progress(struct ipath_devdata *dd) in ipath_sdma_make_progress() argument
103 if (!list_empty(&dd->ipath_sdma_activelist)) { in ipath_sdma_make_progress()
104 lp = dd->ipath_sdma_activelist.next; in ipath_sdma_make_progress()
115 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead); in ipath_sdma_make_progress()
117 if (dmahead >= dd->ipath_sdma_descq_cnt) in ipath_sdma_make_progress()
120 while (dd->ipath_sdma_descq_head != dmahead) { in ipath_sdma_make_progress()
122 dd->ipath_sdma_descq_head == start_idx) { in ipath_sdma_make_progress()
123 unmap_desc(dd, dd->ipath_sdma_descq_head); in ipath_sdma_make_progress()
125 if (start_idx == dd->ipath_sdma_descq_cnt) in ipath_sdma_make_progress()
130 dd->ipath_sdma_descq_removed++; in ipath_sdma_make_progress()
131 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt) in ipath_sdma_make_progress()
132 dd->ipath_sdma_descq_head = 0; in ipath_sdma_make_progress()
134 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) { in ipath_sdma_make_progress()
137 vl15_watchdog_deq(dd); in ipath_sdma_make_progress()
138 list_move_tail(lp, &dd->ipath_sdma_notifylist); in ipath_sdma_make_progress()
139 if (!list_empty(&dd->ipath_sdma_activelist)) { in ipath_sdma_make_progress()
140 lp = dd->ipath_sdma_activelist.next; in ipath_sdma_make_progress()
153 tasklet_hi_schedule(&dd->ipath_sdma_notify_task); in ipath_sdma_make_progress()
159 static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list) in ipath_sdma_notify() argument
172 static void sdma_notify_taskbody(struct ipath_devdata *dd) in sdma_notify_taskbody() argument
179 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in sdma_notify_taskbody()
181 list_splice_init(&dd->ipath_sdma_notifylist, &list); in sdma_notify_taskbody()
183 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in sdma_notify_taskbody()
185 ipath_sdma_notify(dd, &list); in sdma_notify_taskbody()
194 ipath_ib_piobufavail(dd->verbs_dev); in sdma_notify_taskbody()
199 struct ipath_devdata *dd = (struct ipath_devdata *)opaque; in sdma_notify_task() local
201 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) in sdma_notify_task()
202 sdma_notify_taskbody(dd); in sdma_notify_task()
205 static void dump_sdma_state(struct ipath_devdata *dd) in dump_sdma_state() argument
209 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus); in dump_sdma_state()
212 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl); in dump_sdma_state()
215 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0); in dump_sdma_state()
218 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1); in dump_sdma_state()
221 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2); in dump_sdma_state()
224 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail); in dump_sdma_state()
227 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead); in dump_sdma_state()
233 struct ipath_devdata *dd = (struct ipath_devdata *) opaque; in sdma_abort_task() local
237 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) in sdma_abort_task()
240 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in sdma_abort_task()
242 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK; in sdma_abort_task()
250 if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout)) in sdma_abort_task()
254 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); in sdma_abort_task()
264 hwstatus = ipath_read_kreg64(dd, in sdma_abort_task()
265 dd->ipath_kregs->kr_senddmastatus); in sdma_abort_task()
271 if (dd->ipath_sdma_reset_wait > 0) { in sdma_abort_task()
273 --dd->ipath_sdma_reset_wait; in sdma_abort_task()
278 dump_sdma_state(dd); in sdma_abort_task()
283 &dd->ipath_sdma_activelist, list) { in sdma_abort_task()
286 vl15_watchdog_deq(dd); in sdma_abort_task()
287 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); in sdma_abort_task()
291 tasklet_hi_schedule(&dd->ipath_sdma_notify_task); in sdma_abort_task()
294 dd->ipath_sdma_descq_tail = 0; in sdma_abort_task()
295 dd->ipath_sdma_descq_head = 0; in sdma_abort_task()
296 dd->ipath_sdma_head_dma[0] = 0; in sdma_abort_task()
297 dd->ipath_sdma_generation = 0; in sdma_abort_task()
298 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added; in sdma_abort_task()
301 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, in sdma_abort_task()
302 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18)); in sdma_abort_task()
305 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in sdma_abort_task()
318 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); in sdma_abort_task()
319 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; in sdma_abort_task()
320 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, in sdma_abort_task()
321 dd->ipath_sendctrl); in sdma_abort_task()
322 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); in sdma_abort_task()
323 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); in sdma_abort_task()
326 dd->ipath_sdma_abort_jiffies = 0; in sdma_abort_task()
332 if (dd->ipath_flags & IPATH_LINKACTIVE) in sdma_abort_task()
333 ipath_restart_sdma(dd); in sdma_abort_task()
344 if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) { in sdma_abort_task()
346 dd->ipath_sdma_status); in sdma_abort_task()
347 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; in sdma_abort_task()
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in sdma_abort_task()
351 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) in sdma_abort_task()
352 tasklet_hi_schedule(&dd->ipath_sdma_abort_task); in sdma_abort_task()
356 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in sdma_abort_task()
364 void ipath_sdma_intr(struct ipath_devdata *dd) in ipath_sdma_intr() argument
368 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in ipath_sdma_intr()
370 (void) ipath_sdma_make_progress(dd); in ipath_sdma_intr()
372 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in ipath_sdma_intr()
375 static int alloc_sdma(struct ipath_devdata *dd) in alloc_sdma() argument
380 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev, in alloc_sdma()
381 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL); in alloc_sdma()
383 if (!dd->ipath_sdma_descq) { in alloc_sdma()
384 ipath_dev_err(dd, "failed to allocate SendDMA descriptor " in alloc_sdma()
390 dd->ipath_sdma_descq_cnt = in alloc_sdma()
394 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev, in alloc_sdma()
395 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL); in alloc_sdma()
396 if (!dd->ipath_sdma_head_dma) { in alloc_sdma()
397 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n"); in alloc_sdma()
401 dd->ipath_sdma_head_dma[0] = 0; in alloc_sdma()
403 init_timer(&dd->ipath_sdma_vl15_timer); in alloc_sdma()
404 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout; in alloc_sdma()
405 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd; in alloc_sdma()
406 atomic_set(&dd->ipath_sdma_vl15_count, 0); in alloc_sdma()
411 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, in alloc_sdma()
412 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys); in alloc_sdma()
413 dd->ipath_sdma_descq = NULL; in alloc_sdma()
414 dd->ipath_sdma_descq_phys = 0; in alloc_sdma()
419 int setup_sdma(struct ipath_devdata *dd) in setup_sdma() argument
427 ret = alloc_sdma(dd); in setup_sdma()
431 if (!dd->ipath_sdma_descq) { in setup_sdma()
432 ipath_dev_err(dd, "SendDMA memory not allocated\n"); in setup_sdma()
441 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED; in setup_sdma()
442 dd->ipath_sdma_abort_jiffies = 0; in setup_sdma()
443 dd->ipath_sdma_generation = 0; in setup_sdma()
444 dd->ipath_sdma_descq_tail = 0; in setup_sdma()
445 dd->ipath_sdma_descq_head = 0; in setup_sdma()
446 dd->ipath_sdma_descq_removed = 0; in setup_sdma()
447 dd->ipath_sdma_descq_added = 0; in setup_sdma()
450 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, in setup_sdma()
451 dd->ipath_sdma_descq_phys); in setup_sdma()
453 tmp64 = dd->ipath_sdma_descq_cnt; in setup_sdma()
455 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64); in setup_sdma()
457 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, in setup_sdma()
458 dd->ipath_sdma_descq_tail); in setup_sdma()
460 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, in setup_sdma()
461 dd->ipath_sdma_head_phys); in setup_sdma()
467 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; in setup_sdma()
468 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved; in setup_sdma()
469 ipath_chg_pioavailkernel(dd, i, n - i , 0); in setup_sdma()
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, in setup_sdma()
478 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, in setup_sdma()
480 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, in setup_sdma()
483 INIT_LIST_HEAD(&dd->ipath_sdma_activelist); in setup_sdma()
484 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist); in setup_sdma()
486 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task, in setup_sdma()
487 (unsigned long) dd); in setup_sdma()
488 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task, in setup_sdma()
489 (unsigned long) dd); in setup_sdma()
497 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); in setup_sdma()
498 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE; in setup_sdma()
499 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); in setup_sdma()
500 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); in setup_sdma()
501 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); in setup_sdma()
502 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); in setup_sdma()
508 void teardown_sdma(struct ipath_devdata *dd) in teardown_sdma() argument
517 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in teardown_sdma()
518 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); in teardown_sdma()
519 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); in teardown_sdma()
520 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status); in teardown_sdma()
521 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in teardown_sdma()
523 tasklet_kill(&dd->ipath_sdma_abort_task); in teardown_sdma()
524 tasklet_kill(&dd->ipath_sdma_notify_task); in teardown_sdma()
527 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); in teardown_sdma()
528 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; in teardown_sdma()
529 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, in teardown_sdma()
530 dd->ipath_sendctrl); in teardown_sdma()
531 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); in teardown_sdma()
532 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); in teardown_sdma()
534 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in teardown_sdma()
536 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, in teardown_sdma()
540 vl15_watchdog_deq(dd); in teardown_sdma()
541 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); in teardown_sdma()
543 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in teardown_sdma()
545 sdma_notify_taskbody(dd); in teardown_sdma()
547 del_timer_sync(&dd->ipath_sdma_vl15_timer); in teardown_sdma()
549 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in teardown_sdma()
551 dd->ipath_sdma_abort_jiffies = 0; in teardown_sdma()
553 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0); in teardown_sdma()
554 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0); in teardown_sdma()
555 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0); in teardown_sdma()
556 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0); in teardown_sdma()
557 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0); in teardown_sdma()
558 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0); in teardown_sdma()
559 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0); in teardown_sdma()
561 if (dd->ipath_sdma_head_dma) { in teardown_sdma()
562 sdma_head_dma = (void *) dd->ipath_sdma_head_dma; in teardown_sdma()
563 sdma_head_phys = dd->ipath_sdma_head_phys; in teardown_sdma()
564 dd->ipath_sdma_head_dma = NULL; in teardown_sdma()
565 dd->ipath_sdma_head_phys = 0; in teardown_sdma()
568 if (dd->ipath_sdma_descq) { in teardown_sdma()
569 sdma_descq = dd->ipath_sdma_descq; in teardown_sdma()
570 sdma_descq_phys = dd->ipath_sdma_descq_phys; in teardown_sdma()
571 dd->ipath_sdma_descq = NULL; in teardown_sdma()
572 dd->ipath_sdma_descq_phys = 0; in teardown_sdma()
575 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in teardown_sdma()
578 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in teardown_sdma()
582 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, in teardown_sdma()
591 void ipath_restart_sdma(struct ipath_devdata *dd) in ipath_restart_sdma() argument
596 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) in ipath_restart_sdma()
604 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in ipath_restart_sdma()
605 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status) in ipath_restart_sdma()
606 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) in ipath_restart_sdma()
609 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); in ipath_restart_sdma()
610 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); in ipath_restart_sdma()
611 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); in ipath_restart_sdma()
613 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in ipath_restart_sdma()
616 dd->ipath_sdma_status); in ipath_restart_sdma()
619 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); in ipath_restart_sdma()
624 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; in ipath_restart_sdma()
625 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); in ipath_restart_sdma()
626 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); in ipath_restart_sdma()
627 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE; in ipath_restart_sdma()
628 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); in ipath_restart_sdma()
629 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); in ipath_restart_sdma()
630 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); in ipath_restart_sdma()
633 ipath_ib_piobufavail(dd->verbs_dev); in ipath_restart_sdma()
639 static inline void make_sdma_desc(struct ipath_devdata *dd, in make_sdma_desc() argument
648 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30; in make_sdma_desc()
664 int ipath_sdma_verbs_send(struct ipath_devdata *dd, in ipath_sdma_verbs_send() argument
678 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) { in ipath_sdma_verbs_send()
680 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen); in ipath_sdma_verbs_send()
685 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in ipath_sdma_verbs_send()
688 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) { in ipath_sdma_verbs_send()
693 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { in ipath_sdma_verbs_send()
694 if (ipath_sdma_make_progress(dd)) in ipath_sdma_verbs_send()
700 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, in ipath_sdma_verbs_send()
702 if (dma_mapping_error(&dd->pcidev->dev, addr)) in ipath_sdma_verbs_send()
706 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); in ipath_sdma_verbs_send()
714 tail = dd->ipath_sdma_descq_tail; in ipath_sdma_verbs_send()
715 descqp = &dd->ipath_sdma_descq[tail].qw[0]; in ipath_sdma_verbs_send()
723 if (++tail == dd->ipath_sdma_descq_cnt) { in ipath_sdma_verbs_send()
725 descqp = &dd->ipath_sdma_descq[0].qw[0]; in ipath_sdma_verbs_send()
726 ++dd->ipath_sdma_generation; in ipath_sdma_verbs_send()
741 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, in ipath_sdma_verbs_send()
743 if (dma_mapping_error(&dd->pcidev->dev, addr)) in ipath_sdma_verbs_send()
745 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); in ipath_sdma_verbs_send()
754 if (++tail == dd->ipath_sdma_descq_cnt) { in ipath_sdma_verbs_send()
756 descqp = &dd->ipath_sdma_descq[0].qw[0]; in ipath_sdma_verbs_send()
757 ++dd->ipath_sdma_generation; in ipath_sdma_verbs_send()
782 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; in ipath_sdma_verbs_send()
793 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); in ipath_sdma_verbs_send()
797 dd->ipath_sdma_descq_tail = tail; in ipath_sdma_verbs_send()
798 dd->ipath_sdma_descq_added += tx->txreq.sg_count; in ipath_sdma_verbs_send()
799 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); in ipath_sdma_verbs_send()
801 vl15_watchdog_enq(dd); in ipath_sdma_verbs_send()
805 while (tail != dd->ipath_sdma_descq_tail) { in ipath_sdma_verbs_send()
807 tail = dd->ipath_sdma_descq_cnt - 1; in ipath_sdma_verbs_send()
810 unmap_desc(dd, tail); in ipath_sdma_verbs_send()
815 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in ipath_sdma_verbs_send()