• Home
  • Raw
  • Download

Lines Matching refs:dd

61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)  in qib_disarm_piobufs()  argument
68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs()
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded() local
103 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) { in qib_disarm_piobufs_ifneeded()
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
110 spin_unlock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) in is_sdma_buf() argument
119 for (pidx = 0; pidx < dd->num_pports; pidx++) { in is_sdma_buf()
120 ppd = dd->pport + pidx; in is_sdma_buf()
132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn) in find_ctxt() argument
138 spin_lock(&dd->uctxt_lock); in find_ctxt()
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
140 rcd = dd->rcd[ctxt]; in find_ctxt()
159 spin_unlock(&dd->uctxt_lock); in find_ctxt()
171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, in qib_disarm_piobufs_set() argument
178 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
189 ppd = is_sdma_buf(dd, i); in qib_disarm_piobufs_set()
198 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
199 if (test_bit(i, dd->pio_writing) || in qib_disarm_piobufs_set()
200 (!test_bit(i << 1, dd->pioavailkernel) && in qib_disarm_piobufs_set()
201 find_ctxt(dd, i))) { in qib_disarm_piobufs_set()
202 __set_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs_set()
206 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
208 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
212 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
223 static void update_send_bufs(struct qib_devdata *dd) in update_send_bufs() argument
227 const unsigned piobregs = dd->pioavregs; in update_send_bufs()
247 if (!dd->pioavailregs_dma) in update_send_bufs()
249 spin_lock_irqsave(&dd->pioavail_lock, flags); in update_send_bufs()
253 piov = le64_to_cpu(dd->pioavailregs_dma[i]); in update_send_bufs()
254 pchg = dd->pioavailkernel[i] & in update_send_bufs()
255 ~(dd->pioavailshadow[i] ^ piov); in update_send_bufs()
257 if (pchg && (pchbusy & dd->pioavailshadow[i])) { in update_send_bufs()
258 pnew = dd->pioavailshadow[i] & ~pchbusy; in update_send_bufs()
260 dd->pioavailshadow[i] = pnew; in update_send_bufs()
263 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in update_send_bufs()
269 static noinline void no_send_bufs(struct qib_devdata *dd) in no_send_bufs() argument
271 dd->upd_pio_shadow = 1; in no_send_bufs()
284 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, in qib_getsendbuf_range() argument
290 unsigned long *shadow = dd->pioavailshadow; in qib_getsendbuf_range()
293 if (!(dd->flags & QIB_PRESENT)) in qib_getsendbuf_range()
297 if (dd->upd_pio_shadow) { in qib_getsendbuf_range()
304 update_send_bufs(dd); in qib_getsendbuf_range()
313 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
314 if (dd->last_pio >= first && dd->last_pio <= last) in qib_getsendbuf_range()
315 i = dd->last_pio + 1; in qib_getsendbuf_range()
318 nbufs = last - dd->min_kernel_pio + 1; in qib_getsendbuf_range()
321 i = !first ? dd->min_kernel_pio : first; in qib_getsendbuf_range()
327 __set_bit(i, dd->pio_writing); in qib_getsendbuf_range()
329 dd->last_pio = i; in qib_getsendbuf_range()
332 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
341 no_send_bufs(dd); in qib_getsendbuf_range()
344 if (i < dd->piobcnt2k) in qib_getsendbuf_range()
345 buf = (u32 __iomem *)(dd->pio2kbase + in qib_getsendbuf_range()
346 i * dd->palign); in qib_getsendbuf_range()
347 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) in qib_getsendbuf_range()
348 buf = (u32 __iomem *)(dd->pio4kbase + in qib_getsendbuf_range()
349 (i - dd->piobcnt2k) * dd->align4k); in qib_getsendbuf_range()
351 buf = (u32 __iomem *)(dd->piovl15base + in qib_getsendbuf_range()
352 (i - (dd->piobcnt2k + dd->piobcnt4k)) * in qib_getsendbuf_range()
353 dd->align4k); in qib_getsendbuf_range()
356 dd->upd_pio_shadow = 0; in qib_getsendbuf_range()
366 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) in qib_sendbuf_done() argument
370 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_sendbuf_done()
371 __clear_bit(n, dd->pio_writing); in qib_sendbuf_done()
372 if (__test_and_clear_bit(n, dd->pio_need_disarm)) in qib_sendbuf_done()
373 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
374 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_sendbuf_done()
384 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, in qib_chg_pioavailkernel() argument
395 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
417 dd->pioavailshadow); in qib_chg_pioavailkernel()
419 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_chg_pioavailkernel()
423 start, dd->pioavailshadow); in qib_chg_pioavailkernel()
426 + start, dd->pioavailshadow); in qib_chg_pioavailkernel()
427 __set_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
428 if ((start >> 1) < dd->min_kernel_pio) in qib_chg_pioavailkernel()
429 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
432 dd->pioavailshadow); in qib_chg_pioavailkernel()
433 __clear_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
434 if ((start >> 1) > dd->min_kernel_pio) in qib_chg_pioavailkernel()
435 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
440 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) in qib_chg_pioavailkernel()
441 dd->last_pio = dd->min_kernel_pio - 1; in qib_chg_pioavailkernel()
442 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
444 dd->f_txchk_change(dd, ostart, len, avail, rcd); in qib_chg_pioavailkernel()
458 struct qib_devdata *dd = ppd->dd; in qib_cancel_sends() local
473 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
474 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_cancel_sends()
475 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
491 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
492 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_cancel_sends()
494 __set_bit(i, dd->pio_need_disarm); in qib_cancel_sends()
495 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_cancel_sends()
497 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
500 if (!(dd->flags & QIB_HAS_SEND_DMA)) in qib_cancel_sends()
501 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | in qib_cancel_sends()
512 void qib_force_pio_avail_update(struct qib_devdata *dd) in qib_force_pio_avail_update() argument
514 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
559 if (!(ppd->dd->flags & QIB_INITTED)) in qib_hol_event()