Lines Matching refs:qp
336 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, in qib_post_one_send() argument
349 spin_lock_irqsave(&qp->s_lock, flags); in qib_post_one_send()
352 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) in qib_post_one_send()
356 if (wr->num_sge > qp->s_max_sge) in qib_post_one_send()
365 if (qib_fast_reg_mr(qp, wr)) in qib_post_one_send()
367 } else if (qp->ibqp.qp_type == IB_QPT_UC) { in qib_post_one_send()
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in qib_post_one_send()
376 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in qib_post_one_send()
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in qib_post_one_send()
388 next = qp->s_head + 1; in qib_post_one_send()
389 if (next >= qp->s_size) in qib_post_one_send()
391 if (next == qp->s_last) { in qib_post_one_send()
396 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_post_one_send()
397 pd = to_ipd(qp->ibqp.pd); in qib_post_one_send()
398 wqe = get_swqe_ptr(qp, qp->s_head); in qib_post_one_send()
420 if (qp->ibqp.qp_type == IB_QPT_UC || in qib_post_one_send()
421 qp->ibqp.qp_type == IB_QPT_RC) { in qib_post_one_send()
424 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + in qib_post_one_send()
425 qp->port_num - 1)->ibmtu) in qib_post_one_send()
429 wqe->ssn = qp->s_ssn++; in qib_post_one_send()
430 qp->s_head = next; in qib_post_one_send()
446 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { in qib_post_one_send()
447 qib_schedule_send(qp); in qib_post_one_send()
450 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_post_one_send()
465 struct qib_qp *qp = to_iqp(ibqp); in qib_post_send() local
470 err = qib_post_one_send(qp, wr, &scheduled); in qib_post_send()
479 qib_do_send(&qp->s_work); in qib_post_send()
496 struct qib_qp *qp = to_iqp(ibqp); in qib_post_receive() local
497 struct qib_rwq *wq = qp->r_rq.wq; in qib_post_receive()
502 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { in qib_post_receive()
513 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in qib_post_receive()
519 spin_lock_irqsave(&qp->r_rq.lock, flags); in qib_post_receive()
521 if (next >= qp->r_rq.size) in qib_post_receive()
524 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive()
530 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in qib_post_receive()
538 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive()
560 int has_grh, void *data, u32 tlen, struct qib_qp *qp) in qib_qp_rcv() argument
564 spin_lock(&qp->r_lock); in qib_qp_rcv()
567 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { in qib_qp_rcv()
572 switch (qp->ibqp.qp_type) { in qib_qp_rcv()
579 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); in qib_qp_rcv()
583 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); in qib_qp_rcv()
587 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); in qib_qp_rcv()
595 spin_unlock(&qp->r_lock); in qib_qp_rcv()
614 struct qib_qp *qp; in qib_ib_rcv() local
665 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); in qib_ib_rcv()
683 qp = qib_lookup_qpn(ibp, qp_num); in qib_ib_rcv()
684 if (!qp) in qib_ib_rcv()
686 rcd->lookaside_qp = qp; in qib_ib_rcv()
689 qp = rcd->lookaside_qp; in qib_ib_rcv()
691 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); in qib_ib_rcv()
707 struct qib_qp *qp = NULL; in mem_timer() local
712 qp = list_entry(list->next, struct qib_qp, iowait); in mem_timer()
713 list_del_init(&qp->iowait); in mem_timer()
714 atomic_inc(&qp->refcount); in mem_timer()
720 if (qp) { in mem_timer()
721 spin_lock_irqsave(&qp->s_lock, flags); in mem_timer()
722 if (qp->s_flags & QIB_S_WAIT_KMEM) { in mem_timer()
723 qp->s_flags &= ~QIB_S_WAIT_KMEM; in mem_timer()
724 qib_schedule_send(qp); in mem_timer()
726 spin_unlock_irqrestore(&qp->s_lock, flags); in mem_timer()
727 if (atomic_dec_and_test(&qp->refcount)) in mem_timer()
728 wake_up(&qp->wait); in mem_timer()
926 struct qib_qp *qp) in __get_txreq() argument
931 spin_lock_irqsave(&qp->s_lock, flags); in __get_txreq()
939 spin_unlock_irqrestore(&qp->s_lock, flags); in __get_txreq()
942 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && in __get_txreq()
943 list_empty(&qp->iowait)) { in __get_txreq()
945 qp->s_flags |= QIB_S_WAIT_TX; in __get_txreq()
946 list_add_tail(&qp->iowait, &dev->txwait); in __get_txreq()
948 qp->s_flags &= ~QIB_S_BUSY; in __get_txreq()
950 spin_unlock_irqrestore(&qp->s_lock, flags); in __get_txreq()
957 struct qib_qp *qp) in get_txreq() argument
973 tx = __get_txreq(dev, qp); in get_txreq()
981 struct qib_qp *qp; in qib_put_txreq() local
984 qp = tx->qp; in qib_put_txreq()
985 dev = to_idev(qp->ibqp.device); in qib_put_txreq()
987 if (atomic_dec_and_test(&qp->refcount)) in qib_put_txreq()
988 wake_up(&qp->wait); in qib_put_txreq()
1008 qp = list_entry(dev->txwait.next, struct qib_qp, iowait); in qib_put_txreq()
1009 list_del_init(&qp->iowait); in qib_put_txreq()
1010 atomic_inc(&qp->refcount); in qib_put_txreq()
1013 spin_lock_irqsave(&qp->s_lock, flags); in qib_put_txreq()
1014 if (qp->s_flags & QIB_S_WAIT_TX) { in qib_put_txreq()
1015 qp->s_flags &= ~QIB_S_WAIT_TX; in qib_put_txreq()
1016 qib_schedule_send(qp); in qib_put_txreq()
1018 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_put_txreq()
1020 if (atomic_dec_and_test(&qp->refcount)) in qib_put_txreq()
1021 wake_up(&qp->wait); in qib_put_txreq()
1034 struct qib_qp *qp, *nqp; in qib_verbs_sdma_desc_avail() local
1044 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { in qib_verbs_sdma_desc_avail()
1045 if (qp->port_num != ppd->port) in qib_verbs_sdma_desc_avail()
1049 if (qp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail()
1051 avail -= qp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail()
1052 list_del_init(&qp->iowait); in qib_verbs_sdma_desc_avail()
1053 atomic_inc(&qp->refcount); in qib_verbs_sdma_desc_avail()
1054 qps[n++] = qp; in qib_verbs_sdma_desc_avail()
1060 qp = qps[i]; in qib_verbs_sdma_desc_avail()
1061 spin_lock(&qp->s_lock); in qib_verbs_sdma_desc_avail()
1062 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { in qib_verbs_sdma_desc_avail()
1063 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; in qib_verbs_sdma_desc_avail()
1064 qib_schedule_send(qp); in qib_verbs_sdma_desc_avail()
1066 spin_unlock(&qp->s_lock); in qib_verbs_sdma_desc_avail()
1067 if (atomic_dec_and_test(&qp->refcount)) in qib_verbs_sdma_desc_avail()
1068 wake_up(&qp->wait); in qib_verbs_sdma_desc_avail()
1079 struct qib_qp *qp = tx->qp; in sdma_complete() local
1081 spin_lock(&qp->s_lock); in sdma_complete()
1083 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); in sdma_complete()
1084 else if (qp->ibqp.qp_type == IB_QPT_RC) { in sdma_complete()
1090 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in sdma_complete()
1094 qib_rc_send_complete(qp, hdr); in sdma_complete()
1096 if (atomic_dec_and_test(&qp->s_dma_busy)) { in sdma_complete()
1097 if (qp->state == IB_QPS_RESET) in sdma_complete()
1098 wake_up(&qp->wait_dma); in sdma_complete()
1099 else if (qp->s_flags & QIB_S_WAIT_DMA) { in sdma_complete()
1100 qp->s_flags &= ~QIB_S_WAIT_DMA; in sdma_complete()
1101 qib_schedule_send(qp); in sdma_complete()
1104 spin_unlock(&qp->s_lock); in sdma_complete()
1109 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) in wait_kmem() argument
1114 spin_lock_irqsave(&qp->s_lock, flags); in wait_kmem()
1115 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { in wait_kmem()
1117 if (list_empty(&qp->iowait)) { in wait_kmem()
1120 qp->s_flags |= QIB_S_WAIT_KMEM; in wait_kmem()
1121 list_add_tail(&qp->iowait, &dev->memwait); in wait_kmem()
1124 qp->s_flags &= ~QIB_S_BUSY; in wait_kmem()
1127 spin_unlock_irqrestore(&qp->s_lock, flags); in wait_kmem()
1132 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, in qib_verbs_send_dma() argument
1136 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in qib_verbs_send_dma()
1138 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_verbs_send_dma()
1146 tx = qp->s_tx; in qib_verbs_send_dma()
1148 qp->s_tx = NULL; in qib_verbs_send_dma()
1154 tx = get_txreq(dev, qp); in qib_verbs_send_dma()
1158 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, in qib_verbs_send_dma()
1160 tx->qp = qp; in qib_verbs_send_dma()
1161 atomic_inc(&qp->refcount); in qib_verbs_send_dma()
1162 tx->wqe = qp->s_wqe; in qib_verbs_send_dma()
1163 tx->mr = qp->s_rdma_mr; in qib_verbs_send_dma()
1164 if (qp->s_rdma_mr) in qib_verbs_send_dma()
1165 qp->s_rdma_mr = NULL; in qib_verbs_send_dma()
1222 ret = wait_kmem(dev, qp); in qib_verbs_send_dma()
1236 static int no_bufs_available(struct qib_qp *qp) in no_bufs_available() argument
1238 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in no_bufs_available()
1249 spin_lock_irqsave(&qp->s_lock, flags); in no_bufs_available()
1250 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { in no_bufs_available()
1252 if (list_empty(&qp->iowait)) { in no_bufs_available()
1254 qp->s_flags |= QIB_S_WAIT_PIO; in no_bufs_available()
1255 list_add_tail(&qp->iowait, &dev->piowait); in no_bufs_available()
1260 qp->s_flags &= ~QIB_S_BUSY; in no_bufs_available()
1263 spin_unlock_irqrestore(&qp->s_lock, flags); in no_bufs_available()
1267 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, in qib_verbs_send_pio() argument
1271 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qib_verbs_send_pio()
1272 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; in qib_verbs_send_pio()
1282 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, in qib_verbs_send_pio()
1287 return no_bufs_available(qp); in qib_verbs_send_pio()
1347 if (qp->s_rdma_mr) { in qib_verbs_send_pio()
1348 qib_put_mr(qp->s_rdma_mr); in qib_verbs_send_pio()
1349 qp->s_rdma_mr = NULL; in qib_verbs_send_pio()
1351 if (qp->s_wqe) { in qib_verbs_send_pio()
1352 spin_lock_irqsave(&qp->s_lock, flags); in qib_verbs_send_pio()
1353 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); in qib_verbs_send_pio()
1354 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_verbs_send_pio()
1355 } else if (qp->ibqp.qp_type == IB_QPT_RC) { in qib_verbs_send_pio()
1356 spin_lock_irqsave(&qp->s_lock, flags); in qib_verbs_send_pio()
1357 qib_rc_send_complete(qp, ibhdr); in qib_verbs_send_pio()
1358 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_verbs_send_pio()
1374 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, in qib_verbs_send() argument
1377 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qib_verbs_send()
1393 if (qp->ibqp.qp_type == IB_QPT_SMI || in qib_verbs_send()
1395 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, in qib_verbs_send()
1398 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, in qib_verbs_send()
1508 struct qib_qp *qp; in qib_ib_piobufavail() local
1525 qp = list_entry(list->next, struct qib_qp, iowait); in qib_ib_piobufavail()
1526 list_del_init(&qp->iowait); in qib_ib_piobufavail()
1527 atomic_inc(&qp->refcount); in qib_ib_piobufavail()
1528 qps[n++] = qp; in qib_ib_piobufavail()
1535 qp = qps[i]; in qib_ib_piobufavail()
1537 spin_lock_irqsave(&qp->s_lock, flags); in qib_ib_piobufavail()
1538 if (qp->s_flags & QIB_S_WAIT_PIO) { in qib_ib_piobufavail()
1539 qp->s_flags &= ~QIB_S_WAIT_PIO; in qib_ib_piobufavail()
1540 qib_schedule_send(qp); in qib_ib_piobufavail()
1542 spin_unlock_irqrestore(&qp->s_lock, flags); in qib_ib_piobufavail()
1545 if (atomic_dec_and_test(&qp->refcount)) in qib_ib_piobufavail()
1546 wake_up(&qp->wait); in qib_ib_piobufavail()
2325 void qib_schedule_send(struct qib_qp *qp) in qib_schedule_send() argument
2327 if (qib_send_ok(qp)) { in qib_schedule_send()
2329 to_iport(qp->ibqp.device, qp->port_num); in qib_schedule_send()
2332 queue_work(ppd->qib_wq, &qp->s_work); in qib_schedule_send()