/drivers/infiniband/hw/amso1100/ |
D | c2_qp.c | 420 struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq); in c2_alloc_qp() local 460 wr.rq_cq_handle = recv_cq->adapter_handle; in c2_alloc_qp() 571 static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_lock_cqs() argument 573 if (send_cq == recv_cq) in c2_lock_cqs() 575 else if (send_cq > recv_cq) { in c2_lock_cqs() 577 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); in c2_lock_cqs() 579 spin_lock_irq(&recv_cq->lock); in c2_lock_cqs() 584 static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_unlock_cqs() argument 586 if (send_cq == recv_cq) in c2_unlock_cqs() 588 else if (send_cq > recv_cq) { in c2_unlock_cqs() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 168 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); in ipoib_transport_dev_init() 169 if (IS_ERR(priv->recv_cq)) { in ipoib_transport_dev_init() 181 if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 185 init_attr.recv_cq = priv->recv_cq; in ipoib_transport_dev_init() 232 ib_destroy_cq(priv->recv_cq); in ipoib_transport_dev_init() 258 if (ib_destroy_cq(priv->recv_cq)) in ipoib_transport_dev_cleanup()
|
D | ipoib_ethtool.c | 81 ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames, in ipoib_set_coalesce()
|
D | ipoib_ib.c | 426 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); in ipoib_poll() 450 if (unlikely(ib_req_notify_cq(priv->recv_cq, in ipoib_poll() 773 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); in ipoib_drain_cq() 887 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); in ipoib_ib_dev_stop()
|
D | ipoib_cm.c | 252 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp() 253 .recv_cq = priv->recv_cq, in ipoib_cm_create_rx_qp() 1016 .send_cq = priv->recv_cq, in ipoib_cm_create_tx_qp() 1017 .recv_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
|
D | ipoib.h | 308 struct ib_cq *recv_cq; member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 764 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); in __mthca_modify_qp() 828 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, in __mthca_modify_qp() 830 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 1152 struct mthca_cq *recv_cq, in mthca_alloc_qp_common() argument 1280 struct mthca_cq *recv_cq, in mthca_alloc_qp() argument 1306 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1321 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument 1323 if (send_cq == recv_cq) in mthca_lock_cqs() 1325 else if (send_cq->cqn < recv_cq->cqn) { in mthca_lock_cqs() 1327 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); in mthca_lock_cqs() [all …]
|
D | mthca_dev.h | 544 struct mthca_cq *recv_cq, 552 struct mthca_cq *recv_cq,
|
D | mthca_provider.c | 594 to_mcq(init_attr->recv_cq), in mthca_create_qp() 629 to_mcq(init_attr->recv_cq), in mthca_create_qp()
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 617 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument 619 if (send_cq == recv_cq) in mlx4_ib_lock_cqs() 621 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs() 623 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); in mlx4_ib_lock_cqs() 625 spin_lock_irq(&recv_cq->lock); in mlx4_ib_lock_cqs() 630 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_unlock_cqs() argument 632 if (send_cq == recv_cq) in mlx4_ib_unlock_cqs() 634 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs() 635 spin_unlock(&recv_cq->lock); in mlx4_ib_unlock_cqs() 639 spin_unlock_irq(&recv_cq->lock); in mlx4_ib_unlock_cqs() [all …]
|
/drivers/infiniband/hw/ehca/ |
D | ehca_qp.c | 412 list = &qp->recv_cq->rqp_err_list; in ehca_add_to_err_list() 630 if (init_attr->recv_cq) in internal_create_qp() 631 my_qp->recv_cq = in internal_create_qp() 632 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq); in internal_create_qp() 687 if (my_qp->recv_cq) in internal_create_qp() 688 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle; in internal_create_qp() 827 my_qp->ib_qp.recv_cq = init_attr->recv_cq; in internal_create_qp() 1189 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); in check_for_left_cqes() 1192 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); in check_for_left_cqes() 1202 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); in check_for_left_cqes() [all …]
|
D | ehca_classes.h | 208 struct ehca_cq *recv_cq; member
|
D | ehca_main.c | 555 qp_init_attr.recv_cq = ibcq; in ehca_create_aqp1()
|
/drivers/infiniband/core/ |
D | verbs.c | 294 qp->recv_cq = qp_init_attr->recv_cq; in ib_create_qp() 302 atomic_inc(&qp_init_attr->recv_cq->usecnt); in ib_create_qp() 590 rcq = qp->recv_cq; in ib_destroy_qp()
|
D | uverbs_cmd.c | 1062 attr.recv_cq = rcq; in ib_uverbs_create_qp() 1087 qp->recv_cq = attr.recv_cq; in ib_uverbs_create_qp() 1095 atomic_inc(&attr.recv_cq->usecnt); in ib_uverbs_create_qp()
|
/drivers/infiniband/hw/ipath/ |
D | ipath_qp.c | 404 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp() 427 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_error_qp() 657 init_attr->recv_cq = qp->ibqp.recv_cq; in ipath_query_qp()
|
D | ipath_ud.c | 226 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_ud_loopback() 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_ud_rcv()
|
D | ipath_ruc.c | 150 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_init_sge() 444 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_ruc_loopback()
|
D | ipath_uc.c | 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_uc_rcv()
|
D | ipath_rc.c | 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, in ipath_rc_rcv()
|
/drivers/net/ehea/ |
D | ehea_main.c | 200 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles() 834 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll() 836 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll() 1414 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res() 1417 if (!pr->recv_cq) { in ehea_init_port_res() 1433 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res() 1458 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res() 1517 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res() 1531 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
|
D | ehea.h | 385 struct ehea_cq *recv_cq; member
|
/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 173 init_attr.recv_cq = device->cq; in iser_create_ib_conn_res()
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 884 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); in iwch_create_qp() 940 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; in iwch_create_qp()
|
/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 1354 nescq = to_nescq(init_attr->recv_cq); in nes_create_qp() 2763 init_attr->recv_cq = nesqp->ibqp.recv_cq; in nes_query_qp()
|