Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 25 of 39) sorted by relevance

12

/drivers/infiniband/hw/amso1100/
Dc2_qp.c422 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); in c2_alloc_qp() local
462 wr.sq_cq_handle = send_cq->adapter_handle; in c2_alloc_qp()
574 static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_lock_cqs() argument
576 if (send_cq == recv_cq) in c2_lock_cqs()
577 spin_lock_irq(&send_cq->lock); in c2_lock_cqs()
578 else if (send_cq > recv_cq) { in c2_lock_cqs()
579 spin_lock_irq(&send_cq->lock); in c2_lock_cqs()
583 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in c2_lock_cqs()
587 static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) in c2_unlock_cqs() argument
589 if (send_cq == recv_cq) in c2_unlock_cqs()
[all …]
/drivers/infiniband/ulp/ipoib/
Dipoib_verbs.c176 priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, in ipoib_transport_dev_init()
178 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
186 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
234 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
260 if (ib_destroy_cq(priv->send_cq)) in ipoib_transport_dev_cleanup()
Dipoib_cm.c254 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
765 rc = ib_req_notify_cq(priv->send_cq, in ipoib_cm_send()
770 ipoib_send_comp_handler(priv->send_cq, dev); in ipoib_cm_send()
1026 .send_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
Dipoib_ib.c420 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
598 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_send()
Dipoib.h335 struct ib_cq *send_cq; member
/drivers/infiniband/hw/mthca/
Dmthca_qp.c719 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
1143 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1271 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1298 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp()
1313 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument
1314 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs()
1316 if (send_cq == recv_cq) { in mthca_lock_cqs()
1317 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
[all …]
Dmthca_dev.h545 struct mthca_cq *send_cq,
553 struct mthca_cq *send_cq,
Dmthca_provider.c566 to_mcq(init_attr->send_cq), in mthca_create_qp()
601 to_mcq(init_attr->send_cq), in mthca_create_qp()
/drivers/infiniband/hw/mlx5/
Dqp.c910 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); in create_qp_common()
960 if (init_attr->send_cq) in create_qp_common()
961 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common()
995 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) in mlx5_ib_lock_cqs() argument
996 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx5_ib_lock_cqs()
998 if (send_cq) { in mlx5_ib_lock_cqs()
1000 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1001 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
1004 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs()
1005 spin_lock_irq(&send_cq->lock); in mlx5_ib_lock_cqs()
[all …]
Dmain.c1085 init_attr->send_cq = cq; in create_umr_res()
1102 qp->send_cq = init_attr->send_cq; in create_umr_res()
/drivers/infiniband/hw/mlx4/
Dqp.c890 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument
891 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx4_ib_lock_cqs()
893 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs()
894 spin_lock_irq(&send_cq->lock); in mlx4_ib_lock_cqs()
896 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs()
897 spin_lock_irq(&send_cq->lock); in mlx4_ib_lock_cqs()
901 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in mlx4_ib_lock_cqs()
905 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_unlock_cqs() argument
906 __releases(&send_cq->lock) __releases(&recv_cq->lock) in mlx4_ib_unlock_cqs()
908 if (send_cq == recv_cq) { in mlx4_ib_unlock_cqs()
[all …]
/drivers/infiniband/hw/ehca/
Dehca_qp.c409 list = &qp->send_cq->sqp_err_list; in ehca_add_to_err_list()
635 if (init_attr->send_cq) in internal_create_qp()
636 my_qp->send_cq = in internal_create_qp()
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); in internal_create_qp()
683 if (my_qp->send_cq) in internal_create_qp()
684 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle; in internal_create_qp()
829 my_qp->ib_qp.send_cq = init_attr->send_cq; in internal_create_qp()
878 if (my_qp->send_cq) { in internal_create_qp()
879 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); in internal_create_qp()
916 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); in internal_create_qp()
[all …]
Dehca_classes.h207 struct ehca_cq *send_cq; member
/drivers/net/ethernet/ibm/ehea/
Dehea_main.c209 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
816 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local
825 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
827 ehea_inc_cq(send_cq); in ehea_proc_cqes()
867 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
870 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes()
905 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
907 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
910 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
1501 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
[all …]
Dehea.h364 struct ehea_cq *send_cq; member
/drivers/infiniband/ulp/srp/
Dib_srp.c450 struct ib_cq *recv_cq, *send_cq; in srp_create_target_ib() local
468 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target, in srp_create_target_ib()
470 if (IS_ERR(send_cq)) { in srp_create_target_ib()
471 ret = PTR_ERR(send_cq); in srp_create_target_ib()
484 init_attr->send_cq = send_cq; in srp_create_target_ib()
525 if (target->send_cq) in srp_create_target_ib()
526 ib_destroy_cq(target->send_cq); in srp_create_target_ib()
530 target->send_cq = send_cq; in srp_create_target_ib()
539 ib_destroy_cq(send_cq); in srp_create_target_ib()
566 ib_destroy_cq(target->send_cq); in srp_free_target_ib()
[all …]
Dib_srp.h140 struct ib_cq *send_cq ____cacheline_aligned_in_smp;
/drivers/infiniband/core/
Dverbs.c462 qp->send_cq = qp->recv_cq = NULL; in ib_create_qp()
490 qp->send_cq = qp_init_attr->send_cq; in ib_create_qp()
494 atomic_inc(&qp_init_attr->send_cq->usecnt); in ib_create_qp()
989 scq = qp->send_cq; in ib_destroy_qp()
Duverbs_cmd.c1661 attr.send_cq = scq; in ib_uverbs_create_qp()
1693 qp->send_cq = attr.send_cq; in ib_uverbs_create_qp()
1701 atomic_inc(&attr.send_cq->usecnt); in ib_uverbs_create_qp()
/drivers/infiniband/hw/ipath/
Dipath_qp.c658 init_attr->send_cq = qp->ibqp.send_cq; in ipath_query_qp()
Dipath_ruc.c720 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, in ipath_send_complete()
/drivers/infiniband/hw/qib/
Dqib_qp.c892 init_attr->send_cq = qp->ibqp.send_cq; in qib_query_qp()
/drivers/infiniband/hw/cxgb4/
Dqp.c1169 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1629 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp()
1680 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.c494 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; in usnic_ib_create_qp()
/drivers/infiniband/hw/cxgb3/
Diwch_provider.c906 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); in iwch_create_qp()
962 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; in iwch_create_qp()

12