Home
last modified time | relevance | path

Searched refs:srq (Results 1 – 25 of 56) sorted by relevance

123

/drivers/infiniband/hw/mthca/
Dmthca_srq.c72 static void *get_wqe(struct mthca_srq *srq, int n) in get_wqe() argument
74 if (srq->is_direct) in get_wqe()
75 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe()
77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe()
97 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument
102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context()
104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
115 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument
126 max = srq->max; in mthca_arbel_init_srq_context()
[all …]
Dmthca_provider.c440 struct mthca_srq *srq; in mthca_create_srq() local
446 srq = kmalloc(sizeof *srq, GFP_KERNEL); in mthca_create_srq()
447 if (!srq) in mthca_create_srq()
465 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq()
466 srq->db_index = ucmd.db_index; in mthca_create_srq()
470 &init_attr->attr, srq); in mthca_create_srq()
479 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { in mthca_create_srq()
480 mthca_free_srq(to_mdev(pd->device), srq); in mthca_create_srq()
485 return &srq->ibsrq; in mthca_create_srq()
488 kfree(srq); in mthca_create_srq()
[all …]
Dmthca_dev.h249 struct mthca_array srq; member
507 struct mthca_srq *srq);
513 struct ib_srq_attr *attr, struct mthca_srq *srq);
514 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
517 int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
521 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
522 int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
524 int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
Dmthca_cq.c279 struct mthca_srq *srq) in mthca_cq_clean() argument
311 if (srq && is_recv_cqe(cqe)) in mthca_cq_clean()
312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean()
544 } else if ((*cur_qp)->ibqp.srq) { in mthca_poll_one()
545 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); in mthca_poll_one() local
548 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one()
549 entry->wr_id = srq->wrid[wqe_index]; in mthca_poll_one()
550 mthca_free_srq_wqe(srq, wqe); in mthca_poll_one()
/drivers/infiniband/hw/mlx4/
Dsrq.c41 static void *get_wqe(struct mlx4_ib_srq *srq, int n) in get_wqe() argument
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) in mlx4_ib_srq_event() argument
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx4_ib_srq_event()
53 event.element.srq = ibsrq; in mlx4_ib_srq_event()
63 "on SRQ %06x\n", type, srq->srqn); in mlx4_ib_srq_event()
76 struct mlx4_ib_srq *srq; in mlx4_ib_create_srq() local
91 srq = kmalloc(sizeof *srq, GFP_KERNEL); in mlx4_ib_create_srq()
92 if (!srq) in mlx4_ib_create_srq()
95 mutex_init(&srq->mutex); in mlx4_ib_create_srq()
[all …]
Dcq.c601 struct mlx4_ib_srq *srq; in mlx4_ib_poll_one() local
694 } else if ((*cur_qp)->ibqp.srq) { in mlx4_ib_poll_one()
695 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx4_ib_poll_one()
697 wc->wr_id = srq->wrid[wqe_ctr]; in mlx4_ib_poll_one()
698 mlx4_ib_free_srq_wqe(srq, wqe_ctr); in mlx4_ib_poll_one()
700 srq = to_mibsrq(msrq); in mlx4_ib_poll_one()
702 wc->wr_id = srq->wrid[wqe_ctr]; in mlx4_ib_poll_one()
703 mlx4_ib_free_srq_wqe(srq, wqe_ctr); in mlx4_ib_poll_one()
866 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument
894 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) in __mlx4_ib_cq_clean()
[all …]
/drivers/infiniband/hw/mlx5/
Dsrq.c46 static void *get_wqe(struct mlx5_ib_srq *srq, int n) in get_wqe() argument
48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
51 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) in mlx5_ib_srq_event() argument
54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx5_ib_srq_event()
58 event.element.srq = ibsrq; in mlx5_ib_srq_event()
68 type, srq->srqn); in mlx5_ib_srq_event()
76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, in create_srq_user() argument
103 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user()
105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user()
107 if (IS_ERR(srq->umem)) { in create_srq_user()
[all …]
Dcq.c174 struct mlx5_ib_srq *srq; in handle_responder() local
179 if (qp->ibqp.srq || qp->ibqp.xrcd) { in handle_responder()
185 srq = to_mibsrq(msrq); in handle_responder()
187 srq = to_msrq(qp->ibqp.srq); in handle_responder()
189 if (srq) { in handle_responder()
191 wc->wr_id = srq->wrid[wqe_ctr]; in handle_responder()
192 mlx5_ib_free_srq_wqe(srq, wqe_ctr); in handle_responder()
500 struct mlx5_ib_srq *srq; in mlx5_poll_one() local
502 if ((*cur_qp)->ibqp.srq) { in mlx5_poll_one()
503 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx5_poll_one()
[all …]
/drivers/infiniband/hw/qib/
Dqib_srq.c51 struct qib_srq *srq = to_isrq(ibsrq); in qib_post_srq_receive() local
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in qib_post_srq_receive()
68 wq = srq->rq.wq; in qib_post_srq_receive()
70 if (next >= srq->rq.size) in qib_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
106 struct qib_srq *srq; in qib_create_srq() local
123 srq = kmalloc(sizeof(*srq), GFP_KERNEL); in qib_create_srq()
[all …]
Dqib_ruc.c90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge()
143 struct qib_srq *srq; in qib_get_rwqe() local
149 if (qp->ibqp.srq) { in qib_get_rwqe()
150 srq = to_isrq(qp->ibqp.srq); in qib_get_rwqe()
151 handler = srq->ibsrq.event_handler; in qib_get_rwqe()
152 rq = &srq->rq; in qib_get_rwqe()
154 srq = NULL; in qib_get_rwqe()
207 if (n < srq->limit) { in qib_get_rwqe()
210 srq->limit = 0; in qib_get_rwqe()
213 ev.element.srq = qp->ibqp.srq; in qib_get_rwqe()
[all …]
Dqib_qp.c870 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; in qib_query_qp()
894 init_attr->srq = qp->ibqp.srq; in qib_query_qp()
915 if (qp->ibqp.srq) { in qib_compute_aeth()
1004 if (!init_attr->srq) { in qib_create_qp()
1041 if (init_attr->srq) { in qib_create_qp()
1042 struct qib_srq *srq = to_isrq(init_attr->srq); in qib_create_qp() local
1044 if (srq->rq.max_sge > 1) in qib_create_qp()
1046 (srq->rq.max_sge - 1); in qib_create_qp()
1064 if (init_attr->srq) in qib_create_qp()
/drivers/infiniband/hw/ipath/
Dipath_srq.c51 struct ipath_srq *srq = to_isrq(ibsrq); in ipath_post_srq_receive() local
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive()
68 wq = srq->rq.wq; in ipath_post_srq_receive()
70 if (next >= srq->rq.size) in ipath_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
106 struct ipath_srq *srq; in ipath_create_srq() local
126 srq = kmalloc(sizeof(*srq), GFP_KERNEL); in ipath_create_srq()
[all …]
Dipath_ud.c57 struct ipath_srq *srq; in ipath_ud_loopback() local
107 if (qp->ibqp.srq) { in ipath_ud_loopback()
108 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback()
109 handler = srq->ibsrq.event_handler; in ipath_ud_loopback()
110 rq = &srq->rq; in ipath_ud_loopback()
112 srq = NULL; in ipath_ud_loopback()
164 if (n < srq->limit) { in ipath_ud_loopback()
167 srq->limit = 0; in ipath_ud_loopback()
170 ev.element.srq = qp->ibqp.srq; in ipath_ud_loopback()
172 handler(&ev, srq->ibsrq.srq_context); in ipath_ud_loopback()
Dipath_ruc.c171 struct ipath_srq *srq; in ipath_get_rwqe() local
177 if (qp->ibqp.srq) { in ipath_get_rwqe()
178 srq = to_isrq(qp->ibqp.srq); in ipath_get_rwqe()
179 handler = srq->ibsrq.event_handler; in ipath_get_rwqe()
180 rq = &srq->rq; in ipath_get_rwqe()
182 srq = NULL; in ipath_get_rwqe()
231 if (n < srq->limit) { in ipath_get_rwqe()
234 srq->limit = 0; in ipath_get_rwqe()
237 ev.element.srq = qp->ibqp.srq; in ipath_get_rwqe()
239 handler(&ev, srq->ibsrq.srq_context); in ipath_get_rwqe()
Dipath_qp.c636 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; in ipath_query_qp()
660 init_attr->srq = qp->ibqp.srq; in ipath_query_qp()
681 if (qp->ibqp.srq) { in ipath_compute_aeth()
765 if (!init_attr->srq) { in ipath_create_qp()
796 if (init_attr->srq) { in ipath_create_qp()
797 struct ipath_srq *srq = to_isrq(init_attr->srq); in ipath_create_qp() local
799 if (srq->rq.max_sge > 1) in ipath_create_qp()
801 (srq->rq.max_sge - 1); in ipath_create_qp()
820 if (init_attr->srq) { in ipath_create_qp()
/drivers/net/ethernet/mellanox/mlx5/core/
Dsrq.c44 struct mlx5_core_srq *srq; in mlx5_srq_event() local
48 srq = radix_tree_lookup(&table->tree, srqn); in mlx5_srq_event()
49 if (srq) in mlx5_srq_event()
50 atomic_inc(&srq->refcount); in mlx5_srq_event()
54 if (!srq) { in mlx5_srq_event()
59 srq->event(srq, event_type); in mlx5_srq_event()
61 if (atomic_dec_and_test(&srq->refcount)) in mlx5_srq_event()
62 complete(&srq->free); in mlx5_srq_event()
68 struct mlx5_core_srq *srq; in mlx5_core_get_srq() local
72 srq = radix_tree_lookup(&table->tree, srqn); in mlx5_core_get_srq()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Dsrq.c46 struct mlx4_srq *srq; in mlx4_srq_event() local
50 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event()
51 if (srq) in mlx4_srq_event()
52 atomic_inc(&srq->refcount); in mlx4_srq_event()
56 if (!srq) { in mlx4_srq_event()
61 srq->event(srq, event_type); in mlx4_srq_event()
63 if (atomic_dec_and_test(&srq->refcount)) in mlx4_srq_event()
64 complete(&srq->free); in mlx4_srq_event()
166 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument
174 err = mlx4_srq_alloc_icm(dev, &srq->srqn); in mlx4_srq_alloc()
[all …]
Dresource_tracker.c109 struct res_srq *srq; member
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; in mlx4_init_quotas()
455 dev->quotas.srq = in mlx4_init_quotas()
1464 enum res_srq_states state, struct res_srq **srq) in srq_res_start_move_to() argument
1490 if (srq) in srq_res_start_move_to()
1491 *srq = r; in srq_res_start_move_to()
2468 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; in qp_get_mtt_size() local
2479 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); in qp_get_mtt_size()
2711 struct res_srq *srq; in mlx4_RST2INIT_QP_wrapper() local
2747 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_RST2INIT_QP_wrapper()
[all …]
/drivers/infiniband/core/
Dverbs.c296 struct ib_srq *srq; in ib_create_srq() local
301 srq = pd->device->create_srq(pd, srq_init_attr, NULL); in ib_create_srq()
303 if (!IS_ERR(srq)) { in ib_create_srq()
304 srq->device = pd->device; in ib_create_srq()
305 srq->pd = pd; in ib_create_srq()
306 srq->uobject = NULL; in ib_create_srq()
307 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq()
308 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq()
309 srq->srq_type = srq_init_attr->srq_type; in ib_create_srq()
310 if (srq->srq_type == IB_SRQT_XRC) { in ib_create_srq()
[all …]
Duverbs_cmd.c266 static void put_srq_read(struct ib_srq *srq) in put_srq_read() argument
268 put_uobj_read(srq->uobject); in put_srq_read()
1595 struct ib_srq *srq = NULL; in ib_uverbs_create_qp() local
1632 srq = idr_read_srq(cmd.srq_handle, file->ucontext); in ib_uverbs_create_qp()
1633 if (!srq || srq->srq_type != IB_SRQT_BASIC) { in ib_uverbs_create_qp()
1663 attr.srq = srq; in ib_uverbs_create_qp()
1695 qp->srq = attr.srq; in ib_uverbs_create_qp()
1704 if (attr.srq) in ib_uverbs_create_qp()
1705 atomic_inc(&attr.srq->usecnt); in ib_uverbs_create_qp()
1742 if (srq) in ib_uverbs_create_qp()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1056 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { in ocrdma_check_qp_params()
1111 int dpp_credit_lmt, int srq) in ocrdma_copy_qp_uresp() argument
1128 if (!srq) { in ocrdma_copy_qp_uresp()
1155 if (!srq) { in ocrdma_copy_qp_uresp()
1283 (attrs->srq != NULL)); in ocrdma_create_qp()
1480 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) in ocrdma_srq_toggle_bit() argument
1485 if (srq->idx_bit_fields[i] & mask) in ocrdma_srq_toggle_bit()
1486 srq->idx_bit_fields[i] &= ~mask; in ocrdma_srq_toggle_bit()
1488 srq->idx_bit_fields[i] |= mask; in ocrdma_srq_toggle_bit()
1550 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) in ocrdma_discard_cqes()
[all …]
Docrdma_hw.c713 ib_evt.element.srq = &qp->srq->ibsrq; in ocrdma_dispatch_ibevent()
719 ib_evt.element.srq = &qp->srq->ibsrq; in ocrdma_dispatch_ibevent()
744 if (qp->srq->ibsrq.event_handler) in ocrdma_dispatch_ibevent()
745 qp->srq->ibsrq.event_handler(&ib_evt, in ocrdma_dispatch_ibevent()
746 qp->srq->ibsrq. in ocrdma_dispatch_ibevent()
848 if (qp->srq) in ocrdma_qp_buddy_cq_handler()
1904 if (!qp->srq) { in ocrdma_flush_qp()
2148 if (!attrs->srq) { in ocrdma_get_create_qp_rsp()
2191 if (attrs->srq) { in ocrdma_mbx_create_qp()
2192 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq); in ocrdma_mbx_create_qp() local
[all …]
/drivers/net/
Deql.c264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
412 slaving_request_t srq; in eql_enslave() local
414 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_enslave()
417 slave_dev = __dev_get_by_name(&init_net, srq.slave_name); in eql_enslave()
433 s->priority = srq.priority; in eql_enslave()
434 s->priority_bps = srq.priority; in eql_enslave()
435 s->priority_Bps = srq.priority / 8; in eql_enslave()
455 slaving_request_t srq; in eql_emancipate() local
458 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_emancipate()
[all …]
/drivers/infiniband/hw/ehca/
Dehca_iverbs.h161 int ehca_post_srq_recv(struct ib_srq *srq,
169 int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
172 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
174 int ehca_destroy_srq(struct ib_srq *srq);
/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c102 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); in ipoib_cm_post_receive_srq()
256 .srq = priv->cm.srq, in ipoib_cm_create_rx_qp()
434 rep.srq = ipoib_cm_has_srq(dev); in ipoib_cm_send_rep()
1028 .srq = priv->cm.srq, in ipoib_cm_create_tx_qp()
1082 req.srq = ipoib_cm_has_srq(dev); in ipoib_cm_send_req()
1509 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); in ipoib_cm_create_srq()
1510 if (IS_ERR(priv->cm.srq)) { in ipoib_cm_create_srq()
1511 if (PTR_ERR(priv->cm.srq) != -ENOSYS) in ipoib_cm_create_srq()
1513 priv->ca->name, PTR_ERR(priv->cm.srq)); in ipoib_cm_create_srq()
1514 priv->cm.srq = NULL; in ipoib_cm_create_srq()
[all …]

123