Searched refs:msrq (Results 1 – 9 of 9) sorted by relevance
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe() 97 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq() 98 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx4_ib_create_srq() 102 srq->msrq.max_gs * in mlx4_ib_create_srq() 104 srq->msrq.wqe_shift = ilog2(desc_size); in mlx4_ib_create_srq() 106 buf_size = srq->msrq.max * desc_size; in mlx4_ib_create_srq() 150 srq->tail = srq->msrq.max - 1; in mlx4_ib_create_srq() 153 for (i = 0; i < srq->msrq.max; ++i) { in mlx4_ib_create_srq() 156 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in mlx4_ib_create_srq() 173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); in mlx4_ib_create_srq() [all …]
|
D | mlx4_ib.h | 300 struct mlx4_srq msrq; member 615 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) in to_mibsrq() argument 617 return container_of(msrq, struct mlx4_ib_srq, msrq); in to_mibsrq()
|
D | cq.c | 602 struct mlx4_srq *msrq = NULL; in mlx4_ib_poll_one() local 677 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one() 679 if (unlikely(!msrq)) { in mlx4_ib_poll_one() 699 } else if (msrq) { in mlx4_ib_poll_one() 700 srq = to_mibsrq(msrq); in mlx4_ib_poll_one()
|
D | qp.c | 1640 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); in __mlx4_ib_modify_qp()
|
/drivers/infiniband/hw/mlx5/ |
D | srq.c | 48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe() 178 srq->tail = srq->msrq.max - 1; in create_srq_kernel() 181 for (i = 0; i < srq->msrq.max; i++) { in create_srq_kernel() 184 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in create_srq_kernel() 198 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); in create_srq_kernel() 201 (unsigned long)(srq->msrq.max * sizeof(u64))); in create_srq_kernel() 266 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx5_ib_create_srq() 267 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx5_ib_create_srq() 270 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_create_srq() 273 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / in mlx5_ib_create_srq() [all …]
|
D | mlx5_ib.h | 234 struct mlx5_core_srq msrq; member 432 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) in to_mibsrq() argument 434 return container_of(msrq, struct mlx5_ib_srq, msrq); in to_mibsrq()
|
D | cq.c | 180 struct mlx5_core_srq *msrq = NULL; in handle_responder() local 183 msrq = mlx5_core_get_srq(dev->mdev, in handle_responder() 185 srq = to_mibsrq(msrq); in handle_responder() 193 if (msrq && atomic_dec_and_test(&msrq->refcount)) in handle_responder() 194 complete(&msrq->free); in handle_responder()
|
D | qp.c | 942 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common() 948 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common() 953 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); in create_qp_common() 956 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common()
|
/drivers/staging/lustre/lnet/selftest/ |
D | conrpc.c | 602 srpc_mksn_reqst_t *msrq; in lstcon_sesrpc_prep() local 613 msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst; in lstcon_sesrpc_prep() 614 msrq->mksn_sid = console_session.ses_id; in lstcon_sesrpc_prep() 615 msrq->mksn_force = console_session.ses_force; in lstcon_sesrpc_prep() 616 strncpy(msrq->mksn_name, console_session.ses_name, in lstcon_sesrpc_prep()
|