Home
last modified time | relevance | path

Searched refs:msrq (Results 1 – 10 of 10) sorted by relevance

/drivers/infiniband/hw/mlx4/
Dsrq.c44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
96 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq()
97 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx4_ib_create_srq()
101 srq->msrq.max_gs * in mlx4_ib_create_srq()
103 srq->msrq.wqe_shift = ilog2(desc_size); in mlx4_ib_create_srq()
105 buf_size = srq->msrq.max * desc_size; in mlx4_ib_create_srq()
143 srq->tail = srq->msrq.max - 1; in mlx4_ib_create_srq()
146 for (i = 0; i < srq->msrq.max; ++i) { in mlx4_ib_create_srq()
149 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in mlx4_ib_create_srq()
166 srq->wrid = kvmalloc_array(srq->msrq.max, in mlx4_ib_create_srq()
[all …]
Dmlx4_ib.h356 struct mlx4_srq msrq; member
706 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) in to_mibsrq() argument
708 return container_of(msrq, struct mlx4_ib_srq, msrq); in to_mibsrq()
Dcq.c669 struct mlx4_srq *msrq = NULL; in mlx4_ib_poll_one() local
732 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
749 } else if (msrq) { in mlx4_ib_poll_one()
750 srq = to_mibsrq(msrq); in mlx4_ib_poll_one()
Dqp.c2456 to_msrq(ibsrq)->msrq.srqn); in __mlx4_ib_modify_qp()
/drivers/infiniband/hw/mlx5/
Dsrq.c151 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max), in create_srq_kernel()
155 srq->tail = srq->msrq.max - 1; in create_srq_kernel()
158 for (i = 0; i < srq->msrq.max; i++) { in create_srq_kernel()
161 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in create_srq_kernel()
172 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); in create_srq_kernel()
239 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx5_ib_create_srq()
240 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx5_ib_create_srq()
243 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_create_srq()
244 if (desc_size == 0 || srq->msrq.max_gs > desc_size) in mlx5_ib_create_srq()
252 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / in mlx5_ib_create_srq()
[all …]
Dcq.c178 struct mlx5_core_srq *msrq = NULL; in handle_responder() local
181 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder()
182 srq = to_mibsrq(msrq); in handle_responder()
190 if (msrq) in handle_responder()
191 mlx5_core_res_put(&msrq->common); in handle_responder()
Dmlx5_ib.h534 struct mlx5_core_srq msrq; member
1072 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) in to_mibsrq() argument
1074 return container_of(msrq, struct mlx5_ib_srq, msrq); in to_mibsrq()
Dodp.c1090 int wqe_size = 1 << srq->msrq.wqe_shift; in mlx5_ib_mr_responder_pfault_handler_srq()
1159 struct mlx5_core_srq *msrq = in res_to_srq() local
1162 return to_mibsrq(msrq); in res_to_srq()
Dqp.c272 srq->msrq.max, in mlx5_ib_read_user_wqe_srq()
273 srq->msrq.wqe_shift, in mlx5_ib_read_user_wqe_srq()
2225 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); in create_qp_common()
2231 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); in create_qp_common()
2236 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); in create_qp_common()
2239 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); in create_qp_common()
2573 MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); in mlx5_ib_create_dct()
Ddevx.c584 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
602 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()