/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 450 hr_qp->rq.wqe_cnt = 0; in set_rq_size() 452 hr_qp->rq_inl_buf.wqe_cnt = 0; in set_rq_size() 481 hr_qp->rq.wqe_cnt = cnt; in set_rq_size() 485 hr_qp->rq_inl_buf.wqe_cnt = cnt; in set_rq_size() 487 hr_qp->rq_inl_buf.wqe_cnt = 0; in set_rq_size() 645 hr_qp->sq.wqe_cnt = cnt; in set_user_sq_size() 662 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, in set_wqe_buf_attr() 684 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, in set_wqe_buf_attr() 722 hr_qp->sq.wqe_cnt = cnt; in set_kernel_sq_size() 755 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; in alloc_rq_inline_buf() local [all …]
|
D | hns_roce_srq.c | 176 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt, in alloc_srq_idx() 191 idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL); in alloc_srq_idx() 231 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt, in alloc_srq_wqe_buf() 254 srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL); in alloc_srq_wrid() 307 srq->wqe_cnt = roundup_pow_of_two(attr->max_wr); in set_srq_basic_param() 310 attr->max_wr = srq->wqe_cnt; in set_srq_basic_param()
|
D | hns_roce_restrack.c | 91 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt)) in hns_roce_fill_res_qp_entry() 97 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt)) in hns_roce_fill_res_qp_entry()
|
D | hns_roce_hw_v2.c | 728 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v2_post_send() 741 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send() 833 if (hr_qp->rq_inl_buf.wqe_cnt) { in fill_rq_wqe() 880 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); in hns_roce_v2_post_recv() 922 return idx_que->head - idx_que->tail >= srq->wqe_cnt; in hns_roce_srqwq_overflow() 951 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt); in get_srq_wqe_idx() 952 if (unlikely(pos == srq->wqe_cnt)) in get_srq_wqe_idx() 966 head = idx_que->head & (srq->wqe_cnt - 1); in fill_wqe_idx() 3748 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1); in hns_roce_handle_recv_inl_wqe() 3783 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in sw_comp() [all …]
|
D | hns_roce_device.h | 331 u32 wqe_cnt; /* WQE num */ member 437 u32 wqe_cnt; member 583 u32 wqe_cnt; member
|
/drivers/infiniband/hw/mlx5/ |
D | qp.c | 184 wq->offset, wq->wqe_cnt, in mlx5_ib_read_user_wqe_sq() 210 wq->wqe_cnt, wq->wqe_shift, in mlx5_ib_read_user_wqe_sq() 246 wq->offset, wq->wqe_cnt, in mlx5_ib_read_user_wqe_rq() 364 qp->rq.wqe_cnt = 0; in set_rq_size() 372 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size() 384 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size() 393 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size() 406 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size() 535 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in calc_sq_size() 536 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size() [all …]
|
D | srq.h | 21 u32 wqe_cnt; member
|
D | cq.c | 197 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in handle_responder() 344 idx = tail & (qp->sq.wqe_cnt - 1); in handle_atomics() 410 idx &= (wq->wqe_cnt - 1); in sw_comp() 501 idx = wqe_ctr & (wq->wqe_cnt - 1); in mlx5_poll_one() 533 idx = wqe_ctr & (wq->wqe_cnt - 1); in mlx5_poll_one() 546 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx5_poll_one()
|
D | wr.h | 59 idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1); in handle_post_send_edge()
|
D | wr.c | 716 idx = (idx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe() 732 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in mlx5r_begin_wqe() 783 (qp->sq.wqe_cnt - 1)) : in mlx5r_finish_wqe() 1231 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv() 1266 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
|
D | mem.c | 118 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in post_send_nop()
|
D | mlx5_ib.h | 361 int wqe_cnt; member
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 194 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe() 314 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size() 325 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size() 333 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size() 337 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size() 381 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + in set_kernel_sq_size() 390 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size() 391 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size() 394 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size() 396 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size() [all …]
|
D | cq.c | 621 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_qp_sw_comp() 737 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_poll_one() 751 tail = wq->tail & (wq->wqe_cnt - 1); in mlx4_ib_poll_one()
|
D | mlx4_ib.h | 165 int wqe_cnt; member
|
/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_send.c | 60 (dr_cq->qp->sq.wqe_cnt - 1); in dr_parse_cqe() 66 (dr_cq->qp->sq.wqe_cnt - 1); in dr_parse_cqe() 126 dr_qp->rq.wqe_cnt = 4; in dr_create_rc_qp() 129 dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr); in dr_create_rc_qp() 132 MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); in dr_create_rc_qp() 133 MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); in dr_create_rc_qp() 141 dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt, in dr_create_rc_qp() 172 MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); in dr_create_rc_qp() 174 MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); in dr_create_rc_qp() 240 idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); in dr_rdma_segments()
|
D | dr_types.h | 1338 unsigned int wqe_cnt; member 1344 unsigned int wqe_cnt; member
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 140 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); in pvrdma_set_rq_size() 144 req_cap->max_recv_wr = qp->rq.wqe_cnt; in pvrdma_set_rq_size() 150 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_rq_size() 165 qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); in pvrdma_set_sq_size() 169 req_cap->max_send_wr = qp->sq.wqe_cnt; in pvrdma_set_sq_size() 177 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_sq_size() 696 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { in pvrdma_post_send() 857 qp->sq.wqe_cnt); in pvrdma_post_send() 922 qp->rq.ring, qp->rq.wqe_cnt, &tail))) { in pvrdma_post_recv() 948 qp->rq.wqe_cnt); in pvrdma_post_recv()
|
D | pvrdma.h | 154 int wqe_cnt; member 169 int wqe_cnt; member
|
/drivers/vfio/pci/mlx5/ |
D | cmd.h | 76 unsigned int wqe_cnt; member
|
D | cmd.c | 772 qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr); in mlx5vf_create_rc_qp() 774 log_rq_sz = ilog2(qp->rq.wqe_cnt); in mlx5vf_create_rc_qp() 846 WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt); in mlx5vf_post_recv() 847 ix = qp->rq.pc & (qp->rq.wqe_cnt - 1); in mlx5vf_post_recv() 884 for (i = 0; i < qp->rq.wqe_cnt; i++) { in mlx5vf_activate_qp() 1246 ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1); in mlx5vf_rq_cqe()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 56 __be16 wqe_cnt; member 68 __be16 wqe_cnt; member
|
/drivers/net/ethernet/microsoft/mana/ |
D | gdma.h | 81 u64 wqe_cnt : 8; member
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.h | 293 u64 wqe_cnt; member
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mlx4.h | 376 __be16 wqe_cnt; member
|