/kernel/linux/linux-5.10/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 243 unsigned int max_send_wr, cq_size; in iser_create_ib_conn_res() local 257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; in iser_create_ib_conn_res() 258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res() 263 ib_conn->cq_size = cq_size; in iser_create_ib_conn_res() 292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_create_ib_conn_res() 406 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_free_ib_conn_res()
|
D | iscsi_iser.h | 382 u32 cq_size; member
|
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/rdma/ |
D | nes-abi.h | 51 __u32 cq_size; member
|
D | i40iw-abi.h | 50 __u32 cq_size; member
|
/kernel/linux/linux-5.10/include/uapi/rdma/ |
D | i40iw-abi.h | 92 __u32 cq_size; member
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/rdma/ |
D | i40iw-abi.h | 62 __u32 cq_size; member
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
D | rtrs.c | 217 static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size, in create_cq() argument 223 cq = ib_alloc_cq(cm_id->device, con, cq_size, in create_cq() 265 u32 max_send_sge, int cq_vector, int cq_size, in rtrs_cq_qp_create() argument 271 err = create_cq(con, cq_vector, cq_size, poll_ctx); in rtrs_cq_qp_create()
|
D | rtrs-pri.h | 313 u32 max_send_sge, int cq_vector, int cq_size,
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
D | i40iw_user.h | 369 u32 cq_size; member 399 u32 cq_size; member
|
D | i40iw_verbs.h | 108 u16 cq_size; member
|
D | i40iw_uk.c | 1059 if ((info->cq_size < I40IW_MIN_CQ_SIZE) || in i40iw_cq_uk_init() 1060 (info->cq_size > I40IW_MAX_CQ_SIZE)) in i40iw_cq_uk_init() 1064 cq->cq_size = info->cq_size; in i40iw_cq_uk_init() 1069 I40IW_RING_INIT(cq->cq_ring, cq->cq_size); in i40iw_cq_uk_init()
|
D | i40iw_puda.c | 641 set_64bit_val(wqe, 0, cq->cq_uk.cq_size); in i40iw_puda_cq_wqe() 683 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); in i40iw_puda_cq_create() 695 info.shadow_read_threshold = rsrc->cq_size >> 2; in i40iw_puda_cq_create() 701 init_info->cq_size = rsrc->cq_size; in i40iw_puda_cq_create() 939 rsrc->cq_size = info->rq_size + info->sq_size; in i40iw_puda_create_rsrc()
|
D | i40iw_puda.h | 131 u32 cq_size; member
|
D | i40iw_ctrl.c | 2045 cq->cq_uk.cq_size = info->num_elem; in i40iw_sc_ccq_init() 2101 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); in i40iw_sc_ccq_create() 2160 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); in i40iw_sc_ccq_destroy() 2265 set_64bit_val(wqe, 0, cq->cq_uk.cq_size); in i40iw_sc_cq_create() 2317 set_64bit_val(wqe, 0, cq->cq_uk.cq_size); in i40iw_sc_cq_destroy() 2358 u32 cq_size, ceq_id, first_pm_pbl_idx; in i40iw_sc_cq_modify() local 2381 cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size; in i40iw_sc_cq_modify() 2398 cq->cq_uk.cq_size = cq_size; in i40iw_sc_cq_modify() 2406 set_64bit_val(wqe, 0, cq_size); in i40iw_sc_cq_modify()
|
D | i40iw_verbs.c | 1101 ukinfo->cq_size = max(entries, 4); in i40iw_create_cq() 1103 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; in i40iw_create_cq() 1148 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe); in i40iw_create_cq() 1192 resp.cq_size = info.cq_uk_init_info.cq_size; in i40iw_create_cq()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/isert/ |
D | ib_isert.h | 184 u32 cq_size; member
|
D | ib_isert.c | 107 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; in isert_create_qp() local 113 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp() 119 isert_conn->cq_size = cq_size; in isert_create_qp() 141 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_create_qp() 411 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_destroy_qp()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.h | 304 u32 cq_size; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
D | conn.c | 411 static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) in mlx5_fpga_conn_create_cq() argument 424 cq_size = roundup_pow_of_two(cq_size); in mlx5_fpga_conn_create_cq() 425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
|
/kernel/linux/linux-5.10/drivers/dma/ |
D | hisi_dma.c | 350 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; in hisi_dma_alloc_qps_mem() local 362 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, in hisi_dma_alloc_qps_mem()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/pensando/ionic/ |
D | ionic_lif.c | 379 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); in ionic_qcq_free() 551 int q_size, cq_size; in ionic_qcq_alloc() local 555 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); in ionic_qcq_alloc() 557 new->q_size = PAGE_SIZE + q_size + cq_size; in ionic_qcq_alloc() 586 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); in ionic_qcq_alloc() 587 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, in ionic_qcq_alloc() 622 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); in ionic_qcq_alloc() 705 memset(qcq->cq_base, 0, qcq->cq_size); in ionic_qcq_sanitize() 2203 swap(a->cq_size, b->cq_size); in ionic_swap_queues()
|
D | ionic_lif.h | 66 u32 cq_size; member
|
D | ionic_debugfs.c | 133 debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); in ionic_debugfs_add_qcq()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_send.c | 870 int cq_size; in mlx5dr_send_ring_alloc() local 878 cq_size = QUEUE_SIZE + 1; in mlx5dr_send_ring_alloc() 879 dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); in mlx5dr_send_ring_alloc()
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
D | rdma.c | 99 int cq_size; member 420 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq() 479 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq() 483 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq() 511 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
|