/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | cq.c | 661 int cqe_size) in alloc_cq_frag_buf() argument 664 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0); in alloc_cq_frag_buf() 665 u8 log_wq_sz = ilog2(cqe_size); in alloc_cq_frag_buf() 669 nent * cqe_size, in alloc_cq_frag_buf() 677 buf->cqe_size = cqe_size; in alloc_cq_frag_buf() 707 int *cqe_size, int *index, int *inlen) in create_cq_user() argument 731 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user() 735 *cqe_size = ucmd.cqe_size; in create_cq_user() 739 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); in create_cq_user() 752 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); in create_cq_user() [all …]
|
D | mlx5_ib.h | 461 int cqe_size; member 507 int cqe_size; member
|
/kernel/linux/linux-5.10/include/uapi/rdma/ |
D | hns-abi.h | 42 __u32 cqe_size; member 78 __u32 cqe_size; member
|
D | mlx5-abi.h | 278 __u32 cqe_size; member 294 __u16 cqe_size; member
|
D | mlx4-abi.h | 71 __u32 cqe_size; member
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/rdma/ |
D | hns-abi.h | 25 __u32 cqe_size; member 55 __u32 cqe_size; member
|
D | mlx5-abi.h | 200 __u32 cqe_size; member 214 __u16 cqe_size; member
|
D | mlx4-abi.h | 37 __u32 cqe_size; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | cq.c | 290 static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) in mlx4_init_user_cqes() argument 292 int entries_per_copy = PAGE_SIZE / cqe_size; in mlx4_init_user_cqes() 317 err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? in mlx4_init_user_cqes() 329 int cqe_size) in mlx4_init_kernel_cqes() argument 334 memset(buf->direct.buf, 0xcc, entries * cqe_size); in mlx4_init_kernel_cqes() 394 dev->caps.cqe_size); in mlx4_cq_alloc() 399 dev->caps.cqe_size); in mlx4_cq_alloc()
|
D | fw.h | 203 u16 cqe_size; /* For use only when CQE stride feature enabled */ member
|
D | en_cq.c | 62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
|
D | en_tx.c | 452 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq() 505 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
|
D | en_rx.c | 693 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() 900 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq()
|
D | mlx4_en.h | 585 int cqe_size; member
|
D | fw.c | 1944 dev->caps.cqe_size = 64; in mlx4_INIT_HCA() 1947 dev->caps.cqe_size = 32; in mlx4_INIT_HCA() 1954 dev->caps.cqe_size = cache_line_size(); in mlx4_INIT_HCA() 2178 param->cqe_size = 1 << ((byte_field & in mlx4_QUERY_HCA()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_cq.c | 153 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; in alloc_cq_buf() 233 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) in set_cqe_size() 234 hr_cq->cqe_size = ucmd->cqe_size; in set_cqe_size() 236 hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; in set_cqe_size() 238 hr_cq->cqe_size = hr_dev->caps.cqe_sz; in set_cqe_size()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
D | cq.c | 105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf() 111 buf->entry_size = dev->dev->caps.cqe_size; in mlx4_ib_alloc_cq_buf() 143 int cqe_size = dev->dev->caps.cqe_size; in mlx4_ib_get_cq_umem() local 147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 358 int cqe_size = cq->buf.entry_size; in mlx4_ib_cq_resize_copy_cqes() local 359 int cqe_inc = cqe_size == 64 ? 1 : 0; in mlx4_ib_cq_resize_copy_cqes() 368 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); in mlx4_ib_cq_resize_copy_cqes()
|
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/rdma/ |
D | mlx5-abi.h | 182 __u32 cqe_size; member 193 __u16 cqe_size; member
|
D | mlx4-abi.h | 24 __u32 cqe_size; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.h | 125 u16 cqe_size; member
|
D | otx2_common.c | 832 cq->cqe_size = pfvf->qset.xqe_size; in otx2_cq_init() 835 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); in otx2_cq_init()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 320 size_t cqe_dma_size, cqe_size; in alloc_rq_cqe() local 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_hw.c | 1790 u32 hw_pages, cqe_size, page_size, cqe_count; in ocrdma_mbx_create_cq() local 1803 cqe_size = OCRDMA_DPP_CQE_SIZE; in ocrdma_mbx_create_cq() 1808 cqe_size = sizeof(struct ocrdma_cqe); in ocrdma_mbx_create_cq() 1812 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); in ocrdma_mbx_create_cq() 1832 cqe_count = cq->len / cqe_size; in ocrdma_mbx_create_cq() 1861 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); in ocrdma_mbx_create_cq() 1863 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; in ocrdma_mbx_create_cq()
|
/kernel/linux/linux-5.10/drivers/scsi/bnx2i/ |
D | bnx2i.h | 661 u32 cqe_size; member
|
D | bnx2i_hwi.c | 171 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing() 172 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing() 1123 ep->qp.cqe_size = hba->max_cqes; in bnx2i_alloc_qp_resc() 2063 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) in bnx2i_process_new_cqes()
|