/drivers/net/ethernet/mellanox/mlx4/ |
D | en_cq.c | 40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() argument 52 struct mlx4_en_cq *cq; in mlx4_en_create_cq() local 55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq() 56 if (!cq) { in mlx4_en_create_cq() 57 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx4_en_create_cq() 58 if (!cq) { in mlx4_en_create_cq() 64 cq->size = entries; in mlx4_en_create_cq() 65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq() 67 cq->ring = ring; in mlx4_en_create_cq() 68 cq->is_tx = mode; in mlx4_en_create_cq() [all …]
|
D | cq.c | 82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) in mlx4_add_cq_to_tasklet() argument 85 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx4_add_cq_to_tasklet() 93 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx4_add_cq_to_tasklet() 94 atomic_inc(&cq->refcount); in mlx4_add_cq_to_tasklet() 95 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx4_add_cq_to_tasklet() 102 struct mlx4_cq *cq; in mlx4_cq_completion() local 105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion() 109 if (!cq) { in mlx4_cq_completion() 117 ++cq->arm_sn; in mlx4_cq_completion() 119 cq->comp(cq); in mlx4_cq_completion() [all …]
|
/drivers/infiniband/core/ |
D | cq.c | 28 static int __ib_process_cq(struct ib_cq *cq, int budget) in __ib_process_cq() argument 32 while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { in __ib_process_cq() 34 struct ib_wc *wc = &cq->wc[i]; in __ib_process_cq() 37 wc->wr_cqe->done(cq, wc); in __ib_process_cq() 64 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() argument 66 WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT); in ib_process_cq_direct() 68 return __ib_process_cq(cq, budget); in ib_process_cq_direct() 72 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() argument 74 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); in ib_cq_completion_direct() 79 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument 171 return get_cqe_from_buf(&cq->buf, entry); in get_cqe() 179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument 208 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index() 211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index() 224 struct mthca_cq *cq; in mthca_cq_completion() local 226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 228 if (!cq) { in mthca_cq_completion() [all …]
|
D | mthca_provider.c | 656 struct mthca_cq *cq; in mthca_create_cq() local 683 cq = kmalloc(sizeof *cq, GFP_KERNEL); in mthca_create_cq() 684 if (!cq) { in mthca_create_cq() 690 cq->buf.mr.ibmr.lkey = ucmd.lkey; in mthca_create_cq() 691 cq->set_ci_db_index = ucmd.set_db_index; in mthca_create_cq() 692 cq->arm_db_index = ucmd.arm_db_index; in mthca_create_cq() 701 cq); in mthca_create_cq() 705 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { in mthca_create_cq() 706 mthca_free_cq(to_mdev(ibdev), cq); in mthca_create_cq() 711 cq->resize_buf = NULL; in mthca_create_cq() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 38 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() argument 54 if (cq) { in rxe_cq_chk_attr() 55 count = queue_count(cq->queue); in rxe_cq_chk_attr() 71 struct rxe_cq *cq = (struct rxe_cq *)data; in rxe_send_complete() local 73 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_send_complete() 76 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 82 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 84 if (!cq->queue) { in rxe_cq_from_init() 89 err = do_mmap_info(rxe, udata, false, context, cq->queue->buf, in rxe_cq_from_init() 90 cq->queue->buf_size, &cq->queue->ip); in rxe_cq_from_init() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument 44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() 48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument 55 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event() 59 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event() 63 event.element.cq = ibcq; in mlx4_ib_cq_event() 73 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument 75 return get_cqe_from_buf(&cq->buf, n); in get_cqe() 78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 62 void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() argument 69 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter() 75 wc = cq->queue; in rvt_cq_enter() 77 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 78 head = cq->ibcq.cqe; in rvt_cq_enter() 85 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter() 86 if (cq->ibcq.event_handler) { in rvt_cq_enter() 89 ev.device = cq->ibcq.device; in rvt_cq_enter() 90 ev.element.cq = &cq->ibcq; in rvt_cq_enter() 92 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) in mlx5_ib_cq_comp() argument 41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() 48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local 49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event() 50 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() 62 event.element.cq = ibcq; in mlx5_ib_cq_event() 72 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument 74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe() 82 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() [all …]
|
/drivers/net/ethernet/cisco/enic/ |
D | vnic_cq.c | 29 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 31 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 33 cq->ctrl = NULL; in vnic_cq_free() 36 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 41 cq->index = index; in vnic_cq_alloc() 42 cq->vdev = vdev; in vnic_cq_alloc() 44 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 45 if (!cq->ctrl) { in vnic_cq_alloc() 50 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 57 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
D | vnic_cq.h | 72 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument 83 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 84 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 88 while (color != cq->last_color) { in vnic_cq_service() 90 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service() 94 cq->to_clean++; in vnic_cq_service() 95 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 96 cq->to_clean = 0; in vnic_cq_service() 97 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service() 100 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() [all …]
|
/drivers/scsi/snic/ |
D | vnic_cq.c | 24 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument 26 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free() 28 cq->ctrl = NULL; in svnic_cq_free() 31 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, in svnic_cq_alloc() argument 36 cq->index = index; in svnic_cq_alloc() 37 cq->vdev = vdev; in svnic_cq_alloc() 39 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); in svnic_cq_alloc() 40 if (!cq->ctrl) { in svnic_cq_alloc() 46 err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in svnic_cq_alloc() 53 void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in svnic_cq_init() argument [all …]
|
D | vnic_cq_fw.h | 24 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument 35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() 36 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service() 39 while (color != cq->last_color) { in vnic_cq_fw_service() 41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service() 44 cq->to_clean++; in vnic_cq_fw_service() 45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service() 46 cq->to_clean = 0; in vnic_cq_fw_service() 47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service() 50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() [all …]
|
D | vnic_cq.h | 60 static inline unsigned int svnic_cq_service(struct vnic_cq *cq, in svnic_cq_service() argument 71 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() 72 cq->ring.desc_size * cq->to_clean); in svnic_cq_service() 76 while (color != cq->last_color) { in svnic_cq_service() 78 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service() 82 cq->to_clean++; in svnic_cq_service() 83 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service() 84 cq->to_clean = 0; in svnic_cq_service() 85 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service() 88 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() [all …]
|
/drivers/scsi/fnic/ |
D | vnic_cq.c | 24 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 28 cq->ctrl = NULL; in vnic_cq_free() 31 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 36 cq->index = index; in vnic_cq_alloc() 37 cq->vdev = vdev; in vnic_cq_alloc() 39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 40 if (!cq->ctrl) { in vnic_cq_alloc() 45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 52 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
D | vnic_cq_copy.h | 24 struct vnic_cq *cq, in vnic_cq_copy_service() argument 35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 36 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service() 39 while (color != cq->last_color) { in vnic_cq_copy_service() 41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service() 44 cq->to_clean++; in vnic_cq_copy_service() 45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service() 46 cq->to_clean = 0; in vnic_cq_copy_service() 47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service() 50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() [all …]
|
D | vnic_cq.h | 70 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument 81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 82 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 86 while (color != cq->last_color) { in vnic_cq_service() 88 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service() 92 cq->to_clean++; in vnic_cq_service() 93 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 94 cq->to_clean = 0; in vnic_cq_service() 95 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service() 98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cq.c | 71 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) in mlx5_add_cq_to_tasklet() argument 74 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx5_add_cq_to_tasklet() 82 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx5_add_cq_to_tasklet() 83 atomic_inc(&cq->refcount); in mlx5_add_cq_to_tasklet() 84 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx5_add_cq_to_tasklet() 91 struct mlx5_core_cq *cq; in mlx5_cq_completion() local 95 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_cq_completion() 96 if (likely(cq)) in mlx5_cq_completion() 97 atomic_inc(&cq->refcount); in mlx5_cq_completion() 100 if (!cq) { in mlx5_cq_completion() [all …]
|
D | en_txrx.c | 35 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) in mlx5e_get_cqe() argument 37 struct mlx5_cqwq *wq = &cq->wq; in mlx5e_get_cqe() 52 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) in mlx5e_poll_ico_cq() argument 54 struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); in mlx5e_poll_ico_cq() 62 cqe = mlx5e_get_cqe(cq); in mlx5e_poll_ico_cq() 77 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_ico_cq() 98 } while ((cqe = mlx5e_get_cqe(cq))); in mlx5e_poll_ico_cq() 100 mlx5_cqwq_update_db_record(&cq->wq); in mlx5e_poll_ico_cq() 108 static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq) in mlx5e_poll_xdp_tx_cq() argument 114 sq = container_of(cq, struct mlx5e_sq, cq); in mlx5e_poll_xdp_tx_cq() [all …]
|
D | en_rx.c | 46 static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, in mlx5e_read_cqe_slot() argument 49 u32 ci = cqcc & cq->wq.sz_m1; in mlx5e_read_cqe_slot() 51 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); in mlx5e_read_cqe_slot() 55 struct mlx5e_cq *cq, u32 cqcc) in mlx5e_read_title_slot() argument 57 mlx5e_read_cqe_slot(cq, cqcc, &cq->title); in mlx5e_read_title_slot() 58 cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); in mlx5e_read_title_slot() 59 cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); in mlx5e_read_title_slot() 63 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) in mlx5e_read_mini_arr_slot() argument 65 mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); in mlx5e_read_mini_arr_slot() 66 cq->mini_arr_idx = 0; in mlx5e_read_mini_arr_slot() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() argument 56 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq() 57 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq() 58 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq() 66 kfree(cq->sw_queue); in destroy_cq() 68 cq->memsize, cq->queue, in destroy_cq() 69 dma_unmap_addr(cq, mapping)); in destroy_cq() 70 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq() 74 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() argument 85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq() [all …]
|
D | t4.h | 568 static inline void write_gts(struct t4_cq *cq, u32 val) in write_gts() argument 570 if (cq->bar2_va) in write_gts() 571 writel(val | INGRESSQID_V(cq->bar2_qid), in write_gts() 572 cq->bar2_va + SGE_UDB_GTS); in write_gts() 574 writel(val | INGRESSQID_V(cq->cqid), cq->gts); in write_gts() 577 static inline int t4_clear_cq_armed(struct t4_cq *cq) in t4_clear_cq_armed() argument 579 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed() 582 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument 586 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq() 587 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq() [all …]
|
/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 71 int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, in cxio_hal_cq_op() argument 79 setup.id = cq->cqid; in cxio_hal_cq_op() 92 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { in cxio_hal_cq_op() 95 rptr = cq->rptr; in cxio_hal_cq_op() 101 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) in cxio_hal_cq_op() 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op() 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op() 158 int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) in cxio_create_cq() argument 161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); in cxio_create_cq() 164 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); in cxio_create_cq() [all …]
|
/drivers/isdn/mISDN/ |
D | dsp_hwec.c | 55 struct mISDN_ctrl_req cq; in dsp_hwec_enable() local 97 memset(&cq, 0, sizeof(cq)); in dsp_hwec_enable() 98 cq.op = MISDN_CTRL_HFC_ECHOCAN_ON; in dsp_hwec_enable() 99 cq.p1 = deftaps; in dsp_hwec_enable() 100 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_enable() 109 struct mISDN_ctrl_req cq; in dsp_hwec_disable() local 118 memset(&cq, 0, sizeof(cq)); in dsp_hwec_disable() 119 cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF; in dsp_hwec_disable() 120 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_disable()
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 125 struct ehea_cq *cq; in ehea_create_cq() local 132 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in ehea_create_cq() 133 if (!cq) in ehea_create_cq() 136 cq->attr.max_nr_of_cqes = nr_of_cqe; in ehea_create_cq() 137 cq->attr.cq_token = cq_token; in ehea_create_cq() 138 cq->attr.eq_handle = eq_handle; in ehea_create_cq() 140 cq->adapter = adapter; in ehea_create_cq() 142 cq_handle_ref = &cq->fw_handle; in ehea_create_cq() 146 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, in ehea_create_cq() 147 &cq->fw_handle, &cq->epas); in ehea_create_cq() [all …]
|