Home
last modified time | relevance | path

Searched refs:cq_context (Results 1 – 25 of 44) sorted by relevance

12

/drivers/net/ethernet/mellanox/mlx4/
Dcq.c173 struct mlx4_cq_context *cq_context; in mlx4_cq_modify() local
180 cq_context = mailbox->buf; in mlx4_cq_modify()
181 cq_context->cq_max_count = cpu_to_be16(count); in mlx4_cq_modify()
182 cq_context->cq_period = cpu_to_be16(period); in mlx4_cq_modify()
195 struct mlx4_cq_context *cq_context; in mlx4_cq_resize() local
203 cq_context = mailbox->buf; in mlx4_cq_resize()
204 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); in mlx4_cq_resize()
205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize()
207 cq_context->mtt_base_addr_h = mtt_addr >> 32; in mlx4_cq_resize()
208 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); in mlx4_cq_resize()
[all …]
/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2_dfx.c12 struct hns_roce_v2_cq_context *cq_context; in hns_roce_v2_query_cqc_info() local
20 cq_context = mailbox->buf; in hns_roce_v2_query_cqc_info()
29 memcpy(buffer, cq_context, sizeof(*cq_context)); in hns_roce_v2_query_cqc_info()
Dhns_roce_hw_v1.c745 free_mr->mr_free_cq->ib_cq.cq_context = NULL; in hns_roce_v1_rsv_lp_qp()
2078 struct hns_roce_cq_context *cq_context = NULL; in hns_roce_v1_write_cqc() local
2087 cq_context = mb_buf; in hns_roce_v1_write_cqc()
2088 memset(cq_context, 0, sizeof(*cq_context)); in hns_roce_v1_write_cqc()
2096 roce_set_field(cq_context->cqc_byte_4, in hns_roce_v1_write_cqc()
2099 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, in hns_roce_v1_write_cqc()
2102 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); in hns_roce_v1_write_cqc()
2104 roce_set_field(cq_context->cqc_byte_12, in hns_roce_v1_write_cqc()
2108 roce_set_field(cq_context->cqc_byte_12, in hns_roce_v1_write_cqc()
2112 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, in hns_roce_v1_write_cqc()
[all …]
Dhns_roce_hw_v2.c2556 struct hns_roce_v2_cq_context *cq_context; in hns_roce_v2_write_cqc() local
2558 cq_context = mb_buf; in hns_roce_v2_write_cqc()
2559 memset(cq_context, 0, sizeof(*cq_context)); in hns_roce_v2_write_cqc()
2561 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M, in hns_roce_v2_write_cqc()
2563 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M, in hns_roce_v2_write_cqc()
2565 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M, in hns_roce_v2_write_cqc()
2567 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, in hns_roce_v2_write_cqc()
2570 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, in hns_roce_v2_write_cqc()
2573 cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); in hns_roce_v2_write_cqc()
2575 roce_set_field(cq_context->byte_16_hop_addr, in hns_roce_v2_write_cqc()
[all …]
Dhns_roce_cq.c46 ibcq->comp_handler(ibcq, ibcq->cq_context); in hns_roce_ib_cq_comp()
72 ibcq->event_handler(&event, ibcq->cq_context); in hns_roce_ib_cq_event()
/drivers/infiniband/hw/mthca/
Dmthca_cq.c230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
256 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); in mthca_cq_event()
773 struct mthca_cq_context *cq_context; in mthca_init_cq() local
809 cq_context = mailbox->buf; in mthca_init_cq()
822 memset(cq_context, 0, sizeof *cq_context); in mthca_init_cq()
823 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | in mthca_init_cq()
826 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
828 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); in mthca_init_cq()
830 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); in mthca_init_cq()
831 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); in mthca_init_cq()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_cq.c81 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_send_complete()
145 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
/drivers/infiniband/core/
Duverbs_std_types_cq.c42 struct ib_uverbs_event_queue *ev_queue = cq->cq_context; in uverbs_free_cq()
124 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; in UVERBS_HANDLER()
Duverbs.h234 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
Dcq.c200 cq->cq_context = private; in __ib_alloc_cq_user()
/drivers/infiniband/hw/cxgb3/
Diwch_ev.c100 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
178 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in iwch_ev_dispatch()
Diwch_qp.c748 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in __flush_qp()
762 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); in __flush_qp()
784 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in flush_qp()
790 schp->ibcq.cq_context); in flush_qp()
/drivers/infiniband/hw/cxgb4/
Dev.c115 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
Dqp.c1011 schp->ibcq.cq_context); in complete_sq_drain_wr()
1062 rchp->ibcq.cq_context); in complete_rq_drain_wr()
1640 rchp->ibcq.cq_context); in __flush_qp()
1647 rchp->ibcq.cq_context); in __flush_qp()
1653 schp->ibcq.cq_context); in __flush_qp()
1677 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in flush_qp()
1683 schp->ibcq.cq_context); in flush_qp()
/drivers/net/ethernet/mellanox/mlx5/core/
Dcq.c93 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); in mlx5_core_create_cq()
212 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); in mlx5_core_modify_cq_moderation()
Ddebugfs.c352 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context); in cq_read_field()
/drivers/infiniband/hw/mlx5/
Dcq.c44 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx5_ib_cq_comp()
64 ibcq->event_handler(&event, ibcq->cq_context); in mlx5_ib_cq_event()
739 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_user()
856 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_kernel()
884 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
946 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); in mlx5_ib_create_cq()
1280 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); in mlx5_ib_resize_cq()
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dhealth.c49 cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); in mlx5e_reporter_cq_diagnose()
/drivers/infiniband/sw/rdmavt/
Dcq.c116 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
182 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
/drivers/infiniband/hw/i40iw/
Di40iw_hw.c177 i40iwcq->ibcq.cq_context); in i40iw_iwarp_ce_handler()
387 iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context); in i40iw_process_aeq()
/drivers/infiniband/hw/qedr/
Dqedr_roce_cm.c93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_tx_packet()
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_rx_packet()
Dmain.c491 (&cq->ibcq, cq->ibcq.cq_context); in qedr_irq_handler()
735 ibcq->event_handler(&event, ibcq->cq_context); in qedr_affiliated_event()
/drivers/nvme/target/
Drdma.c538 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_send_done()
586 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_read_data_done()
791 struct nvmet_rdma_queue *queue = cq->cq_context; in nvmet_rdma_recv_done()
/drivers/infiniband/hw/mlx4/
Dcq.c46 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx4_ib_cq_comp()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx4_ib_cq_event()
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_main.c355 ibcq->event_handler(&e, ibcq->cq_context); in pvrdma_cq_event()
521 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in pvrdma_intrx_handler()

12