Home
last modified time | relevance | path

Searched refs:cqc (Results 1 – 16 of 16) sorted by relevance

/drivers/net/ethernet/mellanox/mlx5/core/en/
Dparams.c439 void *cqc = param->cqc; in mlx5e_build_common_cq_param() local
441 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5e_build_common_cq_param()
443 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); in mlx5e_build_common_cq_param()
452 void *cqc = param->cqc; in mlx5e_build_rx_cq_param() local
465 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); in mlx5e_build_rx_cq_param()
467 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? in mlx5e_build_rx_cq_param()
469 MLX5_SET(cqc, cqc, cqe_comp_en, 1); in mlx5e_build_rx_cq_param()
556 void *cqc = param->cqc; in mlx5e_build_tx_cq_param() local
558 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); in mlx5e_build_tx_cq_param()
598 void *cqc = param->cqc; in mlx5e_build_ico_cq_param() local
[all …]
Dhealth.c42 void *cqc; in mlx5e_health_cq_diag_fmsg() local
49 cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); in mlx5e_health_cq_diag_fmsg()
50 hw_status = MLX5_GET(cqc, cqc, status); in mlx5e_health_cq_diag_fmsg()
Dparams.h15 u32 cqc[MLX5_ST_SZ_DW(cqc)]; member
/drivers/infiniband/hw/mlx5/
Dcq.c715 void *cqc; in create_cq_user() local
747 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT, in create_cq_user()
776 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_user()
777 MLX5_SET(cqc, cqc, log_page_size, in create_cq_user()
779 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized); in create_cq_user()
813 MLX5_SET(cqc, cqc, cqe_comp_en, 1); in create_cq_user()
814 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format); in create_cq_user()
874 void *cqc; in create_cq_kernel() local
903 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_kernel()
904 MLX5_SET(cqc, cqc, log_page_size, in create_cq_kernel()
[all …]
Ddevx.c700 void *cqc; in devx_set_umem_valid() local
703 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in devx_set_umem_valid()
704 MLX5_SET(cqc, cqc, dbr_umem_valid, 1); in devx_set_umem_valid()
1456 !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq)) in is_apu_cq()
/drivers/net/ethernet/mellanox/mlx5/core/
Dcq.c92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), in mlx5_core_create_cq()
206 void *cqc; in mlx5_core_modify_cq_moderation() local
209 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); in mlx5_core_modify_cq_moderation()
210 MLX5_SET(cqc, cqc, cq_period, cq_period); in mlx5_core_modify_cq_moderation()
211 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); in mlx5_core_modify_cq_moderation()
Dwq.c160 void *cqc, struct mlx5_cqwq *wq, in mlx5_cqwq_create() argument
164 u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7; in mlx5_cqwq_create()
165 u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); in mlx5_cqwq_create()
Ddebugfs.c347 param = 1 << MLX5_GET(cqc, ctx, log_cq_size); in cq_read_field()
350 param = MLX5_GET(cqc, ctx, log_page_size); in cq_read_field()
Den_main.c1549 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq, in mlx5e_alloc_cq_common()
1607 void *cqc; in mlx5e_create_cq() local
1622 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5e_create_cq()
1624 memcpy(cqc, param->cqc, sizeof(param->cqc)); in mlx5e_create_cq()
1629 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); in mlx5e_create_cq()
1630 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5e_create_cq()
1631 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5e_create_cq()
1632 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in mlx5e_create_cq()
1634 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in mlx5e_create_cq()
Dwq.h90 void *cqc, struct mlx5_cqwq *wq,
/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_send.c759 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {}; in dr_create_cq()
765 void *cqc, *in; in dr_create_cq() local
775 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq()
803 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in dr_create_cq()
804 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq()
805 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in dr_create_cq()
806 MLX5_SET(cqc, cqc, uar_page, uar->index); in dr_create_cq()
807 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in dr_create_cq()
809 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in dr_create_cq()
/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c415 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; in mlx5_fpga_conn_create_cq()
420 void *cqc, *in; in mlx5_fpga_conn_create_cq() local
425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
454 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5_fpga_conn_create_cq()
455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
456 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5_fpga_conn_create_cq()
457 MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); in mlx5_fpga_conn_create_cq()
458 MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - in mlx5_fpga_conn_create_cq()
460 MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); in mlx5_fpga_conn_create_cq()
/drivers/crypto/hisilicon/
Dqm.c81 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) argument
1597 struct qm_cqc *cqc, *cqc_curr; in qm_cqc_dump() local
1611 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); in qm_cqc_dump()
1612 if (IS_ERR(cqc)) in qm_cqc_dump()
1613 return PTR_ERR(cqc); in qm_cqc_dump()
1618 if (qm->cqc) { in qm_cqc_dump()
1619 cqc_curr = qm->cqc + qp_id; in qm_cqc_dump()
1621 ret = dump_show(qm, cqc_curr, sizeof(*cqc), in qm_cqc_dump()
1631 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); in qm_cqc_dump()
1636 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); in qm_cqc_dump()
[all …]
Dqm.h236 struct qm_cqc *cqc; member
/drivers/vdpa/mlx5/net/
Dmlx5_vnet.c580 void *cqc; in cq_create() local
611 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create()
612 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create()
621 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create()
622 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent)); in cq_create()
623 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
624 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in cq_create()
625 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
/drivers/net/ethernet/mellanox/mlx4/
Dresource_tracker.c3077 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) in cq_get_mtt_addr() argument
3079 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; in cq_get_mtt_addr()
3082 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) in cq_get_mtt_size() argument
3084 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; in cq_get_mtt_size()
3085 int page_shift = (cqc->log_page_size & 0x3f) + 12; in cq_get_mtt_size()
3442 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper() local
3443 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_CQ_wrapper()
3453 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in mlx4_SW2HW_CQ_wrapper()
3531 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize() local
3532 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in handle_resize()
[all …]