/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | cq.c | 715 void *cqc; in create_cq_user() local 765 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_user() 766 MLX5_SET(cqc, cqc, log_page_size, in create_cq_user() 801 MLX5_SET(cqc, cqc, cqe_comp_en, 1); in create_cq_user() 802 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format); in create_cq_user() 859 void *cqc; in create_cq_kernel() local 888 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); in create_cq_kernel() 889 MLX5_SET(cqc, cqc, log_page_size, in create_cq_kernel() 931 void *cqc; in mlx5_ib_create_cq() local 977 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); in mlx5_ib_create_cq() [all …]
|
D | devx.c | 629 void *cqc; in devx_set_umem_valid() local 632 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in devx_set_umem_valid() 633 MLX5_SET(cqc, cqc, dbr_umem_valid, 1); in devx_set_umem_valid()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cq.c | 92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); in mlx5_core_create_cq() 205 void *cqc; in mlx5_core_modify_cq_moderation() local 208 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); in mlx5_core_modify_cq_moderation() 209 MLX5_SET(cqc, cqc, cq_period, cq_period); in mlx5_core_modify_cq_moderation() 210 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); in mlx5_core_modify_cq_moderation()
|
D | wq.c | 165 void *cqc, struct mlx5_cqwq *wq, in mlx5_cqwq_create() argument 169 u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7; in mlx5_cqwq_create() 170 u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); in mlx5_cqwq_create()
|
D | en_main.c | 1565 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, in mlx5e_alloc_cq_common() 1621 void *cqc; in mlx5e_create_cq() local 1636 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5e_create_cq() 1638 memcpy(cqc, param->cqc, sizeof(param->cqc)); in mlx5e_create_cq() 1643 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); in mlx5e_create_cq() 1644 MLX5_SET(cqc, cqc, c_eqn, eqn); in mlx5e_create_cq() 1645 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); in mlx5e_create_cq() 1646 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in mlx5e_create_cq() 1648 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in mlx5e_create_cq() 2239 void *cqc = param->cqc; in mlx5e_build_common_cq_param() local [all …]
|
D | debugfs.c | 347 param = 1 << MLX5_GET(cqc, ctx, log_cq_size); in cq_read_field() 350 param = MLX5_GET(cqc, ctx, log_page_size); in cq_read_field()
|
D | wq.h | 90 void *cqc, struct mlx5_cqwq *wq,
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_send.c | 709 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {}; in dr_create_cq() 715 void *cqc, *in; in dr_create_cq() local 725 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq() 753 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in dr_create_cq() 754 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); in dr_create_cq() 755 MLX5_SET(cqc, cqc, c_eqn, eqn); in dr_create_cq() 756 MLX5_SET(cqc, cqc, uar_page, uar->index); in dr_create_cq() 757 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - in dr_create_cq() 759 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); in dr_create_cq()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | health.c | 43 void *cqc; in mlx5e_health_cq_diag_fmsg() local 50 cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); in mlx5e_health_cq_diag_fmsg() 51 hw_status = MLX5_GET(cqc, cqc, status); in mlx5e_health_cq_diag_fmsg()
|
D | params.h | 15 u32 cqc[MLX5_ST_SZ_DW(cqc)]; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
D | conn.c | 415 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; in mlx5_fpga_conn_create_cq() 420 void *cqc, *in; in mlx5_fpga_conn_create_cq() local 425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 454 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in mlx5_fpga_conn_create_cq() 455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 456 MLX5_SET(cqc, cqc, c_eqn, eqn); in mlx5_fpga_conn_create_cq() 457 MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); in mlx5_fpga_conn_create_cq() 458 MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - in mlx5_fpga_conn_create_cq() 460 MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); in mlx5_fpga_conn_create_cq()
|
/kernel/linux/linux-5.10/drivers/crypto/hisilicon/ |
D | qm.c | 71 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) argument 1189 struct qm_cqc *cqc, *cqc_curr; in qm_cqc_dump() local 1203 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); in qm_cqc_dump() 1204 if (IS_ERR(cqc)) in qm_cqc_dump() 1205 return PTR_ERR(cqc); in qm_cqc_dump() 1210 if (qm->cqc) { in qm_cqc_dump() 1211 cqc_curr = qm->cqc + qp_id; in qm_cqc_dump() 1213 ret = dump_show(qm, cqc_curr, sizeof(*cqc), in qm_cqc_dump() 1223 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); in qm_cqc_dump() 1228 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); in qm_cqc_dump() [all …]
|
D | qm.h | 217 struct qm_cqc *cqc; member
|
/kernel/linux/linux-5.10/include/linux/mlx5/ |
D | cq.h | 132 #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) 133 #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
|
/kernel/linux/linux-5.10/drivers/vdpa/mlx5/net/ |
D | mlx5_vnet.c | 516 void *cqc; in cq_create() local 547 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create() 548 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create() 557 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); in cq_create() 558 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent)); in cq_create() 559 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create() 560 MLX5_SET(cqc, cqc, c_eqn, eqn); in cq_create() 561 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | resource_tracker.c | 3076 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) in cq_get_mtt_addr() argument 3078 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; in cq_get_mtt_addr() 3081 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) in cq_get_mtt_size() argument 3083 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; in cq_get_mtt_size() 3084 int page_shift = (cqc->log_page_size & 0x3f) + 12; in cq_get_mtt_size() 3441 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper() local 3442 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_CQ_wrapper() 3452 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in mlx4_SW2HW_CQ_wrapper() 3530 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize() local 3531 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in handle_resize() [all …]
|