/drivers/scsi/snic/ |
D | snic_res.c | 124 snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ); in snic_get_res_counts() 125 SNIC_BUG_ON(snic->cq_count == 0); in snic_get_res_counts() 139 for (i = 0; i < snic->cq_count; i++) in snic_free_vnic_res() 171 snic->cq_count, in snic_alloc_vnic_res() 197 SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count); in snic_alloc_vnic_res() 199 for (i = snic->wq_count; i < snic->cq_count; i++) { in snic_alloc_vnic_res() 230 for (i = 0; i < snic->cq_count; i++) { in snic_alloc_vnic_res()
|
D | snic_isr.c | 170 if (snic->wq_count < n || snic->cq_count < n + m) in snic_set_intr_mode() 177 snic->cq_count = n + m; in snic_set_intr_mode()
|
D | snic.h | 293 unsigned int cq_count; member
|
D | snic_main.c | 252 for (i = 0; i < snic->cq_count; i++) in snic_cleanup()
|
/drivers/scsi/fnic/ |
D | fnic_isr.c | 253 fnic->cq_count >= n + m + o) { in fnic_set_intr_mode() 262 fnic->cq_count = n + m + o; in fnic_set_intr_mode() 281 fnic->cq_count >= 3 && in fnic_set_intr_mode() 288 fnic->cq_count = 3; in fnic_set_intr_mode() 309 fnic->cq_count >= 3 && in fnic_set_intr_mode() 315 fnic->cq_count = 3; in fnic_set_intr_mode()
|
D | fnic_res.c | 205 fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ); in fnic_get_res_counts() 223 for (i = 0; i < fnic->cq_count; i++) in fnic_free_vnic_resources() 252 fnic->rq_count, fnic->cq_count, fnic->intr_count); in fnic_alloc_vnic_resources() 378 for (i = 0; i < fnic->cq_count; i++) { in fnic_alloc_vnic_resources()
|
D | fnic.h | 236 unsigned int cq_count; member
|
D | fnic_main.c | 524 for (i = 0; i < fnic->cq_count; i++) in fnic_cleanup()
|
/drivers/net/ethernet/cisco/enic/ |
D | enic_res.c | 195 for (i = 0; i < enic->cq_count; i++) in enic_free_vnic_resources() 205 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); in enic_get_res_counts() 212 enic->cq_count, enic->intr_count); in enic_get_res_counts() 269 for (i = 0; i < enic->cq_count; i++) { in enic_init_vnic_resources() 328 enic->cq_count, enic->intr_count, in enic_alloc_vnic_resources() 353 for (i = 0; i < enic->cq_count; i++) { in enic_alloc_vnic_resources()
|
D | enic_main.c | 2011 for (i = 0; i < enic->cq_count; i++) in enic_stop() 2389 enic->cq_count >= n + m && in enic_set_intr_mode() 2397 enic->cq_count = n + m; in enic_set_intr_mode() 2410 enic->cq_count >= 1 + m && in enic_set_intr_mode() 2417 enic->cq_count = 1 + m; in enic_set_intr_mode() 2435 enic->cq_count >= 2 && in enic_set_intr_mode() 2441 enic->cq_count = 2; in enic_set_intr_mode() 2460 enic->cq_count >= 2 && in enic_set_intr_mode() 2465 enic->cq_count = 2; in enic_set_intr_mode()
|
D | enic.h | 197 unsigned int cq_count; member
|
/drivers/infiniband/hw/bnxt_re/ |
D | bnxt_re.h | 175 atomic_t cq_count; member
|
D | ib_verbs.h | 100 u16 cq_count; member
|
D | hw_counters.c | 128 stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count); in bnxt_re_ib_get_hw_stats()
|
D | qplib_res.h | 250 u32 cq_count; member
|
D | main.c | 166 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); in bnxt_re_limit_pf_res() 190 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; in bnxt_re_limit_vf_res() 782 atomic_set(&rdev->cq_count, 0); in bnxt_re_dev_add()
|
D | qplib_sp.c | 185 req.number_of_cq = cpu_to_le32(ctx->cq_count); in bnxt_qplib_set_func_resources()
|
D | qplib_res.c | 536 hwq_attr.depth = ctx->cq_count; in bnxt_qplib_alloc_ctx()
|
/drivers/scsi/be2iscsi/ |
D | be.h | 88 u32 cq_count; member
|
D | be_main.c | 2060 pbe_eq->cq_count += ret; in be_iopoll() 5218 pbe_eq->cq_count < aic->eq_prev) { in beiscsi_eqd_update_work() 5220 aic->eq_prev = pbe_eq->cq_count; in beiscsi_eqd_update_work() 5224 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); in beiscsi_eqd_update_work() 5233 aic->eq_prev = pbe_eq->cq_count; in beiscsi_eqd_update_work()
|
/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_main.c | 1807 u8 cq_count, txq_count; in nicvf_set_xdp_queues() local 1819 cq_count = max(nic->rx_queues, txq_count); in nicvf_set_xdp_queues() 1820 if (cq_count > MAX_CMP_QUEUES_PER_QS) { in nicvf_set_xdp_queues() 1821 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS); in nicvf_set_xdp_queues()
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx4_ib_modify_cq() argument 98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); in mlx4_ib_modify_cq()
|
D | mlx4_ib.h | 765 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 1107 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument 1120 cq_period, cq_count); in mlx5_ib_modify_cq()
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 743 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); in mlxsw_pci_eq_tasklet() local 783 for_each_set_bit(cqn, active_cqns, cq_count) { in mlxsw_pci_eq_tasklet()
|