/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event() 852 cq->cqn & (dev->limits.num_cqs - 1), cq); in mthca_init_cq() 929 cq->cqn & (dev->limits.num_cqs - 1)); in mthca_free_cq() 960 dev->limits.num_cqs, in mthca_init_cq_table() 967 dev->limits.num_cqs); in mthca_init_cq_table() 976 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); in mthca_cleanup_cq_table()
|
D | mthca_profile.c | 188 dev->limits.num_cqs = profile[i].num; in mthca_make_profile()
|
D | mthca_dev.h | 154 int num_cqs; member
|
D | mthca_eq.c | 798 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, in mthca_init_eq_table()
|
/drivers/scsi/elx/efct/ |
D | efct_hw_queues.c | 193 u32 num_cqs, u32 entry_count) in efct_hw_new_cq_set() argument 203 for (i = 0; i < num_cqs; i++) in efct_hw_new_cq_set() 206 for (i = 0; i < num_cqs; i++) { in efct_hw_new_cq_set() 222 if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) { in efct_hw_new_cq_set() 227 for (i = 0; i < num_cqs; i++) { in efct_hw_new_cq_set() 236 for (i = 0; i < num_cqs; i++) { in efct_hw_new_cq_set()
|
D | efct_hw.h | 741 u32 num_cqs, u32 entry_count);
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | cq.c | 110 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion() 132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event() 472 return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, in mlx4_init_cq_table() 473 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); in mlx4_init_cq_table()
|
D | profile.c | 210 dev->caps.num_cqs = profile[i].num; in mlx4_make_profile()
|
/drivers/scsi/bfa/ |
D | bfa_core.c | 980 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); in bfa_iocfc_send_cfg() 981 bfa_trc(bfa, cfg->fwcfg.num_cqs); in bfa_iocfc_send_cfg() 992 cfg_info->num_cqs = cfg->fwcfg.num_cqs; in bfa_iocfc_send_cfg() 1001 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { in bfa_iocfc_send_cfg() 1109 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { in bfa_iocfc_mem_claim() 1125 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { in bfa_iocfc_mem_claim() 1285 fwcfg->num_cqs = fwcfg->num_cqs; in bfa_iocfc_cfgrsp() 1490 for (q = 0; q < cfg->fwcfg.num_cqs; q++) { in bfa_iocfc_meminfo() 1498 for (q = 0; q < cfg->fwcfg.num_cqs; q++) in bfa_iocfc_meminfo() 1979 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; in bfa_cfg_get_default()
|
D | bfi_ms.h | 36 u8 num_cqs; /* Number of CQs to be used */ member
|
D | bfa_defs_svc.h | 46 u8 num_cqs; member
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 129 if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq)) in pvrdma_create_cq() 221 atomic_dec(&dev->num_cqs); in pvrdma_create_cq() 266 atomic_dec(&dev->num_cqs); in pvrdma_destroy_cq()
|
D | pvrdma.h | 242 atomic_t num_cqs; member
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_cq.c | 455 cqn & (hr_dev->caps.num_cqs - 1)); in hns_roce_cq_completion() 476 cqn & (hr_dev->caps.num_cqs - 1)); in hns_roce_cq_event() 522 cq_table->bank[i].max = hr_dev->caps.num_cqs / in hns_roce_init_cq_table()
|
D | hns_roce_main.c | 185 props->max_cq = hr_dev->caps.num_cqs; in hns_roce_query_device() 720 hr_dev->caps.num_cqs); in hns_roce_init_hem()
|
D | hns_roce_device.h | 744 u32 num_cqs; member
|
/drivers/net/ethernet/qlogic/qed/ |
D | qed_fcoe.c | 120 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { in qed_sp_fcoe_func_start() 123 fcoe_pf_params->num_cqs, in qed_sp_fcoe_func_start() 166 p_data->q_params.num_queues = fcoe_pf_params->num_cqs; in qed_sp_fcoe_func_start() 171 for (i = 0; i < fcoe_pf_params->num_cqs; i++) { in qed_sp_fcoe_func_start() 740 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); in qed_fill_fcoe_dev_info()
|
D | qed_nvmetcp.c | 154 info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ); in qed_fill_nvmetcp_dev_info()
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 1047 u8 num_cqs; in mlxsw_pci_aqs_init() local 1063 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); in mlxsw_pci_aqs_init() 1069 if (num_sdqs + num_rdqs > num_cqs || in mlxsw_pci_aqs_init() 1071 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { in mlxsw_pci_aqs_init() 1096 num_cqs); in mlxsw_pci_aqs_init()
|
/drivers/scsi/qedi/ |
D | qedi.h | 55 #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
|
/drivers/net/ethernet/broadcom/ |
D | cnic.h | 297 int num_cqs; member
|
D | cnic.c | 1477 cp->num_cqs = req1->num_cqs; in cnic_bnx2x_iscsi_init1() 1807 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { in cnic_setup_bnx2x_ctx() 1830 ictx->ustorm_st_context.num_cqs = cp->num_cqs; in cnic_setup_bnx2x_ctx() 1854 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; in cnic_setup_bnx2x_ctx() 1855 for (i = 0; i < cp->num_cqs; i++) { in cnic_setup_bnx2x_ctx()
|
/drivers/scsi/elx/libefc_sli/ |
D | sli4.c | 817 struct sli4_queue *qs[], u32 num_cqs, in sli_cmd_cq_set_create() argument 847 (SZ_DMAADDR * num_pages_cq * num_cqs), in sli_cmd_cq_set_create() 863 SZ_DMAADDR * num_pages_cq * num_cqs); in sli_cmd_cq_set_create() 898 req->num_cq_req = cpu_to_le16(num_cqs); in sli_cmd_cq_set_create() 901 for (i = 0; i < num_cqs; i++) { in sli_cmd_cq_set_create() 918 u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[]) in sli_cq_alloc_set() argument 926 for (i = 0; i < num_cqs; i++) { in sli_cq_alloc_set() 932 if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma)) in sli_cq_alloc_set() 951 if (le16_to_cpu(res->num_q_allocated) != num_cqs) { in sli_cq_alloc_set() 956 for (i = 0; i < num_cqs; i++) { in sli_cq_alloc_set() [all …]
|
/drivers/scsi/qedf/ |
D | qedf.h | 588 #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
|
/drivers/scsi/bnx2i/ |
D | 57xx_iscsi_hsi.h | 555 u8 num_cqs; member 557 u8 num_cqs;
|