Home
last modified time | relevance | path

Searched refs:nr_queues (Results 1 – 25 of 31) sorted by relevance

12

/drivers/crypto/cavium/cpt/
Dcptvf_main.c42 if (cptvf->nr_queues) { in init_worker_threads()
44 cptvf->nr_queues); in init_worker_threads()
47 for (i = 0; i < cptvf->nr_queues; i++) { in init_worker_threads()
69 if (cptvf->nr_queues) { in cleanup_worker_threads()
71 cptvf->nr_queues); in cleanup_worker_threads()
74 for (i = 0; i < cptvf->nr_queues; i++) in cleanup_worker_threads()
100 pqinfo->nr_queues = 0; in free_pending_queues()
104 u32 nr_queues) in alloc_pending_queues() argument
110 pqinfo->nr_queues = nr_queues; in alloc_pending_queues()
136 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) in init_pending_queues() argument
[all …]
Dcptvf.h85 u32 nr_queues; /* Number of queues supported */ member
91 for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
110 u32 nr_queues; member
Dcptvf_reqmanager.c233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command()
235 qno, cptvf->nr_queues); in send_cpt_command()
545 if (unlikely(qno > cptvf->nr_queues)) { in vq_post_process()
/drivers/crypto/cavium/nitrox/
Dnitrox_sriov.c58 int nr_queues = 0; in vf_mode_to_nr_queues() local
62 nr_queues = MAX_PF_QUEUES; in vf_mode_to_nr_queues()
65 nr_queues = 8; in vf_mode_to_nr_queues()
68 nr_queues = 4; in vf_mode_to_nr_queues()
71 nr_queues = 2; in vf_mode_to_nr_queues()
74 nr_queues = 1; in vf_mode_to_nr_queues()
78 return nr_queues; in vf_mode_to_nr_queues()
Dnitrox_lib.c91 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_aqm_queues()
102 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_aqm_queues()
142 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_pktin_queues()
155 ndev->pkt_inq = kcalloc_node(ndev->nr_queues, in nitrox_alloc_pktin_queues()
161 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_pktin_queues()
Dnitrox_dev.h160 int nr_queues; member
250 u16 nr_queues; member
Dnitrox_mbx.c68 vfdev->nr_queues = vfdev->msg.data; in pf2vf_send_response()
76 vfdev->nr_queues = 0; in pf2vf_send_response()
Dnitrox_hal.c124 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_config_pkt_input_rings()
240 for (i = 0; i < ndev->nr_queues; i++) in nitrox_config_pkt_solicit_ports()
356 for (ring = 0; ring < ndev->nr_queues; ring++) { in nitrox_config_aqm_rings()
Dnitrox_main.c473 ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus()); in nitrox_probe()
Dnitrox_isr.c334 if (qvec->ring >= ndev->nr_queues) in nitrox_register_interrupts()
Dnitrox_reqmgr.c425 qno = smp_processor_id() % ndev->nr_queues; in nitrox_process_se_request()
/drivers/ufs/core/
Dufs-mcq.c172 hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; in ufshcd_mcq_config_nr_queues()
173 rem -= hba->nr_queues[HCTX_TYPE_DEFAULT]; in ufshcd_mcq_config_nr_queues()
179 hba->nr_queues[HCTX_TYPE_POLL] = poll_queues; in ufshcd_mcq_config_nr_queues()
180 rem -= hba->nr_queues[HCTX_TYPE_POLL]; in ufshcd_mcq_config_nr_queues()
184 hba->nr_queues[HCTX_TYPE_READ] = read_queues; in ufshcd_mcq_config_nr_queues()
185 rem -= hba->nr_queues[HCTX_TYPE_READ]; in ufshcd_mcq_config_nr_queues()
188 if (!hba->nr_queues[HCTX_TYPE_DEFAULT]) in ufshcd_mcq_config_nr_queues()
189 hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues, in ufshcd_mcq_config_nr_queues()
193 host->nr_hw_queues += hba->nr_queues[i]; in ufshcd_mcq_config_nr_queues()
387 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) in ufshcd_mcq_make_queues_operational()
Dufshcd.c2850 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1; in ufshcd_map_queues()
2851 hba->nr_queues[HCTX_TYPE_READ] = 0; in ufshcd_map_queues()
2852 hba->nr_queues[HCTX_TYPE_POLL] = 1; in ufshcd_map_queues()
2859 map->nr_queues = hba->nr_queues[i]; in ufshcd_map_queues()
2860 if (!map->nr_queues) in ufshcd_map_queues()
2867 queue_offset += map->nr_queues; in ufshcd_map_queues()
6926 unsigned int nr_queues; in ufshcd_handle_mcq_cq_events() local
6935 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; in ufshcd_handle_mcq_cq_events()
6936 for_each_set_bit(i, &outstanding_cqs, nr_queues) { in ufshcd_handle_mcq_cq_events()
8766 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], in ufshcd_config_mcq()
[all …]
/drivers/block/rnbd/
Drnbd-clt.c1171 set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues()
1173 set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues()
1180 set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues; in rnbd_rdma_map_queues()
1182 set->map[HCTX_TYPE_READ].nr_queues; in rnbd_rdma_map_queues()
1186 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues()
1187 set->map[HCTX_TYPE_READ].nr_queues, in rnbd_rdma_map_queues()
1188 set->map[HCTX_TYPE_POLL].nr_queues); in rnbd_rdma_map_queues()
1192 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues()
1193 set->map[HCTX_TYPE_READ].nr_queues); in rnbd_rdma_map_queues()
/drivers/block/null_blk/
Dmain.c1508 if (nullb->nr_queues != 1) in nullb_to_queue()
1509 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
1580 map->nr_queues = submit_queues; in null_map_queues()
1583 map->nr_queues = 0; in null_map_queues()
1586 map->nr_queues = poll_queues; in null_map_queues()
1590 qoff += map->nr_queues; in null_map_queues()
1718 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues()
1729 nullb->nr_queues--; in null_exit_hctx()
1755 nullb->nr_queues++; in null_init_hctx()
1886 nullb->nr_queues++; in init_driver_queues()
Dnull_blk.h136 unsigned int nr_queues; member
/drivers/nvme/host/
Drdma.c906 int ret, nr_queues; in nvme_rdma_configure_io_queues() local
923 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); in nvme_rdma_configure_io_queues()
924 ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); in nvme_rdma_configure_io_queues()
950 ret = nvme_rdma_start_io_queues(ctrl, nr_queues, in nvme_rdma_configure_io_queues()
2154 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2157 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2163 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2166 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2177 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_rdma_map_queues()
Dtcp.c1867 int ret, nr_queues; in nvme_tcp_configure_io_queues() local
1887 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues()
1888 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); in nvme_tcp_configure_io_queues()
1914 ret = nvme_tcp_start_io_queues(ctrl, nr_queues, in nvme_tcp_configure_io_queues()
2438 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2441 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2447 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2450 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2459 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
Dpci.c449 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
450 if (!map->nr_queues) { in nvme_pci_map_queues()
464 qoff += map->nr_queues; in nvme_pci_map_queues()
465 offset += map->nr_queues; in nvme_pci_map_queues()
2506 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_disable_io_queues() local
2511 while (nr_queues > 0) { in __nvme_disable_io_queues()
2512 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_disable_io_queues()
2514 nr_queues--; in __nvme_disable_io_queues()
2518 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_disable_io_queues()
2526 if (nr_queues) in __nvme_disable_io_queues()
Dmultipath.c518 ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) in nvme_mpath_alloc_disk()
/drivers/s390/cio/
Dqdio_setup.c100 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) in __qdio_allocate_qs() argument
105 for (i = 0; i < nr_queues; i++) { in __qdio_allocate_qs()
/drivers/char/
Dvirtio_console.c1846 u32 i, j, nr_ports, nr_queues; in init_vqs() local
1850 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; in init_vqs()
1852 vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); in init_vqs()
1853 io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), in init_vqs()
1855 io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); in init_vqs()
1893 err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, in init_vqs()
/drivers/block/
Dublk_drv.c1377 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_deinit_queues() local
1383 for (i = 0; i < nr_queues; i++) in ublk_deinit_queues()
1390 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_init_queues() local
1396 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL); in ublk_init_queues()
1400 for (i = 0; i < nr_queues; i++) { in ublk_init_queues()
Dvirtio_blk.c821 map->nr_queues = vblk->io_queues[i]; in virtblk_map_queues()
823 qoff += map->nr_queues; in virtblk_map_queues()
825 if (map->nr_queues == 0) in virtblk_map_queues()
/drivers/scsi/mpi3mr/
Dmpi3mr_os.c3821 map->nr_queues = 0; in mpi3mr_map_queues()
3824 map->nr_queues = mrioc->default_qcount; in mpi3mr_map_queues()
3826 map->nr_queues = mrioc->active_poll_qcount; in mpi3mr_map_queues()
3828 if (!map->nr_queues) { in mpi3mr_map_queues()
3843 qoff += map->nr_queues; in mpi3mr_map_queues()
3844 offset += map->nr_queues; in mpi3mr_map_queues()

12