Home
last modified time | relevance | path

Searched refs:num_queues (Results 1 – 25 of 97) sorted by relevance

1234

/kernel/linux/linux-5.10/drivers/net/xen-netback/
Dinterface.c186 unsigned int num_queues; in xenvif_select_queue() local
190 num_queues = READ_ONCE(vif->num_queues); in xenvif_select_queue()
191 if (num_queues < 1) in xenvif_select_queue()
212 unsigned int num_queues; in xenvif_start_xmit() local
222 num_queues = READ_ONCE(vif->num_queues); in xenvif_start_xmit()
223 if (num_queues < 1) in xenvif_start_xmit()
228 if (index >= num_queues) { in xenvif_start_xmit()
231 index %= num_queues; in xenvif_start_xmit()
275 unsigned int num_queues; in xenvif_get_stats() local
283 num_queues = READ_ONCE(vif->num_queues); in xenvif_get_stats()
[all …]
Dxenbus.c177 for (i = 0; i < vif->num_queues; ++i) { in xenvif_debugfs_addif()
255 unsigned int num_queues = vif->num_queues; in backend_disconnect() local
267 vif->num_queues = 0; in backend_disconnect()
270 for (queue_index = 0; queue_index < num_queues; ++queue_index) in backend_disconnect()
533 for (queue_index = 0; queue_index < vif->num_queues; queue_index++) { in xen_net_rate_changed()
772 be->vif->num_queues = requested_num_queues; in connect()
790 be->vif->num_queues = queue_index; in connect()
806 be->vif->num_queues = queue_index; in connect()
837 if (be->vif->num_queues > 0) in connect()
839 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) in connect()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/google/gve/
Dgve_ethtool.c87 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_get_strings()
95 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_get_strings()
126 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + in gve_get_sset_count()
127 (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); in gve_get_sset_count()
157 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, in gve_get_ethtool_stats()
161 tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues, in gve_get_ethtool_stats()
169 ring < priv->rx_cfg.num_queues; ring++) { in gve_get_ethtool_stats()
192 ring < priv->tx_cfg.num_queues; ring++) { in gve_get_ethtool_stats()
230 base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + in gve_get_ethtool_stats()
231 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; in gve_get_ethtool_stats()
[all …]
Dgve_main.c40 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
53 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { in gve_get_stats()
127 priv->tx_cfg.num_queues; in gve_alloc_stats_report()
129 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
246 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
247 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
248 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
249 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
470 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_rings()
473 priv->tx_cfg.num_queues); in gve_create_rings()
[all …]
Dgve_adminq.c344 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues) in gve_adminq_create_tx_queues() argument
349 for (i = 0; i < num_queues; i++) { in gve_adminq_create_tx_queues()
384 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) in gve_adminq_create_rx_queues() argument
389 for (i = 0; i < num_queues; i++) { in gve_adminq_create_rx_queues()
416 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues) in gve_adminq_destroy_tx_queues() argument
421 for (i = 0; i < num_queues; i++) { in gve_adminq_destroy_tx_queues()
448 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) in gve_adminq_destroy_rx_queues() argument
453 for (i = 0; i < num_queues; i++) { in gve_adminq_destroy_rx_queues()
Dgve_adminq.h257 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
259 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
/kernel/linux/linux-5.10/drivers/crypto/marvell/octeontx/
Dotx_cptvf_main.c38 if (cptvf->num_queues) { in init_worker_threads()
40 cptvf->num_queues); in init_worker_threads()
43 for (i = 0; i < cptvf->num_queues; i++) { in init_worker_threads()
63 if (cptvf->num_queues) { in cleanup_worker_threads()
65 cptvf->num_queues); in cleanup_worker_threads()
68 for (i = 0; i < cptvf->num_queues; i++) in cleanup_worker_threads()
90 pqinfo->num_queues = 0; in free_pending_queues()
94 u32 num_queues) in alloc_pending_queues() argument
101 pqinfo->num_queues = num_queues; in alloc_pending_queues()
128 u32 num_queues) in init_pending_queues() argument
[all …]
Dotx_cptvf.h52 u32 num_queues; /* Number of queues supported */ member
57 for (i = 0, q = &qinfo->queue[i]; i < qinfo->num_queues; i++, \
87 u32 num_queues; member
/kernel/linux/linux-5.10/arch/mips/cavium-octeon/executive/
Dcvmx-pko.c71 const int num_queues = 1; in __cvmx_pko_iport_config() local
76 for (queue = 0; queue < num_queues; queue++) { in __cvmx_pko_iport_config()
85 config.s.tail = (queue == (num_queues - 1)); in __cvmx_pko_iport_config()
101 num_queues, queue); in __cvmx_pko_iport_config()
341 uint64_t num_queues, in cvmx_pko_config_port() argument
361 if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { in cvmx_pko_config_port()
364 (unsigned long long)(base_queue + num_queues)); in cvmx_pko_config_port()
374 for (queue = 0; queue < num_queues; queue++) { in cvmx_pko_config_port()
388 && queue == num_queues - 1) in cvmx_pko_config_port()
431 cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, in cvmx_pko_config_port()
[all …]
/kernel/linux/linux-5.10/drivers/staging/wfx/
Dqueue.c235 int i, j, num_queues = 0; in wfx_tx_queues_get_skb() local
244 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb()
245 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb()
246 for (j = num_queues; j > 0; j--) in wfx_tx_queues_get_skb()
250 num_queues++; in wfx_tx_queues_get_skb()
258 for (i = 0; i < num_queues; i++) { in wfx_tx_queues_get_skb()
278 for (i = 0; i < num_queues; i++) { in wfx_tx_queues_get_skb()
/kernel/linux/linux-5.10/drivers/net/
Dxen-netfront.c364 unsigned int num_queues = dev->real_num_tx_queues; in xennet_open() local
371 for (i = 0; i < num_queues; ++i) { in xennet_open()
589 unsigned int num_queues = dev->real_num_tx_queues; in xennet_select_queue() local
594 if (num_queues == 1) { in xennet_select_queue()
598 queue_idx = hash % num_queues; in xennet_select_queue()
649 unsigned int num_queues = dev->real_num_tx_queues; in xennet_xdp_xmit() local
661 queue = &np->queues[smp_processor_id() % num_queues]; in xennet_xdp_xmit()
724 unsigned int num_queues = dev->real_num_tx_queues; in xennet_start_xmit() local
729 if (num_queues < 1) in xennet_start_xmit()
873 unsigned int num_queues = dev->real_num_tx_queues; in xennet_close() local
[all …]
/kernel/linux/linux-5.10/drivers/soc/ti/
Dknav_qmss_acc.c33 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify()
150 queue >= range_base + range->num_queues) { in knav_acc_int_handler()
154 range_base + range->num_queues); in knav_acc_int_handler()
313 queue_mask = BIT(range->num_queues) - 1; in knav_acc_setup_cmd()
379 for (queue = 0; queue < range->num_queues; queue++) { in knav_acc_init_range()
439 channels = range->num_queues; in knav_acc_free_range()
523 channels = range->num_queues; in knav_init_acc_range()
533 if (range->num_queues > 32) { in knav_init_acc_range()
Dknav_qmss.h140 unsigned num_queues; member
297 unsigned num_queues; member
332 unsigned num_queues; member
Dknav_qmss_queue.c208 kdev->base_id + kdev->num_queues > id) { in knav_queue_find_by_id()
473 kdev->base_id + kdev->num_queues - 1); in knav_queue_debug_show()
1229 range->num_queues = temp[1]; in knav_setup_queue_range()
1263 range->num_irqs = min(range->num_irqs, range->num_queues); in knav_setup_queue_range()
1283 end = min(qmgr->start_queue + qmgr->num_queues, in knav_setup_queue_range()
1284 range->queue_base + range->num_queues); in knav_setup_queue_range()
1297 range->queue_base + range->num_queues - 1, in knav_setup_queue_range()
1302 kdev->num_queues_in_use += range->num_queues; in knav_setup_queue_range()
1410 qmgr->num_queues = temp[1]; in knav_queue_init_qmgrs()
1418 qmgr->start_queue, qmgr->num_queues); in knav_queue_init_qmgrs()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/
Dvirtio_scsi.c77 u32 num_queues; member
206 num_vqs = vscsi->num_queues; in virtscsi_poll_requests()
798 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; in virtscsi_init()
850 u32 num_queues; in virtscsi_probe() local
859 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; in virtscsi_probe()
860 num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); in virtscsi_probe()
865 struct_size(vscsi, req_vqs, num_queues)); in virtscsi_probe()
873 vscsi->num_queues = num_queues; in virtscsi_probe()
893 shost->nr_hw_queues = num_queues; in virtscsi_probe()
/kernel/linux/linux-5.10/drivers/net/ethernet/qlogic/qede/
Dqede.h189 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
190 #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
202 u16 num_queues; member
205 #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
206 #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
208 #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
607 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/
Dkfd_pm4_headers.h88 uint32_t num_queues:10; member
141 uint32_t num_queues:10; member
Dkfd_pm4_headers_vi.h191 uint32_t num_queues:10; member
240 uint32_t num_queues:3; member
361 uint32_t num_queues:3; member
Dkfd_pm4_headers_ai.h184 uint32_t num_queues:10; member
285 uint32_t num_queues:3; member
408 uint32_t num_queues:3; member
Dkfd_packet_manager_vi.c59 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_vi()
152 packet->bitfields2.num_queues = 1; in pm_map_queues_vi()
239 packet->bitfields2.num_queues = 1; in pm_unmap_queues_vi()
Dkfd_packet_manager_v9.c49 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_v9()
153 packet->bitfields2.num_queues = 1; in pm_map_queues_v9()
259 packet->bitfields2.num_queues = 1; in pm_unmap_queues_v9()
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/
Dcn23xx_vf_device.c50 static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues) in cn23xx_vf_reset_io_queues() argument
57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
361 u32 num_queues = oct->num_iqs; in cn23xx_disable_vf_io_queues() local
366 if (num_queues < oct->num_oqs) in cn23xx_disable_vf_io_queues()
367 num_queues = oct->num_oqs; in cn23xx_disable_vf_io_queues()
369 cn23xx_vf_reset_io_queues(oct, num_queues); in cn23xx_disable_vf_io_queues()
/kernel/linux/linux-5.10/drivers/scsi/qla2xxx/
Dqla_tmpl.h134 uint32_t num_queues; member
195 uint32_t num_queues; member
/kernel/linux/linux-5.10/drivers/net/ethernet/cadence/
Dmacb_main.c442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
604 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
660 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
1502 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1520 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1733 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
2140 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2184 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2208 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2244 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/include/nvfw/
Dsec2.h20 u8 num_queues; member

1234