/drivers/net/ethernet/google/gve/ |
D | gve_main.c | 164 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks() 166 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks() 170 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks() 171 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks() 172 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks() 173 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks() 174 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks() 175 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks() 953 priv->tx_cfg.max_queues = in gve_init_priv() 954 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv() [all …]
|
D | gve_ethtool.c | 169 cmd->max_rx = priv->rx_cfg.max_queues; in gve_get_channels() 170 cmd->max_tx = priv->tx_cfg.max_queues; in gve_get_channels()
|
D | gve.h | 154 u16 max_queues; member
|
/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_vf.c | 40 for (i = 0; i < hw->mac.max_queues; i++) { in fm10k_stop_hw_vf() 126 hw->mac.max_queues = i; in fm10k_init_hw_vf() 142 hw->mac.max_queues = 0; in fm10k_init_hw_vf() 454 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); in fm10k_update_hw_stats_vf() 468 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); in fm10k_rebind_hw_stats_vf()
|
D | fm10k_ethtool.c | 170 for (i = 0; i < interface->hw.mac.max_queues; i++) { in fm10k_get_stat_strings() 207 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; in fm10k_get_sset_count() 282 for (i = 0; i < interface->hw.mac.max_queues; i++) { in fm10k_get_ethtool_stats() 411 if (i < hw->mac.max_queues) in fm10k_get_regs() 1103 unsigned int max_combined = interface->hw.mac.max_queues; in fm10k_max_channels()
|
D | fm10k_pf.c | 174 hw->mac.max_queues = FM10K_MAX_QUEUES_PF; in fm10k_init_hw_pf() 1487 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); in fm10k_update_hw_stats_pf() 1512 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); in fm10k_rebind_hw_stats_pf()
|
D | fm10k_common.c | 241 return fm10k_disable_queues_generic(hw, hw->mac.max_queues); in fm10k_stop_hw_generic()
|
D | fm10k_type.h | 546 u16 max_queues; member
|
D | fm10k_main.c | 1507 rss_i = interface->hw.mac.max_queues / pcs; in fm10k_set_qos_queues() 1540 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); in fm10k_set_rss_queues()
|
D | fm10k_pci.c | 613 for (i = 0; i < hw->mac.max_queues; i++) { in fm10k_update_stats()
|
/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_ethtool.c | 710 channel->max_rx = nic->max_queues; in nicvf_get_channels() 711 channel->max_tx = nic->max_queues; in nicvf_get_channels() 728 if (channel->rx_count > nic->max_queues) in nicvf_set_channels() 730 if (channel->tx_count > nic->max_queues) in nicvf_set_channels() 734 ((channel->tx_count + channel->rx_count) > nic->max_queues)) { in nicvf_set_channels() 737 nic->max_queues); in nicvf_set_channels()
|
D | nic.h | 310 u8 max_queues; member
|
D | nicvf_main.c | 1862 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) { in nicvf_xdp_setup() 1865 nic->max_queues); in nicvf_xdp_setup() 2180 nic->max_queues = qcount; in nicvf_probe() 2185 nic->max_queues *= 2; in nicvf_probe()
|
/drivers/net/ethernet/samsung/sxgbe/ |
D | sxgbe_common.h | 194 #define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \ argument 195 for (queue_num = 0; queue_num < max_queues; queue_num++)
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_process_queue_manager.c | 203 unsigned int max_queues = 127; /* HWS limit */ in pqm_create_queue() local 221 max_queues = dev->device_info->max_no_of_hqd/2; in pqm_create_queue() 223 if (pdd->qpd.queue_count >= max_queues) in pqm_create_queue()
|
/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 3159 struct ena_admin_queue_feature_desc *max_queues = in ena_calc_io_queue_num() local 3160 &get_feat_ctx->max_queues; in ena_calc_io_queue_num() 3161 io_tx_sq_num = max_queues->max_sq_num; in ena_calc_io_queue_num() 3162 io_tx_cq_num = max_queues->max_cq_num; in ena_calc_io_queue_num() 3384 struct ena_admin_queue_feature_desc *max_queues = in ena_calc_queue_size() local 3385 &ctx->get_feat_ctx->max_queues; in ena_calc_queue_size() 3386 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, in ena_calc_queue_size() 3387 max_queues->max_sq_depth); in ena_calc_queue_size() 3388 max_tx_queue_size = max_queues->max_cq_depth; in ena_calc_queue_size() 3395 max_queues->max_sq_depth); in ena_calc_queue_size() [all …]
|
D | ena_com.h | 354 struct ena_admin_queue_feature_desc max_queues; member
|
D | ena_com.c | 1924 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, in ena_com_get_dev_attr_feat()
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 74 u16 num_queues, u16 max_queues) in hinic_qp_prepare_header() argument 76 u16 max_sqs = max_queues; in hinic_qp_prepare_header() 77 u16 max_rqs = max_queues; in hinic_qp_prepare_header()
|
D | hinic_hw_qp.h | 120 u16 num_queues, u16 max_queues);
|
/drivers/net/ |
D | xen-netfront.c | 62 module_param_named(max_queues, xennet_max_queues, uint, 0644); 63 MODULE_PARM_DESC(max_queues, 1806 unsigned int max_queues = 0; in talk_to_netback() local 1813 max_queues = xenbus_read_unsigned(info->xbdev->otherend, in talk_to_netback() 1815 num_queues = min(max_queues, xennet_max_queues); in talk_to_netback()
|
/drivers/block/xen-blkback/ |
D | blkback.c | 103 module_param_named(max_queues, xenblk_max_queues, uint, 0644); 104 MODULE_PARM_DESC(max_queues,
|
/drivers/net/xen-netback/ |
D | netback.c | 72 module_param_named(max_queues, xenvif_max_queues, uint, 0644); 73 MODULE_PARM_DESC(max_queues,
|
/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_ethtool.c | 3016 int max_queues = min_t(int, adapter->num_rx_queues, in ixgbe_set_rxfh() local 3021 (max_queues < 2)) in ixgbe_set_rxfh() 3022 max_queues = 2; in ixgbe_set_rxfh() 3026 if (indir[i] >= max_queues) in ixgbe_set_rxfh()
|
/drivers/block/ |
D | xen-blkfront.c | 140 module_param_named(max_queues, xen_blkif_max_queues, uint, 0444); 141 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
|