/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_debugfs.c | 116 struct fm10k_q_vector *q_vector = ring->q_vector; in fm10k_dbg_desc_open() local 120 if (ring < q_vector->rx.ring) in fm10k_dbg_desc_open() 150 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) in fm10k_dbg_q_vector_init() argument 152 struct fm10k_intfc *interface = q_vector->interface; in fm10k_dbg_q_vector_init() 160 snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); in fm10k_dbg_q_vector_init() 162 q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); in fm10k_dbg_q_vector_init() 165 for (i = 0; i < q_vector->tx.count; i++) { in fm10k_dbg_q_vector_init() 166 struct fm10k_ring *ring = &q_vector->tx.ring[i]; in fm10k_dbg_q_vector_init() 171 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init() 176 for (i = 0; i < q_vector->rx.count; i++) { in fm10k_dbg_q_vector_init() [all …]
|
D | fm10k_main.c | 322 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer() 559 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, in fm10k_receive_skb() argument 562 napi_gro_receive(&q_vector->napi, skb); in fm10k_receive_skb() 565 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_rx_irq() argument 615 fm10k_receive_skb(q_vector, skb); in fm10k_clean_rx_irq() 631 q_vector->rx.total_packets += total_packets; in fm10k_clean_rx_irq() 632 q_vector->rx.total_bytes += total_bytes; in fm10k_clean_rx_irq() 1125 struct fm10k_intfc *interface = ring->q_vector->interface; in fm10k_get_tx_pending() 1189 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_tx_irq() argument 1192 struct fm10k_intfc *interface = q_vector->interface; in fm10k_clean_tx_irq() [all …]
|
D | fm10k_pci.c | 720 struct fm10k_q_vector *qv = interface->q_vector[i]; in fm10k_check_hang_subtask() 900 if (ring->q_vector) { in fm10k_configure_tx_ring() 901 txint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_tx_ring() 913 ring->q_vector) in fm10k_configure_tx_ring() 915 &ring->q_vector->affinity_mask, in fm10k_configure_tx_ring() 1039 if (ring->q_vector) { in fm10k_configure_rx_ring() 1040 rxint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_rx_ring() 1174 struct fm10k_q_vector *q_vector; in fm10k_napi_enable_all() local 1178 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 1179 napi_enable(&q_vector->napi); in fm10k_napi_enable_all() [all …]
|
D | fm10k.h | 104 struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ member 343 struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; member 548 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector); 549 void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector); 555 static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_init() argument 556 static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_exit() argument
|
/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 60 static void igc_write_itr(struct igc_q_vector *q_vector); 61 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); 1307 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); in igc_construct_skb() 1557 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) in igc_clean_rx_irq() argument 1560 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq() 1623 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq() 1639 q_vector->rx.total_packets += total_packets; in igc_clean_rx_irq() 1640 q_vector->rx.total_bytes += total_bytes; in igc_clean_rx_irq() 1701 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) in igc_clean_tx_irq() argument 1703 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq() [all …]
|
/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 166 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung() 285 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq() 286 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq() 333 struct iavf_q_vector *q_vector) in iavf_enable_wb_on_itr() argument 335 u16 flags = q_vector->tx.ring[0].flags; in iavf_enable_wb_on_itr() 341 if (q_vector->arm_wb_state) in iavf_enable_wb_on_itr() 348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); in iavf_enable_wb_on_itr() 349 q_vector->arm_wb_state = true; in iavf_enable_wb_on_itr() 358 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) in iavf_force_wb() argument 367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), in iavf_force_wb() [all …]
|
D | iavf_main.c | 285 struct iavf_q_vector *q_vector = data; in iavf_msix_clean_rings() local 287 if (!q_vector->tx.ring && !q_vector->rx.ring) in iavf_msix_clean_rings() 290 napi_schedule_irqoff(&q_vector->napi); in iavf_msix_clean_rings() 304 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; in iavf_map_vector_to_rxq() local 308 rx_ring->q_vector = q_vector; in iavf_map_vector_to_rxq() 309 rx_ring->next = q_vector->rx.ring; in iavf_map_vector_to_rxq() 311 q_vector->rx.ring = rx_ring; in iavf_map_vector_to_rxq() 312 q_vector->rx.count++; in iavf_map_vector_to_rxq() 313 q_vector->rx.next_update = jiffies + 1; in iavf_map_vector_to_rxq() 314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in iavf_map_vector_to_rxq() [all …]
|
D | iavf_ethtool.c | 717 struct iavf_q_vector *q_vector; in iavf_set_itr_per_queue() local 730 q_vector = rx_ring->q_vector; in iavf_set_itr_per_queue() 731 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in iavf_set_itr_per_queue() 733 q_vector = tx_ring->q_vector; in iavf_set_itr_per_queue() 734 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); in iavf_set_itr_per_queue()
|
/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_lib.c | 835 struct ixgbe_q_vector *q_vector; in ixgbe_alloc_q_vector() local 856 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 858 if (!q_vector) in ixgbe_alloc_q_vector() 859 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 861 if (!q_vector) in ixgbe_alloc_q_vector() 866 cpumask_set_cpu(cpu, &q_vector->affinity_mask); in ixgbe_alloc_q_vector() 867 q_vector->numa_node = node; in ixgbe_alloc_q_vector() 871 q_vector->cpu = -1; in ixgbe_alloc_q_vector() 875 netif_napi_add(adapter->netdev, &q_vector->napi, in ixgbe_alloc_q_vector() 879 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector() [all …]
|
D | ixgbe_xsk.c | 403 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in ixgbe_construct_skb_zc() 427 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, in ixgbe_clean_rx_irq_zc() argument 432 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq_zc() 526 ixgbe_rx_skb(q_vector, skb); in ixgbe_clean_rx_irq_zc() 546 q_vector->rx.total_packets += total_rx_packets; in ixgbe_clean_rx_irq_zc() 547 q_vector->rx.total_bytes += total_rx_bytes; in ixgbe_clean_rx_irq_zc() 640 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_xdp_tx_irq() argument 686 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_xdp_tx_irq() 687 q_vector->tx.total_packets += total_packets; in ixgbe_clean_xdp_tx_irq() 695 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); in ixgbe_clean_xdp_tx_irq() [all …]
|
D | ixgbe_txrx_common.h | 23 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 39 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, 43 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
D | ixgbe_main.c | 1112 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_tx_irq() argument 1115 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq() 1119 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq() 1211 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq() 1212 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq() 1343 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) in ixgbe_update_dca() argument 1345 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca() 1349 if (q_vector->cpu == cpu) in ixgbe_update_dca() 1352 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca() 1355 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 195 tx_ring->q_vector->tx.total_bytes += total_bytes; in ice_clean_tx_irq() 196 tx_ring->q_vector->tx.total_pkts += total_pkts; in ice_clean_tx_irq() 706 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb() 990 napi_gro_receive(&rx_ring->q_vector->napi, skb); in ice_receive_skb() 1107 rx_ring->q_vector->rx.total_pkts += total_rx_pkts; in ice_clean_rx_irq() 1108 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in ice_clean_rx_irq() 1193 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) in ice_update_itr() argument 1208 if (q_vector->itr_countdown) { in ice_update_itr() 1213 container_is_rx = (&q_vector->rx == rc); in ice_update_itr() 1229 prefetch(q_vector->vsi->port_info); in ice_update_itr() [all …]
|
D | ice_lib.c | 521 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_msix_clean_rings() local 523 if (!q_vector->tx.ring && !q_vector->rx.ring) in ice_msix_clean_rings() 526 napi_schedule(&q_vector->napi); in ice_msix_clean_rings() 1107 struct ice_q_vector *q_vector; in ice_free_q_vector() local 1116 q_vector = vsi->q_vectors[v_idx]; in ice_free_q_vector() 1118 ice_for_each_ring(ring, q_vector->tx) in ice_free_q_vector() 1119 ring->q_vector = NULL; in ice_free_q_vector() 1120 ice_for_each_ring(ring, q_vector->rx) in ice_free_q_vector() 1121 ring->q_vector = NULL; in ice_free_q_vector() 1125 netif_napi_del(&q_vector->napi); in ice_free_q_vector() [all …]
|
D | ice_main.c | 107 ice_trigger_sw_intr(hw, tx_ring->q_vector); in ice_check_for_hang_subtask() 1565 struct ice_q_vector *q_vector = in ice_irq_affinity_notify() local 1568 cpumask_copy(&q_vector->affinity_mask, mask); in ice_irq_affinity_notify() 1613 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix() local 1617 if (q_vector->tx.ring && q_vector->rx.ring) { in ice_vsi_req_irq_msix() 1618 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix() 1621 } else if (q_vector->rx.ring) { in ice_vsi_req_irq_msix() 1622 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix() 1624 } else if (q_vector->tx.ring) { in ice_vsi_req_irq_msix() 1625 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix() [all …]
|
D | ice_virtchnl_pf.c | 757 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) in ice_calc_vf_reg_idx() argument 761 if (!vf || !q_vector) in ice_calc_vf_reg_idx() 768 q_vector->v_idx + 1; in ice_calc_vf_reg_idx() 2159 struct ice_q_vector *q_vector; in ice_vc_cfg_irq_map_msg() local 2180 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; in ice_vc_cfg_irq_map_msg() 2181 if (!q_vector) { in ice_vc_cfg_irq_map_msg() 2188 q_vector->num_ring_rx = 0; in ice_vc_cfg_irq_map_msg() 2194 q_vector->num_ring_rx++; in ice_vc_cfg_irq_map_msg() 2195 q_vector->rx.itr_idx = map->rxitr_idx; in ice_vc_cfg_irq_map_msg() 2196 vsi->rx_rings[vsi_q_id]->q_vector = q_vector; in ice_vc_cfg_irq_map_msg() [all …]
|
D | ice_virtchnl_pf.h | 121 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); 192 struct ice_q_vector __always_unused *q_vector) in ice_calc_vf_reg_idx() argument
|
D | ice.h | 391 struct ice_q_vector *q_vector) in ice_irq_dynamic_ena() argument 393 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : in ice_irq_dynamic_ena()
|
/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 112 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 266 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_tx_irq() argument 269 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq() 365 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq() 366 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq() 431 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, in ixgbevf_rx_skb() argument 434 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb() 892 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb() 1115 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_rx_irq() argument 1120 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq() [all …]
|
D | ethtool.c | 803 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbevf_get_coalesce() 819 struct ixgbevf_q_vector *q_vector; in ixgbevf_set_coalesce() local 824 if (adapter->q_vector[0]->tx.count && in ixgbevf_set_coalesce() 825 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) in ixgbevf_set_coalesce() 855 q_vector = adapter->q_vector[i]; in ixgbevf_set_coalesce() 856 if (q_vector->tx.count && !q_vector->rx.count) in ixgbevf_set_coalesce() 858 q_vector->itr = tx_itr_param; in ixgbevf_set_coalesce() 861 q_vector->itr = rx_itr_param; in ixgbevf_set_coalesce() 862 ixgbevf_write_eitr(q_vector); in ixgbevf_set_coalesce()
|
D | ixgbevf.h | 92 struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */ member 324 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; member 460 extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 752 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung() 906 struct i40e_q_vector *q_vector) in i40e_enable_wb_on_itr() argument 908 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr() 914 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr() 922 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr() 930 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr() 939 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) in i40e_force_wb() argument 949 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb() 961 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, in i40e_container_is_rx() argument 964 return &q_vector->rx == rc; in i40e_container_is_rx() [all …]
|
D | i40e_main.c | 360 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + in i40e_tx_timeout() 3093 if (!ring->q_vector || !ring->netdev || ring->ch) in i40e_config_xps_tx_ring() 3100 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); in i40e_config_xps_tx_ring() 3531 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix() local 3533 q_vector->rx.next_update = jiffies + 1; in i40e_vsi_configure_msix() 3534 q_vector->rx.target_itr = in i40e_vsi_configure_msix() 3537 q_vector->rx.target_itr >> 1); in i40e_vsi_configure_msix() 3538 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_vsi_configure_msix() 3540 q_vector->tx.next_update = jiffies + 1; in i40e_vsi_configure_msix() 3541 q_vector->tx.target_itr = in i40e_vsi_configure_msix() [all …]
|
D | i40e_txrx_common.h | 55 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats() 56 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats()
|
/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 790 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) in igb_assign_vector() argument 792 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector() 798 if (q_vector->rx.ring) in igb_assign_vector() 799 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector() 800 if (q_vector->tx.ring) in igb_assign_vector() 801 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector() 817 q_vector->eims_value = msixbm; in igb_assign_vector() 833 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector() 854 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector() 862 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector() [all …]
|