Home
last modified time | relevance | path

Searched refs:q_vector (Results 1 – 25 of 46) sorted by relevance

12

/drivers/net/ethernet/intel/ice/
Dice_base.c104 struct ice_q_vector *q_vector; in ice_vsi_alloc_q_vector() local
107 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), in ice_vsi_alloc_q_vector()
109 if (!q_vector) in ice_vsi_alloc_q_vector()
112 q_vector->vsi = vsi; in ice_vsi_alloc_q_vector()
113 q_vector->v_idx = v_idx; in ice_vsi_alloc_q_vector()
114 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; in ice_vsi_alloc_q_vector()
115 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; in ice_vsi_alloc_q_vector()
120 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); in ice_vsi_alloc_q_vector()
127 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, in ice_vsi_alloc_q_vector()
132 vsi->q_vectors[v_idx] = q_vector; in ice_vsi_alloc_q_vector()
[all …]
Dice_xsk.c53 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, in ice_qvec_toggle_napi() argument
56 if (!vsi->netdev || !q_vector) in ice_qvec_toggle_napi()
60 napi_enable(&q_vector->napi); in ice_qvec_toggle_napi()
62 napi_disable(&q_vector->napi); in ice_qvec_toggle_napi()
73 struct ice_q_vector *q_vector) in ice_qvec_dis_irq() argument
89 if (q_vector) { in ice_qvec_dis_irq()
90 u16 v_idx = q_vector->v_idx; in ice_qvec_dis_irq()
92 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); in ice_qvec_dis_irq()
104 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) in ice_qvec_cfg_msix() argument
106 u16 reg_idx = q_vector->reg_idx; in ice_qvec_cfg_msix()
[all …]
Dice_txrx.c977 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb()
1333 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) in ice_update_itr() argument
1348 if (q_vector->itr_countdown) { in ice_update_itr()
1353 container_is_rx = (&q_vector->rx == rc); in ice_update_itr()
1369 prefetch(q_vector->vsi->port_info); in ice_update_itr()
1381 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { in ice_update_itr()
1392 (q_vector->rx.target_itr & ICE_ITR_MASK) == in ice_update_itr()
1420 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in ice_update_itr()
1454 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, in ice_update_itr()
1502 static void ice_update_ena_itr(struct ice_q_vector *q_vector) in ice_update_ena_itr() argument
[all …]
Dice_lib.c368 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_msix_clean_ctrl_vsi() local
370 if (!q_vector->tx.ring) in ice_msix_clean_ctrl_vsi()
374 ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
375 ice_clean_ctrl_tx_irq(q_vector->tx.ring); in ice_msix_clean_ctrl_vsi()
387 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_msix_clean_rings() local
389 if (!q_vector->tx.ring && !q_vector->rx.ring) in ice_msix_clean_rings()
392 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
1213 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings() local
1215 if (q_vector) { in ice_vsi_clear_rings()
1216 q_vector->tx.ring = NULL; in ice_vsi_clear_rings()
[all …]
Dice_base.h20 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
25 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
Dice_main.c105 ice_trigger_sw_intr(hw, tx_ring->q_vector); in ice_check_for_hang_subtask()
2160 struct ice_q_vector *q_vector = in ice_irq_affinity_notify() local
2163 cpumask_copy(&q_vector->affinity_mask, mask); in ice_irq_affinity_notify()
2210 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix() local
2214 if (q_vector->tx.ring && q_vector->rx.ring) { in ice_vsi_req_irq_msix()
2215 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2218 } else if (q_vector->rx.ring) { in ice_vsi_req_irq_msix()
2219 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2221 } else if (q_vector->tx.ring) { in ice_vsi_req_irq_msix()
2222 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
[all …]
Dice_virtchnl_pf.c832 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) in ice_calc_vf_reg_idx() argument
836 if (!vf || !q_vector) in ice_calc_vf_reg_idx()
843 q_vector->v_idx + 1; in ice_calc_vf_reg_idx()
2803 struct ice_q_vector *q_vector) in ice_cfg_interrupt() argument
2808 q_vector->num_ring_rx = 0; in ice_cfg_interrupt()
2809 q_vector->num_ring_tx = 0; in ice_cfg_interrupt()
2818 q_vector->num_ring_rx++; in ice_cfg_interrupt()
2819 q_vector->rx.itr_idx = map->rxitr_idx; in ice_cfg_interrupt()
2820 vsi->rx_rings[vsi_q_id]->q_vector = q_vector; in ice_cfg_interrupt()
2822 q_vector->rx.itr_idx); in ice_cfg_interrupt()
[all …]
/drivers/net/ethernet/intel/fm10k/
Dfm10k_debugfs.c116 struct fm10k_q_vector *q_vector = ring->q_vector; in fm10k_dbg_desc_open() local
120 if (ring < q_vector->rx.ring) in fm10k_dbg_desc_open()
150 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) in fm10k_dbg_q_vector_init() argument
152 struct fm10k_intfc *interface = q_vector->interface; in fm10k_dbg_q_vector_init()
160 snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); in fm10k_dbg_q_vector_init()
162 q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); in fm10k_dbg_q_vector_init()
165 for (i = 0; i < q_vector->tx.count; i++) { in fm10k_dbg_q_vector_init()
166 struct fm10k_ring *ring = &q_vector->tx.ring[i]; in fm10k_dbg_q_vector_init()
171 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init()
176 for (i = 0; i < q_vector->rx.count; i++) { in fm10k_dbg_q_vector_init()
[all …]
Dfm10k_main.c324 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer()
561 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, in fm10k_receive_skb() argument
564 napi_gro_receive(&q_vector->napi, skb); in fm10k_receive_skb()
567 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_rx_irq() argument
617 fm10k_receive_skb(q_vector, skb); in fm10k_clean_rx_irq()
633 q_vector->rx.total_packets += total_packets; in fm10k_clean_rx_irq()
634 q_vector->rx.total_bytes += total_bytes; in fm10k_clean_rx_irq()
1120 struct fm10k_intfc *interface = ring->q_vector->interface; in fm10k_get_tx_pending()
1184 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_tx_irq() argument
1187 struct fm10k_intfc *interface = q_vector->interface; in fm10k_clean_tx_irq()
[all …]
Dfm10k_pci.c721 struct fm10k_q_vector *qv = interface->q_vector[i]; in fm10k_check_hang_subtask()
901 if (ring->q_vector) { in fm10k_configure_tx_ring()
902 txint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_tx_ring()
914 ring->q_vector) in fm10k_configure_tx_ring()
916 &ring->q_vector->affinity_mask, in fm10k_configure_tx_ring()
1040 if (ring->q_vector) { in fm10k_configure_rx_ring()
1041 rxint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_rx_ring()
1175 struct fm10k_q_vector *q_vector; in fm10k_napi_enable_all() local
1179 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all()
1180 napi_enable(&q_vector->napi); in fm10k_napi_enable_all()
[all …]
Dfm10k.h104 struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ member
337 struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; member
544 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector);
545 void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector);
551 static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_init() argument
552 static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_exit() argument
/drivers/net/ethernet/intel/iavf/
Diavf_txrx.c169 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung()
288 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq()
289 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq()
336 struct iavf_q_vector *q_vector) in iavf_enable_wb_on_itr() argument
338 u16 flags = q_vector->tx.ring[0].flags; in iavf_enable_wb_on_itr()
344 if (q_vector->arm_wb_state) in iavf_enable_wb_on_itr()
351 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); in iavf_enable_wb_on_itr()
352 q_vector->arm_wb_state = true; in iavf_enable_wb_on_itr()
361 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) in iavf_force_wb() argument
370 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), in iavf_force_wb()
[all …]
Diavf_main.c296 struct iavf_q_vector *q_vector = data; in iavf_msix_clean_rings() local
298 if (!q_vector->tx.ring && !q_vector->rx.ring) in iavf_msix_clean_rings()
301 napi_schedule_irqoff(&q_vector->napi); in iavf_msix_clean_rings()
315 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; in iavf_map_vector_to_rxq() local
319 rx_ring->q_vector = q_vector; in iavf_map_vector_to_rxq()
320 rx_ring->next = q_vector->rx.ring; in iavf_map_vector_to_rxq()
322 q_vector->rx.ring = rx_ring; in iavf_map_vector_to_rxq()
323 q_vector->rx.count++; in iavf_map_vector_to_rxq()
324 q_vector->rx.next_update = jiffies + 1; in iavf_map_vector_to_rxq()
325 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in iavf_map_vector_to_rxq()
[all …]
Diavf_ethtool.c748 struct iavf_q_vector *q_vector; in iavf_set_itr_per_queue() local
780 q_vector = rx_ring->q_vector; in iavf_set_itr_per_queue()
781 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in iavf_set_itr_per_queue()
783 q_vector = tx_ring->q_vector; in iavf_set_itr_per_queue()
784 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); in iavf_set_itr_per_queue()
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_lib.c836 struct ixgbe_q_vector *q_vector; in ixgbe_alloc_q_vector() local
854 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector()
856 if (!q_vector) in ixgbe_alloc_q_vector()
857 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector()
859 if (!q_vector) in ixgbe_alloc_q_vector()
864 cpumask_set_cpu(cpu, &q_vector->affinity_mask); in ixgbe_alloc_q_vector()
865 q_vector->numa_node = node; in ixgbe_alloc_q_vector()
869 q_vector->cpu = -1; in ixgbe_alloc_q_vector()
873 netif_napi_add(adapter->netdev, &q_vector->napi, in ixgbe_alloc_q_vector()
877 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector()
[all …]
Dixgbe_xsk.c214 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in ixgbe_construct_skb_zc()
239 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, in ixgbe_clean_rx_irq_zc() argument
244 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq_zc()
333 ixgbe_rx_skb(q_vector, skb); in ixgbe_clean_rx_irq_zc()
353 q_vector->rx.total_packets += total_rx_packets; in ixgbe_clean_rx_irq_zc()
354 q_vector->rx.total_bytes += total_rx_bytes; in ixgbe_clean_rx_irq_zc()
448 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_xdp_tx_irq() argument
494 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_xdp_tx_irq()
495 q_vector->tx.total_packets += total_packets; in ixgbe_clean_xdp_tx_irq()
503 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); in ixgbe_clean_xdp_tx_irq()
[all …]
Dixgbe_txrx_common.h23 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
40 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
44 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
Dixgbe_main.c1110 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_tx_irq() argument
1113 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq()
1117 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq()
1209 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq()
1210 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq()
1341 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) in ixgbe_update_dca() argument
1343 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca()
1347 if (q_vector->cpu == cpu) in ixgbe_update_dca()
1350 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca()
1353 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca()
[all …]
/drivers/net/ethernet/intel/igc/
Digc_main.c1733 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); in igc_construct_skb()
1738 igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); in igc_construct_skb()
2034 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) in igc_clean_rx_irq() argument
2037 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2100 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2116 q_vector->rx.total_packets += total_packets; in igc_clean_rx_irq()
2117 q_vector->rx.total_bytes += total_bytes; in igc_clean_rx_irq()
2132 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) in igc_clean_tx_irq() argument
2134 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
2136 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
[all …]
/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c109 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
264 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_tx_irq() argument
267 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq()
363 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq()
364 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq()
429 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, in ixgbevf_rx_skb() argument
432 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb()
888 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
1123 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_rx_irq() argument
1128 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq()
[all …]
Dethtool.c801 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbevf_get_coalesce()
817 struct ixgbevf_q_vector *q_vector; in ixgbevf_set_coalesce() local
822 if (adapter->q_vector[0]->tx.count && in ixgbevf_set_coalesce()
823 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) in ixgbevf_set_coalesce()
853 q_vector = adapter->q_vector[i]; in ixgbevf_set_coalesce()
854 if (q_vector->tx.count && !q_vector->rx.count) in ixgbevf_set_coalesce()
856 q_vector->itr = tx_itr_param; in ixgbevf_set_coalesce()
859 q_vector->itr = rx_itr_param; in ixgbevf_set_coalesce()
860 ixgbevf_write_eitr(q_vector); in ixgbevf_set_coalesce()
/drivers/net/ethernet/intel/i40e/
Di40e_txrx.c753 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
907 struct i40e_q_vector *q_vector) in i40e_enable_wb_on_itr() argument
909 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr()
915 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr()
923 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr()
931 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr()
940 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) in i40e_force_wb() argument
950 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb()
962 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, in i40e_container_is_rx() argument
965 return &q_vector->rx == rc; in i40e_container_is_rx()
[all …]
Di40e_txrx_common.h51 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats()
52 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats()
Di40e_main.c365 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + in i40e_tx_timeout()
3248 if (!ring->q_vector || !ring->netdev || ring->ch) in i40e_config_xps_tx_ring()
3255 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); in i40e_config_xps_tx_ring()
3688 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix() local
3690 q_vector->rx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3691 q_vector->rx.target_itr = in i40e_vsi_configure_msix()
3694 q_vector->rx.target_itr >> 1); in i40e_vsi_configure_msix()
3695 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_vsi_configure_msix()
3697 q_vector->tx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3698 q_vector->tx.target_itr = in i40e_vsi_configure_msix()
[all …]
/drivers/net/ethernet/intel/igb/
Digb_main.c781 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) in igb_assign_vector() argument
783 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector()
789 if (q_vector->rx.ring) in igb_assign_vector()
790 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector()
791 if (q_vector->tx.ring) in igb_assign_vector()
792 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector()
808 q_vector->eims_value = msixbm; in igb_assign_vector()
824 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
845 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
853 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector()
[all …]

12