Lines Matching refs:vsi
19 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_map() argument
21 struct i40e_pf *pf = vsi->back; in i40e_xsk_umem_dma_map()
53 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_unmap() argument
55 struct i40e_pf *pf = vsi->back; in i40e_xsk_umem_dma_unmap()
77 static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, in i40e_xsk_umem_enable() argument
80 struct net_device *netdev = vsi->netdev; in i40e_xsk_umem_enable()
85 if (vsi->type != I40E_VSI_MAIN) in i40e_xsk_umem_enable()
88 if (qid >= vsi->num_queue_pairs) in i40e_xsk_umem_enable()
95 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); in i40e_xsk_umem_enable()
101 err = i40e_xsk_umem_dma_map(vsi, umem); in i40e_xsk_umem_enable()
105 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_umem_enable()
107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); in i40e_xsk_umem_enable()
110 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_umem_enable()
114 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_umem_enable()
119 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); in i40e_xsk_umem_enable()
134 static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) in i40e_xsk_umem_disable() argument
136 struct net_device *netdev = vsi->netdev; in i40e_xsk_umem_disable()
145 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); in i40e_xsk_umem_disable()
148 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_umem_disable()
153 clear_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_umem_disable()
154 i40e_xsk_umem_dma_unmap(vsi, umem); in i40e_xsk_umem_disable()
157 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_umem_disable()
175 int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, in i40e_xsk_umem_setup() argument
178 return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : in i40e_xsk_umem_setup()
179 i40e_xsk_umem_disable(vsi, qid); in i40e_xsk_umem_setup()
214 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc()
719 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, in i40e_clean_xdp_tx_irq() argument
722 unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; in i40e_clean_xdp_tx_irq()
766 i40e_arm_wb(tx_ring, vsi, budget); in i40e_clean_xdp_tx_irq()
789 struct i40e_vsi *vsi = np->vsi; in i40e_xsk_wakeup() local
790 struct i40e_pf *pf = vsi->back; in i40e_xsk_wakeup()
796 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_xsk_wakeup()
799 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_xsk_wakeup()
802 if (queue_id >= vsi->num_queue_pairs) in i40e_xsk_wakeup()
805 if (!vsi->xdp_rings[queue_id]->xsk_umem) in i40e_xsk_wakeup()
808 ring = vsi->xdp_rings[queue_id]; in i40e_xsk_wakeup()
817 i40e_force_wb(vsi, ring->q_vector); in i40e_xsk_wakeup()
873 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) in i40e_xsk_any_rx_ring_enabled() argument
875 struct net_device *netdev = vsi->netdev; in i40e_xsk_any_rx_ring_enabled()
878 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_xsk_any_rx_ring_enabled()