• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

21 static void i40e_fdir(struct i40e_ring *tx_ring,  in i40e_fdir()  argument
25 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
30 i = tx_ring->next_to_use; in i40e_fdir()
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
94 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
106 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
107 dev = tx_ring->dev; in i40e_program_fdir_filter()
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
122 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
123 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
124 i40e_fdir(tx_ring, fdir_data, add); in i40e_program_fdir_filter()
127 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
128 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
129 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
156 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
633 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
638 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { in i40e_clean_tx_ring()
639 i40e_xsk_clean_tx_ring(tx_ring); in i40e_clean_tx_ring()
642 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
646 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
647 i40e_unmap_and_free_tx_resource(tx_ring, in i40e_clean_tx_ring()
648 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
651 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
652 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
655 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
657 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
658 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
660 if (!tx_ring->netdev) in i40e_clean_tx_ring()
664 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring()
673 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
675 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
676 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
677 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
679 if (tx_ring->desc) { in i40e_free_tx_resources()
680 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
681 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
682 tx_ring->desc = NULL; in i40e_free_tx_resources()
722 struct i40e_ring *tx_ring = NULL; in i40e_detect_recover_hung() local
741 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
742 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
750 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
751 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
752 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
760 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
761 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
775 struct i40e_ring *tx_ring, int napi_budget) in i40e_clean_tx_irq() argument
777 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
784 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
785 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
786 i -= tx_ring->count; in i40e_clean_tx_irq()
788 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
800 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
813 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
819 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
831 tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
837 i -= tx_ring->count; in i40e_clean_tx_irq()
838 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
839 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
844 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
857 i -= tx_ring->count; in i40e_clean_tx_irq()
858 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
859 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
868 i += tx_ring->count; in i40e_clean_tx_irq()
869 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
870 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); in i40e_clean_tx_irq()
871 i40e_arm_wb(tx_ring, vsi, budget); in i40e_clean_tx_irq()
873 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
877 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
881 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
882 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
887 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
888 tx_ring->queue_index) && in i40e_clean_tx_irq()
890 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
891 tx_ring->queue_index); in i40e_clean_tx_irq()
892 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1296 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
1298 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1305 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1306 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1307 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1308 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1311 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1314 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1318 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1319 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1320 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1321 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1322 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1324 tx_ring->size); in i40e_setup_tx_descriptors()
1328 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1329 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1330 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1334 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1335 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
2667 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
2671 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2691 if (!tx_ring->atr_sample_rate) in i40e_atr()
2737 tx_ring->atr_count++; in i40e_atr()
2743 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2746 tx_ring->atr_count = 0; in i40e_atr()
2749 i = tx_ring->next_to_use; in i40e_atr()
2750 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
2753 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2755 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & in i40e_atr()
2763 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2813 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
2820 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
2849 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) in i40e_tx_prepare_vlan_flags()
2995 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
3010 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3041 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
3203 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
3208 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3215 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
3218 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3234 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
3236 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3241 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
3245 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3246 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3346 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
3355 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3368 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3370 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
3376 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3396 if (i == tx_ring->count) { in i40e_tx_map()
3397 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3418 if (i == tx_ring->count) { in i40e_tx_map()
3419 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3426 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3429 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3432 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3435 if (i == tx_ring->count) in i40e_tx_map()
3438 tx_ring->next_to_use = i; in i40e_tx_map()
3440 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
3448 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3453 tx_ring->packet_stride = 0; in i40e_tx_map()
3473 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
3474 writel(i, tx_ring->tail); in i40e_tx_map()
3480 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3484 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3485 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
3489 i = tx_ring->count; in i40e_tx_map()
3493 tx_ring->next_to_use = i; in i40e_tx_map()
3559 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
3575 i40e_trace(xmit_frame_ring, skb, tx_ring); in i40e_xmit_frame_ring()
3584 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3593 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
3594 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3599 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3605 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
3626 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
3630 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
3638 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
3645 i40e_atr(tx_ring, skb, tx_flags); in i40e_xmit_frame_ring()
3647 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
3654 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3659 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3680 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
3688 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()