• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

50 static void i40e_fdir(struct i40e_ring *tx_ring,  in i40e_fdir()  argument
54 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
59 i = tx_ring->next_to_use; in i40e_fdir()
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
120 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
136 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
137 dev = tx_ring->dev; in i40e_program_fdir_filter()
141 if (I40E_DESC_UNUSED(tx_ring) > 1) in i40e_program_fdir_filter()
147 if (!(I40E_DESC_UNUSED(tx_ring) > 1)) in i40e_program_fdir_filter()
156 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
157 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
158 i40e_fdir(tx_ring, fdir_data, add); in i40e_program_fdir_filter()
161 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
162 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
163 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
165 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
190 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
575 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
581 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
585 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
586 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
588 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
589 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
592 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
594 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
595 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
597 if (!tx_ring->netdev) in i40e_clean_tx_ring()
601 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring()
610 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
612 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
613 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
614 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
616 if (tx_ring->desc) { in i40e_free_tx_resources()
617 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
618 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
619 tx_ring->desc = NULL; in i40e_free_tx_resources()
659 struct i40e_ring *tx_ring, int napi_budget) in i40e_clean_tx_irq() argument
661 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
668 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
669 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
670 i -= tx_ring->count; in i40e_clean_tx_irq()
672 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
699 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
715 i -= tx_ring->count; in i40e_clean_tx_irq()
716 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
717 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
722 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
735 i -= tx_ring->count; in i40e_clean_tx_irq()
736 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
737 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
746 i += tx_ring->count; in i40e_clean_tx_irq()
747 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
748 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
749 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
750 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
751 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
752 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
753 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
755 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { in i40e_clean_tx_irq()
761 unsigned int j = i40e_get_tx_pending(tx_ring, false); in i40e_clean_tx_irq()
766 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) in i40e_clean_tx_irq()
767 tx_ring->arm_wb = true; in i40e_clean_tx_irq()
771 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
775 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
776 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
781 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
782 tx_ring->queue_index) && in i40e_clean_tx_irq()
784 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
785 tx_ring->queue_index); in i40e_clean_tx_irq()
786 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
991 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
993 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1000 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1001 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1002 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1003 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1007 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1011 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1012 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1013 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1014 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1015 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1017 tx_ring->size); in i40e_setup_tx_descriptors()
1021 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1022 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1026 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1027 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
2056 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
2060 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2080 if (!tx_ring->atr_sample_rate) in i40e_atr()
2121 tx_ring->atr_count++; in i40e_atr()
2127 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2130 tx_ring->atr_count = 0; in i40e_atr()
2133 i = tx_ring->next_to_use; in i40e_atr()
2134 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
2137 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2139 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & in i40e_atr()
2147 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2199 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
2203 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags()
2211 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
2240 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) in i40e_tx_prepare_vlan_flags()
2374 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
2389 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
2418 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
2580 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
2585 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
2592 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
2595 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
2611 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
2613 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
2618 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
2622 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
2623 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
2722 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
2726 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map()
2736 u16 i = tx_ring->next_to_use; in i40e_tx_map()
2761 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
2763 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
2769 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
2789 if (i == tx_ring->count) { in i40e_tx_map()
2790 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
2811 if (i == tx_ring->count) { in i40e_tx_map()
2812 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
2819 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
2822 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
2829 if (i == tx_ring->count) in i40e_tx_map()
2832 tx_ring->next_to_use = i; in i40e_tx_map()
2834 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
2835 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
2859 !netif_xmit_stopped(txring_txq(tx_ring))) { in i40e_tx_map()
2860 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; in i40e_tx_map()
2863 !netif_xmit_stopped(txring_txq(tx_ring)) && in i40e_tx_map()
2864 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && in i40e_tx_map()
2865 (tx_ring->packet_stride < WB_STRIDE) && in i40e_tx_map()
2867 tx_ring->packet_stride++; in i40e_tx_map()
2869 tx_ring->packet_stride = 0; in i40e_tx_map()
2870 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; in i40e_tx_map()
2874 tx_ring->packet_stride = 0; in i40e_tx_map()
2892 writel(i, tx_ring->tail); in i40e_tx_map()
2897 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
2901 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
2902 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
2906 i = tx_ring->count; in i40e_tx_map()
2910 tx_ring->next_to_use = i; in i40e_tx_map()
2921 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
2942 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
2951 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
2952 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
2957 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
2964 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
2981 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
2985 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
2995 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
3002 i40e_atr(tx_ring, skb, tx_flags); in i40e_xmit_frame_ring()
3004 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
3025 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
3033 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()