• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

80 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)  in i40evf_clean_tx_ring()  argument
86 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring()
90 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring()
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring()
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring()
94 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring()
97 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring()
99 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring()
100 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring()
102 if (!tx_ring->netdev) in i40evf_clean_tx_ring()
106 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40evf_clean_tx_ring()
115 void i40evf_free_tx_resources(struct i40e_ring *tx_ring) in i40evf_free_tx_resources() argument
117 i40evf_clean_tx_ring(tx_ring); in i40evf_free_tx_resources()
118 kfree(tx_ring->tx_bi); in i40evf_free_tx_resources()
119 tx_ring->tx_bi = NULL; in i40evf_free_tx_resources()
121 if (tx_ring->desc) { in i40evf_free_tx_resources()
122 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40evf_free_tx_resources()
123 tx_ring->desc, tx_ring->dma); in i40evf_free_tx_resources()
124 tx_ring->desc = NULL; in i40evf_free_tx_resources()
164 struct i40e_ring *tx_ring, int napi_budget) in i40e_clean_tx_irq() argument
166 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
173 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
174 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
175 i -= tx_ring->count; in i40e_clean_tx_irq()
177 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
204 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
220 i -= tx_ring->count; in i40e_clean_tx_irq()
221 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
222 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
227 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
240 i -= tx_ring->count; in i40e_clean_tx_irq()
241 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
242 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
251 i += tx_ring->count; in i40e_clean_tx_irq()
252 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
253 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
254 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
255 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
256 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
257 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
258 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
260 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { in i40e_clean_tx_irq()
266 unsigned int j = i40evf_get_tx_pending(tx_ring, false); in i40e_clean_tx_irq()
271 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) in i40e_clean_tx_irq()
272 tx_ring->arm_wb = true; in i40e_clean_tx_irq()
276 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
280 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
281 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
286 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
287 tx_ring->queue_index) && in i40e_clean_tx_irq()
289 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
290 tx_ring->queue_index); in i40e_clean_tx_irq()
291 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
450 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40evf_setup_tx_descriptors() argument
452 struct device *dev = tx_ring->dev; in i40evf_setup_tx_descriptors()
459 WARN_ON(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
460 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_setup_tx_descriptors()
461 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_tx_descriptors()
462 if (!tx_ring->tx_bi) in i40evf_setup_tx_descriptors()
466 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40evf_setup_tx_descriptors()
470 tx_ring->size += sizeof(u32); in i40evf_setup_tx_descriptors()
471 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40evf_setup_tx_descriptors()
472 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40evf_setup_tx_descriptors()
473 &tx_ring->dma, GFP_KERNEL); in i40evf_setup_tx_descriptors()
474 if (!tx_ring->desc) { in i40evf_setup_tx_descriptors()
476 tx_ring->size); in i40evf_setup_tx_descriptors()
480 tx_ring->next_to_use = 0; in i40evf_setup_tx_descriptors()
481 tx_ring->next_to_clean = 0; in i40evf_setup_tx_descriptors()
485 kfree(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
486 tx_ring->tx_bi = NULL; in i40evf_setup_tx_descriptors()
1495 struct i40e_ring *tx_ring, in i40evf_tx_prepare_vlan_flags() argument
1502 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40evf_tx_prepare_vlan_flags()
1643 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
1805 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
1810 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
1817 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
1820 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
1920 int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40evf_maybe_stop_tx() argument
1922 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40evf_maybe_stop_tx()
1927 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40evf_maybe_stop_tx()
1931 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40evf_maybe_stop_tx()
1932 ++tx_ring->tx_stats.restart_queue; in __i40evf_maybe_stop_tx()
1946 static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40evf_tx_map() argument
1955 u16 i = tx_ring->next_to_use; in i40evf_tx_map()
1980 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40evf_tx_map()
1982 tx_desc = I40E_TX_DESC(tx_ring, i); in i40evf_tx_map()
1988 if (dma_mapping_error(tx_ring->dev, dma)) in i40evf_tx_map()
2008 if (i == tx_ring->count) { in i40evf_tx_map()
2009 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40evf_tx_map()
2030 if (i == tx_ring->count) { in i40evf_tx_map()
2031 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40evf_tx_map()
2038 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40evf_tx_map()
2041 tx_bi = &tx_ring->tx_bi[i]; in i40evf_tx_map()
2048 if (i == tx_ring->count) in i40evf_tx_map()
2051 tx_ring->next_to_use = i; in i40evf_tx_map()
2053 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40evf_tx_map()
2054 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40evf_tx_map()
2078 !netif_xmit_stopped(txring_txq(tx_ring))) { in i40evf_tx_map()
2079 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; in i40evf_tx_map()
2082 !netif_xmit_stopped(txring_txq(tx_ring)) && in i40evf_tx_map()
2083 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && in i40evf_tx_map()
2084 (tx_ring->packet_stride < WB_STRIDE) && in i40evf_tx_map()
2086 tx_ring->packet_stride++; in i40evf_tx_map()
2088 tx_ring->packet_stride = 0; in i40evf_tx_map()
2089 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; in i40evf_tx_map()
2093 tx_ring->packet_stride = 0; in i40evf_tx_map()
2111 writel(i, tx_ring->tail); in i40evf_tx_map()
2116 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40evf_tx_map()
2120 tx_bi = &tx_ring->tx_bi[i]; in i40evf_tx_map()
2121 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40evf_tx_map()
2125 i = tx_ring->count; in i40evf_tx_map()
2129 tx_ring->next_to_use = i; in i40evf_tx_map()
2140 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
2160 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
2169 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
2170 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
2175 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
2182 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
2199 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
2208 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
2211 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
2231 struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame() local
2243 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()