• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

79 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)  in i40evf_clean_tx_ring()  argument
85 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring()
89 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring()
90 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring()
92 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring()
93 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring()
96 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring()
98 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring()
99 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring()
101 if (!tx_ring->netdev) in i40evf_clean_tx_ring()
105 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in i40evf_clean_tx_ring()
106 tx_ring->queue_index)); in i40evf_clean_tx_ring()
115 void i40evf_free_tx_resources(struct i40e_ring *tx_ring) in i40evf_free_tx_resources() argument
117 i40evf_clean_tx_ring(tx_ring); in i40evf_free_tx_resources()
118 kfree(tx_ring->tx_bi); in i40evf_free_tx_resources()
119 tx_ring->tx_bi = NULL; in i40evf_free_tx_resources()
121 if (tx_ring->desc) { in i40evf_free_tx_resources()
122 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40evf_free_tx_resources()
123 tx_ring->desc, tx_ring->dma); in i40evf_free_tx_resources()
124 tx_ring->desc = NULL; in i40evf_free_tx_resources()
147 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) in i40e_check_tx_hang() argument
149 u32 tx_pending = i40e_get_tx_pending(tx_ring); in i40e_check_tx_hang()
152 clear_check_for_tx_hang(tx_ring); in i40e_check_tx_hang()
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && in i40e_check_tx_hang()
169 &tx_ring->state); in i40e_check_tx_hang()
170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || in i40e_check_tx_hang()
174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; in i40e_check_tx_hang()
175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); in i40e_check_tx_hang()
188 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
190 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
202 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) in i40e_clean_tx_irq() argument
204 u16 i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
211 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
212 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
213 i -= tx_ring->count; in i40e_clean_tx_irq()
215 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
242 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
258 i -= tx_ring->count; in i40e_clean_tx_irq()
259 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
260 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
265 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
278 i -= tx_ring->count; in i40e_clean_tx_irq()
279 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
280 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
287 i += tx_ring->count; in i40e_clean_tx_irq()
288 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
289 u64_stats_update_begin(&tx_ring->syncp); in i40e_clean_tx_irq()
290 tx_ring->stats.bytes += total_bytes; in i40e_clean_tx_irq()
291 tx_ring->stats.packets += total_packets; in i40e_clean_tx_irq()
292 u64_stats_update_end(&tx_ring->syncp); in i40e_clean_tx_irq()
293 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq()
294 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq()
296 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { in i40e_clean_tx_irq()
298 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" in i40e_clean_tx_irq()
303 tx_ring->vsi->seid, in i40e_clean_tx_irq()
304 tx_ring->queue_index, in i40e_clean_tx_irq()
305 tx_ring->next_to_use, i); in i40e_clean_tx_irq()
306 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" in i40e_clean_tx_irq()
309 tx_ring->tx_bi[i].time_stamp, jiffies); in i40e_clean_tx_irq()
311 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in i40e_clean_tx_irq()
313 dev_info(tx_ring->dev, in i40e_clean_tx_irq()
315 tx_ring->queue_index); in i40e_clean_tx_irq()
317 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); in i40e_clean_tx_irq()
323 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_clean_tx_irq()
324 tx_ring->queue_index), in i40e_clean_tx_irq()
328 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
329 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
334 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
335 tx_ring->queue_index) && in i40e_clean_tx_irq()
336 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { in i40e_clean_tx_irq()
337 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
338 tx_ring->queue_index); in i40e_clean_tx_irq()
339 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
445 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40evf_setup_tx_descriptors() argument
447 struct device *dev = tx_ring->dev; in i40evf_setup_tx_descriptors()
453 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_setup_tx_descriptors()
454 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_tx_descriptors()
455 if (!tx_ring->tx_bi) in i40evf_setup_tx_descriptors()
459 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40evf_setup_tx_descriptors()
463 tx_ring->size += sizeof(u32); in i40evf_setup_tx_descriptors()
464 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40evf_setup_tx_descriptors()
465 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40evf_setup_tx_descriptors()
466 &tx_ring->dma, GFP_KERNEL); in i40evf_setup_tx_descriptors()
467 if (!tx_ring->desc) { in i40evf_setup_tx_descriptors()
469 tx_ring->size); in i40evf_setup_tx_descriptors()
473 tx_ring->next_to_use = 0; in i40evf_setup_tx_descriptors()
474 tx_ring->next_to_clean = 0; in i40evf_setup_tx_descriptors()
478 kfree(tx_ring->tx_bi); in i40evf_setup_tx_descriptors()
479 tx_ring->tx_bi = NULL; in i40evf_setup_tx_descriptors()
1118 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
1155 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
1216 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
1323 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
1328 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
1335 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
1338 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
1357 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
1366 u16 i = tx_ring->next_to_use; in i40e_tx_map()
1388 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
1390 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
1394 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
1410 if (i == tx_ring->count) { in i40e_tx_map()
1411 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
1429 if (i == tx_ring->count) { in i40e_tx_map()
1430 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
1437 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
1440 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
1448 (first <= &tx_ring->tx_bi[i]) && in i40e_tx_map()
1449 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { in i40e_tx_map()
1461 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, in i40e_tx_map()
1462 tx_ring->queue_index), in i40e_tx_map()
1479 if (i == tx_ring->count) in i40e_tx_map()
1482 tx_ring->next_to_use = i; in i40e_tx_map()
1485 writel(i, tx_ring->tail); in i40e_tx_map()
1490 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
1494 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
1495 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
1499 i = tx_ring->count; in i40e_tx_map()
1503 tx_ring->next_to_use = i; in i40e_tx_map()
1513 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
1515 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
1520 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
1524 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
1525 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
1536 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx() argument
1538 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40e_maybe_stop_tx()
1540 return __i40e_maybe_stop_tx(tx_ring, size); in i40e_maybe_stop_tx()
1553 struct i40e_ring *tx_ring) in i40e_xmit_descriptor_count() argument
1568 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_descriptor_count()
1569 tx_ring->tx_stats.tx_busy++; in i40e_xmit_descriptor_count()
1583 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
1594 if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
1598 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
1605 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
1613 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, in i40e_xmit_frame_ring()
1631 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
1634 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
1637 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
1640 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_xmit_frame_ring()
1659 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame() local
1671 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()