Lines Matching refs:tx_ring
753 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument
786 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
793 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
795 netdev_err(tx_ring->netdev, in fm10k_tso()
800 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
822 dev_warn(tx_ring->dev, in fm10k_tx_csum()
824 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
865 dev_warn(tx_ring->dev, in fm10k_tx_csum()
870 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
876 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum()
880 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum()
902 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, in fm10k_tx_desc_push() argument
916 return i == tx_ring->count; in fm10k_tx_desc_push()
919 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in __fm10k_maybe_stop_tx() argument
921 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
927 if (likely(fm10k_desc_unused(tx_ring) < size)) in __fm10k_maybe_stop_tx()
931 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
932 ++tx_ring->tx_stats.restart_queue; in __fm10k_maybe_stop_tx()
936 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in fm10k_maybe_stop_tx() argument
938 if (likely(fm10k_desc_unused(tx_ring) >= size)) in fm10k_maybe_stop_tx()
940 return __fm10k_maybe_stop_tx(tx_ring, size); in fm10k_maybe_stop_tx()
943 static void fm10k_tx_map(struct fm10k_ring *tx_ring, in fm10k_tx_map() argument
954 u16 i = tx_ring->next_to_use; in fm10k_tx_map()
957 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_tx_map()
968 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in fm10k_tx_map()
974 if (dma_mapping_error(tx_ring->dev, dma)) in fm10k_tx_map()
982 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, in fm10k_tx_map()
984 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
995 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, in fm10k_tx_map()
997 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
1004 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in fm10k_tx_map()
1007 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1013 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) in fm10k_tx_map()
1017 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map()
1034 tx_ring->next_to_use = i; in fm10k_tx_map()
1037 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); in fm10k_tx_map()
1040 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in fm10k_tx_map()
1041 writel(i, tx_ring->tail); in fm10k_tx_map()
1046 dev_err(tx_ring->dev, "TX DMA map failed\n"); in fm10k_tx_map()
1050 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1051 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); in fm10k_tx_map()
1055 i = tx_ring->count; in fm10k_tx_map()
1059 tx_ring->next_to_use = i; in fm10k_tx_map()
1063 struct fm10k_ring *tx_ring) in fm10k_xmit_frame_ring() argument
1082 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { in fm10k_xmit_frame_ring()
1083 tx_ring->tx_stats.tx_busy++; in fm10k_xmit_frame_ring()
1088 first = &tx_ring->tx_buffer[tx_ring->next_to_use]; in fm10k_xmit_frame_ring()
1096 tso = fm10k_tso(tx_ring, first); in fm10k_xmit_frame_ring()
1100 fm10k_tx_csum(tx_ring, first); in fm10k_xmit_frame_ring()
1102 fm10k_tx_map(tx_ring, first); in fm10k_xmit_frame_ring()
1140 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) in fm10k_check_tx_hang() argument
1142 u32 tx_done = fm10k_get_tx_completed(tx_ring); in fm10k_check_tx_hang()
1143 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in fm10k_check_tx_hang()
1144 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true); in fm10k_check_tx_hang()
1146 clear_check_for_tx_hang(tx_ring); in fm10k_check_tx_hang()
1158 tx_ring->tx_stats.tx_done_old = tx_done; in fm10k_check_tx_hang()
1160 clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); in fm10k_check_tx_hang()
1166 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); in fm10k_check_tx_hang()
1190 struct fm10k_ring *tx_ring, int napi_budget) in fm10k_clean_tx_irq() argument
1197 unsigned int i = tx_ring->next_to_clean; in fm10k_clean_tx_irq()
1202 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_clean_tx_irq()
1203 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_clean_tx_irq()
1204 i -= tx_ring->count; in fm10k_clean_tx_irq()
1231 dma_unmap_single(tx_ring->dev, in fm10k_clean_tx_irq()
1246 i -= tx_ring->count; in fm10k_clean_tx_irq()
1247 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1248 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1253 dma_unmap_page(tx_ring->dev, in fm10k_clean_tx_irq()
1266 i -= tx_ring->count; in fm10k_clean_tx_irq()
1267 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1268 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1278 i += tx_ring->count; in fm10k_clean_tx_irq()
1279 tx_ring->next_to_clean = i; in fm10k_clean_tx_irq()
1280 u64_stats_update_begin(&tx_ring->syncp); in fm10k_clean_tx_irq()
1281 tx_ring->stats.bytes += total_bytes; in fm10k_clean_tx_irq()
1282 tx_ring->stats.packets += total_packets; in fm10k_clean_tx_irq()
1283 u64_stats_update_end(&tx_ring->syncp); in fm10k_clean_tx_irq()
1287 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { in fm10k_clean_tx_irq()
1291 netif_err(interface, drv, tx_ring->netdev, in fm10k_clean_tx_irq()
1297 tx_ring->queue_index, in fm10k_clean_tx_irq()
1298 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1299 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1300 tx_ring->next_to_use, i); in fm10k_clean_tx_irq()
1302 netif_stop_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1303 tx_ring->queue_index); in fm10k_clean_tx_irq()
1305 netif_info(interface, probe, tx_ring->netdev, in fm10k_clean_tx_irq()
1308 tx_ring->queue_index); in fm10k_clean_tx_irq()
1317 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
1321 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in fm10k_clean_tx_irq()
1322 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in fm10k_clean_tx_irq()
1327 if (__netif_subqueue_stopped(tx_ring->netdev, in fm10k_clean_tx_irq()
1328 tx_ring->queue_index) && in fm10k_clean_tx_irq()
1330 netif_wake_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1331 tx_ring->queue_index); in fm10k_clean_tx_irq()
1332 ++tx_ring->tx_stats.restart_queue; in fm10k_clean_tx_irq()
1647 interface->tx_ring[txr_idx] = ring; in fm10k_alloc_q_vector()
1709 interface->tx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1888 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1889 interface->tx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1913 interface->tx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()