• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

592 	struct ixgbe_ring *tx_ring;  in ixgbe_dump()  local
634 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
635 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in ixgbe_dump()
637 n, tx_ring->next_to_use, tx_ring->next_to_clean, in ixgbe_dump()
686 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
688 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in ixgbe_dump()
695 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in ixgbe_dump()
696 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_dump()
697 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_dump()
709 if (i == tx_ring->next_to_use && in ixgbe_dump()
710 i == tx_ring->next_to_clean) in ixgbe_dump()
712 else if (i == tx_ring->next_to_use) in ixgbe_dump()
714 else if (i == tx_ring->next_to_clean) in ixgbe_dump()
992 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
1031 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received() local
1033 tc = tx_ring->dcb_tc; in ixgbe_update_xoff_received()
1035 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_update_xoff_received()
1066 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) in ixgbe_check_tx_hang() argument
1068 u32 tx_done = ixgbe_get_tx_completed(tx_ring); in ixgbe_check_tx_hang()
1069 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbe_check_tx_hang()
1070 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); in ixgbe_check_tx_hang()
1072 clear_check_for_tx_hang(tx_ring); in ixgbe_check_tx_hang()
1089 &tx_ring->state); in ixgbe_check_tx_hang()
1091 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbe_check_tx_hang()
1093 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_check_tx_hang()
1150 struct ixgbe_ring *tx_ring, int napi_budget) in ixgbe_clean_tx_irq() argument
1157 unsigned int i = tx_ring->next_to_clean; in ixgbe_clean_tx_irq()
1162 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_irq()
1163 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_clean_tx_irq()
1164 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1191 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_irq()
1206 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1207 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1208 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1213 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_irq()
1226 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1227 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1228 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_clean_tx_irq()
1238 i += tx_ring->count; in ixgbe_clean_tx_irq()
1239 tx_ring->next_to_clean = i; in ixgbe_clean_tx_irq()
1240 u64_stats_update_begin(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1241 tx_ring->stats.bytes += total_bytes; in ixgbe_clean_tx_irq()
1242 tx_ring->stats.packets += total_packets; in ixgbe_clean_tx_irq()
1243 u64_stats_update_end(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1247 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { in ixgbe_clean_tx_irq()
1258 tx_ring->queue_index, in ixgbe_clean_tx_irq()
1259 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1260 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1261 tx_ring->next_to_use, i, in ixgbe_clean_tx_irq()
1262 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbe_clean_tx_irq()
1264 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1268 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1277 netdev_tx_completed_queue(txring_txq(tx_ring), in ixgbe_clean_tx_irq()
1281 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbe_clean_tx_irq()
1282 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in ixgbe_clean_tx_irq()
1287 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbe_clean_tx_irq()
1288 tx_ring->queue_index) in ixgbe_clean_tx_irq()
1290 netif_wake_subqueue(tx_ring->netdev, in ixgbe_clean_tx_irq()
1291 tx_ring->queue_index); in ixgbe_clean_tx_irq()
1292 ++tx_ring->tx_stats.restart_queue; in ixgbe_clean_tx_irq()
1301 struct ixgbe_ring *tx_ring, in ixgbe_update_tx_dca() argument
1309 txctrl = dca3_get_tag(tx_ring->dev, cpu); in ixgbe_update_tx_dca()
1313 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1317 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); in ixgbe_update_tx_dca()
2789 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
3302 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
4952 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; in ixgbe_fwd_ring_down()
4953 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4990 adapter->tx_ring[txbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4991 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
5382 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) in ixgbe_clean_tx_ring() argument
5389 if (!tx_ring->tx_buffer_info) in ixgbe_clean_tx_ring()
5393 for (i = 0; i < tx_ring->count; i++) { in ixgbe_clean_tx_ring()
5394 tx_buffer_info = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_ring()
5395 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); in ixgbe_clean_tx_ring()
5398 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_tx_ring()
5400 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_clean_tx_ring()
5401 memset(tx_ring->tx_buffer_info, 0, size); in ixgbe_clean_tx_ring()
5404 memset(tx_ring->desc, 0, tx_ring->size); in ixgbe_clean_tx_ring()
5406 tx_ring->next_to_use = 0; in ixgbe_clean_tx_ring()
5407 tx_ring->next_to_clean = 0; in ixgbe_clean_tx_ring()
5431 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
5519 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbe_down()
5796 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_setup_tx_resources() argument
5798 struct device *dev = tx_ring->dev; in ixgbe_setup_tx_resources()
5803 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_setup_tx_resources()
5805 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
5806 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
5808 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); in ixgbe_setup_tx_resources()
5809 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5810 tx_ring->tx_buffer_info = vzalloc(size); in ixgbe_setup_tx_resources()
5811 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
5814 u64_stats_init(&tx_ring->syncp); in ixgbe_setup_tx_resources()
5817 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbe_setup_tx_resources()
5818 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbe_setup_tx_resources()
5821 tx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_tx_resources()
5822 tx_ring->size, in ixgbe_setup_tx_resources()
5823 &tx_ring->dma, in ixgbe_setup_tx_resources()
5826 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5827 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in ixgbe_setup_tx_resources()
5828 &tx_ring->dma, GFP_KERNEL); in ixgbe_setup_tx_resources()
5829 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
5832 tx_ring->next_to_use = 0; in ixgbe_setup_tx_resources()
5833 tx_ring->next_to_clean = 0; in ixgbe_setup_tx_resources()
5837 vfree(tx_ring->tx_buffer_info); in ixgbe_setup_tx_resources()
5838 tx_ring->tx_buffer_info = NULL; in ixgbe_setup_tx_resources()
5858 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5870 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5968 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) in ixgbe_free_tx_resources() argument
5970 ixgbe_clean_tx_ring(tx_ring); in ixgbe_free_tx_resources()
5972 vfree(tx_ring->tx_buffer_info); in ixgbe_free_tx_resources()
5973 tx_ring->tx_buffer_info = NULL; in ixgbe_free_tx_resources()
5976 if (!tx_ring->desc) in ixgbe_free_tx_resources()
5979 dma_free_coherent(tx_ring->dev, tx_ring->size, in ixgbe_free_tx_resources()
5980 tx_ring->desc, tx_ring->dma); in ixgbe_free_tx_resources()
5982 tx_ring->desc = NULL; in ixgbe_free_tx_resources()
5996 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
5997 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
6415 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_stats() local
6416 restart_queue += tx_ring->tx_stats.restart_queue; in ixgbe_update_stats()
6417 tx_busy += tx_ring->tx_stats.tx_busy; in ixgbe_update_stats()
6418 bytes += tx_ring->stats.bytes; in ixgbe_update_stats()
6419 packets += tx_ring->stats.packets; in ixgbe_update_stats()
6613 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
6646 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
6864 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending() local
6866 if (tx_ring->next_to_use != tx_ring->next_to_clean) in ixgbe_ring_tx_pending()
7243 static int ixgbe_tso(struct ixgbe_ring *tx_ring, in ixgbe_tso() argument
7323 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, in ixgbe_tso()
7338 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, in ixgbe_tx_csum() argument
7383 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); in ixgbe_tx_csum()
7442 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in __ixgbe_maybe_stop_tx() argument
7444 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
7455 if (likely(ixgbe_desc_unused(tx_ring) < size)) in __ixgbe_maybe_stop_tx()
7459 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
7460 ++tx_ring->tx_stats.restart_queue; in __ixgbe_maybe_stop_tx()
7464 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) in ixgbe_maybe_stop_tx() argument
7466 if (likely(ixgbe_desc_unused(tx_ring) >= size)) in ixgbe_maybe_stop_tx()
7469 return __ixgbe_maybe_stop_tx(tx_ring, size); in ixgbe_maybe_stop_tx()
7475 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, in ixgbe_tx_map() argument
7487 u16 i = tx_ring->next_to_use; in ixgbe_tx_map()
7489 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_tx_map()
7507 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbe_tx_map()
7512 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbe_tx_map()
7527 if (i == tx_ring->count) { in ixgbe_tx_map()
7528 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7546 if (i == tx_ring->count) { in ixgbe_tx_map()
7547 tx_desc = IXGBE_TX_DESC(tx_ring, 0); in ixgbe_tx_map()
7559 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbe_tx_map()
7562 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7569 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map()
7588 if (i == tx_ring->count) in ixgbe_tx_map()
7591 tx_ring->next_to_use = i; in ixgbe_tx_map()
7593 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); in ixgbe_tx_map()
7595 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in ixgbe_tx_map()
7596 writel(i, tx_ring->tail); in ixgbe_tx_map()
7606 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbe_tx_map()
7610 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
7611 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); in ixgbe_tx_map()
7615 i = tx_ring->count; in ixgbe_tx_map()
7619 tx_ring->next_to_use = i; in ixgbe_tx_map()
7799 struct ixgbe_ring *tx_ring) in ixgbe_xmit_frame_ring() argument
7819 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { in ixgbe_xmit_frame_ring()
7820 tx_ring->tx_stats.tx_busy++; in ixgbe_xmit_frame_ring()
7825 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbe_xmit_frame_ring()
7898 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { in ixgbe_xmit_frame_ring()
7899 tso = ixgbe_fso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7907 tso = ixgbe_tso(tx_ring, first, &hdr_len); in ixgbe_xmit_frame_ring()
7911 ixgbe_tx_csum(tx_ring, first); in ixgbe_xmit_frame_ring()
7914 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) in ixgbe_xmit_frame_ring()
7915 ixgbe_atr(tx_ring, first); in ixgbe_xmit_frame_ring()
7920 ixgbe_tx_map(tx_ring, first, hdr_len); in ixgbe_xmit_frame_ring()
7936 struct ixgbe_ring *tx_ring; in __ixgbe_xmit_frame() local
7945 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; in __ixgbe_xmit_frame()
7947 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); in __ixgbe_xmit_frame()
8111 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()