Lines Matching refs:tx_ring
372 struct igb_ring *tx_ring; in igb_dump() local
407 tx_ring = adapter->tx_ring[n]; in igb_dump()
408 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
410 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
435 tx_ring = adapter->tx_ring[n]; in igb_dump()
437 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
441 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
444 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_dump()
445 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
447 if (i == tx_ring->next_to_use && in igb_dump()
448 i == tx_ring->next_to_clean) in igb_dump()
450 else if (i == tx_ring->next_to_use) in igb_dump()
452 else if (i == tx_ring->next_to_clean) in igb_dump()
744 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
1040 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1275 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
3287 int igb_setup_tx_resources(struct igb_ring *tx_ring) in igb_setup_tx_resources() argument
3289 struct device *dev = tx_ring->dev; in igb_setup_tx_resources()
3292 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_setup_tx_resources()
3294 tx_ring->tx_buffer_info = vzalloc(size); in igb_setup_tx_resources()
3295 if (!tx_ring->tx_buffer_info) in igb_setup_tx_resources()
3299 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igb_setup_tx_resources()
3300 tx_ring->size = ALIGN(tx_ring->size, 4096); in igb_setup_tx_resources()
3302 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igb_setup_tx_resources()
3303 &tx_ring->dma, GFP_KERNEL); in igb_setup_tx_resources()
3304 if (!tx_ring->desc) in igb_setup_tx_resources()
3307 tx_ring->next_to_use = 0; in igb_setup_tx_resources()
3308 tx_ring->next_to_clean = 0; in igb_setup_tx_resources()
3313 vfree(tx_ring->tx_buffer_info); in igb_setup_tx_resources()
3314 tx_ring->tx_buffer_info = NULL; in igb_setup_tx_resources()
3332 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3337 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3420 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
3791 void igb_free_tx_resources(struct igb_ring *tx_ring) in igb_free_tx_resources() argument
3793 igb_clean_tx_ring(tx_ring); in igb_free_tx_resources()
3795 vfree(tx_ring->tx_buffer_info); in igb_free_tx_resources()
3796 tx_ring->tx_buffer_info = NULL; in igb_free_tx_resources()
3799 if (!tx_ring->desc) in igb_free_tx_resources()
3802 dma_free_coherent(tx_ring->dev, tx_ring->size, in igb_free_tx_resources()
3803 tx_ring->desc, tx_ring->dma); in igb_free_tx_resources()
3805 tx_ring->desc = NULL; in igb_free_tx_resources()
3819 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
3820 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
3849 static void igb_clean_tx_ring(struct igb_ring *tx_ring) in igb_clean_tx_ring() argument
3855 if (!tx_ring->tx_buffer_info) in igb_clean_tx_ring()
3859 for (i = 0; i < tx_ring->count; i++) { in igb_clean_tx_ring()
3860 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_ring()
3861 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); in igb_clean_tx_ring()
3864 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_tx_ring()
3866 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_clean_tx_ring()
3867 memset(tx_ring->tx_buffer_info, 0, size); in igb_clean_tx_ring()
3870 memset(tx_ring->desc, 0, tx_ring->size); in igb_clean_tx_ring()
3872 tx_ring->next_to_use = 0; in igb_clean_tx_ring()
3873 tx_ring->next_to_clean = 0; in igb_clean_tx_ring()
3885 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
3886 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
4621 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task() local
4628 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { in igb_watchdog_task()
4637 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_watchdog_task()
4878 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, in igb_tx_ctxtdesc() argument
4882 u16 i = tx_ring->next_to_use; in igb_tx_ctxtdesc()
4884 context_desc = IGB_TX_CTXTDESC(tx_ring, i); in igb_tx_ctxtdesc()
4887 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igb_tx_ctxtdesc()
4893 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_ctxtdesc()
4894 mss_l4len_idx |= tx_ring->reg_idx << 4; in igb_tx_ctxtdesc()
4902 static int igb_tso(struct igb_ring *tx_ring, in igb_tso() argument
4982 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); in igb_tso()
4996 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) in igb_tx_csum() argument
5037 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); in igb_tx_csum()
5070 static void igb_tx_olinfo_status(struct igb_ring *tx_ring, in igb_tx_olinfo_status() argument
5077 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_olinfo_status()
5078 olinfo_status |= tx_ring->reg_idx << 4; in igb_tx_olinfo_status()
5093 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in __igb_maybe_stop_tx() argument
5095 struct net_device *netdev = tx_ring->netdev; in __igb_maybe_stop_tx()
5097 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
5108 if (igb_desc_unused(tx_ring) < size) in __igb_maybe_stop_tx()
5112 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
5114 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
5115 tx_ring->tx_stats.restart_queue2++; in __igb_maybe_stop_tx()
5116 u64_stats_update_end(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
5121 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) in igb_maybe_stop_tx() argument
5123 if (igb_desc_unused(tx_ring) >= size) in igb_maybe_stop_tx()
5125 return __igb_maybe_stop_tx(tx_ring, size); in igb_maybe_stop_tx()
5128 static void igb_tx_map(struct igb_ring *tx_ring, in igb_tx_map() argument
5140 u16 i = tx_ring->next_to_use; in igb_tx_map()
5142 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_tx_map()
5144 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igb_tx_map()
5149 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igb_tx_map()
5154 if (dma_mapping_error(tx_ring->dev, dma)) in igb_tx_map()
5169 if (i == tx_ring->count) { in igb_tx_map()
5170 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
5188 if (i == tx_ring->count) { in igb_tx_map()
5189 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_tx_map()
5197 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igb_tx_map()
5200 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
5207 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map()
5225 if (i == tx_ring->count) in igb_tx_map()
5228 tx_ring->next_to_use = i; in igb_tx_map()
5231 igb_maybe_stop_tx(tx_ring, DESC_NEEDED); in igb_tx_map()
5233 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { in igb_tx_map()
5234 writel(i, tx_ring->tail); in igb_tx_map()
5244 dev_err(tx_ring->dev, "TX DMA map failed\n"); in igb_tx_map()
5248 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
5249 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); in igb_tx_map()
5253 i = tx_ring->count; in igb_tx_map()
5257 tx_ring->next_to_use = i; in igb_tx_map()
5261 struct igb_ring *tx_ring) in igb_xmit_frame_ring() argument
5280 if (igb_maybe_stop_tx(tx_ring, count + 3)) { in igb_xmit_frame_ring()
5286 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_frame_ring()
5292 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
5317 tso = igb_tso(tx_ring, first, &hdr_len); in igb_xmit_frame_ring()
5321 igb_tx_csum(tx_ring, first); in igb_xmit_frame_ring()
5323 igb_tx_map(tx_ring, first, hdr_len); in igb_xmit_frame_ring()
5328 igb_unmap_and_free_tx_resource(tx_ring, first); in igb_xmit_frame_ring()
5341 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
5510 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
5795 struct igb_ring *tx_ring, in igb_update_tx_dca() argument
5799 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in igb_update_tx_dca()
5812 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); in igb_update_tx_dca()
6643 struct igb_ring *tx_ring = q_vector->tx.ring; in igb_clean_tx_irq() local
6648 unsigned int i = tx_ring->next_to_clean; in igb_clean_tx_irq()
6653 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_irq()
6654 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_clean_tx_irq()
6655 i -= tx_ring->count; in igb_clean_tx_irq()
6682 dma_unmap_single(tx_ring->dev, in igb_clean_tx_irq()
6697 i -= tx_ring->count; in igb_clean_tx_irq()
6698 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6699 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6704 dma_unmap_page(tx_ring->dev, in igb_clean_tx_irq()
6717 i -= tx_ring->count; in igb_clean_tx_irq()
6718 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
6719 tx_desc = IGB_TX_DESC(tx_ring, 0); in igb_clean_tx_irq()
6729 netdev_tx_completed_queue(txring_txq(tx_ring), in igb_clean_tx_irq()
6731 i += tx_ring->count; in igb_clean_tx_irq()
6732 tx_ring->next_to_clean = i; in igb_clean_tx_irq()
6733 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6734 tx_ring->tx_stats.bytes += total_bytes; in igb_clean_tx_irq()
6735 tx_ring->tx_stats.packets += total_packets; in igb_clean_tx_irq()
6736 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6740 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igb_clean_tx_irq()
6746 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_clean_tx_irq()
6753 dev_err(tx_ring->dev, in igb_clean_tx_irq()
6765 tx_ring->queue_index, in igb_clean_tx_irq()
6766 rd32(E1000_TDH(tx_ring->reg_idx)), in igb_clean_tx_irq()
6767 readl(tx_ring->tail), in igb_clean_tx_irq()
6768 tx_ring->next_to_use, in igb_clean_tx_irq()
6769 tx_ring->next_to_clean, in igb_clean_tx_irq()
6774 netif_stop_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6775 tx_ring->queue_index); in igb_clean_tx_irq()
6784 netif_carrier_ok(tx_ring->netdev) && in igb_clean_tx_irq()
6785 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { in igb_clean_tx_irq()
6790 if (__netif_subqueue_stopped(tx_ring->netdev, in igb_clean_tx_irq()
6791 tx_ring->queue_index) && in igb_clean_tx_irq()
6793 netif_wake_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
6794 tx_ring->queue_index); in igb_clean_tx_irq()
6796 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
6797 tx_ring->tx_stats.restart_queue++; in igb_clean_tx_irq()
6798 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()