Lines Matching refs:rx_ring
576 struct ixgbe_ring *rx_ring; in ixgbe_dump() local
715 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
717 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
772 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
785 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
788 if (i == rx_ring->next_to_use) in ixgbe_dump()
790 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
795 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
796 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
821 ixgbe_rx_bufsz(rx_ring), true); in ixgbe_dump()
1312 struct ixgbe_ring *rx_ring, in ixgbe_update_rx_dca() argument
1317 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1320 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1525 static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) in ixgbe_rx_offset() argument
1527 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; in ixgbe_rx_offset()
1530 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, in ixgbe_alloc_mapped_page() argument
1541 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1543 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1548 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1549 ixgbe_rx_pg_size(rx_ring), in ixgbe_alloc_mapped_page()
1557 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1558 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1560 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1566 bi->page_offset = ixgbe_rx_offset(rx_ring); in ixgbe_alloc_mapped_page()
1569 rx_ring->rx_stats.alloc_rx_page++; in ixgbe_alloc_mapped_page()
1579 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) in ixgbe_alloc_rx_buffers() argument
1583 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1590 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers()
1591 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1592 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1594 bufsz = ixgbe_rx_bufsz(rx_ring); in ixgbe_alloc_rx_buffers()
1597 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) in ixgbe_alloc_rx_buffers()
1601 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbe_alloc_rx_buffers()
1615 rx_desc = IXGBE_RX_DESC(rx_ring, 0); in ixgbe_alloc_rx_buffers()
1616 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1617 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1626 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1628 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1629 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1632 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1640 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1655 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, in ixgbe_update_rsc_stats() argument
1662 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1663 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1665 ixgbe_set_rsc_gso_size(rx_ring, skb); in ixgbe_update_rsc_stats()
1681 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, in ixgbe_process_skb_fields() argument
1685 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1686 u32 flags = rx_ring->q_vector->adapter->flags; in ixgbe_process_skb_fields()
1688 ixgbe_update_rsc_stats(rx_ring, skb); in ixgbe_process_skb_fields()
1690 ixgbe_rx_hash(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1692 ixgbe_rx_checksum(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1695 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1704 ixgbe_ipsec_rx(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1708 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1733 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, in ixgbe_is_non_eop() argument
1737 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1740 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1741 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1743 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_is_non_eop()
1746 if (ring_is_rsc_enabled(rx_ring)) { in ixgbe_is_non_eop()
1768 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1769 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1786 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, in ixgbe_pull_tail() argument
1826 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, in ixgbe_dma_sync_frag() argument
1829 if (ring_uses_build_skb(rx_ring)) { in ixgbe_dma_sync_frag()
1832 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1840 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1849 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1850 ixgbe_rx_pg_size(rx_ring), in ixgbe_dma_sync_frag()
1878 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, in ixgbe_cleanup_headers() argument
1882 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1901 ixgbe_pull_tail(rx_ring, skb); in ixgbe_cleanup_headers()
1905 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) in ixgbe_cleanup_headers()
1923 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_page() argument
1927 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1929 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1933 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
2002 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, in ixgbe_add_rx_frag() argument
2008 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_add_rx_frag()
2010 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbe_add_rx_frag()
2023 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_get_rx_buffer() argument
2030 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer()
2043 ixgbe_dma_sync_frag(rx_ring, *skb); in ixgbe_get_rx_buffer()
2047 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer()
2058 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_put_rx_buffer() argument
2064 ixgbe_reuse_rx_page(rx_ring, rx_buffer); in ixgbe_put_rx_buffer()
2071 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_put_rx_buffer()
2072 ixgbe_rx_pg_size(rx_ring), in ixgbe_put_rx_buffer()
2085 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, in ixgbe_construct_skb() argument
2092 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_construct_skb()
2121 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); in ixgbe_construct_skb()
2146 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, in ixgbe_build_skb() argument
2153 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_build_skb()
2197 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp() argument
2206 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp()
2236 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2247 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, in ixgbe_rx_buffer_flip() argument
2252 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_rx_buffer_flip()
2256 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbe_rx_buffer_flip()
2278 struct ixgbe_ring *rx_ring, in ixgbe_clean_rx_irq() argument
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring); in ixgbe_clean_rx_irq()
2291 xdp.rxq = &rx_ring->xdp_rxq; in ixgbe_clean_rx_irq()
2301 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbe_clean_rx_irq()
2305 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2316 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); in ixgbe_clean_rx_irq()
2324 ixgbe_rx_offset(rx_ring); in ixgbe_clean_rx_irq()
2327 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); in ixgbe_clean_rx_irq()
2335 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); in ixgbe_clean_rx_irq()
2342 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbe_clean_rx_irq()
2343 } else if (ring_uses_build_skb(rx_ring)) { in ixgbe_clean_rx_irq()
2344 skb = ixgbe_build_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2347 skb = ixgbe_construct_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2353 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq()
2358 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); in ixgbe_clean_rx_irq()
2362 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2366 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2373 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); in ixgbe_clean_rx_irq()
2377 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { in ixgbe_clean_rx_irq()
2382 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2419 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2420 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2421 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2422 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
3698 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3701 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3708 struct ixgbe_ring *rx_ring) in ixgbe_configure_srrctl() argument
3712 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3728 if (rx_ring->xsk_umem) { in ixgbe_configure_srrctl()
3729 u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - in ixgbe_configure_srrctl()
3744 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { in ixgbe_configure_srrctl()
4267 struct ixgbe_ring *rx_ring; in ixgbe_set_rx_buffer_len() local
4301 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
4303 clear_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4304 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4305 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4308 set_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) in ixgbe_set_rx_buffer_len()
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4316 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4320 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4324 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4411 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
4519 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
4557 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
5261 adapter->rx_ring[action]->reg_idx); in ixgbe_fdir_filter_restore()
5271 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) in ixgbe_clean_rx_ring() argument
5273 u16 i = rx_ring->next_to_clean; in ixgbe_clean_rx_ring()
5274 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
5276 if (rx_ring->xsk_umem) { in ixgbe_clean_rx_ring()
5277 ixgbe_xsk_clean_rx_ring(rx_ring); in ixgbe_clean_rx_ring()
5282 while (i != rx_ring->next_to_alloc) { in ixgbe_clean_rx_ring()
5286 dma_unmap_page_attrs(rx_ring->dev, in ixgbe_clean_rx_ring()
5288 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5297 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_clean_rx_ring()
5300 ixgbe_rx_bufsz(rx_ring), in ixgbe_clean_rx_ring()
5304 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_clean_rx_ring()
5305 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5313 if (i == rx_ring->count) { in ixgbe_clean_rx_ring()
5315 rx_buffer = rx_ring->rx_buffer_info; in ixgbe_clean_rx_ring()
5320 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
5321 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
5322 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
5347 adapter->rx_ring[baseq + i]->netdev = vdev; in ixgbe_fwd_ring_up()
5366 adapter->rx_ring[baseq + i]->netdev = NULL; in ixgbe_fwd_ring_up()
5738 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
5781 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
6035 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
6520 struct ixgbe_ring *rx_ring) in ixgbe_setup_rx_resources() argument
6522 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
6527 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
6529 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
6530 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
6532 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
6533 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6534 rx_ring->rx_buffer_info = vmalloc(size); in ixgbe_setup_rx_resources()
6535 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6539 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
6540 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
6543 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
6544 rx_ring->size, in ixgbe_setup_rx_resources()
6545 &rx_ring->dma, in ixgbe_setup_rx_resources()
6548 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6549 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
6550 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
6551 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6554 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
6555 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
6558 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbe_setup_rx_resources()
6559 rx_ring->queue_index) < 0) in ixgbe_setup_rx_resources()
6562 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbe_setup_rx_resources()
6566 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
6567 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
6587 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6603 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6654 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_free_rx_resources() argument
6656 ixgbe_clean_rx_ring(rx_ring); in ixgbe_free_rx_resources()
6658 rx_ring->xdp_prog = NULL; in ixgbe_free_rx_resources()
6659 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbe_free_rx_resources()
6660 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
6661 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
6664 if (!rx_ring->desc) in ixgbe_free_rx_resources()
6667 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
6668 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
6670 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
6688 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
6689 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
6709 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_change_mtu()
7041 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
7042 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
7049 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; in ixgbe_update_stats() local
7050 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
7051 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbe_update_stats()
7052 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
7053 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
7054 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
7055 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
7056 packets += rx_ring->stats.packets; in ixgbe_update_stats()
8924 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
9251 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; in get_macvlan_queue()
10160 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; in ixgbe_fwd_del()
10234 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_xdp_setup()
10264 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in ixgbe_xdp_setup()
10437 struct ixgbe_ring *rx_ring) in ixgbe_disable_rxr_hw() argument
10441 u8 reg_idx = rx_ring->reg_idx; in ixgbe_disable_rxr_hw()
10481 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) in ixgbe_reset_rxr_stats() argument
10483 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); in ixgbe_reset_rxr_stats()
10484 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); in ixgbe_reset_rxr_stats()
10497 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_disable() local
10499 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_disable()
10506 ixgbe_disable_rxr_hw(adapter, rx_ring); in ixgbe_txrx_ring_disable()
10512 napi_disable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_disable()
10517 ixgbe_clean_rx_ring(rx_ring); in ixgbe_txrx_ring_disable()
10522 ixgbe_reset_rxr_stats(rx_ring); in ixgbe_txrx_ring_disable()
10535 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_enable() local
10537 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_enable()
10542 napi_enable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_enable()
10547 ixgbe_configure_rx_ring(adapter, rx_ring); in ixgbe_txrx_ring_enable()
11034 u64_stats_init(&adapter->rx_ring[i]->syncp); in ixgbe_probe()