Lines Matching refs:rx_ring
115 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
508 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument
512 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
513 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
517 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
524 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
526 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
530 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument
535 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
539 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
550 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_put_rx_buffer() argument
556 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); in ixgbevf_put_rx_buffer()
562 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
563 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_put_rx_buffer()
584 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, in ixgbevf_is_non_eop() argument
587 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
590 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
591 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
593 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); in ixgbevf_is_non_eop()
601 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) in ixgbevf_rx_offset() argument
603 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; in ixgbevf_rx_offset()
606 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_mapped_page() argument
617 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
619 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
624 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
625 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_alloc_mapped_page()
631 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
632 __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
634 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
640 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
642 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
652 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_rx_buffers() argument
657 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
660 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
663 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); in ixgbevf_alloc_rx_buffers()
664 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
665 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
668 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) in ixgbevf_alloc_rx_buffers()
672 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
674 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_alloc_rx_buffers()
686 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); in ixgbevf_alloc_rx_buffers()
687 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
688 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
697 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
699 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
701 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
704 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
712 ixgbevf_write_tail(rx_ring, i); in ixgbevf_alloc_rx_buffers()
734 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, in ixgbevf_cleanup_headers() argument
745 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
767 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, in ixgbevf_reuse_rx_page() argument
771 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
773 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
777 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
834 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, in ixgbevf_add_rx_frag() argument
840 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_add_rx_frag()
842 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_add_rx_frag()
856 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_construct_skb() argument
863 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_construct_skb()
892 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
933 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_build_skb() argument
940 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_build_skb()
1061 struct ixgbevf_ring *rx_ring, in ixgbevf_run_xdp() argument
1070 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1080 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1087 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1098 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, in ixgbevf_rx_buffer_flip() argument
1103 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_rx_buffer_flip()
1107 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_rx_buffer_flip()
1116 struct ixgbevf_ring *rx_ring, in ixgbevf_clean_rx_irq() argument
1121 u16 cleaned_count = ixgbevf_desc_unused(rx_ring); in ixgbevf_clean_rx_irq()
1122 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1126 xdp.rxq = &rx_ring->xdp_rxq; in ixgbevf_clean_rx_irq()
1135 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbevf_clean_rx_irq()
1139 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1150 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); in ixgbevf_clean_rx_irq()
1158 ixgbevf_rx_offset(rx_ring); in ixgbevf_clean_rx_irq()
1161 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); in ixgbevf_clean_rx_irq()
1167 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1175 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbevf_clean_rx_irq()
1176 } else if (ring_uses_build_skb(rx_ring)) { in ixgbevf_clean_rx_irq()
1177 skb = ixgbevf_build_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1180 skb = ixgbevf_construct_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1186 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1191 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); in ixgbevf_clean_rx_irq()
1195 if (ixgbevf_is_non_eop(rx_ring, rx_desc)) in ixgbevf_clean_rx_irq()
1199 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { in ixgbevf_clean_rx_irq()
1212 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1219 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); in ixgbevf_clean_rx_irq()
1231 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1235 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1244 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1245 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1246 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1247 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1967 struct ixgbevf_ring *rx_ring) in ixgbevf_set_rx_buffer_len() argument
1973 clear_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1974 clear_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
1979 set_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1985 set_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
2017 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx() local
2019 ixgbevf_set_rx_buffer_len(adapter, rx_ring); in ixgbevf_configure_rx()
2020 ixgbevf_configure_rx_ring(adapter, rx_ring); in ixgbevf_configure_rx()
2332 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) in ixgbevf_clean_rx_ring() argument
2334 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2337 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2338 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2339 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2343 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2346 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2351 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2354 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_clean_rx_ring()
2358 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2360 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_clean_rx_ring()
2368 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2372 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2373 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2374 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2448 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2477 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2802 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2837 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
3131 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats() local
3133 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3134 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3135 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3136 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3467 struct ixgbevf_ring *rx_ring) in ixgbevf_setup_rx_resources() argument
3471 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3472 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3473 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3476 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3479 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3480 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3482 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3483 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3485 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3489 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3490 rx_ring->queue_index) < 0) in ixgbevf_setup_rx_resources()
3493 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3497 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3498 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3499 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3518 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3529 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3539 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) in ixgbevf_free_rx_resources() argument
3541 ixgbevf_clean_rx_ring(rx_ring); in ixgbevf_free_rx_resources()
3543 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3544 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3545 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3546 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3548 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3549 rx_ring->dma); in ixgbevf_free_rx_resources()
3551 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3565 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3566 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
4394 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4458 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4482 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()