• Home
  • Raw
  • Download

Lines Matching refs:rx_ring

267 void ice_clean_rx_ring(struct ice_ring *rx_ring)  in ice_clean_rx_ring()  argument
269 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
273 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
277 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
278 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
303 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); in ice_clean_rx_ring()
306 memset(rx_ring->desc, 0, rx_ring->size); in ice_clean_rx_ring()
308 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
309 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
310 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
319 void ice_free_rx_ring(struct ice_ring *rx_ring) in ice_free_rx_ring() argument
321 ice_clean_rx_ring(rx_ring); in ice_free_rx_ring()
322 devm_kfree(rx_ring->dev, rx_ring->rx_buf); in ice_free_rx_ring()
323 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
325 if (rx_ring->desc) { in ice_free_rx_ring()
326 dmam_free_coherent(rx_ring->dev, rx_ring->size, in ice_free_rx_ring()
327 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
328 rx_ring->desc = NULL; in ice_free_rx_ring()
338 int ice_setup_rx_ring(struct ice_ring *rx_ring) in ice_setup_rx_ring() argument
340 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
346 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
347 rx_ring->rx_buf = in ice_setup_rx_ring()
348 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, in ice_setup_rx_ring()
350 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
354 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
356 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, in ice_setup_rx_ring()
358 if (!rx_ring->desc) { in ice_setup_rx_ring()
360 rx_ring->size); in ice_setup_rx_ring()
364 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
365 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
369 devm_kfree(dev, rx_ring->rx_buf); in ice_setup_rx_ring()
370 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
379 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) in ice_release_rx_desc() argument
381 u16 prev_ntu = rx_ring->next_to_use; in ice_release_rx_desc()
383 rx_ring->next_to_use = val; in ice_release_rx_desc()
386 rx_ring->next_to_alloc = val; in ice_release_rx_desc()
401 writel(val, rx_ring->tail); in ice_release_rx_desc()
414 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) in ice_alloc_mapped_page() argument
421 rx_ring->rx_stats.page_reuse_count++; in ice_alloc_mapped_page()
428 rx_ring->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
433 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, in ice_alloc_mapped_page()
439 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
441 rx_ring->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
467 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) in ice_alloc_rx_bufs() argument
470 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
474 if (!rx_ring->netdev || !cleaned_count) in ice_alloc_rx_bufs()
478 rx_desc = ICE_RX_DESC(rx_ring, ntu); in ice_alloc_rx_bufs()
479 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
483 if (!ice_alloc_mapped_page(rx_ring, bi)) in ice_alloc_rx_bufs()
487 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
500 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
501 rx_desc = ICE_RX_DESC(rx_ring, 0); in ice_alloc_rx_bufs()
502 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
512 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
513 ice_release_rx_desc(rx_ring, ntu); in ice_alloc_rx_bufs()
628 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) in ice_reuse_rx_page() argument
630 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
633 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
637 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
659 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, in ice_get_rx_buf() argument
664 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; in ice_get_rx_buf()
671 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
692 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, in ice_construct_skb() argument
706 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb()
711 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
751 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) in ice_put_rx_buf() argument
758 ice_reuse_rx_page(rx_ring, rx_buf); in ice_put_rx_buf()
759 rx_ring->rx_stats.page_reuse_count++; in ice_put_rx_buf()
762 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, in ice_put_rx_buf()
822 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, in ice_is_non_eop() argument
825 u32 ntc = rx_ring->next_to_clean + 1; in ice_is_non_eop()
828 ntc = (ntc < rx_ring->count) ? ntc : 0; in ice_is_non_eop()
829 rx_ring->next_to_clean = ntc; in ice_is_non_eop()
831 prefetch(ICE_RX_DESC(rx_ring, ntc)); in ice_is_non_eop()
839 rx_ring->rx_buf[ntc].skb = skb; in ice_is_non_eop()
840 rx_ring->rx_stats.non_eop_descs++; in ice_is_non_eop()
864 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, in ice_rx_hash() argument
870 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) in ice_rx_hash()
963 ice_process_skb_fields(struct ice_ring *rx_ring, in ice_process_skb_fields() argument
967 ice_rx_hash(rx_ring, rx_desc, skb, ptype); in ice_process_skb_fields()
970 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ice_process_skb_fields()
972 ice_rx_csum(rx_ring, skb, rx_desc, ptype); in ice_process_skb_fields()
985 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) in ice_receive_skb() argument
987 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in ice_receive_skb()
990 napi_gro_receive(&rx_ring->q_vector->napi, skb); in ice_receive_skb()
1005 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) in ice_clean_rx_irq() argument
1008 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); in ice_clean_rx_irq()
1022 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ice_clean_rx_irq()
1043 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); in ice_clean_rx_irq()
1048 skb = ice_construct_skb(rx_ring, rx_buf, size); in ice_clean_rx_irq()
1052 rx_ring->rx_stats.alloc_buf_failed++; in ice_clean_rx_irq()
1058 ice_put_rx_buf(rx_ring, rx_buf); in ice_clean_rx_irq()
1062 if (ice_is_non_eop(rx_ring, rx_desc, skb)) in ice_clean_rx_irq()
1090 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); in ice_clean_rx_irq()
1093 ice_receive_skb(rx_ring, skb, vlan_tag); in ice_clean_rx_irq()
1100 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); in ice_clean_rx_irq()
1103 u64_stats_update_begin(&rx_ring->syncp); in ice_clean_rx_irq()
1104 rx_ring->stats.pkts += total_rx_pkts; in ice_clean_rx_irq()
1105 rx_ring->stats.bytes += total_rx_bytes; in ice_clean_rx_irq()
1106 u64_stats_update_end(&rx_ring->syncp); in ice_clean_rx_irq()
1107 rx_ring->q_vector->rx.total_pkts += total_rx_pkts; in ice_clean_rx_irq()
1108 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in ice_clean_rx_irq()