• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

175 		txr = &adapter->tx_ring[i];  in ena_init_io_rings()
207 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
211 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
217 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
220 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
221 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
222 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
223 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
227 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
228 tx_ring->free_tx_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
229 if (!tx_ring->free_tx_ids) { in ena_setup_tx_resources()
230 tx_ring->free_tx_ids = vzalloc(size); in ena_setup_tx_resources()
231 if (!tx_ring->free_tx_ids) { in ena_setup_tx_resources()
232 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
238 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
239 tx_ring->free_tx_ids[i] = i; in ena_setup_tx_resources()
242 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
244 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
245 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
246 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
258 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
260 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
261 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
263 vfree(tx_ring->free_tx_ids); in ena_free_tx_resources()
264 tx_ring->free_tx_ids = NULL; in ena_free_tx_resources()
567 static void ena_free_tx_bufs(struct ena_ring *tx_ring) in ena_free_tx_bufs() argument
571 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
572 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
580 netdev_notice(tx_ring->netdev, in ena_free_tx_bufs()
582 tx_ring->qid, i); in ena_free_tx_bufs()
585 dma_unmap_single(tx_ring->dev, in ena_free_tx_bufs()
594 dma_unmap_page(tx_ring->dev, in ena_free_tx_bufs()
602 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
603 tx_ring->qid)); in ena_free_tx_bufs()
608 struct ena_ring *tx_ring; in ena_free_all_tx_bufs() local
612 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
613 ena_free_tx_bufs(tx_ring); in ena_free_all_tx_bufs()
645 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
649 if (likely(req_id < tx_ring->ring_size)) { in validate_tx_req_id()
650 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
656 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, in validate_tx_req_id()
659 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, in validate_tx_req_id()
662 u64_stats_update_begin(&tx_ring->syncp); in validate_tx_req_id()
663 tx_ring->tx_stats.bad_req_id++; in validate_tx_req_id()
664 u64_stats_update_end(&tx_ring->syncp); in validate_tx_req_id()
667 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); in validate_tx_req_id()
671 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_tx_irq() argument
682 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
683 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
691 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
696 rc = validate_tx_req_id(tx_ring, req_id); in ena_clean_tx_irq()
700 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
712 dma_unmap_single(tx_ring->dev, in ena_clean_tx_irq()
721 dma_unmap_page(tx_ring->dev, in ena_clean_tx_irq()
728 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
729 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
737 tx_ring->free_tx_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
739 tx_ring->ring_size); in ena_clean_tx_irq()
742 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
743 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
744 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); in ena_clean_tx_irq()
748 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
750 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
757 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > in ena_clean_tx_irq()
761 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > in ena_clean_tx_irq()
765 u64_stats_update_begin(&tx_ring->syncp); in ena_clean_tx_irq()
766 tx_ring->tx_stats.queue_wakeup++; in ena_clean_tx_irq()
767 u64_stats_update_end(&tx_ring->syncp); in ena_clean_tx_irq()
772 tx_ring->per_napi_bytes += tx_bytes; in ena_clean_tx_irq()
773 tx_ring->per_napi_packets += tx_pkts; in ena_clean_tx_irq()
1058 struct ena_ring *tx_ring) in ena_adjust_intr_moderation() argument
1070 tx_ring->per_napi_packets = 0; in ena_adjust_intr_moderation()
1071 tx_ring->per_napi_bytes = 0; in ena_adjust_intr_moderation()
1076 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, in ena_update_ring_numa_node() argument
1083 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1090 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1094 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1105 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1114 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1117 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1119 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_io_poll()
1124 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); in ena_io_poll()
1133 ena_adjust_intr_moderation(rx_ring, tx_ring); in ena_io_poll()
1140 tx_ring->smoothed_interval, in ena_io_poll()
1148 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1155 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1156 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1157 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1158 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1414 napi->tx_ring = &adapter->tx_ring[i]; in ena_init_napi()
1511 struct ena_ring *tx_ring; in ena_create_io_tx_queue() local
1518 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1527 ctx.numa_node = cpu_to_node(tx_ring->cpu); in ena_create_io_tx_queue()
1538 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1539 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1548 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
1836 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, in ena_check_and_linearize_skb() argument
1844 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
1847 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
1848 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
1851 u64_stats_update_begin(&tx_ring->syncp); in ena_check_and_linearize_skb()
1852 tx_ring->tx_stats.linearize++; in ena_check_and_linearize_skb()
1853 u64_stats_update_end(&tx_ring->syncp); in ena_check_and_linearize_skb()
1857 u64_stats_update_begin(&tx_ring->syncp); in ena_check_and_linearize_skb()
1858 tx_ring->tx_stats.linearize_failed++; in ena_check_and_linearize_skb()
1859 u64_stats_update_end(&tx_ring->syncp); in ena_check_and_linearize_skb()
1871 struct ena_ring *tx_ring; in ena_start_xmit() local
1887 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
1890 rc = ena_check_and_linearize_skb(tx_ring, skb); in ena_start_xmit()
1897 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
1898 req_id = tx_ring->free_tx_ids[next_to_use]; in ena_start_xmit()
1899 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
1906 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_start_xmit()
1908 push_len = min_t(u32, len, tx_ring->tx_max_header_size); in ena_start_xmit()
1913 header_len = min_t(u32, len, tx_ring->tx_max_header_size); in ena_start_xmit()
1922 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_start_xmit()
1924 if (dma_mapping_error(tx_ring->dev, dma)) in ena_start_xmit()
1940 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in ena_start_xmit()
1942 if (dma_mapping_error(tx_ring->dev, dma)) in ena_start_xmit()
1963 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, in ena_start_xmit()
1969 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
1970 tx_ring->tx_stats.queue_stop++; in ena_start_xmit()
1971 tx_ring->tx_stats.prepare_ctx_err++; in ena_start_xmit()
1972 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
1979 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
1980 tx_ring->tx_stats.cnt++; in ena_start_xmit()
1981 tx_ring->tx_stats.bytes += skb->len; in ena_start_xmit()
1982 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
1987 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, in ena_start_xmit()
1988 tx_ring->ring_size); in ena_start_xmit()
2000 if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < in ena_start_xmit()
2001 (tx_ring->sgl_size + 2))) { in ena_start_xmit()
2006 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2007 tx_ring->tx_stats.queue_stop++; in ena_start_xmit()
2008 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2019 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) in ena_start_xmit()
2022 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2023 tx_ring->tx_stats.queue_wakeup++; in ena_start_xmit()
2024 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2030 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_start_xmit()
2031 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2032 tx_ring->tx_stats.doorbells++; in ena_start_xmit()
2033 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2039 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2040 tx_ring->tx_stats.dma_mapping_err++; in ena_start_xmit()
2041 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2054 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_start_xmit()
2060 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_start_xmit()
2529 struct ena_ring *tx_ring; in check_for_missing_tx_completions() local
2542 tx_ring = &adapter->tx_ring[i]; in check_for_missing_tx_completions()
2544 for (j = 0; j < tx_ring->ring_size; j++) { in check_for_missing_tx_completions()
2545 tx_buf = &tx_ring->tx_buffer_info[j]; in check_for_missing_tx_completions()
2550 tx_ring->qid, j); in check_for_missing_tx_completions()
2552 u64_stats_update_begin(&tx_ring->syncp); in check_for_missing_tx_completions()
2553 missed_tx = tx_ring->tx_stats.missing_tx_comp++; in check_for_missing_tx_completions()
2554 u64_stats_update_end(&tx_ring->syncp); in check_for_missing_tx_completions()