/kernel/linux/linux-5.10/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 12 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) in i40e_clear_rx_bi_zc() argument 14 memset(rx_ring->rx_bi_zc, 0, in i40e_clear_rx_bi_zc() 15 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); in i40e_clear_rx_bi_zc() 18 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument 20 return &rx_ring->rx_bi_zc[idx]; in i40e_rx_bi() 32 static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) in i40e_realloc_rx_xdp_bi() argument 34 size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : in i40e_realloc_rx_xdp_bi() 35 sizeof(*rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi() 36 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); in i40e_realloc_rx_xdp_bi() 42 kfree(rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi() [all …]
|
D | i40e_txrx.c | 531 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_fd_handle_status() argument 534 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status() 1199 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument 1201 return &rx_ring->rx_bi[idx]; in i40e_rx_bi() 1211 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument 1215 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page() 1217 new_buff = i40e_rx_bi(rx_ring, nta); in i40e_reuse_rx_page() 1221 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page() 1229 rx_ring->rx_stats.page_reuse_count++; in i40e_reuse_rx_page() 1247 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_clean_programming_status() argument [all …]
|
D | i40e_txrx_common.h | 8 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, 10 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 13 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 16 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); 17 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); 102 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 98 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp_zc() argument 107 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 134 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc() 144 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) in ixgbe_alloc_rx_buffers_zc() argument 148 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers_zc() 156 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers_zc() 157 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers_zc() 158 i -= rx_ring->count; in ixgbe_alloc_rx_buffers_zc() 161 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() [all …]
|
D | ixgbe_txrx_common.h | 17 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, 20 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, 39 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); 41 struct ixgbe_ring *rx_ring, 43 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
|
D | ixgbe_main.c | 574 struct ixgbe_ring *rx_ring; in ixgbe_dump() local 713 rx_ring = adapter->rx_ring[n]; in ixgbe_dump() 715 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump() 770 rx_ring = adapter->rx_ring[n]; in ixgbe_dump() 772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump() 783 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump() 786 if (i == rx_ring->next_to_use) in ixgbe_dump() 788 else if (i == rx_ring->next_to_clean) in ixgbe_dump() 793 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump() 794 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 658 void iavf_clean_rx_ring(struct iavf_ring *rx_ring) in iavf_clean_rx_ring() argument 664 if (!rx_ring->rx_bi) in iavf_clean_rx_ring() 667 if (rx_ring->skb) { in iavf_clean_rx_ring() 668 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring() 669 rx_ring->skb = NULL; in iavf_clean_rx_ring() 673 for (i = 0; i < rx_ring->count; i++) { in iavf_clean_rx_ring() 674 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in iavf_clean_rx_ring() 682 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_clean_rx_ring() 685 rx_ring->rx_buf_len, in iavf_clean_rx_ring() 689 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in iavf_clean_rx_ring() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 369 void ice_clean_rx_ring(struct ice_ring *rx_ring) in ice_clean_rx_ring() argument 371 struct device *dev = rx_ring->dev; in ice_clean_rx_ring() 375 if (!rx_ring->rx_buf) in ice_clean_rx_ring() 378 if (rx_ring->xsk_pool) { in ice_clean_rx_ring() 379 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring() 384 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring() 385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring() 399 rx_ring->rx_buf_len, in ice_clean_rx_ring() 403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring() 412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); in ice_clean_rx_ring() [all …]
|
D | ice_xsk.c | 72 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, in ice_qvec_dis_irq() argument 84 reg = rx_ring->reg_idx; in ice_qvec_dis_irq() 152 struct ice_ring *tx_ring, *rx_ring; in ice_qp_dis() local 161 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_dis() 162 q_vector = rx_ring->q_vector; in ice_qp_dis() 172 ice_qvec_dis_irq(vsi, rx_ring, q_vector); in ice_qp_dis() 209 struct ice_ring *tx_ring, *rx_ring; in ice_qp_ena() local 225 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_ena() 226 q_vector = rx_ring->q_vector; in ice_qp_ena() 244 err = ice_setup_rx_ctx(rx_ring); in ice_qp_ena() [all …]
|
D | ice_txrx_lib.c | 11 void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) in ice_release_rx_desc() argument 13 u16 prev_ntu = rx_ring->next_to_use & ~0x7; in ice_release_rx_desc() 15 rx_ring->next_to_use = val; in ice_release_rx_desc() 18 rx_ring->next_to_alloc = val; in ice_release_rx_desc() 33 writel(val, rx_ring->tail); in ice_release_rx_desc() 56 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, in ice_rx_hash() argument 62 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) in ice_rx_hash() 167 ice_process_skb_fields(struct ice_ring *rx_ring, in ice_process_skb_fields() argument 171 ice_rx_hash(rx_ring, rx_desc, skb, ptype); in ice_process_skb_fields() 174 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ice_process_skb_fields() [all …]
|
D | ice_xsk.h | 14 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); 17 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count); 19 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); 31 ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, in ice_clean_rx_irq_zc() argument 45 ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring, in ice_alloc_rx_bufs_zc() argument 63 #define ice_xsk_clean_rx_ring(rx_ring) do {} while (0) argument
|
/kernel/linux/linux-5.10/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 73 struct ena_ring *rx_ring); 75 struct ena_ring *rx_ring); 105 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu() 335 static int ena_xdp_execute(struct ena_ring *rx_ring, in ena_xdp_execute() argument 344 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute() 352 ena_xdp_xmit_buff(rx_ring->netdev, in ena_xdp_execute() 354 rx_ring->qid + rx_ring->adapter->num_io_queues, in ena_xdp_execute() 357 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute() 359 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute() 360 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/qlge/ |
D | qlge_main.c | 967 struct rx_ring *rx_ring) in ql_get_curr_lchunk() argument 969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq); in ql_get_curr_lchunk() 985 static void ql_update_cq(struct rx_ring *rx_ring) in ql_update_cq() argument 987 rx_ring->cnsmr_idx++; in ql_update_cq() 988 rx_ring->curr_entry++; in ql_update_cq() 989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { in ql_update_cq() 990 rx_ring->cnsmr_idx = 0; in ql_update_cq() 991 rx_ring->curr_entry = rx_ring->cq_base; in ql_update_cq() 995 static void ql_write_cq_idx(struct rx_ring *rx_ring) in ql_write_cq_idx() argument 997 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); in ql_write_cq_idx() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugfs.c | 14 struct nfp_net_rx_ring *rx_ring; in nfp_rx_q_show() local 23 if (!r_vec->nfp_net || !r_vec->rx_ring) in nfp_rx_q_show() 26 rx_ring = r_vec->rx_ring; in nfp_rx_q_show() 30 rxd_cnt = rx_ring->cnt; in nfp_rx_q_show() 32 fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show() 33 fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show() 36 rx_ring->idx, rx_ring->fl_qcidx, in nfp_rx_q_show() 37 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, in nfp_rx_q_show() 38 rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); in nfp_rx_q_show() 41 rxd = &rx_ring->rxds[i]; in nfp_rx_q_show() [all …]
|
D | nfp_net_common.c | 595 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_ring_init() argument 600 rx_ring->idx = idx; in nfp_net_rx_ring_init() 601 rx_ring->r_vec = r_vec; in nfp_net_rx_ring_init() 602 u64_stats_init(&rx_ring->r_vec->rx_sync); in nfp_net_rx_ring_init() 604 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init() 605 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init() 1433 struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_give_one() argument 1438 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_net_rx_give_one() 1443 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_net_rx_give_one() 1444 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_net_rx_give_one() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 286 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 311 v->rx_ring.stats.packets, in enetc_rx_net_dim() 312 v->rx_ring.stats.bytes, in enetc_rx_net_dim() 331 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); in enetc_poll() 473 static bool enetc_new_page(struct enetc_bdr *rx_ring, in enetc_new_page() argument 483 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in enetc_new_page() 484 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { in enetc_new_page() 497 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) in enetc_refill_rx_ring() argument 503 i = rx_ring->next_to_use; in enetc_refill_rx_ring() 504 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb() 133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, in igbvf_alloc_rx_buffers() argument 136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers() 145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers() 146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); in igbvf_alloc_rx_buffers() 212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers() 214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers() 219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 75 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument 88 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 93 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page() 98 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page() 101 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 117 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument 121 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers() 127 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers() 128 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers() 129 i -= rx_ring->count; in fm10k_alloc_rx_buffers() [all …]
|
D | fm10k_netdev.c | 83 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) in fm10k_setup_rx_resources() argument 85 struct device *dev = rx_ring->dev; in fm10k_setup_rx_resources() 88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_setup_rx_resources() 90 rx_ring->rx_buffer = vzalloc(size); in fm10k_setup_rx_resources() 91 if (!rx_ring->rx_buffer) in fm10k_setup_rx_resources() 94 u64_stats_init(&rx_ring->syncp); in fm10k_setup_rx_resources() 97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); in fm10k_setup_rx_resources() 98 rx_ring->size = ALIGN(rx_ring->size, 4096); in fm10k_setup_rx_resources() 100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in fm10k_setup_rx_resources() 101 &rx_ring->dma, GFP_KERNEL); in fm10k_setup_rx_resources() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument 510 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields() 522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields() 528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument 533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer() 537 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/oki-semi/pch_gbe/ |
D | pch_gbe_main.c | 572 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues() 573 sizeof(*adapter->rx_ring), GFP_KERNEL); in pch_gbe_alloc_queues() 574 if (!adapter->rx_ring) in pch_gbe_alloc_queues() 848 (unsigned long long)adapter->rx_ring->dma, in pch_gbe_configure_rx() 849 adapter->rx_ring->size); in pch_gbe_configure_rx() 867 rdba = adapter->rx_ring->dma; in pch_gbe_configure_rx() 868 rdlen = adapter->rx_ring->size - 0x10; in pch_gbe_configure_rx() 952 struct pch_gbe_rx_ring *rx_ring) in pch_gbe_clean_rx_ring() argument 960 for (i = 0; i < rx_ring->count; i++) { in pch_gbe_clean_rx_ring() 961 buffer_info = &rx_ring->buffer_info[i]; in pch_gbe_clean_rx_ring() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/igc/ |
D | igc_dump.c | 119 struct igc_ring *rx_ring; in igc_rings_dump() local 214 rx_ring = adapter->rx_ring[n]; in igc_rings_dump() 215 netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, in igc_rings_dump() 216 rx_ring->next_to_clean); in igc_rings_dump() 247 rx_ring = adapter->rx_ring[n]; in igc_rings_dump() 250 rx_ring->queue_index); in igc_rings_dump() 255 for (i = 0; i < rx_ring->count; i++) { in igc_rings_dump() 259 buffer_info = &rx_ring->rx_buffer_info[i]; in igc_rings_dump() 260 rx_desc = IGC_RX_DESC(rx_ring, i); in igc_rings_dump() 264 if (i == rx_ring->next_to_use) in igc_rings_dump() [all …]
|
D | igc_main.c | 349 static void igc_clean_rx_ring(struct igc_ring *rx_ring) in igc_clean_rx_ring() argument 351 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring() 353 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring() 354 rx_ring->skb = NULL; in igc_clean_rx_ring() 357 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring() 358 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring() 363 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring() 366 igc_rx_bufsz(rx_ring), in igc_clean_rx_ring() 370 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring() 372 igc_rx_pg_size(rx_ring), in igc_clean_rx_ring() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 203 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump() local 319 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump() 355 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump() 357 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump() 358 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); in e1000e_dump() 363 if (i == rx_ring->next_to_use) in e1000e_dump() 365 else if (i == rx_ring->next_to_clean) in e1000e_dump() 421 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump() 424 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump() 425 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); in e1000e_dump() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/agere/ |
D | et131x.c | 288 struct rx_ring { struct 492 struct rx_ring rx_ring; member 740 struct rx_ring *rx_ring = &adapter->rx_ring; in et131x_rx_dma_enable() local 742 if (rx_ring->fbr[1]->buffsize == 4096) in et131x_rx_dma_enable() 744 else if (rx_ring->fbr[1]->buffsize == 8192) in et131x_rx_dma_enable() 746 else if (rx_ring->fbr[1]->buffsize == 16384) in et131x_rx_dma_enable() 750 if (rx_ring->fbr[0]->buffsize == 256) in et131x_rx_dma_enable() 752 else if (rx_ring->fbr[0]->buffsize == 512) in et131x_rx_dma_enable() 754 else if (rx_ring->fbr[0]->buffsize == 1024) in et131x_rx_dma_enable() 1539 struct rx_ring *rx_local = &adapter->rx_ring; in et131x_config_rx_dma_regs() [all …]
|