/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 24 struct bnxt_tx_ring_info *txr, in bnxt_xmit_bd() argument 32 prod = txr->tx_prod; in bnxt_xmit_bd() 33 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 35 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 43 txr->tx_prod = prod; in bnxt_xmit_bd() 47 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in __bnxt_xmit_xdp() argument 52 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp() 58 struct bnxt_tx_ring_info *txr, in __bnxt_xmit_xdp_redirect() argument 64 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp_redirect() 73 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; in bnxt_tx_int_xdp() local [all …]
|
D | bnxt.c | 365 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_txr_db_kick() argument 368 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick() 369 txr->kick_pending = 0; in bnxt_txr_db_kick() 373 struct bnxt_tx_ring_info *txr, in bnxt_txr_netif_try_stop_queue() argument 384 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) { in bnxt_txr_netif_try_stop_queue() 404 struct bnxt_tx_ring_info *txr; in bnxt_start_xmit() local 415 txr = &bp->tx_ring[bp->tx_ring_map[i]]; in bnxt_start_xmit() 416 prod = txr->tx_prod; in bnxt_start_xmit() 418 free_size = bnxt_tx_avail(bp, txr); in bnxt_start_xmit() 421 if (net_ratelimit() && txr->kick_pending) in bnxt_start_xmit() [all …]
|
D | bnxt_xdp.h | 14 struct bnxt_tx_ring_info *txr,
|
D | bnxt.h | 2065 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) in bnxt_tx_avail() argument 2071 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); in bnxt_tx_avail()
|
D | bnxt_ethtool.c | 3248 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; in bnxt_run_loopback() local 3278 bnxt_xmit_bd(bp, txr, map, pkt_size); in bnxt_run_loopback() 3283 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_run_loopback()
|
/drivers/net/ethernet/qualcomm/ |
D | qca_spi.c | 287 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_transmit() 300 while (qca->txr.skb[qca->txr.head]) { in qcaspi_transmit() 301 pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; in qcaspi_transmit() 309 if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { in qcaspi_transmit() 316 n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; in qcaspi_transmit() 324 dev_kfree_skb(qca->txr.skb[qca->txr.head]); in qcaspi_transmit() 325 qca->txr.skb[qca->txr.head] = NULL; in qcaspi_transmit() 326 qca->txr.size -= pkt_len; in qcaspi_transmit() 327 new_head = qca->txr.head + 1; in qcaspi_transmit() 328 if (new_head >= qca->txr.count) in qcaspi_transmit() [all …]
|
D | qca_debug.c | 81 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_info_show() 83 else if (qca->txr.skb[qca->txr.tail]) in qcaspi_info_show() 91 qca->txr.size); in qcaspi_info_show() 257 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam() 273 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam() 274 qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); in qcaspi_set_ringparam()
|
D | qca_spi.h | 85 struct tx_ring txr; member
|
/drivers/net/ethernet/broadcom/ |
D | bnx2.c | 247 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) in bnx2_tx_avail() argument 254 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnx2_tx_avail() 697 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_free_tx_mem() local 699 if (txr->tx_desc_ring) { in bnx2_free_tx_mem() 701 txr->tx_desc_ring, in bnx2_free_tx_mem() 702 txr->tx_desc_mapping); in bnx2_free_tx_mem() 703 txr->tx_desc_ring = NULL; in bnx2_free_tx_mem() 705 kfree(txr->tx_buf_ring); in bnx2_free_tx_mem() 706 txr->tx_buf_ring = NULL; in bnx2_free_tx_mem() 749 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_alloc_tx_mem() local [all …]
|
D | bcmsysport.c | 1126 struct bcm_sysport_tx_ring *txr; in bcm_sysport_rx_isr() local 1161 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr() 1163 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr() 1165 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr() 1177 struct bcm_sysport_tx_ring *txr; in bcm_sysport_tx_isr() local 1193 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr() 1195 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr() 1197 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
|
/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 850 static int enetc_alloc_txbdr(struct enetc_bdr *txr) in enetc_alloc_txbdr() argument 854 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); in enetc_alloc_txbdr() 855 if (!txr->tx_swbd) in enetc_alloc_txbdr() 858 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); in enetc_alloc_txbdr() 860 vfree(txr->tx_swbd); in enetc_alloc_txbdr() 864 txr->next_to_clean = 0; in enetc_alloc_txbdr() 865 txr->next_to_use = 0; in enetc_alloc_txbdr() 870 static void enetc_free_txbdr(struct enetc_bdr *txr) in enetc_free_txbdr() argument 874 for (i = 0; i < txr->bd_count; i++) in enetc_free_txbdr() 875 enetc_free_tx_skb(txr, &txr->tx_swbd[i]); in enetc_free_txbdr() [all …]
|
/drivers/net/ethernet/sgi/ |
D | ioc3-eth.c | 86 struct ioc3_etxd *txr; member 615 desc = &ip->txr[entry]; in ioc3_tx_unmap() 642 ip->txr[i].cmd = 0; in ioc3_clean_tx_ring() 902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); in ioc3eth_probe() 1041 desc = &ip->txr[produce]; in ioc3_start_xmit()
|
/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 622 struct ena_ring *txr, *rxr; in ena_init_io_rings() local 628 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 632 ena_init_io_rings_common(adapter, txr, i); in ena_init_io_rings() 635 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings() 636 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings() 637 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings() 638 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings() 639 txr->smoothed_interval = in ena_init_io_rings() 641 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings()
|