Home
last modified time | relevance | path

Searched refs:tx_ring (Results 1 – 25 of 223) sorted by relevance

123456789

/drivers/net/ethernet/intel/i40evf/
Di40e_txrx.c80 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) in i40evf_clean_tx_ring() argument
86 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring()
90 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring()
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring()
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring()
94 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring()
97 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring()
99 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring()
100 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring()
102 if (!tx_ring->netdev) in i40evf_clean_tx_ring()
[all …]
Di40e_txrx.h358 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
360 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
362 void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
367 int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
377 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
379 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
418 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx() argument
420 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40e_maybe_stop_tx()
422 return __i40evf_maybe_stop_tx(tx_ring, size); in i40e_maybe_stop_tx()
/drivers/net/ethernet/intel/i40e/
Di40e_txrx.c50 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument
54 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
59 i = tx_ring->next_to_use; in i40e_fdir()
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
120 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
136 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
137 dev = tx_ring->dev; in i40e_program_fdir_filter()
141 if (I40E_DESC_UNUSED(tx_ring) > 1) in i40e_program_fdir_filter()
147 if (!(I40E_DESC_UNUSED(tx_ring) > 1)) in i40e_program_fdir_filter()
[all …]
Di40e_fcoe.c995 static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, in i40e_fcoe_program_ddp() argument
1002 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fcoe_program_ddp()
1003 u16 i = tx_ring->next_to_use; in i40e_fcoe_program_ddp()
1025 ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i); in i40e_fcoe_program_ddp()
1027 if (i == tx_ring->count) in i40e_fcoe_program_ddp()
1047 queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++); in i40e_fcoe_program_ddp()
1048 if (i == tx_ring->count) in i40e_fcoe_program_ddp()
1057 filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i); in i40e_fcoe_program_ddp()
1059 if (i == tx_ring->count) in i40e_fcoe_program_ddp()
1082 tx_ring->next_to_use = i; in i40e_fcoe_program_ddp()
[all …]
Di40e_txrx.h361 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
363 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
365 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
369 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
373 struct i40e_ring *tx_ring, u32 *flags);
377 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
387 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) in i40e_get_head() argument
389 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
428 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in i40e_maybe_stop_tx() argument
430 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) in i40e_maybe_stop_tx()
[all …]
/drivers/net/ethernet/intel/fm10k/
Dfm10k_main.c767 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument
800 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
806 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
808 netdev_err(tx_ring->netdev, in fm10k_tso()
813 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
835 dev_warn(tx_ring->dev, in fm10k_tx_csum()
837 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
877 dev_warn(tx_ring->dev, in fm10k_tx_csum()
882 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
888 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum()
[all …]
Dfm10k_netdev.c31 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) in fm10k_setup_tx_resources() argument
33 struct device *dev = tx_ring->dev; in fm10k_setup_tx_resources()
36 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; in fm10k_setup_tx_resources()
38 tx_ring->tx_buffer = vzalloc(size); in fm10k_setup_tx_resources()
39 if (!tx_ring->tx_buffer) in fm10k_setup_tx_resources()
42 u64_stats_init(&tx_ring->syncp); in fm10k_setup_tx_resources()
45 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); in fm10k_setup_tx_resources()
46 tx_ring->size = ALIGN(tx_ring->size, 4096); in fm10k_setup_tx_resources()
48 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in fm10k_setup_tx_resources()
49 &tx_ring->dma, GFP_KERNEL); in fm10k_setup_tx_resources()
[all …]
/drivers/net/ethernet/netronome/nfp/
Dnfp_net_common.c474 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, in nfp_net_tx_ring_init() argument
479 tx_ring->idx = idx; in nfp_net_tx_ring_init()
480 tx_ring->r_vec = r_vec; in nfp_net_tx_ring_init()
482 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; in nfp_net_tx_ring_init()
483 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); in nfp_net_tx_ring_init()
608 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) in nfp_net_tx_full() argument
610 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); in nfp_net_tx_full()
614 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_should_wake() argument
616 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); in nfp_net_tx_ring_should_wake()
619 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_should_stop() argument
[all …]
Dnfp_net_debugfs.c121 struct nfp_net_tx_ring *tx_ring; in nfp_net_debugfs_tx_q_read() local
130 if (!r_vec->nfp_net || !r_vec->tx_ring) in nfp_net_debugfs_tx_q_read()
133 tx_ring = r_vec->tx_ring; in nfp_net_debugfs_tx_q_read()
137 txd_cnt = tx_ring->cnt; in nfp_net_debugfs_tx_q_read()
139 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_net_debugfs_tx_q_read()
140 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); in nfp_net_debugfs_tx_q_read()
143 tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); in nfp_net_debugfs_tx_q_read()
146 txd = &tx_ring->txds[i]; in nfp_net_debugfs_tx_q_read()
151 skb = READ_ONCE(tx_ring->txbufs[i].skb); in nfp_net_debugfs_tx_q_read()
155 if (tx_ring->txbufs[i].dma_addr) in nfp_net_debugfs_tx_q_read()
[all …]
/drivers/net/ethernet/intel/igbvf/
Dnetdev.c438 struct igbvf_ring *tx_ring) in igbvf_setup_tx_resources() argument
443 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources()
444 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources()
445 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources()
449 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources()
450 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources()
452 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources()
453 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources()
454 if (!tx_ring->desc) in igbvf_setup_tx_resources()
457 tx_ring->adapter = adapter; in igbvf_setup_tx_resources()
[all …]
/drivers/net/ethernet/amazon/ena/
Dena_netdev.c175 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
207 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
211 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
217 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
220 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
221 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
222 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
223 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
227 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
228 tx_ring->free_tx_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
[all …]
/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_io.c277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; in qlcnic_82xx_change_filter() local
281 producer = tx_ring->producer; in qlcnic_82xx_change_filter()
282 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter()
298 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter()
373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument
377 u32 producer = tx_ring->producer; in qlcnic_tx_encap_pkt()
407 hwdesc = &tx_ring->desc_head[producer]; in qlcnic_tx_encap_pkt()
408 tx_ring->cmd_buf_arr[producer].skb = NULL; in qlcnic_tx_encap_pkt()
413 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_encap_pkt()
416 tx_ring->producer = producer; in qlcnic_tx_encap_pkt()
[all …]
/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c208 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, in ixgbevf_unmap_and_free_tx_resource() argument
214 dma_unmap_single(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource()
219 dma_unmap_page(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource()
250 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) in ixgbevf_check_tx_hang() argument
252 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang()
253 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
254 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang()
256 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang()
266 &tx_ring->state); in ixgbevf_check_tx_hang()
269 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
[all …]
/drivers/net/ethernet/agere/
Det131x.c357 struct tx_ring { struct
487 struct tx_ring tx_ring; member
1641 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_config_tx_dma_regs() local
1644 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); in et131x_config_tx_dma_regs()
1645 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); in et131x_config_tx_dma_regs()
1651 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); in et131x_config_tx_dma_regs()
1652 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); in et131x_config_tx_dma_regs()
1654 *tx_ring->tx_status = 0; in et131x_config_tx_dma_regs()
1657 tx_ring->send_idx = 0; in et131x_config_tx_dma_regs()
1754 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_init_send() local
[all …]
/drivers/net/ethernet/oki-semi/pch_gbe/
Dpch_gbe_main.c631 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues()
632 sizeof(*adapter->tx_ring), GFP_KERNEL); in pch_gbe_alloc_queues()
633 if (!adapter->tx_ring) in pch_gbe_alloc_queues()
858 (unsigned long long)adapter->tx_ring->dma, in pch_gbe_configure_tx()
859 adapter->tx_ring->size); in pch_gbe_configure_tx()
862 tdba = adapter->tx_ring->dma; in pch_gbe_configure_tx()
863 tdlen = adapter->tx_ring->size - 0x10; in pch_gbe_configure_tx()
977 struct pch_gbe_tx_ring *tx_ring) in pch_gbe_clean_tx_ring() argument
985 for (i = 0; i < tx_ring->count; i++) { in pch_gbe_clean_tx_ring()
986 buffer_info = &tx_ring->buffer_info[i]; in pch_gbe_clean_tx_ring()
[all …]
/drivers/net/ethernet/intel/ixgb/
Dixgb_main.c702 struct ixgb_desc_ring *txdr = &adapter->tx_ring; in ixgb_setup_tx_resources()
739 u64 tdba = adapter->tx_ring.dma; in ixgb_configure_tx()
740 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); in ixgb_configure_tx()
916 vfree(adapter->tx_ring.buffer_info); in ixgb_free_tx_resources()
917 adapter->tx_ring.buffer_info = NULL; in ixgb_free_tx_resources()
919 dma_free_coherent(&pdev->dev, adapter->tx_ring.size, in ixgb_free_tx_resources()
920 adapter->tx_ring.desc, adapter->tx_ring.dma); in ixgb_free_tx_resources()
922 adapter->tx_ring.desc = NULL; in ixgb_free_tx_resources()
957 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; in ixgb_clean_tx_ring() local
964 for (i = 0; i < tx_ring->count; i++) { in ixgb_clean_tx_ring()
[all …]
/drivers/net/ethernet/atheros/atl1e/
Datl1e_main.c636 hw->tpd_thresh = adapter->tx_ring.count / 2; in atl1e_sw_init()
663 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_clean_tx_ring() local
668 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) in atl1e_clean_tx_ring()
671 ring_count = tx_ring->count; in atl1e_clean_tx_ring()
674 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring()
687 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring()
694 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * in atl1e_clean_tx_ring()
696 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * in atl1e_clean_tx_ring()
727 *ring_size = ((u32)(adapter->tx_ring.count * in atl1e_cal_ring_size()
753 rwlock_init(&adapter->tx_ring.tx_lock); in atl1e_init_ring_resources()
[all …]
/drivers/net/ethernet/intel/e1000e/
Dnetdev.c218 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump() local
262 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
264 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
306 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
308 tx_desc = E1000_TX_DESC(*tx_ring, i); in e1000e_dump()
309 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
311 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
313 else if (i == tx_ring->next_to_use) in e1000e_dump()
315 else if (i == tx_ring->next_to_clean) in e1000e_dump()
640 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) in e1000e_update_tdt_wa() argument
[all …]
/drivers/net/ethernet/intel/e1000/
De1000_main.c106 struct e1000_tx_ring *tx_ring);
125 struct e1000_tx_ring *tx_ring);
139 struct e1000_tx_ring *tx_ring);
1246 kfree(adapter->tx_ring); in e1000_probe()
1287 kfree(adapter->tx_ring); in e1000_remove()
1342 adapter->tx_ring = kcalloc(adapter->num_tx_queues, in e1000_alloc_queues()
1344 if (!adapter->tx_ring) in e1000_alloc_queues()
1350 kfree(adapter->tx_ring); in e1000_alloc_queues()
1584 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); in e1000_setup_all_tx_resources()
1589 &adapter->tx_ring[i]); in e1000_setup_all_tx_resources()
[all …]
/drivers/net/ethernet/intel/igb/
Digb_main.c372 struct igb_ring *tx_ring; in igb_dump() local
407 tx_ring = adapter->tx_ring[n]; in igb_dump()
408 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
410 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
435 tx_ring = adapter->tx_ring[n]; in igb_dump()
437 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
441 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
444 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_dump()
445 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
447 if (i == tx_ring->next_to_use && in igb_dump()
[all …]
/drivers/net/ethernet/packetengines/
Dyellowfin.c310 struct yellowfin_desc *tx_ring; member
441 np->tx_ring = ring_space; in yellowfin_init_one()
514 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); in yellowfin_init_one()
700 pr_warn(" Tx ring %p: ", yp->tx_ring); in yellowfin_tx_timeout()
704 yp->tx_ring[i].result_status); in yellowfin_tx_timeout()
762 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
763 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
767 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); in yellowfin_init_ring()
775 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
776 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
[all …]
/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_ctx.c448 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; in nx_fw_cmd_create_tx_ctx() local
490 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); in nx_fw_cmd_create_tx_ctx()
491 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in nx_fw_cmd_create_tx_ctx()
503 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, in nx_fw_cmd_create_tx_ctx()
706 struct nx_host_tx_ring *tx_ring; in netxen_init_old_ctx() local
712 tx_ring = adapter->tx_ring; in netxen_init_old_ctx()
716 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); in netxen_init_old_ctx()
717 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); in netxen_init_old_ctx()
762 struct nx_host_tx_ring *tx_ring; in netxen_alloc_hw_resources() local
769 tx_ring = adapter->tx_ring; in netxen_alloc_hw_resources()
[all …]
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c592 struct ixgbe_ring *tx_ring; in ixgbe_dump() local
634 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
635 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in ixgbe_dump()
637 n, tx_ring->next_to_use, tx_ring->next_to_clean, in ixgbe_dump()
686 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
688 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in ixgbe_dump()
695 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in ixgbe_dump()
696 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_dump()
697 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_dump()
709 if (i == tx_ring->next_to_use && in ixgbe_dump()
[all …]
/drivers/net/ethernet/qlogic/qlge/
Dqlge_main.c2110 struct tx_ring *tx_ring; in ql_process_mac_tx_intr() local
2114 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2115 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; in ql_process_mac_tx_intr()
2117 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; in ql_process_mac_tx_intr()
2118 tx_ring->tx_packets++; in ql_process_mac_tx_intr()
2143 atomic_inc(&tx_ring->tx_count); in ql_process_mac_tx_intr()
2212 struct tx_ring *tx_ring; in ql_clean_outbound_rx_ring() local
2240 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2241 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2242 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in ql_clean_outbound_rx_ring()
[all …]
/drivers/net/ethernet/apm/xgene/
Dxgene_enet_main.c326 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, in xgene_enet_setup_tx_desc() argument
329 struct device *dev = ndev_to_dev(tx_ring->ndev); in xgene_enet_setup_tx_desc()
330 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); in xgene_enet_setup_tx_desc()
335 u16 tail = tx_ring->tail; in xgene_enet_setup_tx_desc()
344 raw_desc = &tx_ring->raw_desc[tail]; in xgene_enet_setup_tx_desc()
345 tail = (tail + 1) & (tx_ring->slots - 1); in xgene_enet_setup_tx_desc()
352 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | in xgene_enet_setup_tx_desc()
360 netdev_err(tx_ring->ndev, "DMA mapping error\n"); in xgene_enet_setup_tx_desc()
374 exp_desc = (void *)&tx_ring->raw_desc[tail]; in xgene_enet_setup_tx_desc()
375 tail = (tail + 1) & (tx_ring->slots - 1); in xgene_enet_setup_tx_desc()
[all …]

123456789