/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 79 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) in i40evf_clean_tx_ring() argument 85 if (!tx_ring->tx_bi) in i40evf_clean_tx_ring() 89 for (i = 0; i < tx_ring->count; i++) in i40evf_clean_tx_ring() 90 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in i40evf_clean_tx_ring() 92 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40evf_clean_tx_ring() 93 memset(tx_ring->tx_bi, 0, bi_size); in i40evf_clean_tx_ring() 96 memset(tx_ring->desc, 0, tx_ring->size); in i40evf_clean_tx_ring() 98 tx_ring->next_to_use = 0; in i40evf_clean_tx_ring() 99 tx_ring->next_to_clean = 0; in i40evf_clean_tx_ring() 101 if (!tx_ring->netdev) in i40evf_clean_tx_ring() [all …]
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 56 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local 73 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter() 74 dev = tx_ring->dev; in i40e_program_fdir_filter() 78 if (I40E_DESC_UNUSED(tx_ring) > 1) in i40e_program_fdir_filter() 84 if (!(I40E_DESC_UNUSED(tx_ring) > 1)) in i40e_program_fdir_filter() 93 i = tx_ring->next_to_use; in i40e_program_fdir_filter() 94 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_program_fdir_filter() 95 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter() 98 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter() 146 i = tx_ring->next_to_use; in i40e_program_fdir_filter() [all …]
|
D | i40e_fcoe.c | 1010 static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, in i40e_fcoe_program_ddp() argument 1017 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fcoe_program_ddp() 1018 u16 i = tx_ring->next_to_use; in i40e_fcoe_program_ddp() 1040 ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i); in i40e_fcoe_program_ddp() 1042 if (i == tx_ring->count) in i40e_fcoe_program_ddp() 1062 queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++); in i40e_fcoe_program_ddp() 1063 if (i == tx_ring->count) in i40e_fcoe_program_ddp() 1072 filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i); in i40e_fcoe_program_ddp() 1074 if (i == tx_ring->count) in i40e_fcoe_program_ddp() 1097 tx_ring->next_to_use = i; in i40e_fcoe_program_ddp() [all …]
|
/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 771 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument 804 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso() 810 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso() 812 netdev_err(tx_ring->netdev, in fm10k_tso() 817 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument 837 dev_warn(tx_ring->dev, in fm10k_tx_csum() 839 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum() 858 dev_warn(tx_ring->dev, in fm10k_tx_csum() 862 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum() 875 dev_warn(tx_ring->dev, in fm10k_tx_csum() [all …]
|
D | fm10k_netdev.c | 33 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) in fm10k_setup_tx_resources() argument 35 struct device *dev = tx_ring->dev; in fm10k_setup_tx_resources() 38 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; in fm10k_setup_tx_resources() 40 tx_ring->tx_buffer = vzalloc(size); in fm10k_setup_tx_resources() 41 if (!tx_ring->tx_buffer) in fm10k_setup_tx_resources() 44 u64_stats_init(&tx_ring->syncp); in fm10k_setup_tx_resources() 47 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); in fm10k_setup_tx_resources() 48 tx_ring->size = ALIGN(tx_ring->size, 4096); in fm10k_setup_tx_resources() 50 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in fm10k_setup_tx_resources() 51 &tx_ring->dma, GFP_KERNEL); in fm10k_setup_tx_resources() [all …]
|
/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 434 struct igbvf_ring *tx_ring) in igbvf_setup_tx_resources() argument 439 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources() 440 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources() 441 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources() 445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources() 446 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources() 448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources() 449 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources() 450 if (!tx_ring->desc) in igbvf_setup_tx_resources() 453 tx_ring->adapter = adapter; in igbvf_setup_tx_resources() [all …]
|
/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_io.c | 277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; in qlcnic_82xx_change_filter() local 281 producer = tx_ring->producer; in qlcnic_82xx_change_filter() 282 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter() 298 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter() 373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument 377 u32 producer = tx_ring->producer; in qlcnic_tx_encap_pkt() 407 hwdesc = &tx_ring->desc_head[producer]; in qlcnic_tx_encap_pkt() 408 tx_ring->cmd_buf_arr[producer].skb = NULL; in qlcnic_tx_encap_pkt() 413 producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_tx_encap_pkt() 416 tx_ring->producer = producer; in qlcnic_tx_encap_pkt() [all …]
|
D | qlcnic_ctx.c | 416 struct qlcnic_host_tx_ring *tx_ring, in qlcnic_82xx_fw_cmd_create_tx_ctx() argument 434 tx_ring->producer = 0; in qlcnic_82xx_fw_cmd_create_tx_ctx() 435 tx_ring->sw_consumer = 0; in qlcnic_82xx_fw_cmd_create_tx_ctx() 436 *(tx_ring->hw_consumer) = 0; in qlcnic_82xx_fw_cmd_create_tx_ctx() 477 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); in qlcnic_82xx_fw_cmd_create_tx_ctx() 481 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); in qlcnic_82xx_fw_cmd_create_tx_ctx() 482 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in qlcnic_82xx_fw_cmd_create_tx_ctx() 496 tx_ring->state = le32_to_cpu(prsp->host_ctx_state); in qlcnic_82xx_fw_cmd_create_tx_ctx() 498 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; in qlcnic_82xx_fw_cmd_create_tx_ctx() 499 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); in qlcnic_82xx_fw_cmd_create_tx_ctx() [all …]
|
/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 191 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, in ixgbevf_unmap_and_free_tx_resource() argument 197 dma_unmap_single(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource() 202 dma_unmap_page(tx_ring->dev, in ixgbevf_unmap_and_free_tx_resource() 228 struct ixgbevf_ring *tx_ring) in ixgbevf_clean_tx_irq() argument 234 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq() 235 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq() 240 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq() 241 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_clean_tx_irq() 242 i -= tx_ring->count; in ixgbevf_clean_tx_irq() 269 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq() [all …]
|
D | ethtool.c | 264 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; in ixgbevf_set_ringparam() local 289 adapter->tx_ring[i]->count = new_tx_count; in ixgbevf_set_ringparam() 298 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); in ixgbevf_set_ringparam() 299 if (!tx_ring) { in ixgbevf_set_ringparam() 306 tx_ring[i] = *adapter->tx_ring[i]; in ixgbevf_set_ringparam() 307 tx_ring[i].count = new_tx_count; in ixgbevf_set_ringparam() 308 err = ixgbevf_setup_tx_resources(&tx_ring[i]); in ixgbevf_set_ringparam() 312 ixgbevf_free_tx_resources(&tx_ring[i]); in ixgbevf_set_ringparam() 315 vfree(tx_ring); in ixgbevf_set_ringparam() 316 tx_ring = NULL; in ixgbevf_set_ringparam() [all …]
|
/drivers/net/ethernet/agere/ |
D | et131x.c | 357 struct tx_ring { struct 488 struct tx_ring tx_ring; member 1642 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_config_tx_dma_regs() local 1645 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); in et131x_config_tx_dma_regs() 1646 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); in et131x_config_tx_dma_regs() 1652 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); in et131x_config_tx_dma_regs() 1653 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); in et131x_config_tx_dma_regs() 1655 *tx_ring->tx_status = 0; in et131x_config_tx_dma_regs() 1658 tx_ring->send_idx = 0; in et131x_config_tx_dma_regs() 1755 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_init_send() local [all …]
|
/drivers/net/ethernet/oki-semi/pch_gbe/ |
D | pch_gbe_main.c | 631 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues() 632 sizeof(*adapter->tx_ring), GFP_KERNEL); in pch_gbe_alloc_queues() 633 if (!adapter->tx_ring) in pch_gbe_alloc_queues() 858 (unsigned long long)adapter->tx_ring->dma, in pch_gbe_configure_tx() 859 adapter->tx_ring->size); in pch_gbe_configure_tx() 862 tdba = adapter->tx_ring->dma; in pch_gbe_configure_tx() 863 tdlen = adapter->tx_ring->size - 0x10; in pch_gbe_configure_tx() 977 struct pch_gbe_tx_ring *tx_ring) in pch_gbe_clean_tx_ring() argument 985 for (i = 0; i < tx_ring->count; i++) { in pch_gbe_clean_tx_ring() 986 buffer_info = &tx_ring->buffer_info[i]; in pch_gbe_clean_tx_ring() [all …]
|
/drivers/net/ethernet/intel/ixgb/ |
D | ixgb_main.c | 701 struct ixgb_desc_ring *txdr = &adapter->tx_ring; in ixgb_setup_tx_resources() 738 u64 tdba = adapter->tx_ring.dma; in ixgb_configure_tx() 739 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); in ixgb_configure_tx() 915 vfree(adapter->tx_ring.buffer_info); in ixgb_free_tx_resources() 916 adapter->tx_ring.buffer_info = NULL; in ixgb_free_tx_resources() 918 dma_free_coherent(&pdev->dev, adapter->tx_ring.size, in ixgb_free_tx_resources() 919 adapter->tx_ring.desc, adapter->tx_ring.dma); in ixgb_free_tx_resources() 921 adapter->tx_ring.desc = NULL; in ixgb_free_tx_resources() 956 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; in ixgb_clean_tx_ring() local 963 for (i = 0; i < tx_ring->count; i++) { in ixgb_clean_tx_ring() [all …]
|
/drivers/infiniband/hw/amso1100/ |
D | c2.c | 111 static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, in c2_tx_ring_alloc() argument 119 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); in c2_tx_ring_alloc() 120 if (!tx_ring->start) in c2_tx_ring_alloc() 123 elem = tx_ring->start; in c2_tx_ring_alloc() 126 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { in c2_tx_ring_alloc() 141 if (i == tx_ring->count - 1) { in c2_tx_ring_alloc() 142 elem->next = tx_ring->start; in c2_tx_ring_alloc() 151 tx_ring->to_use = tx_ring->to_clean = tx_ring->start; in c2_tx_ring_alloc() 326 struct c2_ring *tx_ring = &c2_port->tx_ring; in c2_tx_clean() local 334 elem = tx_ring->start; in c2_tx_clean() [all …]
|
/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 217 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump() local 261 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump() 263 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump() 305 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump() 307 tx_desc = E1000_TX_DESC(*tx_ring, i); in e1000e_dump() 308 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump() 310 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump() 312 else if (i == tx_ring->next_to_use) in e1000e_dump() 314 else if (i == tx_ring->next_to_clean) in e1000e_dump() 639 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) in e1000e_update_tdt_wa() argument [all …]
|
D | ethtool.c | 696 adapter->tx_ring->count = new_tx_count; in e1000_set_ringparam() 731 memcpy(temp_tx, adapter->tx_ring, size); in e1000_set_ringparam() 747 e1000e_free_tx_resources(adapter->tx_ring); in e1000_set_ringparam() 748 memcpy(adapter->tx_ring, temp_tx, size); in e1000_set_ringparam() 1114 struct e1000_ring *tx_ring = &adapter->test_tx_ring; in e1000_free_desc_rings() local 1120 if (tx_ring->desc && tx_ring->buffer_info) { in e1000_free_desc_rings() 1121 for (i = 0; i < tx_ring->count; i++) { in e1000_free_desc_rings() 1122 buffer_info = &tx_ring->buffer_info[i]; in e1000_free_desc_rings() 1147 if (tx_ring->desc) { in e1000_free_desc_rings() 1148 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000_free_desc_rings() [all …]
|
/drivers/net/ethernet/atheros/atl1e/ |
D | atl1e_main.c | 636 hw->tpd_thresh = adapter->tx_ring.count / 2; in atl1e_sw_init() 664 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_clean_tx_ring() local 669 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) in atl1e_clean_tx_ring() 672 ring_count = tx_ring->count; in atl1e_clean_tx_ring() 675 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring() 688 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring() 695 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * in atl1e_clean_tx_ring() 697 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * in atl1e_clean_tx_ring() 728 *ring_size = ((u32)(adapter->tx_ring.count * in atl1e_cal_ring_size() 754 rwlock_init(&adapter->tx_ring.tx_lock); in atl1e_init_ring_resources() [all …]
|
/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 538 struct ixgbe_ring *tx_ring; in ixgbe_dump() local 580 tx_ring = adapter->tx_ring[n]; in ixgbe_dump() 581 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in ixgbe_dump() 583 n, tx_ring->next_to_use, tx_ring->next_to_clean, in ixgbe_dump() 632 tx_ring = adapter->tx_ring[n]; in ixgbe_dump() 634 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in ixgbe_dump() 641 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in ixgbe_dump() 642 tx_desc = IXGBE_TX_DESC(tx_ring, i); in ixgbe_dump() 643 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_dump() 655 if (i == tx_ring->next_to_use && in ixgbe_dump() [all …]
|
/drivers/net/ethernet/intel/e1000/ |
D | e1000_main.c | 106 struct e1000_tx_ring *tx_ring); 125 struct e1000_tx_ring *tx_ring); 139 struct e1000_tx_ring *tx_ring); 1245 kfree(adapter->tx_ring); in e1000_probe() 1283 kfree(adapter->tx_ring); in e1000_remove() 1336 adapter->tx_ring = kcalloc(adapter->num_tx_queues, in e1000_alloc_queues() 1338 if (!adapter->tx_ring) in e1000_alloc_queues() 1344 kfree(adapter->tx_ring); in e1000_alloc_queues() 1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); in e1000_setup_all_tx_resources() 1583 &adapter->tx_ring[i]); in e1000_setup_all_tx_resources() [all …]
|
/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 369 struct igb_ring *tx_ring; in igb_dump() local 404 tx_ring = adapter->tx_ring[n]; in igb_dump() 405 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump() 407 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump() 432 tx_ring = adapter->tx_ring[n]; in igb_dump() 434 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump() 438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump() 441 tx_desc = IGB_TX_DESC(tx_ring, i); in igb_dump() 442 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump() 444 if (i == tx_ring->next_to_use && in igb_dump() [all …]
|
/drivers/net/ethernet/packetengines/ |
D | yellowfin.c | 310 struct yellowfin_desc *tx_ring; member 441 np->tx_ring = ring_space; in yellowfin_init_one() 514 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); in yellowfin_init_one() 700 pr_warn(" Tx ring %p: ", yp->tx_ring); in yellowfin_tx_timeout() 704 yp->tx_ring[i].result_status); in yellowfin_tx_timeout() 762 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring() 763 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring() 767 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); in yellowfin_init_ring() 775 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring() 776 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring() [all …]
|
/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_main.c | 223 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, in xgene_enet_setup_tx_desc() argument 226 struct device *dev = ndev_to_dev(tx_ring->ndev); in xgene_enet_setup_tx_desc() 229 u16 tail = tx_ring->tail; in xgene_enet_setup_tx_desc() 232 raw_desc = &tx_ring->raw_desc[tail]; in xgene_enet_setup_tx_desc() 237 netdev_err(tx_ring->ndev, "DMA mapping error\n"); in xgene_enet_setup_tx_desc() 247 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | in xgene_enet_setup_tx_desc() 249 tx_ring->cp_ring->cp_skb[tail] = skb; in xgene_enet_setup_tx_desc() 258 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; in xgene_enet_start_xmit() local 259 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; in xgene_enet_start_xmit() 262 tx_level = xgene_enet_ring_len(tx_ring); in xgene_enet_start_xmit() [all …]
|
/drivers/net/ethernet/qlogic/netxen/ |
D | netxen_nic_ctx.c | 448 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; in nx_fw_cmd_create_tx_ctx() local 490 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); in nx_fw_cmd_create_tx_ctx() 491 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); in nx_fw_cmd_create_tx_ctx() 503 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, in nx_fw_cmd_create_tx_ctx() 706 struct nx_host_tx_ring *tx_ring; in netxen_init_old_ctx() local 712 tx_ring = adapter->tx_ring; in netxen_init_old_ctx() 716 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); in netxen_init_old_ctx() 717 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); in netxen_init_old_ctx() 762 struct nx_host_tx_ring *tx_ring; in netxen_alloc_hw_resources() local 769 tx_ring = adapter->tx_ring; in netxen_alloc_hw_resources() [all …]
|
/drivers/net/ethernet/qlogic/qlge/ |
D | qlge_main.c | 2111 struct tx_ring *tx_ring; in ql_process_mac_tx_intr() local 2115 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr() 2116 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; in ql_process_mac_tx_intr() 2118 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; in ql_process_mac_tx_intr() 2119 tx_ring->tx_packets++; in ql_process_mac_tx_intr() 2144 atomic_inc(&tx_ring->tx_count); in ql_process_mac_tx_intr() 2213 struct tx_ring *tx_ring; in ql_clean_outbound_rx_ring() local 2241 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring() 2242 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring() 2243 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in ql_clean_outbound_rx_ring() [all …]
|
/drivers/net/vmxnet3/ |
D | vmxnet3_drv.c | 336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt() 337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt() 343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt() 345 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt() 346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt() 354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt() 383 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete() 400 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup() 403 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup() 410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup() [all …]
|