Lines Matching refs:tx_ring
2110 struct tx_ring *tx_ring; in ql_process_mac_tx_intr() local
2114 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2115 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; in ql_process_mac_tx_intr()
2117 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; in ql_process_mac_tx_intr()
2118 tx_ring->tx_packets++; in ql_process_mac_tx_intr()
2143 atomic_inc(&tx_ring->tx_count); in ql_process_mac_tx_intr()
2212 struct tx_ring *tx_ring; in ql_clean_outbound_rx_ring() local
2240 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2241 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2242 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in ql_clean_outbound_rx_ring()
2247 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in ql_clean_outbound_rx_ring()
2649 struct tx_ring *tx_ring; in qlge_send() local
2652 tx_ring = &qdev->tx_ring[tx_ring_idx]; in qlge_send()
2657 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2661 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2662 tx_ring->tx_errors++; in qlge_send()
2665 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; in qlge_send()
2697 tx_ring->tx_errors++; in qlge_send()
2701 tx_ring->prod_idx++; in qlge_send()
2702 if (tx_ring->prod_idx == tx_ring->wq_len) in qlge_send()
2703 tx_ring->prod_idx = 0; in qlge_send()
2706 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); in qlge_send()
2709 tx_ring->prod_idx, skb->len); in qlge_send()
2711 atomic_dec(&tx_ring->tx_count); in qlge_send()
2713 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2714 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2715 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in qlge_send()
2720 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in qlge_send()
2773 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_init_tx_ring() argument
2779 mac_iocb_ptr = tx_ring->wq_base; in ql_init_tx_ring()
2780 tx_ring_desc = tx_ring->q; in ql_init_tx_ring()
2781 for (i = 0; i < tx_ring->wq_len; i++) { in ql_init_tx_ring()
2788 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); in ql_init_tx_ring()
2792 struct tx_ring *tx_ring) in ql_free_tx_resources() argument
2794 if (tx_ring->wq_base) { in ql_free_tx_resources()
2795 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_free_tx_resources()
2796 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_free_tx_resources()
2797 tx_ring->wq_base = NULL; in ql_free_tx_resources()
2799 kfree(tx_ring->q); in ql_free_tx_resources()
2800 tx_ring->q = NULL; in ql_free_tx_resources()
2804 struct tx_ring *tx_ring) in ql_alloc_tx_resources() argument
2806 tx_ring->wq_base = in ql_alloc_tx_resources()
2807 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2808 &tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2810 if ((tx_ring->wq_base == NULL) || in ql_alloc_tx_resources()
2811 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) in ql_alloc_tx_resources()
2814 tx_ring->q = in ql_alloc_tx_resources()
2815 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); in ql_alloc_tx_resources()
2816 if (tx_ring->q == NULL) in ql_alloc_tx_resources()
2821 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2822 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2823 tx_ring->wq_base = NULL; in ql_alloc_tx_resources()
3063 struct tx_ring *tx_ring; in ql_tx_ring_clean() local
3072 tx_ring = &qdev->tx_ring[j]; in ql_tx_ring_clean()
3073 for (i = 0; i < tx_ring->wq_len; i++) { in ql_tx_ring_clean()
3074 tx_ring_desc = &tx_ring->q[i]; in ql_tx_ring_clean()
3094 ql_free_tx_resources(qdev, &qdev->tx_ring[i]); in ql_free_mem_resources()
3117 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { in ql_alloc_mem_resources()
3266 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_start_tx_ring() argument
3268 struct wqicb *wqicb = (struct wqicb *)tx_ring; in ql_start_tx_ring()
3270 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); in ql_start_tx_ring()
3272 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3274 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3281 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; in ql_start_tx_ring()
3282 tx_ring->prod_idx = 0; in ql_start_tx_ring()
3284 tx_ring->valid_db_reg = doorbell_area + 0x04; in ql_start_tx_ring()
3289 tx_ring->cnsmr_idx_sh_reg = shadow_reg; in ql_start_tx_ring()
3290 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; in ql_start_tx_ring()
3292 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); in ql_start_tx_ring()
3295 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); in ql_start_tx_ring()
3297 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); in ql_start_tx_ring()
3299 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); in ql_start_tx_ring()
3301 ql_init_tx_ring(qdev, tx_ring); in ql_start_tx_ring()
3304 (u16) tx_ring->wq_id); in ql_start_tx_ring()
3842 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); in ql_adapter_initialize()
4109 struct tx_ring *tx_ring; in ql_configure_rings() local
4131 tx_ring = &qdev->tx_ring[i]; in ql_configure_rings()
4132 memset((void *)tx_ring, 0, sizeof(*tx_ring)); in ql_configure_rings()
4133 tx_ring->qdev = qdev; in ql_configure_rings()
4134 tx_ring->wq_id = i; in ql_configure_rings()
4135 tx_ring->wq_len = qdev->tx_ring_size; in ql_configure_rings()
4136 tx_ring->wq_size = in ql_configure_rings()
4137 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); in ql_configure_rings()
4143 tx_ring->cq_id = qdev->rss_ring_count + i; in ql_configure_rings()
4302 struct tx_ring *tx_ring = &qdev->tx_ring[0]; in qlge_get_stats() local
4323 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { in qlge_get_stats()
4324 pkts += tx_ring->tx_packets; in qlge_get_stats()
4325 bytes += tx_ring->tx_bytes; in qlge_get_stats()
4326 errors += tx_ring->tx_errors; in qlge_get_stats()