Lines Matching refs:txq
1234 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1831 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local
1854 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1859 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1867 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1875 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1882 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1883 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1884 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1885 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1895 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1909 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1919 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1971 txq->tso++; in cxgb4_vf_eth_xmit()
1972 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1990 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2000 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2019 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2059 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2081 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2083 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2681 __netif_tx_lock(q->txq, smp_processor_id()); in cxgb4_selftest_lb_pkt()
2686 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2721 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2948 struct sge_txq *txq; in service_ofldq() local
2998 txq = &q->q; in service_ofldq()
3003 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3004 end = (void *)txq->desc + left; in service_ofldq()
3011 if (pos == (u64 *)txq->stat) { in service_ofldq()
3012 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3013 end = (void *)txq->desc + left; in service_ofldq()
3014 pos = (void *)txq->desc; in service_ofldq()
3128 struct sge_uld_txq *txq; in uld_send() local
3145 txq = &txq_info->uldtxq[idx]; in uld_send()
3146 return ofld_xmit(txq, skb); in uld_send()
3258 struct sge_uld_txq *txq; in cxgb4_immdata_send() local
3271 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3273 ret = ofld_xmit_direct(txq, src, len); in cxgb4_immdata_send()
3592 struct sge_eth_txq *txq; in t4_tx_completion_handler() local
3613 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3614 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
4268 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local
4271 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4565 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
4576 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4578 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4580 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4582 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4616 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4627 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4631 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4632 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4635 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4636 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4640 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4641 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4642 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4643 txq->tso = 0; in t4_sge_alloc_eth_txq()
4644 txq->uso = 0; in t4_sge_alloc_eth_txq()
4645 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4646 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4647 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4648 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4653 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, in t4_sge_alloc_ctrl_txq() argument
4664 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4666 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4667 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4669 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4691 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4697 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4698 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4702 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4703 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4704 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4705 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4706 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4707 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4779 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, in t4_sge_alloc_uld_txq() argument
4789 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4793 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4794 txq->adap = adap; in t4_sge_alloc_uld_txq()
4795 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4796 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4797 txq->full = 0; in t4_sge_alloc_uld_txq()
4798 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4802 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, in t4_sge_alloc_ethofld_txq() argument
4807 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4811 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4812 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4813 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4814 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4815 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4816 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4817 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4818 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4877 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) in t4_sge_free_ethofld_txq() argument
4879 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4881 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4882 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4883 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4884 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4926 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4928 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
5008 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5011 if (txq->q.desc) in t4_sge_stop()
5012 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5022 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5025 if (txq->q.desc) in t4_sge_stop()
5026 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()