• Home
  • Raw
  • Download

Lines Matching refs:txq

1234 	netif_tx_stop_queue(q->txq);  in eth_txq_stop()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1832 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local
1858 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1863 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1871 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1879 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1886 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1887 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1888 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1889 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1899 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1914 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1925 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1977 txq->tso++; in cxgb4_vf_eth_xmit()
1978 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1996 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2006 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2025 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2065 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2087 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2089 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2687 __netif_tx_lock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2692 __netif_tx_unlock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2727 __netif_tx_unlock_bh(q->txq); in cxgb4_selftest_lb_pkt()
2954 struct sge_txq *txq; in service_ofldq() local
3004 txq = &q->q; in service_ofldq()
3009 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3010 end = (void *)txq->desc + left; in service_ofldq()
3017 if (pos == (u64 *)txq->stat) { in service_ofldq()
3018 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3019 end = (void *)txq->desc + left; in service_ofldq()
3020 pos = (void *)txq->desc; in service_ofldq()
3134 struct sge_uld_txq *txq; in uld_send() local
3151 txq = &txq_info->uldtxq[idx]; in uld_send()
3152 return ofld_xmit(txq, skb); in uld_send()
3264 struct sge_uld_txq *txq; in cxgb4_immdata_send() local
3277 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3279 ret = ofld_xmit_direct(txq, src, len); in cxgb4_immdata_send()
3598 struct sge_eth_txq *txq; in t4_tx_completion_handler() local
3619 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3636 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); in t4_tx_completion_handler()
3639 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
4293 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local
4296 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4590 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
4601 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4603 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4605 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4607 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4646 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4657 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4661 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4662 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4665 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4666 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4670 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4671 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4672 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4673 txq->tso = 0; in t4_sge_alloc_eth_txq()
4674 txq->uso = 0; in t4_sge_alloc_eth_txq()
4675 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4676 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4677 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4678 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4683 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, in t4_sge_alloc_ctrl_txq() argument
4694 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4696 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4697 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4699 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4721 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4727 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4728 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4732 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4733 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4734 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4735 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4736 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4737 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4809 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, in t4_sge_alloc_uld_txq() argument
4819 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4823 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4824 txq->adap = adap; in t4_sge_alloc_uld_txq()
4825 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4826 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4827 txq->full = 0; in t4_sge_alloc_uld_txq()
4828 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4832 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, in t4_sge_alloc_ethofld_txq() argument
4837 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4841 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4842 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4843 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4844 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4845 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4846 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4847 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4848 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4907 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) in t4_sge_free_ethofld_txq() argument
4909 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4911 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4912 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4913 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4914 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4956 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4958 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
5038 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5041 if (txq->q.desc) in t4_sge_stop()
5042 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5052 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5055 if (txq->q.desc) in t4_sge_stop()
5056 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()