• Home
  • Raw
  • Download

Lines Matching refs:txq

1234 	netif_tx_stop_queue(q->txq);  in eth_txq_stop()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1832 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local
1856 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1861 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1869 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1877 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1884 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1885 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1886 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1887 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1897 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1912 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
1923 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1975 txq->tso++; in cxgb4_vf_eth_xmit()
1976 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1994 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2004 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2023 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2063 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2085 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2087 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2685 __netif_tx_lock(q->txq, smp_processor_id()); in cxgb4_selftest_lb_pkt()
2690 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2725 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2952 struct sge_txq *txq; in service_ofldq() local
3002 txq = &q->q; in service_ofldq()
3007 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3008 end = (void *)txq->desc + left; in service_ofldq()
3015 if (pos == (u64 *)txq->stat) { in service_ofldq()
3016 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3017 end = (void *)txq->desc + left; in service_ofldq()
3018 pos = (void *)txq->desc; in service_ofldq()
3132 struct sge_uld_txq *txq; in uld_send() local
3149 txq = &txq_info->uldtxq[idx]; in uld_send()
3150 return ofld_xmit(txq, skb); in uld_send()
3262 struct sge_uld_txq *txq; in cxgb4_immdata_send() local
3275 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3277 ret = ofld_xmit_direct(txq, src, len); in cxgb4_immdata_send()
3596 struct sge_eth_txq *txq; in t4_tx_completion_handler() local
3617 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3634 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); in t4_tx_completion_handler()
3637 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
4291 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local
4294 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4588 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
4599 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4601 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4603 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4605 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4644 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4655 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4659 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4660 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4663 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4664 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4668 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4669 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4670 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4671 txq->tso = 0; in t4_sge_alloc_eth_txq()
4672 txq->uso = 0; in t4_sge_alloc_eth_txq()
4673 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4674 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4675 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4676 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4681 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, in t4_sge_alloc_ctrl_txq() argument
4692 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4694 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4695 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4697 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4719 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4725 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4726 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4730 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4731 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4732 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4733 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4734 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4735 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4807 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, in t4_sge_alloc_uld_txq() argument
4817 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4821 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4822 txq->adap = adap; in t4_sge_alloc_uld_txq()
4823 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4824 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4825 txq->full = 0; in t4_sge_alloc_uld_txq()
4826 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4830 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, in t4_sge_alloc_ethofld_txq() argument
4835 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4839 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4840 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4841 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4842 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4843 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4844 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4845 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4846 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4905 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) in t4_sge_free_ethofld_txq() argument
4907 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4909 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4910 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4911 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4912 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4954 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4956 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
5036 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5039 if (txq->q.desc) in t4_sge_stop()
5040 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5050 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop() local
5053 if (txq->q.desc) in t4_sge_stop()
5054 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()