Lines Matching refs:txq
1138 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1140 netif_tx_stop_queue(txq->txq); in txq_stop()
1141 txq->q.stops++; in txq_stop()
1169 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1209 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1218 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1227 txq_stop(txq); in t4vf_eth_xmit()
1241 txq->mapping_err++; in t4vf_eth_xmit()
1256 txq_stop(txq); in t4vf_eth_xmit()
1267 wr = (void *)&txq->q.desc[txq->q.pidx]; in t4vf_eth_xmit()
1323 txq->tso++; in t4vf_eth_xmit()
1324 txq->tx_cso += ssi->gso_segs; in t4vf_eth_xmit()
1341 txq->tx_cso++; in t4vf_eth_xmit()
1351 txq->vlan_ins++; in t4vf_eth_xmit()
1366 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], in t4vf_eth_xmit()
1368 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); in t4vf_eth_xmit()
1380 inline_tx_skb(skb, &txq->q, cpl + 1); in t4vf_eth_xmit()
1421 struct sge_txq *tq = &txq->q; in t4vf_eth_xmit()
1450 txq_advance(&txq->q, ndesc); in t4vf_eth_xmit()
1452 ring_tx_db(adapter, &txq->q, ndesc); in t4vf_eth_xmit()
2129 struct sge_eth_txq *txq = &s->ethtxq[i]; in sge_tx_timer_cb() local
2131 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb()
2132 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
2137 free_tx_desc(adapter, &txq->q, avail, true); in sge_tx_timer_cb()
2138 txq->q.in_use -= avail; in sge_tx_timer_cb()
2139 __netif_tx_unlock(txq->txq); in sge_tx_timer_cb()
2403 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, in t4vf_sge_alloc_eth_txq() argument
2416 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4vf_sge_alloc_eth_txq()
2422 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, in t4vf_sge_alloc_eth_txq()
2425 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); in t4vf_sge_alloc_eth_txq()
2426 if (!txq->q.desc) in t4vf_sge_alloc_eth_txq()
2456 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2468 kfree(txq->q.sdesc); in t4vf_sge_alloc_eth_txq()
2469 txq->q.sdesc = NULL; in t4vf_sge_alloc_eth_txq()
2472 txq->q.desc, txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2473 txq->q.desc = NULL; in t4vf_sge_alloc_eth_txq()
2477 txq->q.in_use = 0; in t4vf_sge_alloc_eth_txq()
2478 txq->q.cidx = 0; in t4vf_sge_alloc_eth_txq()
2479 txq->q.pidx = 0; in t4vf_sge_alloc_eth_txq()
2480 txq->q.stat = (void *)&txq->q.desc[txq->q.size]; in t4vf_sge_alloc_eth_txq()
2481 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); in t4vf_sge_alloc_eth_txq()
2482 txq->q.bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_eth_txq()
2483 txq->q.cntxt_id, in t4vf_sge_alloc_eth_txq()
2485 &txq->q.bar2_qid); in t4vf_sge_alloc_eth_txq()
2486 txq->q.abs_id = in t4vf_sge_alloc_eth_txq()
2488 txq->txq = devq; in t4vf_sge_alloc_eth_txq()
2489 txq->tso = 0; in t4vf_sge_alloc_eth_txq()
2490 txq->tx_cso = 0; in t4vf_sge_alloc_eth_txq()
2491 txq->vlan_ins = 0; in t4vf_sge_alloc_eth_txq()
2492 txq->q.stops = 0; in t4vf_sge_alloc_eth_txq()
2493 txq->q.restarts = 0; in t4vf_sge_alloc_eth_txq()
2494 txq->mapping_err = 0; in t4vf_sge_alloc_eth_txq()
2555 struct sge_eth_txq *txq = s->ethtxq; in t4vf_free_sge_resources() local
2560 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { in t4vf_free_sge_resources()
2563 if (txq->q.desc) { in t4vf_free_sge_resources()
2564 t4vf_eth_eq_free(adapter, txq->q.cntxt_id); in t4vf_free_sge_resources()
2565 free_tx_desc(adapter, &txq->q, txq->q.in_use, true); in t4vf_free_sge_resources()
2566 kfree(txq->q.sdesc); in t4vf_free_sge_resources()
2567 free_txq(adapter, &txq->q); in t4vf_free_sge_resources()