Home
last modified time | relevance | path

Searched refs:txq (Results 1 – 25 of 286) sorted by relevance

12345678910>>...12

/drivers/net/wwan/t7xx/
Dt7xx_hif_dpmaif_tx.c54 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_update_drb_rd_idx() local
58 if (!txq->que_started) in t7xx_dpmaif_update_drb_rd_idx()
61 old_sw_rd_idx = txq->drb_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
71 drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
73 spin_lock_irqsave(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx()
74 txq->drb_rd_idx = new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
75 spin_unlock_irqrestore(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx()
83 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_release_tx_buffer() local
90 drb_skb_base = txq->drb_skb_base; in t7xx_dpmaif_release_tx_buffer()
91 drb_base = txq->drb_base; in t7xx_dpmaif_release_tx_buffer()
[all …]
/drivers/infiniband/hw/hfi1/
Dipoib_tx.c29 struct hfi1_ipoib_txq *txq; member
47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument
49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used()
50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used()
53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument
55 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq()
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq()
57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument
62 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq()
[all …]
Dtrace_tx.h897 TP_PROTO(struct hfi1_ipoib_txq *txq),
898 TP_ARGS(txq),
900 DD_DEV_ENTRY(txq->priv->dd)
901 __field(struct hfi1_ipoib_txq *, txq)
913 DD_DEV_ASSIGN(txq->priv->dd);
914 __entry->txq = txq;
915 __entry->sde = txq->sde;
916 __entry->head = txq->tx_ring.head;
917 __entry->tail = txq->tx_ring.tail;
918 __entry->idx = txq->q_idx;
[all …]
/drivers/net/wireless/intel/iwlwifi/queue/
Dtx.c23 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_gen2_update_byte_tbl() argument
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_update_byte_tbl()
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) in iwl_pcie_gen2_update_byte_tbl()
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
69 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument
71 lockdep_assert_held(&txq->lock); in iwl_txq_inc_wr_ptr()
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr()
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr()
162 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument
[all …]
Dtx.h16 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) in iwl_txq_get_first_tb_dma() argument
18 return txq->first_tb_dma + in iwl_txq_get_first_tb_dma()
30 struct iwl_txq *txq) in iwl_wake_queue() argument
32 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { in iwl_wake_queue()
33 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); in iwl_wake_queue()
34 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); in iwl_wake_queue()
39 struct iwl_txq *txq, int idx) in iwl_txq_get_tfd() argument
42 idx = iwl_txq_get_cmd_index(txq, idx); in iwl_txq_get_tfd()
44 return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; in iwl_txq_get_tfd()
47 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
[all …]
/drivers/net/wireless/intel/iwlwifi/pcie/
Dtx.c73 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument
76 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr()
78 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr()
101 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr()
110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr()
111 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr()
113 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr()
121 struct iwl_txq *txq = trans->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local
126 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs()
127 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs()
[all …]
Dtx-gen2.c31 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_pcie_gen2_enqueue_hcmd() local
112 spin_lock_irqsave(&txq->lock, flags); in iwl_pcie_gen2_enqueue_hcmd()
114 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_enqueue_hcmd()
115 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); in iwl_pcie_gen2_enqueue_hcmd()
118 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { in iwl_pcie_gen2_enqueue_hcmd()
119 spin_unlock_irqrestore(&txq->lock, flags); in iwl_pcie_gen2_enqueue_hcmd()
127 out_cmd = txq->entries[idx].cmd; in iwl_pcie_gen2_enqueue_hcmd()
128 out_meta = &txq->entries[idx].meta; in iwl_pcie_gen2_enqueue_hcmd()
144 INDEX_TO_SEQ(txq->write_ptr)); in iwl_pcie_gen2_enqueue_hcmd()
191 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); in iwl_pcie_gen2_enqueue_hcmd()
[all …]
/drivers/net/ethernet/huawei/hinic/
Dhinic_tx.c77 static void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument
79 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats()
96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument
98 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats()
116 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument
118 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init()
121 hinic_txq_clean_stats(txq); in txq_stats_init()
499 struct hinic_txq *txq; in hinic_lb_xmit_frame() local
502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame()
503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
[all …]
/drivers/net/wireless/ath/ath9k/
Dxmit.c56 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
59 int tx_flags, struct ath_txq *txq,
62 struct ath_txq *txq, struct list_head *bf_q,
65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
73 struct ath_txq *txq,
107 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock_complete() argument
108 __releases(&txq->axq_lock) in ath_txq_unlock_complete()
115 skb_queue_splice_init(&txq->complete_q, &q); in ath_txq_unlock_complete()
116 spin_unlock_bh(&txq->axq_lock); in ath_txq_unlock_complete()
135 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local
[all …]
/drivers/net/ethernet/marvell/
Dmv643xx_eth.c179 #define IS_TSO_HEADER(txq, addr) \ argument
180 ((addr >= txq->tso_hdrs_dma) && \
181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
408 struct tx_queue txq[8]; member
446 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
467 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
469 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
472 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
473 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
[all …]
Dmvneta.c135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument
774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
776 txq->txq_get_index++; in mvneta_txq_inc_get()
777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
778 txq->txq_get_index = 0; in mvneta_txq_inc_get()
782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
784 txq->txq_put_index++; in mvneta_txq_inc_put()
785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
786 txq->txq_put_index = 0; in mvneta_txq_inc_put()
964 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument
[all …]
/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c76 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument
78 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
104 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
114 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
120 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
[all …]
Dqede_main.c544 struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_txq_fp_log_metadata() argument
546 struct qed_chain *p_chain = &txq->tx_pbl; in qede_txq_fp_log_metadata()
551 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, in qede_txq_fp_log_metadata()
557 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, in qede_txq_fp_log_metadata()
562 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_tx_log_print() argument
574 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); in qede_tx_log_print()
579 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print()
580 qed_chain_get_cons_idx(&txq->tx_pbl), in qede_tx_log_print()
581 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); in qede_tx_log_print()
585 txq->index, fp->sb_info->igu_sb_id, in qede_tx_log_print()
[all …]
/drivers/net/ethernet/atheros/alx/
Dmain.c53 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) in alx_free_txbuf() argument
55 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf()
58 dma_unmap_single(txq->dev, in alx_free_txbuf()
149 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping()
152 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) in alx_get_tx_queue() argument
154 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue()
157 static inline int alx_tpd_avail(struct alx_tx_queue *txq) in alx_tpd_avail() argument
159 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail()
160 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
161 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dselq.h30 static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix() argument
32 while (unlikely(txq >= num_channels)) in mlx5e_txq_to_ch_ix()
33 txq -= num_channels; in mlx5e_txq_to_ch_ix()
34 return txq; in mlx5e_txq_to_ch_ix()
37 static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix_htb() argument
39 if (unlikely(txq >= num_channels)) { in mlx5e_txq_to_ch_ix_htb()
40 if (unlikely(txq >= num_channels << 3)) in mlx5e_txq_to_ch_ix_htb()
41 txq %= num_channels; in mlx5e_txq_to_ch_ix_htb()
44 txq -= num_channels; in mlx5e_txq_to_ch_ix_htb()
45 while (txq >= num_channels); in mlx5e_txq_to_ch_ix_htb()
[all …]
/drivers/net/ethernet/freescale/
Dfec_main.c352 #define IS_TSO_HEADER(txq, addr) \ argument
353 ((addr >= txq->tso_hdrs_dma) && \
354 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
378 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
382 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
383 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num()
385 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num()
401 struct fec_enet_priv_tx_q *txq; in fec_dump() local
407 txq = fep->tx_queue[0]; in fec_dump()
408 bdp = txq->bd.base; in fec_dump()
[all …]
/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1135 netif_tx_stop_queue(txq->txq); in txq_stop()
1136 txq->q.stops++; in txq_stop()
1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1205 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1214 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1223 txq_stop(txq); in t4vf_eth_xmit()
1237 txq->mapping_err++; in t4vf_eth_xmit()
1252 txq_stop(txq); in t4vf_eth_xmit()
[all …]
/drivers/net/wireless/mediatek/mt76/
Dtx.c9 mt76_txq_get_qid(struct ieee80211_txq *txq) in mt76_txq_get_qid() argument
11 if (!txq->sta) in mt76_txq_get_qid()
14 return txq->ac; in mt76_txq_get_qid()
21 struct ieee80211_txq *txq; in mt76_tx_check_agg_ssn() local
30 txq = sta->txq[tid]; in mt76_tx_check_agg_ssn()
31 mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_tx_check_agg_ssn()
364 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); in mt76_txq_dequeue() local
368 skb = ieee80211_tx_dequeue(phy->hw, txq); in mt76_txq_dequeue()
408 struct ieee80211_txq *txq = sta->txq[i]; in mt76_release_buffered_frames() local
409 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_release_buffered_frames()
[all …]
/drivers/net/ethernet/fungible/funeth/
Dfuneth_trace.h15 TP_PROTO(const struct funeth_txq *txq,
20 TP_ARGS(txq, len, sqe_idx, ngle),
27 __string(devname, txq->netdev->name)
31 __entry->qidx = txq->qidx;
35 __assign_str(devname, txq->netdev->name);
45 TP_PROTO(const struct funeth_txq *txq,
50 TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
57 __string(devname, txq->netdev->name)
61 __entry->qidx = txq->qidx;
65 __assign_str(devname, txq->netdev->name);
/drivers/net/ethernet/hisilicon/
Dhisi_femac.c120 struct hisi_femac_queue txq; member
147 dma_addr = priv->txq.dma_phys[pos]; in hisi_femac_tx_dma_unmap()
155 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local
163 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim()
169 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); in hisi_femac_xmit_reclaim()
177 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim()
178 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim()
372 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); in hisi_femac_init_tx_and_rx_queues()
387 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local
412 pos = txq->tail; in hisi_femac_free_skb_rings()
[all …]
/drivers/net/wireless/ath/ath5k/
Dbase.c769 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument
875 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup()
876 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup()
877 txq->txq_len++; in ath5k_txbuf_setup()
878 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup()
879 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup()
881 *txq->link = bf->daddr; in ath5k_txbuf_setup()
883 txq->link = &ds->ds_link; in ath5k_txbuf_setup()
884 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup()
885 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup()
[all …]
/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
[all …]
/drivers/net/wireless/intel/iwlegacy/
Dcommon.c364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync()
2707 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument
2710 int txq_id = txq->q.id; in il_txq_update_write_ptr()
2712 if (txq->need_update == 0) in il_txq_update_write_ptr()
2730 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2738 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2739 txq->need_update = 0; in il_txq_update_write_ptr()
2749 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local
2750 struct il_queue *q = &txq->q; in il_tx_queue_unmap()
2756 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap()
[all …]
/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c1234 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1832 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local
1858 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1863 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1871 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1879 eth_txq_stop(txq); in cxgb4_vf_eth_xmit()
[all …]
/drivers/net/wireless/ath/ath10k/
Dtxrx.c50 struct ieee80211_txq *txq; in ath10k_txrx_tx_unref() local
76 txq = skb_cb->txq; in ath10k_txrx_tx_unref()
78 if (txq) { in ath10k_txrx_tx_unref()
79 artxq = (void *)txq->drv_priv; in ath10k_txrx_tx_unref()
89 if (txq && txq->sta && skb_cb->airtime_est) in ath10k_txrx_tx_unref()
90 ieee80211_sta_register_airtime(txq->sta, txq->tid, in ath10k_txrx_tx_unref()
138 if (txq) in ath10k_txrx_tx_unref()
139 status.sta = txq->sta; in ath10k_txrx_tx_unref()

12345678910>>...12