/drivers/net/wireless/intel/iwlwifi/queue/ |
D | tx.c | 20 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_gen2_update_byte_tbl() argument 23 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_update_byte_tbl() 28 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) in iwl_pcie_gen2_update_byte_tbl() 44 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl() 52 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl() 66 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument 68 lockdep_assert_held(&txq->lock); in iwl_txq_inc_wr_ptr() 70 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr() 76 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr() 115 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument [all …]
|
D | tx.h | 16 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) in iwl_txq_get_first_tb_dma() argument 18 return txq->first_tb_dma + in iwl_txq_get_first_tb_dma() 30 struct iwl_txq *txq) in iwl_wake_queue() argument 32 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { in iwl_wake_queue() 33 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); in iwl_wake_queue() 34 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); in iwl_wake_queue() 39 struct iwl_txq *txq, int idx) in iwl_txq_get_tfd() argument 42 idx = iwl_txq_get_cmd_index(txq, idx); in iwl_txq_get_tfd() 44 return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; in iwl_txq_get_tfd() 47 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | ipoib_tx.c | 39 struct hfi1_ipoib_txq *txq; member 47 struct hfi1_ipoib_txq *txq; member 59 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument 61 return hfi1_ipoib_txreqs(txq->sent_txreqs, in hfi1_ipoib_used() 62 atomic64_read(&txq->complete_txreqs)); in hfi1_ipoib_used() 65 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument 67 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq() 68 if (atomic_inc_return(&txq->stops) == 1) in hfi1_ipoib_stop_txq() 69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 72 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument [all …]
|
D | trace_tx.h | 897 TP_PROTO(struct hfi1_ipoib_txq *txq), 898 TP_ARGS(txq), 900 DD_DEV_ENTRY(txq->priv->dd) 901 __field(struct hfi1_ipoib_txq *, txq) 913 DD_DEV_ASSIGN(txq->priv->dd); 914 __entry->txq = txq; 915 __entry->sde = txq->sde; 916 __entry->head = txq->tx_ring.head; 917 __entry->tail = txq->tx_ring.tail; 918 __entry->idx = txq->q_idx; [all …]
|
/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 73 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument 76 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr() 78 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr() 101 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr() 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr() 111 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr() 113 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr() 121 struct iwl_txq *txq = trans->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local 126 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs() 127 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs() [all …]
|
D | tx-gen2.c | 31 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_pcie_gen2_enqueue_hcmd() local 112 spin_lock_irqsave(&txq->lock, flags); in iwl_pcie_gen2_enqueue_hcmd() 114 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_enqueue_hcmd() 115 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); in iwl_pcie_gen2_enqueue_hcmd() 118 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { in iwl_pcie_gen2_enqueue_hcmd() 119 spin_unlock_irqrestore(&txq->lock, flags); in iwl_pcie_gen2_enqueue_hcmd() 127 out_cmd = txq->entries[idx].cmd; in iwl_pcie_gen2_enqueue_hcmd() 128 out_meta = &txq->entries[idx].meta; in iwl_pcie_gen2_enqueue_hcmd() 144 INDEX_TO_SEQ(txq->write_ptr)); in iwl_pcie_gen2_enqueue_hcmd() 191 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); in iwl_pcie_gen2_enqueue_hcmd() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_tx.c | 76 void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument 78 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats() 95 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats() 115 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument 117 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init() 120 hinic_txq_clean_stats(txq); in txq_stats_init() 498 struct hinic_txq *txq; in hinic_lb_xmit_frame() local 501 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 502 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() [all …]
|
/drivers/net/wireless/ath/ath9k/ |
D | xmit.c | 50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 int tx_flags, struct ath_txq *txq, 56 struct ath_txq *txq, struct list_head *bf_q, 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 67 struct ath_txq *txq, 101 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock_complete() argument 102 __releases(&txq->axq_lock) in ath_txq_unlock_complete() 109 skb_queue_splice_init(&txq->complete_q, &q); in ath_txq_unlock_complete() 110 spin_unlock_bh(&txq->axq_lock); in ath_txq_unlock_complete() 129 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local [all …]
|
/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 178 #define IS_TSO_HEADER(txq, addr) \ argument 179 ((addr >= txq->tso_hdrs_dma) && \ 180 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 407 struct tx_queue txq[8]; member 445 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument 447 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 466 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument 468 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() 471 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr() 472 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr() [all …]
|
D | mvneta.c | 132 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 337 #define IS_TSO_HEADER(txq, addr) \ argument 338 ((addr >= txq->tso_hdrs_phys) && \ 339 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 737 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument 739 txq->txq_get_index++; in mvneta_txq_inc_get() 740 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 741 txq->txq_get_index = 0; in mvneta_txq_inc_get() 745 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument 747 txq->txq_put_index++; in mvneta_txq_inc_put() [all …]
|
/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 75 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument 77 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt() 78 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt() 83 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 89 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 95 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 103 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 113 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 119 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 123 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt() [all …]
|
D | qede_main.c | 519 static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_tx_log_print() argument 523 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print() 524 qed_chain_get_cons_idx(&txq->tx_pbl), in qede_tx_log_print() 525 qed_chain_get_prod_idx(&txq->tx_pbl), in qede_tx_log_print() 532 struct qede_tx_queue *txq; in qede_tx_timeout() local 542 txq = &edev->fp_array[txqueue].txq[cos]; in qede_tx_timeout() 544 if (qed_chain_get_cons_idx(&txq->tx_pbl) != in qede_tx_timeout() 545 qed_chain_get_prod_idx(&txq->tx_pbl)) in qede_tx_timeout() 546 qede_tx_log_print(edev, txq); in qede_tx_timeout() 895 kfree(fp->txq); in qede_free_fp_array() [all …]
|
/drivers/net/ethernet/atheros/alx/ |
D | main.c | 54 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) in alx_free_txbuf() argument 56 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf() 59 dma_unmap_single(txq->dev, in alx_free_txbuf() 150 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping() 153 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) in alx_get_tx_queue() argument 155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue() 158 static inline int alx_tpd_avail(struct alx_tx_queue *txq) in alx_tpd_avail() argument 160 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 161 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 162 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() [all …]
|
/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_net.c | 41 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; in vdpasim_net_work() local 53 if (!txq->ready || !rxq->ready) in vdpasim_net_work() 58 err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL, in vdpasim_net_work() 59 &txq->head, GFP_ATOMIC); in vdpasim_net_work() 66 vringh_complete_iotlb(&txq->vring, txq->head, 0); in vdpasim_net_work() 71 read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, in vdpasim_net_work() 88 vringh_complete_iotlb(&txq->vring, txq->head, 0); in vdpasim_net_work() 95 if (vringh_need_notify_iotlb(&txq->vring) > 0) in vdpasim_net_work() 96 vringh_notify(&txq->vring); in vdpasim_net_work()
|
/drivers/net/wireless/mediatek/mt76/ |
D | tx.c | 9 mt76_txq_get_qid(struct ieee80211_txq *txq) in mt76_txq_get_qid() argument 11 if (!txq->sta) in mt76_txq_get_qid() 14 return txq->ac; in mt76_txq_get_qid() 21 struct ieee80211_txq *txq; in mt76_tx_check_agg_ssn() local 30 txq = sta->txq[tid]; in mt76_tx_check_agg_ssn() 31 mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_tx_check_agg_ssn() 327 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); in mt76_txq_dequeue() local 332 skb = ieee80211_tx_dequeue(phy->hw, txq); in mt76_txq_dequeue() 373 struct ieee80211_txq *txq = sta->txq[i]; in mt76_release_buffered_frames() local 374 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_release_buffered_frames() [all …]
|
/drivers/net/ethernet/freescale/ |
D | fec_main.c | 308 #define IS_TSO_HEADER(txq, addr) \ argument 309 ((addr >= txq->tso_hdrs_dma) && \ 310 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 334 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument 338 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num() 339 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 341 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num() 367 struct fec_enet_priv_tx_q *txq; in fec_dump() local 373 txq = fep->tx_queue[0]; in fec_dump() 374 bdp = txq->bd.base; in fec_dump() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument 1135 netif_tx_stop_queue(txq->txq); in txq_stop() 1136 txq->q.stops++; in txq_stop() 1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local 1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1208 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit() 1217 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit() 1226 txq_stop(txq); in t4vf_eth_xmit() 1240 txq->mapping_err++; in t4vf_eth_xmit() 1255 txq_stop(txq); in t4vf_eth_xmit() [all …]
|
/drivers/net/ethernet/hisilicon/ |
D | hisi_femac.c | 120 struct hisi_femac_queue txq; member 147 dma_addr = priv->txq.dma_phys[pos]; in hisi_femac_tx_dma_unmap() 155 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local 163 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim() 169 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); in hisi_femac_xmit_reclaim() 177 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim() 178 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim() 372 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); in hisi_femac_init_tx_and_rx_queues() 387 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local 412 pos = txq->tail; in hisi_femac_free_skb_rings() [all …]
|
/drivers/net/wireless/ath/ath5k/ |
D | base.c | 732 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument 829 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup() 830 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup() 831 txq->txq_len++; in ath5k_txbuf_setup() 832 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup() 833 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup() 835 *txq->link = bf->daddr; in ath5k_txbuf_setup() 837 txq->link = &ds->ds_link; in ath5k_txbuf_setup() 838 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup() 839 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup() [all …]
|
/drivers/net/ethernet/microsoft/mana/ |
D | mana_en.c | 140 struct mana_txq *txq; in mana_start_xmit() local 150 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit() 151 gdma_sq = txq->gdma_sq; in mana_start_xmit() 155 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit() 157 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit() 158 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit() 161 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit() 244 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit() 257 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit() 264 atomic_inc(&txq->pending_sends); in mana_start_xmit() [all …]
|
/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 657 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset() 694 if (q->txq[i].desc) { in t3_free_qset() 696 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset() 698 if (q->txq[i].sdesc) { in t3_free_qset() 699 free_tx_desc(adapter, &q->txq[i], in t3_free_qset() 700 q->txq[i].in_use); in t3_free_qset() 701 kfree(q->txq[i].sdesc); in t3_free_qset() 704 q->txq[i].size * in t3_free_qset() 706 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() [all …]
|
/drivers/net/wireless/intel/iwlegacy/ |
D | common.c | 364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync() 2707 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument 2710 int txq_id = txq->q.id; in il_txq_update_write_ptr() 2712 if (txq->need_update == 0) in il_txq_update_write_ptr() 2730 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2738 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2739 txq->need_update = 0; in il_txq_update_write_ptr() 2749 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local 2750 struct il_queue *q = &txq->q; in il_tx_queue_unmap() 2756 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap() [all …]
|
/drivers/atm/ |
D | ambassador.c | 613 amb_txq * txq = &dev->txq; in tx_give() local 621 spin_lock_irqsave (&txq->lock, flags); in tx_give() 623 if (txq->pending < txq->maximum) { in tx_give() 624 PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); in tx_give() 626 *txq->in.ptr = *tx; in tx_give() 627 txq->pending++; in tx_give() 628 txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); in tx_give() 630 wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); in tx_give() 633 if (txq->pending > txq->high) in tx_give() 634 txq->high = txq->pending; in tx_give() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 1234 netif_tx_stop_queue(q->txq); in eth_txq_stop() 1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update() 1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update() 1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update() 1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update() 1832 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local 1856 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit() 1861 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit() 1869 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit() 1877 eth_txq_stop(txq); in cxgb4_vf_eth_xmit() [all …]
|
/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 2870 struct bna_txq *txq; in bna_tx_sm_started_entry() local 2873 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry() 2874 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry() 2876 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry() 3089 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local 3099 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start() 3101 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start() 3102 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start() 3105 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start() 3107 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start() [all …]
|