/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 155 struct iwl_txq *txq = (void *)data; in iwl_pcie_txq_stuck_timer() local 156 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; in iwl_pcie_txq_stuck_timer() 159 spin_lock(&txq->lock); in iwl_pcie_txq_stuck_timer() 161 if (txq->read_ptr == txq->write_ptr) { in iwl_pcie_txq_stuck_timer() 162 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer() 165 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer() 167 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, in iwl_pcie_txq_stuck_timer() 168 jiffies_to_msecs(txq->wd_timeout)); in iwl_pcie_txq_stuck_timer() 170 iwl_trans_pcie_log_scd_error(trans, txq); in iwl_pcie_txq_stuck_timer() 179 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_txq_update_byte_cnt_tbl() argument [all …]
|
D | internal.h | 296 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) in iwl_pcie_get_first_tb_dma() argument 298 return txq->first_tb_dma + in iwl_pcie_get_first_tb_dma() 381 struct iwl_txq *txq; member 487 dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq); 489 struct iwl_txq *txq); 636 struct iwl_txq *txq) in iwl_wake_queue() argument 640 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { in iwl_wake_queue() 641 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); in iwl_wake_queue() 642 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); in iwl_wake_queue() 647 struct iwl_txq *txq) in iwl_stop_queue() argument [all …]
|
D | trans.c | 1948 struct iwl_txq *txq = &trans_pcie->txq[queue]; in iwl_trans_pcie_freeze_txq_timer() local 1951 spin_lock_bh(&txq->lock); in iwl_trans_pcie_freeze_txq_timer() 1955 if (txq->frozen == freeze) in iwl_trans_pcie_freeze_txq_timer() 1961 txq->frozen = freeze; in iwl_trans_pcie_freeze_txq_timer() 1963 if (txq->read_ptr == txq->write_ptr) in iwl_trans_pcie_freeze_txq_timer() 1968 txq->stuck_timer.expires))) { in iwl_trans_pcie_freeze_txq_timer() 1976 txq->frozen_expiry_remainder = in iwl_trans_pcie_freeze_txq_timer() 1977 txq->stuck_timer.expires - now; in iwl_trans_pcie_freeze_txq_timer() 1978 del_timer(&txq->stuck_timer); in iwl_trans_pcie_freeze_txq_timer() 1986 mod_timer(&txq->stuck_timer, in iwl_trans_pcie_freeze_txq_timer() [all …]
|
/drivers/net/wireless/ath/ath9k/ |
D | xmit.c | 50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 int tx_flags, struct ath_txq *txq, 56 struct ath_txq *txq, struct list_head *bf_q, 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 67 struct ath_txq *txq, 98 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_lock() argument 99 __acquires(&txq->axq_lock) in ath_txq_lock() 101 spin_lock_bh(&txq->axq_lock); in ath_txq_lock() 104 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock() argument 105 __releases(&txq->axq_lock) in ath_txq_unlock() [all …]
|
/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 192 #define IS_TSO_HEADER(txq, addr) \ argument 193 ((addr >= txq->tso_hdrs_dma) && \ 194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 423 struct tx_queue txq[8]; member 461 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument 463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 482 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument 484 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() 487 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr() 488 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr() [all …]
|
D | mvneta.c | 125 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 305 #define IS_TSO_HEADER(txq, addr) \ argument 306 ((addr >= txq->tso_hdrs_phys) && \ 307 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 610 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument 612 txq->txq_get_index++; in mvneta_txq_inc_get() 613 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 614 txq->txq_get_index = 0; in mvneta_txq_inc_get() 618 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument 620 txq->txq_put_index++; in mvneta_txq_inc_put() [all …]
|
D | mvpp2.c | 133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) argument 1007 static inline int mvpp2_txq_phys(int port, int txq) in mvpp2_txq_phys() argument 1009 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; in mvpp2_txq_phys() 4036 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_egress_enable() local 4038 if (txq->descs != NULL) in mvpp2_egress_enable() 4153 struct mvpp2_tx_queue *txq) in mvpp2_txq_pend_desc_num_get() argument 4157 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_pend_desc_num_get() 4165 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) in mvpp2_txq_next_desc_get() argument 4167 int tx_desc = txq->next_desc_to_proc; in mvpp2_txq_next_desc_get() 4169 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); in mvpp2_txq_next_desc_get() [all …]
|
/drivers/net/ethernet/freescale/ |
D | fec_main.c | 230 #define IS_TSO_HEADER(txq, addr) \ argument 231 ((addr >= txq->tso_hdrs_dma) && \ 232 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 256 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument 260 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num() 261 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 263 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num() 289 struct fec_enet_priv_tx_q *txq; in fec_dump() local 295 txq = fep->tx_queue[0]; in fec_dump() 296 bdp = txq->bd.base; in fec_dump() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1138 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument 1140 netif_tx_stop_queue(txq->txq); in txq_stop() 1141 txq->q.stops++; in txq_stop() 1169 struct sge_eth_txq *txq; in t4vf_eth_xmit() local 1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1209 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit() 1218 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit() 1227 txq_stop(txq); in t4vf_eth_xmit() 1241 txq->mapping_err++; in t4vf_eth_xmit() 1256 txq_stop(txq); in t4vf_eth_xmit() [all …]
|
/drivers/net/wireless/ath/ath5k/ |
D | base.c | 733 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument 830 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup() 831 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup() 832 txq->txq_len++; in ath5k_txbuf_setup() 833 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup() 834 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup() 836 *txq->link = bf->daddr; in ath5k_txbuf_setup() 838 txq->link = &ds->ds_link; in ath5k_txbuf_setup() 839 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup() 841 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup() [all …]
|
/drivers/net/ethernet/atheros/alx/ |
D | main.c | 60 struct alx_buffer *txb = &alx->txq.bufs[entry]; in alx_free_txbuf() 148 struct alx_tx_queue *txq = &alx->txq; in alx_tpd_avail() local 150 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 151 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 152 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 157 struct alx_tx_queue *txq = &alx->txq; in alx_clean_tx_irq() local 162 sw_read_idx = txq->read_idx; in alx_clean_tx_irq() 169 skb = txq->bufs[sw_read_idx].skb; in alx_clean_tx_irq() 181 txq->read_idx = sw_read_idx; in alx_clean_tx_irq() 442 alx->txq.read_idx = 0; in alx_init_ring_ptrs() [all …]
|
/drivers/net/ethernet/hisilicon/ |
D | hisi_femac.c | 132 struct hisi_femac_queue txq; member 159 dma_addr = priv->txq.dma_phys[pos]; in hisi_femac_tx_dma_unmap() 167 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local 175 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim() 181 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); in hisi_femac_xmit_reclaim() 189 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim() 190 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim() 384 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); in hisi_femac_init_tx_and_rx_queues() 399 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local 424 pos = txq->tail; in hisi_femac_free_skb_rings() [all …]
|
/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset() 691 if (q->txq[i].desc) { in t3_free_qset() 693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset() 695 if (q->txq[i].sdesc) { in t3_free_qset() 696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset() 697 q->txq[i].in_use); in t3_free_qset() 698 kfree(q->txq[i].sdesc); in t3_free_qset() 701 q->txq[i].size * in t3_free_qset() 703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() [all …]
|
/drivers/net/wireless/intel/iwlegacy/ |
D | common.c | 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync() 2726 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument 2729 int txq_id = txq->q.id; in il_txq_update_write_ptr() 2731 if (txq->need_update == 0) in il_txq_update_write_ptr() 2749 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2757 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2758 txq->need_update = 0; in il_txq_update_write_ptr() 2768 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local 2769 struct il_queue *q = &txq->q; in il_tx_queue_unmap() 2775 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap() [all …]
|
D | 3945.c | 289 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_tx_queue_reclaim() local 290 struct il_queue *q = &txq->q; in il3945_tx_queue_reclaim() 298 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim() 300 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim() 301 il->ops->txq_free_tfd(il, txq); in il3945_tx_queue_reclaim() 306 il_wake_queue(il, txq); in il3945_tx_queue_reclaim() 319 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_hdl_tx() local 326 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { in il3945_hdl_tx() 329 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); in il3945_hdl_tx() 346 txq->time_stamp = jiffies; in il3945_hdl_tx() [all …]
|
/drivers/net/ethernet/qlogic/qede/ |
D | qede_main.c | 284 struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument 286 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; in qede_free_tx_pkt() 287 struct sk_buff *skb = txq->sw_tx_ring[idx].skb; in qede_free_tx_pkt() 292 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 298 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 304 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 312 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 322 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 328 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 332 txq->sw_tx_ring[idx].skb = NULL; in qede_free_tx_pkt() [all …]
|
/drivers/atm/ |
D | ambassador.c | 628 amb_txq * txq = &dev->txq; in tx_give() local 636 spin_lock_irqsave (&txq->lock, flags); in tx_give() 638 if (txq->pending < txq->maximum) { in tx_give() 639 PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); in tx_give() 641 *txq->in.ptr = *tx; in tx_give() 642 txq->pending++; in tx_give() 643 txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); in tx_give() 645 wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); in tx_give() 648 if (txq->pending > txq->high) in tx_give() 649 txq->high = txq->pending; in tx_give() [all …]
|
/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 2878 struct bna_txq *txq; in bna_tx_sm_started_entry() local 2881 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry() 2882 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry() 2884 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry() 3097 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local 3107 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start() 3109 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start() 3110 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start() 3113 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start() 3115 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tx.c | 86 sq->db.txq.dma_fifo[i].addr = addr; in mlx5e_dma_push() 87 sq->db.txq.dma_fifo[i].size = size; in mlx5e_dma_push() 88 sq->db.txq.dma_fifo[i].type = map_type; in mlx5e_dma_push() 94 return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask]; in mlx5e_dma_get() 225 struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi]; in mlx5e_sq_xmit() 348 sq->db.txq.skb[pi] = skb; in mlx5e_sq_xmit() 353 netdev_tx_sent_queue(sq->txq, wi->num_bytes); in mlx5e_sq_xmit() 359 netif_tx_stop_queue(sq->txq); in mlx5e_sq_xmit() 364 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { in mlx5e_sq_xmit() 376 sq->db.txq.skb[pi] = NULL; in mlx5e_sq_xmit() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 1114 netif_tx_stop_queue(q->txq); in eth_txq_stop() 1594 struct sge_txq *txq; in service_ofldq() local 1644 txq = &q->q; in service_ofldq() 1649 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq() 1650 end = (void *)txq->desc + left; in service_ofldq() 1657 if (pos == (u64 *)txq->stat) { in service_ofldq() 1658 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq() 1659 end = (void *)txq->desc + left; in service_ofldq() 1660 pos = (void *)txq->desc; in service_ofldq() 2482 struct sge_ofld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local [all …]
|
/drivers/net/ |
D | ifb.c | 68 struct netdev_queue *txq; in ifb_ri_tasklet() local 71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum); in ifb_ri_tasklet() 74 if (!__netif_tx_trylock(txq)) in ifb_ri_tasklet() 77 __netif_tx_unlock(txq); in ifb_ri_tasklet() 113 if (__netif_tx_trylock(txq)) { in ifb_ri_tasklet() 117 if (netif_tx_queue_stopped(txq)) in ifb_ri_tasklet() 118 netif_tx_wake_queue(txq); in ifb_ri_tasklet() 120 __netif_tx_unlock(txq); in ifb_ri_tasklet() 123 __netif_tx_unlock(txq); in ifb_ri_tasklet()
|
/drivers/bluetooth/ |
D | hci_h4.c | 52 struct sk_buff_head txq; member 66 skb_queue_head_init(&h4->txq); in h4_open() 79 skb_queue_purge(&h4->txq); in h4_flush() 93 skb_queue_purge(&h4->txq); in h4_close() 112 skb_queue_tail(&h4->txq, skb); in h4_enqueue() 146 return skb_dequeue(&h4->txq); in h4_dequeue()
|
D | hci_ath.c | 49 struct sk_buff_head txq; member 108 skb_queue_head_init(&ath->txq); in ath_open() 124 skb_queue_purge(&ath->txq); in ath_close() 142 skb_queue_purge(&ath->txq); in ath_flush() 228 skb_queue_tail(&ath->txq, skb); in ath_enqueue() 240 return skb_dequeue(&ath->txq); in ath_dequeue()
|
D | hci_ll.c | 83 struct sk_buff_head txq; member 115 skb_queue_tail(&ll->txq, skb); in send_hcill_cmd() 131 skb_queue_head_init(&ll->txq); in ll_open() 150 skb_queue_purge(&ll->txq); in ll_flush() 163 skb_queue_purge(&ll->txq); in ll_close() 186 skb_queue_tail(&ll->txq, skb); in __ll_do_awake() 319 skb_queue_tail(&ll->txq, skb); in ll_enqueue() 505 return skb_dequeue(&ll->txq); in ll_dequeue()
|
/drivers/net/wireless/ath/ath6kl/ |
D | htc_pipe.c | 106 if (list_empty(&ep->txq)) in get_htc_packet_credit_based() 110 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based() 114 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet_credit_based() 159 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based() 183 if (list_empty(&ep->txq)) in get_htc_packet() 186 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet() 191 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet() 303 struct list_head *txq) in htc_try_send() argument 313 __func__, txq, in htc_try_send() 314 (txq == NULL) ? 0 : get_queue_depth(txq)); in htc_try_send() [all …]
|