/drivers/net/ethernet/microsoft/mana/ |
D | mana_bpf.c | 15 u16 txq_idx = skb_get_queue_mapping(skb); in mana_xdp_tx() local 21 ndevtxq = netdev_get_tx_queue(ndev, txq_idx); in mana_xdp_tx()
|
D | mana_en.c | 225 u16 txq_idx = skb_get_queue_mapping(skb); in mana_start_xmit() local 243 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit() 245 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit() 385 net_txq = netdev_get_tx_queue(ndev, txq_idx); in mana_start_xmit()
|
/drivers/target/iscsi/cxgbit/ |
D | cxgbit_cm.c | 631 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_halfclose() 672 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_abort_req() 943 csk->txq_idx = cxgb4_port_idx(ndev) * step; in cxgbit_offload_init() 979 csk->txq_idx = (port_id * step) + in cxgbit_offload_init() 1483 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); in cxgbit_send_tx_flowc_wr() 1771 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx); in cxgbit_abort_req_rss()
|
D | cxgbit.h | 231 u16 txq_idx; member
|
D | cxgbit_target.c | 244 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); in cxgbit_push_tx_frames()
|
/drivers/staging/qlge/ |
D | qlge.h | 1100 u32 txq_idx; member 1120 u32 txq_idx; member 1143 u32 txq_idx; member 1165 u32 txq_idx; member
|
D | qlge_main.c | 1995 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in qlge_process_mac_tx_intr() 2119 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in qlge_clean_outbound_rx_ring() 2555 mac_iocb_ptr->txq_idx = tx_ring_idx; in qlge_send()
|
/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_main.c | 858 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; in nicvf_cq_intr_handler() local 921 txq_idx = nicvf_netdev_qidx(nic, cq_idx); in nicvf_cq_intr_handler() 924 if (txq_idx < nic->pnicvf->xdp_tx_queues) { in nicvf_cq_intr_handler() 929 txq_idx -= nic->pnicvf->xdp_tx_queues; in nicvf_cq_intr_handler() 936 txq = netdev_get_tx_queue(netdev, txq_idx); in nicvf_cq_intr_handler() 947 "Transmit queue wakeup SQ%d\n", txq_idx); in nicvf_cq_intr_handler()
|
/drivers/infiniband/hw/cxgb4/ |
D | cm.c | 642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_flowc() 655 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, in send_halfclose() 696 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, in send_abort_req() 960 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_req() 1066 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reject() 1118 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reject() 1146 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reply() 2113 ep->txq_idx = cxgb4_port_idx(pdev) * step; in import_ep() 2132 ep->txq_idx = cxgb4_port_idx(pdev) * step; in import_ep() 2235 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, in c4iw_reconnect() [all …]
|
D | iw_cxgb4.h | 891 u16 txq_idx; member
|
D | qp.c | 1574 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in post_terminate() 1705 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in rdma_fini() 1769 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in rdma_init()
|
/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
D | chtls_cm.c | 237 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); in chtls_send_abort() 1233 csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : in chtls_recv_sock() 2061 queue = csk->txq_idx; in bl_abort_syn_rcv() 2094 int queue = csk->txq_idx; in abort_syn_rcv() 2112 int queue = csk->txq_idx; in chtls_abort_req_rss()
|
D | chtls.h | 298 u32 txq_idx; member
|
D | chtls_io.c | 101 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); in create_flowc_wr_skb() 128 csk->txq_idx, in send_flowc_wr() 661 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | in chtls_push_frames()
|
D | chtls_hw.c | 81 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); in chtls_set_tcb_field()
|
/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | trans.c | 2393 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) in iwl_trans_pcie_wait_txq_empty() argument 2404 if (!test_bit(txq_idx, trans->txqs.queue_used)) in iwl_trans_pcie_wait_txq_empty() 2407 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); in iwl_trans_pcie_wait_txq_empty() 2408 txq = trans->txqs.txq[txq_idx]; in iwl_trans_pcie_wait_txq_empty() 2444 "fail to flush all tx fifo queues Q %d\n", txq_idx); in iwl_trans_pcie_wait_txq_empty() 2449 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); in iwl_trans_pcie_wait_txq_empty()
|
/drivers/scsi/cxgbi/ |
D | libcxgbi.h | 120 unsigned short txq_idx; member
|
/drivers/scsi/cxgbi/cxgb4i/ |
D | cxgb4i.c | 1864 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open() 1893 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, in init_act_open()
|