/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 158 struct bnxt_qplib_nq *nq = nq_work->nq; in bnxt_qpn_cqn_sched_task() local 160 if (cq && nq) { in bnxt_qpn_cqn_sched_task() 162 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { in bnxt_qpn_cqn_sched_task() 163 dev_dbg(&nq->pdev->dev, in bnxt_qpn_cqn_sched_task() 165 __func__, cq, nq); in bnxt_qpn_cqn_sched_task() 166 nq->cqn_handler(nq, cq); in bnxt_qpn_cqn_sched_task() 234 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) in clean_nq() argument 236 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() 238 int budget = nq->budget; in clean_nq() 294 clean_nq(cq->nq, cq); in __wait_for_all_nqes() [all …]
|
D | qplib_fp.h | 404 struct bnxt_qplib_nq *nq; member 466 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 468 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 491 struct bnxt_qplib_nq *nq; member 495 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 496 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 497 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 499 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 540 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 541 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); [all …]
|
D | main.c | 299 struct bnxt_qplib_nq *nq; in bnxt_re_stop_irq() local 303 nq = &rdev->nq[indx - 1]; in bnxt_re_stop_irq() 304 bnxt_qplib_nq_stop_irq(nq, false); in bnxt_re_stop_irq() 315 struct bnxt_qplib_nq *nq; in bnxt_re_start_irq() local 341 nq = &rdev->nq[indx - 1]; in bnxt_re_start_irq() 342 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, in bnxt_re_start_irq() 892 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_srqn_handler() argument 915 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_cqn_handler() argument 944 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); in bnxt_re_cleanup_res() 960 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], in bnxt_re_init_res() [all …]
|
D | ib_verbs.c | 835 scq_nq = qplib_qp->scq->nq; in bnxt_re_destroy_qp() 836 rcq_nq = qplib_qp->rcq->nq; in bnxt_re_destroy_qp() 1567 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_destroy_srq() local 1570 nq = qplib_srq->cq->nq; in bnxt_re_destroy_srq() 1574 if (nq) in bnxt_re_destroy_srq() 1575 nq->budget--; in bnxt_re_destroy_srq() 1616 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_create_srq() local 1656 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; in bnxt_re_create_srq() 1657 nq = &rdev->nq[0]; in bnxt_re_create_srq() 1683 if (nq) in bnxt_re_create_srq() [all …]
|
D | bnxt_re.h | 164 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; member
|
/drivers/block/null_blk/ |
D | main.c | 624 static void put_tag(struct nullb_queue *nq, unsigned int tag) in put_tag() argument 626 clear_bit_unlock(tag, nq->tag_map); in put_tag() 628 if (waitqueue_active(&nq->wait)) in put_tag() 629 wake_up(&nq->wait); in put_tag() 632 static unsigned int get_tag(struct nullb_queue *nq) in get_tag() argument 637 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); in get_tag() 638 if (tag >= nq->queue_depth) in get_tag() 640 } while (test_and_set_bit_lock(tag, nq->tag_map)); in get_tag() 647 put_tag(cmd->nq, cmd->tag); in free_cmd() 652 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) in __alloc_cmd() argument [all …]
|
D | zoned.c | 369 struct nullb_device *dev = cmd->nq->dev; in null_zone_write() 603 struct nullb_device *dev = cmd->nq->dev; in null_zone_mgmt() 672 dev = cmd->nq->dev; in null_process_zoned_cmd()
|
D | null_blk.h | 23 struct nullb_queue *nq; member
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.c | 111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; in alloc_uld_rxqs() local 125 for (i = 0; i < nq; i++, q++) { in alloc_uld_rxqs() 403 int nq = txq_info->ntxq; in free_sge_txq_uld() local 406 for (i = 0; i < nq; i++) { in free_sge_txq_uld() 426 int nq = txq_info->ntxq; in alloc_sge_txq_uld() local 429 j = nq / adap->params.nports; in alloc_sge_txq_uld() 430 for (i = 0; i < nq; i++) { in alloc_sge_txq_uld()
|
D | t4_hw.c | 5160 int nq = min(n, 32); in t4_config_rss_range() local 5163 cmd.niqid = cpu_to_be16(nq); in t4_config_rss_range() 5166 start += nq; in t4_config_rss_range() 5167 n -= nq; in t4_config_rss_range() 5169 while (nq > 0) { in t4_config_rss_range() 5183 nq -= 3; in t4_config_rss_range()
|
/drivers/net/ethernet/marvell/ |
D | mvneta.c | 1833 struct netdev_queue *nq, bool napi) in mvneta_txq_bufs_free() argument 1871 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); in mvneta_txq_bufs_free() 1878 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done() local 1885 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done() 1889 if (netif_tx_queue_stopped(nq)) { in mvneta_txq_done() 1891 netif_tx_wake_queue(nq); in mvneta_txq_done() 2097 struct netdev_queue *nq; in mvneta_xdp_xmit_back() local 2108 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back() 2110 __netif_tx_lock(nq, cpu); in mvneta_xdp_xmit_back() 2125 __netif_tx_unlock(nq); in mvneta_xdp_xmit_back() [all …]
|
D | mv643xx_eth.c | 495 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake() local 497 if (netif_tx_queue_stopped(nq)) { in txq_maybe_wake() 498 __netif_tx_lock(nq, smp_processor_id()); in txq_maybe_wake() 500 netif_tx_wake_queue(nq); in txq_maybe_wake() 501 __netif_tx_unlock(nq); in txq_maybe_wake() 997 struct netdev_queue *nq; in mv643xx_eth_xmit() local 1001 nq = netdev_get_tx_queue(dev, queue); in mv643xx_eth_xmit() 1020 netif_tx_stop_queue(nq); in mv643xx_eth_xmit() 1034 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick() local 1038 __netif_tx_lock(nq, smp_processor_id()); in txq_kick() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | t4vf_hw.c | 1272 int nq = min(n, 32); in t4vf_config_rss_range() local 1279 cmd.niqid = cpu_to_be16(nq); in t4vf_config_rss_range() 1285 start += nq; in t4vf_config_rss_range() 1286 n -= nq; in t4vf_config_rss_range() 1293 while (nq > 0) { in t4vf_config_rss_range() 1302 int nqbuf = min(3, nq); in t4vf_config_rss_range() 1304 nq -= nqbuf; in t4vf_config_rss_range()
|
/drivers/net/ |
D | tap.c | 190 struct tap_queue *nq; in tap_disable_queue() local 201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); in tap_disable_queue() 202 nq->queue_index = index; in tap_disable_queue() 204 rcu_assign_pointer(tap->taps[index], nq); in tap_disable_queue()
|
/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 2379 struct netdev_queue *nq; in igc_xdp_xmit_back() local 2387 nq = txring_txq(ring); in igc_xdp_xmit_back() 2389 __netif_tx_lock(nq, cpu); in igc_xdp_xmit_back() 2391 __netif_tx_unlock(nq); in igc_xdp_xmit_back() 2458 struct netdev_queue *nq; in igc_finalize_xdp() local 2463 nq = txring_txq(ring); in igc_finalize_xdp() 2465 __netif_tx_lock(nq, cpu); in igc_finalize_xdp() 2467 __netif_tx_unlock(nq); in igc_finalize_xdp() 2778 struct netdev_queue *nq = txring_txq(ring); in igc_xdp_xmit_zc() local 2787 __netif_tx_lock(nq, cpu); in igc_xdp_xmit_zc() [all …]
|
/drivers/net/ethernet/freescale/ |
D | fec_main.c | 833 struct netdev_queue *nq; in fec_enet_start_xmit() local 838 nq = netdev_get_tx_queue(ndev, queue); in fec_enet_start_xmit() 849 netif_tx_stop_queue(nq); in fec_enet_start_xmit() 1286 struct netdev_queue *nq; in fec_enet_tx_queue() local 1294 nq = netdev_get_tx_queue(ndev, queue_id); in fec_enet_tx_queue() 1374 if (netif_tx_queue_stopped(nq)) { in fec_enet_tx_queue() 1377 netif_tx_wake_queue(nq); in fec_enet_tx_queue()
|
/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_main.c | 2411 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc() local 2420 nq->trans_start = jiffies; in stmmac_xdp_xmit_zc() 4721 struct netdev_queue *nq; in stmmac_xdp_xmit_back() local 4729 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back() 4731 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit_back() 4733 nq->trans_start = jiffies; in stmmac_xdp_xmit_back() 4739 __netif_tx_unlock(nq); in stmmac_xdp_xmit_back() 6362 struct netdev_queue *nq; in stmmac_xdp_xmit() local 6373 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit() 6375 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit() [all …]
|
/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-eth.c | 1105 struct netdev_queue *nq; in __dpaa2_eth_tx() local 1176 nq = netdev_get_tx_queue(net_dev, queue_mapping); in __dpaa2_eth_tx() 1177 netdev_tx_sent_queue(nq, fd_len); in __dpaa2_eth_tx() 1192 netdev_tx_completed_queue(nq, 1, fd_len); in __dpaa2_eth_tx() 1558 struct netdev_queue *nq; in dpaa2_eth_poll() local 1624 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); in dpaa2_eth_poll() 1625 netdev_tx_completed_queue(nq, txc_fq->dq_frames, in dpaa2_eth_poll()
|
/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 2859 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); in mvpp2_txq_done() local 2872 if (netif_tx_queue_stopped(nq)) in mvpp2_txq_done() 2874 netif_tx_wake_queue(nq); in mvpp2_txq_done() 3372 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) in mvpp2_isr_handle_ptp_queue() argument 3382 if (nq) in mvpp2_isr_handle_ptp_queue() 3385 queue = &port->tx_hwtstamp_queue[nq]; in mvpp2_isr_handle_ptp_queue() 3642 struct netdev_queue *nq; in mvpp2_xdp_finish_tx() local 3646 nq = netdev_get_tx_queue(port->dev, txq_id); in mvpp2_xdp_finish_tx() 3658 netif_tx_stop_queue(nq); in mvpp2_xdp_finish_tx() 4426 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); in mvpp2_tx() local [all …]
|
/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 297 int nq = trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_start() local 300 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - in iwl_pcie_tx_start()
|
/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 2920 struct netdev_queue *nq; in igb_xdp_xmit_back() local 2933 nq = txring_txq(tx_ring); in igb_xdp_xmit_back() 2934 __netif_tx_lock(nq, cpu); in igb_xdp_xmit_back() 2936 nq->trans_start = jiffies; in igb_xdp_xmit_back() 2938 __netif_tx_unlock(nq); in igb_xdp_xmit_back() 2949 struct netdev_queue *nq; in igb_xdp_xmit() local 2966 nq = txring_txq(tx_ring); in igb_xdp_xmit() 2967 __netif_tx_lock(nq, cpu); in igb_xdp_xmit() 2970 nq->trans_start = jiffies; in igb_xdp_xmit() 2982 __netif_tx_unlock(nq); in igb_xdp_xmit()
|
/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); in bnx2x_calc_num_queues() local 68 nq = 1; in bnx2x_calc_num_queues() 70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); in bnx2x_calc_num_queues() 71 return nq; in bnx2x_calc_num_queues()
|
/drivers/net/wireless/realtek/rtl8xxxu/ |
D | rtl8xxxu_core.c | 3849 u32 hq, lq, nq, eq, pubq; in rtl8xxxu_init_queue_reserved_page() local 3854 nq = 0; in rtl8xxxu_init_queue_reserved_page() 3863 nq = fops->page_num_norm; in rtl8xxxu_init_queue_reserved_page() 3865 val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT); in rtl8xxxu_init_queue_reserved_page() 3868 pubq = fops->total_page_num - hq - lq - nq - 1; in rtl8xxxu_init_queue_reserved_page()
|
/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt.c | 6282 int nq = bnxt_nq_rings_in_use(bp); in bnxt_need_reserve_rings() local 6310 hw_resc->resv_irqs != nq) in bnxt_need_reserve_rings()
|