/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 31 static int nicvf_poll_reg(struct nicvf *nic, int qidx, in nicvf_poll_reg() argument 42 reg_val = nicvf_queue_reg_read(nic, reg, qidx); in nicvf_poll_reg() 504 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument 524 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); in nicvf_init_snd_queue() 525 if (qidx < nic->pnicvf->xdp_tx_queues) { in nicvf_init_snd_queue() 627 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument 630 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); in nicvf_reclaim_snd_queue() 632 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) in nicvf_reclaim_snd_queue() 635 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); in nicvf_reclaim_snd_queue() 639 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument [all …]
|
D | nicvf_main.c | 75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) in nicvf_netdev_qidx() argument 78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); in nicvf_netdev_qidx() 80 return qidx; in nicvf_netdev_qidx() 104 u64 qidx, u64 val) in nicvf_queue_reg_write() argument 108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_write() 111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) in nicvf_queue_reg_read() argument 115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_read() 992 int qidx; in nicvf_handle_qs_err() local 998 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_handle_qs_err() 1000 qidx); in nicvf_handle_qs_err() [all …]
|
D | nicvf_ethtool.c | 213 int stats, qidx; in nicvf_get_qset_strings() local 216 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 218 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 224 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 226 sprintf(*data, "txq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 300 int stat, qidx; in nicvf_get_qset_stats() local 305 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 306 nicvf_update_rq_stats(nic, qidx); in nicvf_get_qset_stats() 308 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 312 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() [all …]
|
D | nicvf_queues.h | 336 int qidx, bool enable); 338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 339 void nicvf_sq_disable(struct nicvf *nic, int qidx); 342 struct snd_queue *sq, int qidx); 365 u64 qidx, u64 val); 367 u64 offset, u64 qidx);
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_common.c | 20 struct otx2_nic *pfvf, int qidx) in otx2_nix_rq_op_stats() argument 22 u64 incr = (u64)qidx << 32; in otx2_nix_rq_op_stats() 33 struct otx2_nic *pfvf, int qidx) in otx2_nix_sq_op_stats() argument 35 u64 incr = (u64)qidx << 32; in otx2_nix_sq_op_stats() 63 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_rq_stats() argument 65 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats() 70 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); in otx2_update_rq_stats() 74 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_sq_stats() argument 76 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats() 81 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); in otx2_update_sq_stats() [all …]
|
D | otx2_pf.c | 1144 u64 qidx = 0; in otx2_q_intr_handler() local 1147 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler() 1149 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler() 1151 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler() 1158 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); in otx2_q_intr_handler() 1162 qidx); in otx2_q_intr_handler() 1165 qidx); in otx2_q_intr_handler() 1172 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { in otx2_q_intr_handler() 1174 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler() 1175 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler() [all …]
|
D | otx2_ethtool.c | 81 int qidx, stats; in otx2_get_qset_strings() local 83 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_strings() 85 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in otx2_get_qset_strings() 90 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { in otx2_get_qset_strings() 92 sprintf(*data, "txq%d: %s", qidx + start_qidx, in otx2_get_qset_strings() 136 int stat, qidx; in otx2_get_qset_stats() local 140 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_stats() 141 if (!otx2_update_rq_stats(pfvf, qidx)) { in otx2_get_qset_stats() 147 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) in otx2_get_qset_stats() 151 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { in otx2_get_qset_stats() [all …]
|
D | otx2_txrx.c | 189 int qidx) in otx2_free_rcv_seg() argument 202 otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL); in otx2_free_rcv_seg() 208 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument 216 qidx, parse->errlev, parse->errcode); in otx2_check_rcv_errors() 268 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors() 577 struct sk_buff *skb, u16 qidx) in otx2_sqe_add_hdr() argument 590 sqe_hdr->sq = qidx; in otx2_sqe_add_hdr() 705 struct sk_buff *skb, u16 qidx) in otx2_sq_append_tso() argument 707 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_sq_append_tso() 736 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_tso() [all …]
|
D | otx2_vf.c | 364 int qidx = skb_get_queue_mapping(skb); in otx2vf_xmit() local 368 sq = &vf->qset.sq[qidx]; in otx2vf_xmit() 369 txq = netdev_get_tx_queue(netdev, qidx); in otx2vf_xmit() 371 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { in otx2vf_xmit()
|
D | otx2_common.h | 585 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 636 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 637 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
|
D | otx2_txrx.h | 158 struct sk_buff *skb, u16 qidx);
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/ |
D | chcr_ipsec.c | 420 u32 qidx; in copy_esn_pktxt() local 428 qidx = skb->queue_mapping; in copy_esn_pktxt() 429 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_esn_pktxt() 470 u32 ctrl0, qidx; in copy_cpltx_pktxt() local 476 qidx = skb->queue_mapping; in copy_cpltx_pktxt() 477 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_cpltx_pktxt() 515 unsigned int qidx; in copy_key_cpltx_pktxt() local 519 qidx = skb->queue_mapping; in copy_key_cpltx_pktxt() 520 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_key_cpltx_pktxt() 575 int qidx = skb_get_queue_mapping(skb); in ch_ipsec_crypto_wreq() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_dcb.c | 51 u8 qidx; in bnxt_hwrm_queue_pri2cos_cfg() local 56 qidx = bp->tc_to_qidx[ets->prio_tc[i]]; in bnxt_hwrm_queue_pri2cos_cfg() 57 pri2cos[i] = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_pri2cos_cfg() 100 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_cos2bw_cfg() local 104 qidx); in bnxt_hwrm_queue_cos2bw_cfg() 107 cos2bw.queue_id = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_cos2bw_cfg() 123 data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); in bnxt_hwrm_queue_cos2bw_cfg() 125 if (qidx == 0) { in bnxt_hwrm_queue_cos2bw_cfg() 257 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_pfc_cfg() local 259 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { in bnxt_hwrm_queue_pfc_cfg()
|
/kernel/linux/linux-5.10/drivers/scsi/qla2xxx/ |
D | qla_nvme.c | 88 unsigned int qidx, u16 qsize, void **handle) in qla_nvme_alloc_queue() argument 95 if (qidx) in qla_nvme_alloc_queue() 96 qidx--; in qla_nvme_alloc_queue() 103 __func__, handle, qidx, qsize); in qla_nvme_alloc_queue() 105 if (qidx > qla_nvme_fc_transport.max_hw_queues) { in qla_nvme_alloc_queue() 108 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); in qla_nvme_alloc_queue() 116 if (ha->queue_pair_map[qidx]) { in qla_nvme_alloc_queue() 117 *handle = ha->queue_pair_map[qidx]; in qla_nvme_alloc_queue() 120 *handle, qidx); in qla_nvme_alloc_queue()
|
/kernel/linux/linux-5.10/drivers/scsi/csiostor/ |
D | csio_wr.c | 745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) in csio_wr_cleanup_eq_stpg() argument 747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; in csio_wr_cleanup_eq_stpg() 762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) in csio_wr_cleanup_iq_ftr() argument 765 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_cleanup_iq_ftr() 862 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, in csio_wr_get() argument 866 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_get() 877 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_get() 981 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) in csio_wr_issue() argument 984 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_issue() 986 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_issue() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/octeontx2/af/ |
D | rvu_nix.c | 663 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) in rvu_nix_aq_enq_inst() 667 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) in rvu_nix_aq_enq_inst() 671 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) in rvu_nix_aq_enq_inst() 678 (req->qidx >= (256UL << (cfg & 0xF)))) in rvu_nix_aq_enq_inst() 685 (req->qidx >= (256UL << (cfg & 0xF)))) in rvu_nix_aq_enq_inst() 713 inst.cindex = req->qidx; in rvu_nix_aq_enq_inst() 784 __set_bit(req->qidx, pfvf->rq_bmap); in rvu_nix_aq_enq_inst() 786 __set_bit(req->qidx, pfvf->sq_bmap); in rvu_nix_aq_enq_inst() 788 __set_bit(req->qidx, pfvf->cq_bmap); in rvu_nix_aq_enq_inst() 794 (test_bit(req->qidx, pfvf->rq_bmap) & in rvu_nix_aq_enq_inst() [all …]
|
D | rvu_debugfs.c | 1064 int qidx, rc, max_id = 0; in rvu_dbg_nix_queue_ctx_display() local 1129 for (qidx = id; qidx < max_id; qidx++) { in rvu_dbg_nix_queue_ctx_display() 1130 aq_req.qidx = qidx; in rvu_dbg_nix_queue_ctx_display() 1132 ctype_string, nixlf, aq_req.qidx); in rvu_dbg_nix_queue_ctx_display()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_gfx_v9.c | 805 int qidx; in kgd_gfx_v9_get_cu_occupancy() local 846 for (qidx = 0; qidx < max_queue_cnt; qidx++) { in kgd_gfx_v9_get_cu_occupancy() 851 if (!test_bit(qidx, cp_queue_bitmap)) in kgd_gfx_v9_get_cu_occupancy() 854 if (!(queue_map & (1 << qidx))) in kgd_gfx_v9_get_cu_occupancy() 858 get_wave_count(adev, qidx, &wave_cnt, &vmid); in kgd_gfx_v9_get_cu_occupancy()
|
/kernel/linux/linux-5.10/include/linux/ |
D | nvme-fc-driver.h | 479 unsigned int qidx, u16 qsize, 482 unsigned int qidx, void *handle);
|
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
D | lpfc_debugfs.h | 513 lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) in lpfc_debug_dump_hba_eq() argument 517 qp = phba->sli4_hba.hdwq[qidx].hba_eq; in lpfc_debug_dump_hba_eq() 519 pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); in lpfc_debug_dump_hba_eq()
|
D | lpfc_nvme.c | 254 unsigned int qidx, u16 qsize, in lpfc_nvme_create_queue() argument 272 qhandle->qidx = qidx; in lpfc_nvme_create_queue() 278 if (qidx) { in lpfc_nvme_create_queue() 280 qhandle->index = ((qidx - 1) % in lpfc_nvme_create_queue() 284 qhandle->index = qidx; in lpfc_nvme_create_queue() 290 qidx, qhandle->cpu_id, qhandle->index, qhandle); in lpfc_nvme_create_queue() 311 unsigned int qidx, in lpfc_nvme_delete_queue() argument 325 lport, qidx, handle); in lpfc_nvme_delete_queue() 1712 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { in lpfc_nvme_fcp_io_submit() 1773 lpfc_ncmd->qidx = lpfc_queue_info->qidx; in lpfc_nvme_fcp_io_submit()
|
D | lpfc_init.c | 9465 int qidx, uint32_t qtype) in lpfc_create_wq_cq() argument 9473 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); in lpfc_create_wq_cq() 9483 qidx, (uint32_t)rc); in lpfc_create_wq_cq() 9494 qidx, cq->queue_id, qidx, eq->queue_id); in lpfc_create_wq_cq() 9501 qidx, (uint32_t)rc); in lpfc_create_wq_cq() 9513 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); in lpfc_create_wq_cq() 9544 int qidx; in lpfc_setup_cq_lookup() local 9549 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_setup_cq_lookup() 9551 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_setup_cq_lookup() 9585 int qidx, cpu; in lpfc_sli4_queue_setup() local [all …]
|
D | lpfc_nvme.h | 46 uint32_t qidx; /* queue index passed to create */ member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb3/ |
D | cxgb3_main.c | 402 int i, j, err, qidx = 0; in request_msix_data_irqs() local 408 err = request_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs() 410 adap->sge.qs[qidx]. in request_msix_data_irqs() 412 adap->msix_info[qidx + 1].desc, in request_msix_data_irqs() 413 &adap->sge.qs[qidx]); in request_msix_data_irqs() 415 while (--qidx >= 0) in request_msix_data_irqs() 416 free_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs() 417 &adap->sge.qs[qidx]); in request_msix_data_irqs() 420 qidx++; in request_msix_data_irqs() 941 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, in send_pktsched_cmd() argument [all …]
|
/kernel/linux/linux-5.10/drivers/crypto/ccp/ |
D | ccp-dev-v5.c | 216 n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; in ccp5_get_free_slots() 238 mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; in ccp5_do_cmd() 243 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp5_do_cmd() 249 tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in ccp5_do_cmd() 840 cmd_q->qidx = 0; in ccp5_init()
|