/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | wr.c | 78 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, in mlx5_wq_overflow() argument 85 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 93 return cur + nreq >= wq->max_post; in mlx5_wq_overflow() 945 int *size, void **cur_edge, int nreq, in __begin_wqe() argument 948 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in __begin_wqe() 970 void **cur_edge, int nreq) in begin_wqe() argument 972 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in begin_wqe() 980 unsigned int idx, u64 wr_id, int nreq, u8 fence, in finish_wqe() argument 994 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe() 1036 void **cur_edge, unsigned int *idx, int nreq, in handle_psv() argument [all …]
|
D | srq.c | 433 int nreq; in mlx5_ib_post_srq_recv() local 444 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 476 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 477 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
/kernel/linux/linux-5.10/arch/ia64/include/asm/ |
D | perfmon.h | 87 extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *re… 88 extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *r… 89 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg… 90 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
|
/kernel/linux/linux-5.10/crypto/ |
D | echainiv.c | 45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt() 47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in echainiv_encrypt() 48 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt() 50 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt() 54 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
|
D | seqiv.c | 69 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt() 71 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt() 72 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt() 74 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt() 78 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
|
D | gcm.c | 966 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst() 968 skcipher_request_set_sync_tfm(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst() 969 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); in crypto_rfc4543_copy_src_to_dst() 970 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); in crypto_rfc4543_copy_src_to_dst() 972 return crypto_skcipher_encrypt(nreq); in crypto_rfc4543_copy_src_to_dst()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 1565 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1572 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1580 return cur + nreq >= wq->max; in mthca_wq_overflow() 1631 int nreq; in mthca_tavor_post_send() local 1652 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1653 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send() 1657 qp->sq.max, nreq); in mthca_tavor_post_send() 1786 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send() 1790 if (!nreq) { in mthca_tavor_post_send() 1803 if (likely(nreq)) { in mthca_tavor_post_send() [all …]
|
D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() 627 if (likely(nreq)) { in mthca_arbel_post_srq_recv() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
D | srq.c | 311 int nreq; in mlx4_ib_post_srq_recv() local 319 nreq = 0; in mlx4_ib_post_srq_recv() 323 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 355 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 356 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
D | qp.c | 3241 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument 3247 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow() 3255 return cur + nreq >= wq->max_post; in mlx4_wq_overflow() 3493 int nreq; in _mlx4_ib_post_send() local 3529 nreq = 0; in _mlx4_ib_post_send() 3535 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send() 3539 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send() 3552 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send() 3778 if (likely(nreq)) { in _mlx4_ib_post_send() 3779 qp->sq.head += nreq; in _mlx4_ib_post_send() [all …]
|
/kernel/linux/linux-5.10/fs/nfs/ |
D | pnfs_nfs.c | 459 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local 471 nreq++; in pnfs_bucket_alloc_ds_commits() 475 return nreq; in pnfs_bucket_alloc_ds_commits() 480 return nreq; in pnfs_bucket_alloc_ds_commits() 515 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local 526 nreq++; in pnfs_generic_commit_pagelist() 529 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist() 530 if (nreq == 0) in pnfs_generic_commit_pagelist()
|
/kernel/linux/linux-5.10/drivers/crypto/inside-secure/ |
D | safexcel.c | 816 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 856 nreq++; in safexcel_dequeue() 867 if (!nreq) in safexcel_dequeue() 872 priv->ring[ring].requests += nreq; in safexcel_dequeue() 1019 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 1025 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 1026 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 1027 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 1028 if (!nreq) in safexcel_handle_result_descriptor() 1031 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor() [all …]
|
/kernel/linux/linux-5.10/drivers/dma/ |
D | bcm-sba-raid.c | 297 struct sba_request *nreq; in sba_free_chained_requests() local 303 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests() 304 _sba_free_request(sba, nreq); in sba_free_chained_requests() 420 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local 442 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request() 443 _sba_free_request(sba, nreq); in sba_process_received_request() 528 struct sba_request *req, *nreq; in sba_tx_submit() local 540 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit() 541 _sba_pending_request(sba, nreq); in sba_tx_submit()
|
/kernel/linux/linux-5.10/drivers/crypto/qat/qat_common/ |
D | qat_algs.c | 1118 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_encrypt() local 1124 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_encrypt() 1125 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_encrypt() 1126 return crypto_skcipher_encrypt(nreq); in qat_alg_skcipher_xts_encrypt() 1196 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_decrypt() local 1202 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_decrypt() 1203 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_decrypt() 1204 return crypto_skcipher_decrypt(nreq); in qat_alg_skcipher_xts_decrypt()
|
/kernel/linux/linux-5.10/fs/nilfs2/ |
D | btree.c | 1739 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument 1766 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert() 1767 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1768 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1772 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert() 1785 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1799 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument 1817 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert() 1819 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert() 1835 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_hw_v2.c | 653 int nreq; in hns_roce_v2_post_send() local 661 nreq = 0; in hns_roce_v2_post_send() 667 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send() 668 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 674 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v2_post_send() 687 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send() 702 if (likely(nreq)) { in hns_roce_v2_post_send() 703 qp->sq.head += nreq; in hns_roce_v2_post_send() 738 int nreq; in hns_roce_v2_post_recv() local 747 nreq = 0; in hns_roce_v2_post_recv() [all …]
|
D | hns_roce_hw_v1.c | 80 int nreq; in hns_roce_v1_post_send() local 92 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_send() 93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send() 99 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v1_post_send() 315 if (likely(nreq)) { in hns_roce_v1_post_send() 316 qp->sq.head += nreq; in hns_roce_v1_post_send() 356 int nreq = 0; in hns_roce_v1_post_recv() local 362 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_recv() 363 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v1_post_recv() 370 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); in hns_roce_v1_post_recv() [all …]
|
D | hns_roce_qp.c | 1275 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, in hns_roce_wq_overflow() argument 1282 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow() 1290 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
|
D | hns_roce_device.h | 1252 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | main.c | 2943 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local 2949 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x() 2951 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x() 2955 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x() 2958 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x() 2959 nreq); in mlx4_enable_msi_x() 2961 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x() 2966 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_send.c | 225 u32 opcode, int nreq) in dr_rdma_segments() argument 257 if (nreq) in dr_rdma_segments()
|
/kernel/linux/linux-5.10/drivers/usb/isp1760/ |
D | isp1760-udc.c | 772 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local 798 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 2203 unsigned nreq = 0; in rvt_post_send() local 2230 nreq++; in rvt_post_send() 2234 if (nreq) { in rvt_post_send() 2239 if (nreq == 1 && call_send) in rvt_post_send()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 2321 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local 2325 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add() 2326 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add() 2329 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 5186 struct tid_rdma_request *req, *nreq; in make_tid_rdma_ack() local 5260 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack() 5261 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
|