/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 1566 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1573 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1581 return cur + nreq >= wq->max; in mthca_wq_overflow() 1632 int nreq; in mthca_tavor_post_send() local 1653 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1654 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send() 1658 qp->sq.max, nreq); in mthca_tavor_post_send() 1787 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send() 1791 if (!nreq) { in mthca_tavor_post_send() 1804 if (likely(nreq)) { in mthca_tavor_post_send() [all …]
|
D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() 627 if (likely(nreq)) { in mthca_arbel_post_srq_recv() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 308 int nreq; in mlx4_ib_post_srq_recv() local 316 nreq = 0; in mlx4_ib_post_srq_recv() 320 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 352 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 353 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
D | qp.c | 3281 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument 3287 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow() 3295 return cur + nreq >= wq->max_post; in mlx4_wq_overflow() 3533 int nreq; in _mlx4_ib_post_send() local 3569 nreq = 0; in _mlx4_ib_post_send() 3575 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send() 3579 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send() 3592 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send() 3819 if (likely(nreq)) { in _mlx4_ib_post_send() 3820 qp->sq.head += nreq; in _mlx4_ib_post_send() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | srq.c | 430 int nreq; in mlx5_ib_post_srq_recv() local 441 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 473 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 474 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
D | qp.c | 4072 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument 4078 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 4086 return cur + nreq >= wq->max_post; in mlx5_wq_overflow() 4901 int *size, void **cur_edge, int nreq, in __begin_wqe() argument 4904 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in __begin_wqe() 4926 int *size, void **cur_edge, int nreq) in begin_wqe() argument 4928 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in begin_wqe() 4936 unsigned int idx, u64 wr_id, int nreq, u8 fence, in finish_wqe() argument 4950 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe() 4985 int nreq; in _mlx5_ib_post_send() local [all …]
|
/drivers/crypto/inside-secure/ |
D | safexcel.c | 773 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 813 nreq++; in safexcel_dequeue() 824 if (!nreq) in safexcel_dequeue() 829 priv->ring[ring].requests += nreq; in safexcel_dequeue() 969 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 975 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 976 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 977 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 978 if (!nreq) in safexcel_handle_result_descriptor() 981 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor() [all …]
|
/drivers/dma/ |
D | bcm-sba-raid.c | 297 struct sba_request *nreq; in sba_free_chained_requests() local 303 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests() 304 _sba_free_request(sba, nreq); in sba_free_chained_requests() 420 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local 442 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request() 443 _sba_free_request(sba, nreq); in sba_process_received_request() 528 struct sba_request *req, *nreq; in sba_tx_submit() local 540 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit() 541 _sba_pending_request(sba, nreq); in sba_tx_submit()
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_hw_v2.c | 253 int nreq; in hns_roce_v2_post_send() local 275 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send() 276 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 291 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = in hns_roce_v2_post_send() 295 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send() 571 if (likely(nreq)) { in hns_roce_v2_post_send() 572 qp->sq.head += nreq; in hns_roce_v2_post_send() 627 int nreq; in hns_roce_v2_post_recv() local 640 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_recv() 641 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v2_post_recv() [all …]
|
D | hns_roce_hw_v1.c | 77 int nreq = 0; in hns_roce_v1_post_send() local 92 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_send() 93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send() 108 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = in hns_roce_v1_post_send() 317 if (likely(nreq)) { in hns_roce_v1_post_send() 318 qp->sq.head += nreq; in hns_roce_v1_post_send() 352 int nreq = 0; in hns_roce_v1_post_recv() local 368 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_recv() 369 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v1_post_recv() 402 if (likely(nreq)) { in hns_roce_v1_post_recv() [all …]
|
D | hns_roce_qp.c | 1287 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, in hns_roce_wq_overflow() argument 1294 if (likely(cur + nreq < hr_wq->max_post)) in hns_roce_wq_overflow() 1302 return cur + nreq >= hr_wq->max_post; in hns_roce_wq_overflow()
|
D | hns_roce_device.h | 1246 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | main.c | 2937 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local 2943 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x() 2945 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x() 2949 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x() 2952 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x() 2953 nreq); in mlx4_enable_msi_x() 2955 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x() 2960 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
|
/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_send.c | 226 u32 opcode, int nreq) in dr_rdma_segments() argument 258 if (nreq) in dr_rdma_segments()
|
/drivers/usb/isp1760/ |
D | isp1760-udc.c | 772 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local 798 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
|
/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 2171 unsigned nreq = 0; in rvt_post_send() local 2198 nreq++; in rvt_post_send() 2202 if (nreq) { in rvt_post_send() 2207 if (nreq == 1 && call_send) in rvt_post_send()
|
/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 2325 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local 2329 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add() 2330 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add() 2333 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()
|
/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 5185 struct tid_rdma_request *req, *nreq; in make_tid_rdma_ack() local 5259 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack() 5260 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
|
/drivers/nvme/host/ |
D | fc.c | 73 struct nvme_request nreq; /* member
|