Home
last modified time | relevance | path

Searched refs:nreq (Results 1 – 16 of 16) sorted by relevance

/drivers/infiniband/hw/mthca/
Dmthca_qp.c1557 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument
1564 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow()
1572 return cur + nreq >= wq->max; in mthca_wq_overflow()
1623 int nreq; in mthca_tavor_post_send() local
1644 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1645 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1649 qp->sq.max, nreq); in mthca_tavor_post_send()
1778 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send()
1782 if (!nreq) { in mthca_tavor_post_send()
1795 if (likely(nreq)) { in mthca_tavor_post_send()
[all …]
Dmthca_srq.c485 int nreq; in mthca_tavor_post_srq_recv() local
494 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
535 ++nreq; in mthca_tavor_post_srq_recv()
536 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv()
537 nreq = 0; in mthca_tavor_post_srq_recv()
553 if (likely(nreq)) { in mthca_tavor_post_srq_recv()
560 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
584 int nreq; in mthca_arbel_post_srq_recv() local
590 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
625 if (likely(nreq)) { in mthca_arbel_post_srq_recv()
[all …]
/drivers/infiniband/hw/mlx4/
Dsrq.c318 int nreq; in mlx4_ib_post_srq_recv() local
326 nreq = 0; in mlx4_ib_post_srq_recv()
330 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
362 if (likely(nreq)) { in mlx4_ib_post_srq_recv()
363 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
Dqp.c3309 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument
3315 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow()
3323 return cur + nreq >= wq->max_post; in mlx4_wq_overflow()
3560 int nreq; in mlx4_ib_post_send() local
3596 nreq = 0; in mlx4_ib_post_send()
3602 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_send()
3606 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
3619 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
3854 if (likely(nreq)) { in mlx4_ib_post_send()
3855 qp->sq.head += nreq; in mlx4_ib_post_send()
[all …]
/drivers/crypto/inside-secure/
Dsafexcel.c430 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local
480 } while (nreq++ < EIP197_MAX_BATCH_SZ); in safexcel_dequeue()
483 if (nreq == EIP197_MAX_BATCH_SZ) in safexcel_dequeue()
485 else if (!nreq) in safexcel_dequeue()
492 EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), in safexcel_dequeue()
599 int ret, i, nreq, ndesc = 0; in safexcel_handle_result_descriptor() local
602 nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor()
603 nreq >>= 24; in safexcel_handle_result_descriptor()
604 nreq &= GENMASK(6, 0); in safexcel_handle_result_descriptor()
605 if (!nreq) in safexcel_handle_result_descriptor()
[all …]
/drivers/infiniband/hw/mlx5/
Dsrq.c459 int nreq; in mlx5_ib_post_srq_recv() local
470 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv()
502 if (likely(nreq)) { in mlx5_ib_post_srq_recv()
503 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
Dqp.c3068 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument
3074 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow()
3082 return cur + nreq >= wq->max_post; in mlx5_wq_overflow()
3847 int *size, int nreq) in begin_wqe() argument
3849 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in begin_wqe()
3872 int nreq, u8 fence, u32 mlx5_opcode) in finish_wqe() argument
3885 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe()
3910 int nreq; in mlx5_ib_post_send() local
3927 nreq = 0; in mlx5_ib_post_send()
3931 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
[all …]
/drivers/dma/
Dbcm-sba-raid.c305 struct sba_request *nreq; in sba_free_chained_requests() local
311 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests()
312 _sba_free_request(sba, nreq); in sba_free_chained_requests()
437 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local
457 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request()
458 _sba_free_request(sba, nreq); in sba_process_received_request()
543 struct sba_request *req, *nreq; in sba_tx_submit() local
555 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit()
556 _sba_pending_request(sba, nreq); in sba_tx_submit()
/drivers/infiniband/hw/hns/
Dhns_roce_qp.c796 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, in hns_roce_wq_overflow() argument
803 if (likely(cur + nreq < hr_wq->max_post)) in hns_roce_wq_overflow()
811 return cur + nreq >= hr_wq->max_post; in hns_roce_wq_overflow()
Dhns_roce_hw_v1.c74 int nreq = 0; in hns_roce_v1_post_send() local
89 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_send()
90 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send()
105 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = in hns_roce_v1_post_send()
289 if (likely(nreq)) { in hns_roce_v1_post_send()
290 qp->sq.head += nreq; in hns_roce_v1_post_send()
323 int nreq = 0; in hns_roce_v1_post_recv() local
339 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_recv()
340 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v1_post_recv()
373 if (likely(nreq)) { in hns_roce_v1_post_recv()
[all …]
Dhns_roce_device.h726 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
/drivers/net/ethernet/mellanox/mlx4/
Dmain.c2890 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local
2895 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x()
2899 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x()
2902 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x()
2903 nreq); in mlx4_enable_msi_x()
2905 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x()
2910 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
/drivers/infiniband/sw/rdmavt/
Dqp.c1902 unsigned nreq = 0; in rvt_post_send() local
1929 nreq++; in rvt_post_send()
1933 if (nreq) { in rvt_post_send()
/drivers/usb/isp1760/
Disp1760-udc.c775 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local
801 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
/drivers/net/ethernet/neterion/vxge/
Dvxge-config.c2336 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local
2340 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add()
2341 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add()
2344 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()
/drivers/nvme/host/
Dfc.c96 struct nvme_request nreq; /* member