/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 86 struct rxe_create_cq_resp __user *uresp) in rxe_cq_from_init() argument 97 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, in rxe_cq_from_init() 105 if (uresp) in rxe_cq_from_init() 118 struct rxe_resize_cq_resp __user *uresp, in rxe_cq_resize_queue() argument 125 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock); in rxe_cq_resize_queue()
|
D | rxe_srq.c | 103 struct rxe_create_srq_resp __user *uresp) in rxe_srq_from_init() argument 130 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, in rxe_srq_from_init() 138 if (uresp) { in rxe_srq_from_init() 139 if (copy_to_user(&uresp->srq_num, &srq->srq_num, in rxe_srq_from_init() 140 sizeof(uresp->srq_num))) { in rxe_srq_from_init()
|
D | rxe_verbs.c | 299 struct rxe_create_srq_resp __user *uresp = NULL; in rxe_create_srq() local 302 if (udata->outlen < sizeof(*uresp)) in rxe_create_srq() 304 uresp = udata->outbuf; in rxe_create_srq() 318 err = rxe_srq_from_init(rxe, srq, init, udata, uresp); in rxe_create_srq() 419 struct rxe_create_qp_resp __user *uresp = NULL; in rxe_create_qp() local 422 if (udata->outlen < sizeof(*uresp)) in rxe_create_qp() 424 uresp = udata->outbuf; in rxe_create_qp() 447 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata); in rxe_create_qp() 788 struct rxe_create_cq_resp __user *uresp = NULL; in rxe_create_cq() local 791 if (udata->outlen < sizeof(*uresp)) in rxe_create_cq() [all …]
|
D | rxe_qp.c | 221 struct rxe_create_qp_resp __user *uresp) in rxe_qp_init_req() argument 256 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, in rxe_qp_init_req() 290 struct rxe_create_qp_resp __user *uresp) in rxe_qp_init_resp() argument 310 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, in rxe_qp_init_resp() 338 struct rxe_create_qp_resp __user *uresp, in rxe_qp_from_init() argument 360 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); in rxe_qp_from_init() 364 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); in rxe_qp_from_init()
|
D | rxe_loc.h | 57 struct rxe_create_cq_resp __user *uresp); 60 struct rxe_resize_cq_resp __user *uresp, 159 struct rxe_create_qp_resp __user *uresp, 227 struct rxe_create_srq_resp __user *uresp);
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 945 struct ocrdma_create_cq_uresp uresp; in ocrdma_copy_cq_uresp() local 951 memset(&uresp, 0, sizeof(uresp)); in ocrdma_copy_cq_uresp() 952 uresp.cq_id = cq->id; in ocrdma_copy_cq_uresp() 953 uresp.page_size = PAGE_ALIGN(cq->len); in ocrdma_copy_cq_uresp() 954 uresp.num_pages = 1; in ocrdma_copy_cq_uresp() 955 uresp.max_hw_cqe = cq->max_hw_cqe; in ocrdma_copy_cq_uresp() 956 uresp.page_addr[0] = virt_to_phys(cq->va); in ocrdma_copy_cq_uresp() 957 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); in ocrdma_copy_cq_uresp() 958 uresp.db_page_size = dev->nic_info.db_page_size; in ocrdma_copy_cq_uresp() 959 uresp.phase_change = cq->phase_change ? 1 : 0; in ocrdma_copy_cq_uresp() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_verbs.c | 101 struct siw_uresp_alloc_ctx uresp = {}; in siw_alloc_ucontext() local 112 uresp.dev_id = sdev->vendor_part_id; in siw_alloc_ucontext() 114 if (udata->outlen < sizeof(uresp)) { in siw_alloc_ucontext() 118 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); in siw_alloc_ucontext() 454 struct siw_uresp_create_qp uresp = {}; in siw_create_qp() local 456 uresp.num_sqe = num_sqe; in siw_create_qp() 457 uresp.num_rqe = num_rqe; in siw_create_qp() 458 uresp.qp_id = qp_id(qp); in siw_create_qp() 475 uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT; in siw_create_qp() 476 uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT; in siw_create_qp() [all …]
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 113 struct iwch_create_cq_resp uresp; in iwch_create_cq() local 171 uresp.cqid = chp->cq.cqid; in iwch_create_cq() 172 uresp.size_log2 = chp->cq.size_log2; in iwch_create_cq() 174 uresp.key = ucontext->key; in iwch_create_cq() 177 mm->key = uresp.key; in iwch_create_cq() 179 if (udata->outlen < sizeof(uresp)) { in iwch_create_cq() 182 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * in iwch_create_cq() 186 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * in iwch_create_cq() 188 uresp.memsize = mm->len; in iwch_create_cq() 189 uresp.reserved = 0; in iwch_create_cq() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | provider.c | 81 struct c4iw_alloc_ucontext_resp uresp; in c4iw_alloc_ucontext() local 90 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { in c4iw_alloc_ucontext() 100 uresp.status_page_size = PAGE_SIZE; in c4iw_alloc_ucontext() 103 uresp.status_page_key = context->key; in c4iw_alloc_ucontext() 107 ret = ib_copy_to_udata(udata, &uresp, in c4iw_alloc_ucontext() 108 sizeof(uresp) - sizeof(uresp.reserved)); in c4iw_alloc_ucontext() 112 mm->key = uresp.status_page_key; in c4iw_alloc_ucontext() 223 struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; in c4iw_allocate_pd() local 225 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { in c4iw_allocate_pd()
|
D | cq.c | 999 struct c4iw_create_cq_resp uresp; in c4iw_create_cq() local 1094 memset(&uresp, 0, sizeof(uresp)); in c4iw_create_cq() 1095 uresp.qid_mask = rhp->rdev.cqmask; in c4iw_create_cq() 1096 uresp.cqid = chp->cq.cqid; in c4iw_create_cq() 1097 uresp.size = chp->cq.size; in c4iw_create_cq() 1098 uresp.memsize = chp->cq.memsize; in c4iw_create_cq() 1100 uresp.key = ucontext->key; in c4iw_create_cq() 1102 uresp.gts_key = ucontext->key; in c4iw_create_cq() 1107 uresp.flags |= C4IW_64B_CQE; in c4iw_create_cq() 1110 ret = ib_copy_to_udata(udata, &uresp, in c4iw_create_cq() [all …]
|
D | qp.c | 2119 struct c4iw_create_qp_resp uresp; in c4iw_create_qp() local 2250 memset(&uresp, 0, sizeof(uresp)); in c4iw_create_qp() 2258 uresp.flags = C4IW_QPF_ONCHIP; in c4iw_create_qp() 2261 uresp.flags |= C4IW_QPF_WRITE_W_IMM; in c4iw_create_qp() 2262 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_qp() 2263 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp() 2264 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp() 2265 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp() 2267 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp() 2268 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_pd.c | 75 struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; in hns_roce_alloc_pd() local 77 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { in hns_roce_alloc_pd()
|
/drivers/infiniband/hw/qedr/ |
D | verbs.c | 318 struct qedr_alloc_ucontext_resp uresp = {}; in qedr_alloc_ucontext() local 340 uresp.dpm_enabled = dev->user_dpm_enabled; in qedr_alloc_ucontext() 341 uresp.wids_enabled = 1; in qedr_alloc_ucontext() 342 uresp.wid_count = oparams.wid_count; in qedr_alloc_ucontext() 343 uresp.db_pa = ctx->dpi_phys_addr; in qedr_alloc_ucontext() 344 uresp.db_size = ctx->dpi_size; in qedr_alloc_ucontext() 345 uresp.max_send_wr = dev->attr.max_sqe; in qedr_alloc_ucontext() 346 uresp.max_recv_wr = dev->attr.max_rqe; in qedr_alloc_ucontext() 347 uresp.max_srq_wr = dev->attr.max_srq_wr; in qedr_alloc_ucontext() 348 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE; in qedr_alloc_ucontext() [all …]
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.c | 125 struct i40iw_alloc_ucontext_resp uresp = {}; in i40iw_alloc_ucontext() local 136 uresp.max_qps = iwdev->max_qp; in i40iw_alloc_ucontext() 137 uresp.max_pds = iwdev->max_pd; in i40iw_alloc_ucontext() 138 uresp.wq_size = iwdev->max_qp_wr * 2; in i40iw_alloc_ucontext() 139 uresp.kernel_ver = req.userspace_ver; in i40iw_alloc_ucontext() 144 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) in i40iw_alloc_ucontext() 290 struct i40iw_alloc_pd_resp uresp; in i40iw_alloc_pd() local 311 memset(&uresp, 0, sizeof(uresp)); in i40iw_alloc_pd() 312 uresp.pd_id = pd_id; in i40iw_alloc_pd() 313 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { in i40iw_alloc_pd() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_verbs.c | 322 struct pvrdma_alloc_ucontext_resp uresp = {}; in pvrdma_alloc_ucontext() local 350 uresp.qp_tab_size = vdev->dsr->caps.max_qp; in pvrdma_alloc_ucontext() 351 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); in pvrdma_alloc_ucontext()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 308 struct mthca_alloc_ucontext_resp uresp = {}; in mthca_alloc_ucontext() local 315 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; in mthca_alloc_ucontext() 317 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; in mthca_alloc_ucontext() 319 uresp.uarc_size = 0; in mthca_alloc_ucontext() 332 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { in mthca_alloc_ucontext()
|