/drivers/infiniband/hw/qedr/ |
D | verbs.h | 36 struct ib_device_attr *attr, struct ib_udata *udata); 45 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 50 int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 51 int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 52 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); 53 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); 55 struct ib_udata *udata); 56 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 59 struct ib_udata *udata); 61 int attr_mask, struct ib_udata *udata); [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_verbs.h | 37 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata); 44 struct ib_udata *udata); 46 struct ib_udata *udata); 51 int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); 52 int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); 54 struct ib_udata *udata); 58 int attr_mask, struct ib_udata *udata); 59 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata); 64 int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); 68 u64 rnic_va, int rights, struct ib_udata *udata); [all …]
|
D | siw_verbs.c | 80 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata) in siw_alloc_ucontext() argument 95 if (udata->outlen < sizeof(uresp)) { in siw_alloc_ucontext() 99 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); in siw_alloc_ucontext() 124 struct ib_udata *udata) in siw_query_device() argument 128 if (udata->inlen || udata->outlen) in siw_query_device() 226 int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) in siw_alloc_pd() argument 239 int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) in siw_dealloc_pd() argument 296 struct ib_udata *udata) in siw_create_qp() argument 303 rdma_udata_to_drv_context(udata, struct siw_ucontext, in siw_create_qp() 381 if (udata) in siw_create_qp() [all …]
|
/drivers/infiniband/hw/mana/ |
D | mana_ib.h | 103 struct ib_udata *udata); 106 u32 wq_attr_mask, struct ib_udata *udata); 108 int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata); 112 struct ib_udata *udata); 120 struct ib_udata *udata); 122 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 125 struct ib_udata *udata); 128 int attr_mask, struct ib_udata *udata); 130 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 138 struct ib_udata *udata); [all …]
|
D | qp.c | 97 struct ib_udata *udata) in mana_ib_create_qp_rss() argument 121 if (!udata || udata->inlen < sizeof(ucmd)) in mana_ib_create_qp_rss() 124 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_qp_rss() 232 ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); in mana_ib_create_qp_rss() 258 struct ib_udata *udata) in mana_ib_create_qp_raw() argument 267 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, in mana_ib_create_qp_raw() 283 if (!mana_ucontext || udata->inlen < sizeof(ucmd)) in mana_ib_create_qp_raw() 286 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_qp_raw() 382 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); in mana_ib_create_qp_raw() 408 struct ib_udata *udata) in mana_ib_create_qp() argument [all …]
|
D | wq.c | 10 struct ib_udata *udata) in mana_ib_create_wq() argument 19 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_wq() 22 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_wq() 75 u32 wq_attr_mask, struct ib_udata *udata) in mana_ib_modify_wq() argument 81 int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) in mana_ib_destroy_wq() argument 99 struct ib_udata *udata) in mana_ib_create_rwq_ind_table() argument
|
D | cq.c | 9 struct ib_udata *udata) in mana_ib_create_cq() argument 19 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_cq() 22 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_cq() 67 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in mana_ib_destroy_cq() argument
|
/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 51 int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 52 int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 54 struct ib_udata *udata); 55 int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 57 int attr_mask, struct ib_udata *udata); 59 struct ib_udata *udata); 60 int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 63 struct ib_udata *udata); 64 int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 65 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
/drivers/infiniband/hw/bnxt_re/ |
D | ib_verbs.h | 179 struct ib_udata *udata); 193 int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 194 int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 196 struct ib_udata *udata); 201 struct ib_udata *udata); 204 struct ib_udata *udata); 206 int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 210 struct ib_udata *udata); 212 int qp_attr_mask, struct ib_udata *udata); 215 int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); [all …]
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.h | 64 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 69 int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 70 int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 73 struct ib_udata *udata); 75 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 78 struct ib_udata *udata); 82 int attr_mask, struct ib_udata *udata); 86 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 90 struct ib_udata *udata); 94 int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | srq.c | 35 struct ib_udata *udata) in rvt_create_srq() argument 59 dev->dparms.node, udata)) { in rvt_create_srq() 68 if (udata && udata->outlen >= sizeof(__u64)) { in rvt_create_srq() 71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); in rvt_create_srq() 77 ret = ib_copy_to_udata(udata, &srq->ip->offset, in rvt_create_srq() 126 struct ib_udata *udata) in rvt_modify_srq() argument 148 udata)) in rvt_modify_srq() 151 if (udata && udata->inlen >= sizeof(__u64)) { in rvt_modify_srq() 155 ret = ib_copy_from_udata(&offset_addr, udata, in rvt_modify_srq() 159 udata->outbuf = (void __user *) in rvt_modify_srq() [all …]
|
D | cq.c | 159 struct ib_udata *udata) in rvt_create_cq() argument 189 if (udata && udata->outlen >= sizeof(__u64)) { in rvt_create_cq() 207 if (udata && udata->outlen >= sizeof(__u64)) { in rvt_create_cq() 208 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); in rvt_create_cq() 214 err = ib_copy_to_udata(udata, &cq->ip->offset, in rvt_create_cq() 276 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in rvt_destroy_cq() argument 338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 356 if (udata && udata->outlen >= sizeof(__u64)) { in rvt_resize_cq() 370 if (udata && udata->outlen >= sizeof(__u64)) { in rvt_resize_cq() 373 ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); in rvt_resize_cq() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_cq.c | 200 struct ib_udata *udata, unsigned long addr) in alloc_cq_buf() argument 213 udata, addr); in alloc_cq_buf() 226 struct ib_udata *udata, unsigned long addr, in alloc_cq_db() argument 233 if (udata) { in alloc_cq_db() 235 udata->outlen >= offsetofend(typeof(*resp), cap_flags)) { in alloc_cq_db() 236 uctx = rdma_udata_to_drv_context(udata, in alloc_cq_db() 261 struct ib_udata *udata) in free_cq_db() argument 269 if (udata) { in free_cq_db() 270 uctx = rdma_udata_to_drv_context(udata, in free_cq_db() 299 static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata, in get_cq_ucmd() argument [all …]
|
D | hns_roce_srq.c | 166 struct ib_udata *udata, unsigned long addr) in alloc_srq_idx() argument 183 udata, addr); in alloc_srq_idx() 190 if (!udata) { in alloc_srq_idx() 220 struct ib_udata *udata, unsigned long addr) in alloc_srq_wqe_buf() argument 238 udata, addr); in alloc_srq_wqe_buf() 291 struct ib_udata *udata) in set_srq_basic_param() argument 297 max_sge = proc_srq_sge(hr_dev, srq, !!udata); in set_srq_basic_param() 329 struct ib_udata *udata) in set_srq_param() argument 333 ret = set_srq_basic_param(srq, init_attr, udata); in set_srq_param() 343 struct ib_udata *udata) in alloc_srq_buf() argument [all …]
|
D | hns_roce_qp.c | 748 struct ib_udata *udata, unsigned long addr) in alloc_qp_buf() argument 761 udata, addr); in alloc_qp_buf() 784 struct ib_udata *udata, in user_qp_has_sdb() argument 789 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_sdb() 791 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb() 796 struct ib_udata *udata, in user_qp_has_rdb() argument 800 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_rdb() 813 struct ib_udata *udata, in qp_mmap_entry() argument 817 rdma_udata_to_drv_context(udata, in qp_mmap_entry() 843 struct ib_udata *udata, in alloc_user_qp_db() argument [all …]
|
D | hns_roce_pd.c | 45 int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) in hns_roce_alloc_pd() argument 62 if (udata) { in hns_roce_alloc_pd() 65 ret = ib_copy_to_udata(udata, &resp, in hns_roce_alloc_pd() 66 min(udata->outlen, sizeof(resp))); in hns_roce_alloc_pd() 76 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) in hns_roce_dealloc_pd() argument 146 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) in hns_roce_alloc_xrcd() argument 162 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) in hns_roce_dealloc_xrcd() argument
|
/drivers/infiniband/hw/efa/ |
D | efa.h | 136 struct ib_udata *udata); 146 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 147 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 148 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 150 struct ib_udata *udata); 151 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 153 struct ib_udata *udata); 156 struct ib_udata *udata); 160 struct ib_udata *udata); 161 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); [all …]
|
D | efa_verbs.c | 208 struct ib_udata *udata) in efa_query_device() argument 215 if (udata && udata->inlen && in efa_query_device() 216 !ib_is_udata_cleared(udata, 0, udata->inlen)) { in efa_query_device() 243 if (udata && udata->outlen) { in efa_query_device() 266 err = ib_copy_to_udata(udata, &resp, in efa_query_device() 267 min(sizeof(resp), udata->outlen)); in efa_query_device() 379 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) in efa_alloc_pd() argument 387 if (udata->inlen && in efa_alloc_pd() 388 !ib_is_udata_cleared(udata, 0, udata->inlen)) { in efa_alloc_pd() 402 if (udata->outlen) { in efa_alloc_pd() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 287 struct ib_udata *udata) in mthca_alloc_ucontext() argument 314 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { in mthca_alloc_ucontext() 348 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) in mthca_alloc_pd() argument 354 err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); in mthca_alloc_pd() 358 if (udata) { in mthca_alloc_pd() 359 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { in mthca_alloc_pd() 368 static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) in mthca_dealloc_pd() argument 376 struct ib_udata *udata) in mthca_ah_create() argument 393 struct ib_udata *udata) in mthca_create_srq() argument 397 udata, struct mthca_ucontext, ibucontext); in mthca_create_srq() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | srq.c | 45 struct ib_udata *udata, int buf_size) in create_srq_user() argument 50 udata, struct mlx5_ib_ucontext, ibucontext); in create_srq_user() 55 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user() 57 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user() 65 if (udata->inlen > sizeof(ucmd) && in create_srq_user() 66 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user() 67 udata->inlen - sizeof(ucmd))) in create_srq_user() 71 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user() 172 struct ib_udata *udata) in destroy_srq_user() argument 176 udata, in destroy_srq_user() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_verbs.h | 350 struct ib_udata *udata); 364 int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 366 int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 367 int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 371 struct ib_udata *udata); 372 int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); 378 struct ib_udata *udata); 379 int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 383 struct ib_udata *udata); 387 struct ib_udata *udata); [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_verbs.c | 20 struct ib_udata *udata) in rxe_query_device() argument 25 if (udata->inlen || udata->outlen) { in rxe_query_device() 205 static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) in rxe_alloc_ucontext() argument 229 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) in rxe_alloc_pd() argument 248 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) in rxe_dealloc_pd() argument 263 struct ib_udata *udata) in rxe_create_ah() argument 270 if (udata) { in rxe_create_ah() 272 if (udata->outlen >= sizeof(*uresp)) in rxe_create_ah() 273 uresp = udata->outbuf; in rxe_create_ah() 368 struct ib_udata *udata) in rxe_create_srq() argument [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 74 struct ib_udata *udata) in mlx4_ib_create_srq() argument 78 udata, struct mlx4_ib_ucontext, ibucontext); in mlx4_ib_create_srq() 111 if (udata) { in mlx4_ib_create_srq() 114 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq() 132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq() 193 if (udata) in mlx4_ib_create_srq() 194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { in mlx4_ib_create_srq() 204 if (udata) in mlx4_ib_create_srq() 218 if (!udata) in mlx4_ib_create_srq() 225 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) in mlx4_ib_modify_srq() argument [all …]
|
/drivers/xen/ |
D | privcmd.c | 72 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata) in privcmd_ioctl_hypercall() argument 82 if (copy_from_user(&hypercall, udata, sizeof(hypercall))) in privcmd_ioctl_hypercall() 255 static long privcmd_ioctl_mmap(struct file *file, void __user *udata) in privcmd_ioctl_mmap() argument 269 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) in privcmd_ioctl_mmap() 448 struct file *file, void __user *udata, int version) in privcmd_ioctl_mmap_batch() argument 461 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) in privcmd_ioctl_mmap_batch() 469 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2))) in privcmd_ioctl_mmap_batch() 623 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata) in privcmd_ioctl_dm_op() argument 635 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_dm_op() 709 static long privcmd_ioctl_restrict(struct file *file, void __user *udata) in privcmd_ioctl_restrict() argument [all …]
|
/drivers/infiniband/hw/irdma/ |
D | verbs.c | 13 struct ib_udata *udata) in irdma_query_device() argument 20 if (udata->inlen || udata->outlen) in irdma_query_device() 271 struct ib_udata *udata) in irdma_alloc_ucontext() argument 282 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || in irdma_alloc_ucontext() 283 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) in irdma_alloc_ucontext() 286 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) in irdma_alloc_ucontext() 299 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { in irdma_alloc_ucontext() 308 if (ib_copy_to_udata(udata, &uresp, in irdma_alloc_ucontext() 309 min(sizeof(uresp), udata->outlen))) in irdma_alloc_ucontext() 335 if (ib_copy_to_udata(udata, &uresp, in irdma_alloc_ucontext() [all …]
|