/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 410 struct mthca_create_srq ucmd; in mthca_create_srq() local 420 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_srq() 424 context->db_tab, ucmd.db_index, in mthca_create_srq() 425 ucmd.db_page); in mthca_create_srq() 430 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq() 431 srq->db_index = ucmd.db_index; in mthca_create_srq() 439 context->db_tab, ucmd.db_index); in mthca_create_srq() 474 struct mthca_create_qp ucmd; in mthca_create_qp() local 491 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mthca_create_qp() 498 ucmd.sq_db_index, ucmd.sq_db_page); in mthca_create_qp() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | qp.c | 331 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument 347 if (ucmd) { in set_rq_size() 348 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size() 349 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) in set_rq_size() 351 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size() 523 struct mlx5_ib_create_qp *ucmd, in set_user_buf_size() argument 535 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { in set_user_buf_size() 537 ucmd->sq_wqe_count); in set_user_buf_size() 541 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size() 796 struct mlx5_ib_create_wq *ucmd) in create_user_rq() argument [all …]
|
D | cq.c | 685 struct mlx5_ib_create_cq ucmd = {}; in create_cq_user() local 696 ucmdlen = udata->inlen < sizeof(ucmd) ? in create_cq_user() 697 (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); in create_cq_user() 699 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) in create_cq_user() 702 if (ucmdlen == sizeof(ucmd) && in create_cq_user() 703 (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD))) in create_cq_user() 706 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) in create_cq_user() 709 *cqe_size = ucmd.cqe_size; in create_cq_user() 712 ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, in create_cq_user() 719 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); in create_cq_user() [all …]
|
D | srq.c | 49 struct mlx5_ib_create_srq ucmd = {}; in create_srq_user() local 60 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user() 62 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user() 67 if (ucmd.reserved0 || ucmd.reserved1) in create_srq_user() 70 if (udata->inlen > sizeof(ucmd) && in create_srq_user() 71 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user() 72 udata->inlen - sizeof(ucmd))) in create_srq_user() 76 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user() 81 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user() 83 srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0); in create_srq_user() [all …]
|
D | mlx5_ib.h | 1424 struct mlx5_ib_create_qp *ucmd, in get_qp_user_index() argument 1431 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_qp_user_index() 1438 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_qp_user_index() 1442 struct mlx5_ib_create_srq *ucmd, in get_srq_user_index() argument 1449 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_srq_user_index() 1456 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_srq_user_index()
|
D | main.c | 3416 struct mlx5_ib_create_flow *ucmd) in flow_counters_set_data() argument 3424 if (ucmd && ucmd->ncounters_data != 0) { in flow_counters_set_data() 3425 cntrs_data = ucmd->data; in flow_counters_set_data() 3522 struct mlx5_ib_create_flow *ucmd) in _create_flow_rule() argument 3594 err = flow_counters_set_data(flow_act.counters, ucmd); in _create_flow_rule() 3797 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; in mlx5_ib_create_flow() local 3824 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); in mlx5_ib_create_flow() 3825 if (!ucmd) in mlx5_ib_create_flow() 3828 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); in mlx5_ib_create_flow() 3894 dst, underlay_qpn, ucmd); in mlx5_ib_create_flow() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 439 struct mlx4_ib_create_qp *ucmd) in set_user_sq_size() argument 442 if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || in set_user_sq_size() 443 ucmd->log_sq_stride > in set_user_sq_size() 445 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) in set_user_sq_size() 448 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size() 449 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size() 541 struct mlx4_ib_create_qp_rss *ucmd) in set_qp_rss() argument 546 if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && in set_qp_rss() 548 memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, in set_qp_rss() 555 if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | in set_qp_rss() [all …]
|
D | srq.c | 108 struct mlx4_ib_create_srq ucmd; in mlx4_ib_create_srq() local 110 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq() 113 srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0); in mlx4_ib_create_srq() 126 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq()
|
D | cq.c | 206 struct mlx4_ib_create_cq ucmd; in mlx4_ib_create_cq() local 208 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mlx4_ib_create_cq() 213 buf_addr = (void *)(unsigned long)ucmd.buf_addr; in mlx4_ib_create_cq() 215 ucmd.buf_addr, entries); in mlx4_ib_create_cq() 219 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); in mlx4_ib_create_cq() 318 struct mlx4_ib_resize_cq ucmd; in mlx4_alloc_resize_umem() local 324 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) in mlx4_alloc_resize_umem() 332 &cq->resize_umem, ucmd.buf_addr, entries); in mlx4_alloc_resize_umem()
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_srq.c | 109 struct pvrdma_create_srq ucmd; in pvrdma_create_srq() local 144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_srq() 149 srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0); in pvrdma_create_srq()
|
D | pvrdma_cq.c | 116 struct pvrdma_create_cq ucmd; in pvrdma_create_cq() local 133 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_cq() 138 cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
|
D | pvrdma_qp.c | 198 struct pvrdma_create_qp ucmd; in pvrdma_create_qp() local 258 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_qp() 265 qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr, in pvrdma_create_qp() 266 ucmd.rbuf_size, 0, 0); in pvrdma_create_qp() 277 qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr, in pvrdma_create_qp() 278 ucmd.sbuf_size, 0, 0); in pvrdma_create_qp()
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 329 struct hns_roce_ib_create_qp *ucmd) in check_sq_size_with_integrity() argument 335 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity() 336 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity() 353 struct hns_roce_ib_create_qp *ucmd) in hns_roce_set_user_sq_size() argument 360 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) || in hns_roce_set_user_sq_size() 364 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in hns_roce_set_user_sq_size() 370 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in hns_roce_set_user_sq_size() 692 struct hns_roce_ib_create_qp ucmd; in hns_roce_create_qp_common() local 734 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in hns_roce_create_qp_common() 741 &ucmd); in hns_roce_create_qp_common() [all …]
|
D | hns_roce_srq.c | 182 struct hns_roce_ib_create_srq ucmd; in create_user_srq() local 186 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in create_user_srq() 189 srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); in create_user_srq() 208 srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, in create_user_srq()
|
D | hns_roce_cq.c | 305 struct hns_roce_ib_create_cq ucmd; in create_user_cq() local 311 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in create_user_cq() 318 &hr_cq->umem, ucmd.buf_addr, in create_user_cq() 327 ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, in create_user_cq()
|
/drivers/infiniband/sw/rxe/ |
D | rxe_srq.c | 151 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) in rxe_srq_from_attr() argument 162 mi = u64_to_user_ptr(ucmd->mmap_info_addr); in rxe_srq_from_attr()
|
D | rxe_verbs.c | 338 struct rxe_modify_srq_cmd ucmd = {}; in rxe_modify_srq() local 341 if (udata->inlen < sizeof(ucmd)) in rxe_modify_srq() 344 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); in rxe_modify_srq() 353 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); in rxe_modify_srq()
|
D | rxe_loc.h | 231 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
|
/drivers/infiniband/hw/qib/ |
D | qib_file_ops.c | 2036 const struct qib_cmd __user *ucmd; in qib_write() local 2055 ucmd = (const struct qib_cmd __user *) data; in qib_write() 2057 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { in qib_write() 2069 src = &ucmd->cmd.user_info; in qib_write() 2075 src = &ucmd->cmd.recv_ctrl; in qib_write() 2081 src = &ucmd->cmd.ctxt_info; in qib_write() 2088 src = &ucmd->cmd.tid_info; in qib_write() 2094 src = &ucmd->cmd.part_key; in qib_write() 2107 src = &ucmd->cmd.poll_type; in qib_write() 2113 src = &ucmd->cmd.armlaunch_ctrl; in qib_write() [all …]
|
/drivers/block/rsxx/ |
D | cregs.c | 664 struct rsxx_reg_access __user *ucmd, in rsxx_reg_access() argument 670 st = copy_from_user(&cmd, ucmd, sizeof(cmd)); in rsxx_reg_access() 681 st = put_user(cmd.stat, &ucmd->stat); in rsxx_reg_access() 686 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt); in rsxx_reg_access()
|
D | rsxx_priv.h | 413 struct rsxx_reg_access __user *ucmd,
|
/drivers/scsi/sym53c8xx_2/ |
D | sym_glue.c | 129 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym_xpt_done() local 132 if (ucmd->eh_done) in sym_xpt_done() 133 complete(ucmd->eh_done); in sym_xpt_done() 580 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym_eh_handler() local 660 ucmd->eh_done = &eh_done; in sym_eh_handler() 663 ucmd->eh_done = NULL; in sym_eh_handler()
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 998 struct c4iw_create_cq ucmd; in c4iw_create_cq() local 1014 if (udata->inlen < sizeof(ucmd)) in c4iw_create_cq()
|
/drivers/nvme/host/ |
D | core.c | 1379 struct nvme_passthru_cmd __user *ucmd) in nvme_user_cmd() argument 1390 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd() 1419 if (put_user(result, &ucmd->result)) in nvme_user_cmd() 1427 struct nvme_passthru_cmd64 __user *ucmd) in nvme_user_cmd64() argument 1437 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd64() 1466 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
|