Home
last modified time | relevance | path

Searched refs:ucmd (Results 1 – 25 of 28) sorted by relevance

12

/drivers/infiniband/hw/mthca/
Dmthca_provider.c403 struct mthca_create_srq ucmd; in mthca_create_srq() local
413 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_srq()
417 context->db_tab, ucmd.db_index, in mthca_create_srq()
418 ucmd.db_page); in mthca_create_srq()
423 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq()
424 srq->db_index = ucmd.db_index; in mthca_create_srq()
432 context->db_tab, ucmd.db_index); in mthca_create_srq()
468 struct mthca_create_qp ucmd; in mthca_create_qp() local
485 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mthca_create_qp()
492 ucmd.sq_db_index, ucmd.sq_db_page); in mthca_create_qp()
[all …]
/drivers/infiniband/hw/mlx5/
Dqp.c352 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
370 if (ucmd) { in set_rq_size()
371 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
372 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) in set_rq_size()
374 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
556 struct mlx5_ib_create_qp *ucmd, in set_user_buf_size() argument
568 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { in set_user_buf_size()
570 ucmd->sq_wqe_count); in set_user_buf_size()
574 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size()
832 struct mlx5_ib_create_wq *ucmd) in create_user_rq() argument
[all …]
Dcq.c709 struct mlx5_ib_create_cq ucmd = {}; in create_cq_user() local
720 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_cq_user()
724 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) in create_cq_user()
727 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | in create_cq_user()
731 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user()
732 ucmd.reserved0 || ucmd.reserved1) in create_cq_user()
735 *cqe_size = ucmd.cqe_size; in create_cq_user()
738 ib_umem_get(&dev->ib_dev, ucmd.buf_addr, in create_cq_user()
739 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); in create_cq_user()
745 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); in create_cq_user()
[all …]
Dsrq.c49 struct mlx5_ib_create_srq ucmd = {}; in create_srq_user() local
60 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user()
62 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user()
67 if (ucmd.reserved0 || ucmd.reserved1) in create_srq_user()
70 if (udata->inlen > sizeof(ucmd) && in create_srq_user()
71 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user()
72 udata->inlen - sizeof(ucmd))) in create_srq_user()
76 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user()
81 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user()
83 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user()
[all …]
Dmlx5_ib.h1414 struct mlx5_ib_create_qp *ucmd, in get_qp_user_index() argument
1420 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && in get_qp_user_index()
1421 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_qp_user_index()
1424 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) in get_qp_user_index()
1427 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_qp_user_index()
1431 struct mlx5_ib_create_srq *ucmd, in get_srq_user_index() argument
1437 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && in get_srq_user_index()
1438 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_srq_user_index()
1441 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) in get_srq_user_index()
1444 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_srq_user_index()
Dcounters.h15 struct mlx5_ib_create_flow *ucmd);
Dcounters.c589 struct mlx5_ib_create_flow *ucmd) in mlx5_ib_flow_counters_set_data() argument
597 if (ucmd && ucmd->ncounters_data != 0) { in mlx5_ib_flow_counters_set_data()
598 cntrs_data = ucmd->data; in mlx5_ib_flow_counters_set_data()
Dfs.c907 struct mlx5_ib_create_flow *ucmd) in _create_flow_rule() argument
979 err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd); in _create_flow_rule()
1157 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; in mlx5_ib_create_flow() local
1183 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); in mlx5_ib_create_flow()
1184 if (!ucmd) in mlx5_ib_create_flow()
1187 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); in mlx5_ib_create_flow()
1250 underlay_qpn, ucmd); in mlx5_ib_create_flow()
1272 kfree(ucmd); in mlx5_ib_create_flow()
1284 kfree(ucmd); in mlx5_ib_create_flow()
/drivers/infiniband/hw/hns/
Dhns_roce_cq.c228 struct hns_roce_ib_create_cq *ucmd) in set_cqe_size() argument
233 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) in set_cqe_size()
234 hr_cq->cqe_size = ucmd->cqe_size; in set_cqe_size()
249 struct hns_roce_ib_create_cq ucmd = {}; in hns_roce_create_cq() local
276 ret = ib_copy_from_udata(&ucmd, udata, in hns_roce_create_cq()
277 min(udata->inlen, sizeof(ucmd))); in hns_roce_create_cq()
285 set_cqe_size(hr_cq, udata, &ucmd); in hns_roce_create_cq()
287 ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); in hns_roce_create_cq()
293 ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp); in hns_roce_create_cq()
Dhns_roce_qp.c444 struct hns_roce_ib_create_qp *ucmd) in check_sq_size_with_integrity() argument
450 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity()
451 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity()
467 struct hns_roce_ib_create_qp *ucmd) in set_user_sq_size() argument
473 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
477 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size()
488 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
694 struct hns_roce_ib_create_qp *ucmd) in user_qp_has_sdb() argument
699 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb()
722 struct hns_roce_ib_create_qp *ucmd, in alloc_qp_db() argument
[all …]
Dhns_roce_srq.c291 struct hns_roce_ib_create_srq ucmd = {}; in hns_roce_create_srq() local
307 ret = ib_copy_from_udata(&ucmd, udata, in hns_roce_create_srq()
308 min(udata->inlen, sizeof(ucmd))); in hns_roce_create_srq()
316 ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr); in hns_roce_create_srq()
323 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr); in hns_roce_create_srq()
/drivers/infiniband/hw/mlx4/
Dqp.c413 struct mlx4_ib_create_qp *ucmd) in set_user_sq_size() argument
418 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
421 if (ucmd->log_sq_stride > in set_user_sq_size()
423 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) in set_user_sq_size()
426 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
427 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
519 struct mlx4_ib_create_qp_rss *ucmd) in set_qp_rss() argument
524 if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && in set_qp_rss()
526 memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, in set_qp_rss()
533 if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | in set_qp_rss()
[all …]
Dsrq.c108 struct mlx4_ib_create_srq ucmd; in mlx4_ib_create_srq() local
110 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq()
114 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); in mlx4_ib_create_srq()
128 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq()
Dcq.c205 struct mlx4_ib_create_cq ucmd; in mlx4_ib_create_cq() local
207 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mlx4_ib_create_cq()
212 buf_addr = (void *)(unsigned long)ucmd.buf_addr; in mlx4_ib_create_cq()
214 ucmd.buf_addr, entries); in mlx4_ib_create_cq()
218 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); in mlx4_ib_create_cq()
317 struct mlx4_ib_resize_cq ucmd; in mlx4_alloc_resize_umem() local
323 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) in mlx4_alloc_resize_umem()
331 &cq->resize_umem, ucmd.buf_addr, entries); in mlx4_alloc_resize_umem()
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_srq.c109 struct pvrdma_create_srq ucmd; in pvrdma_create_srq() local
144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_srq()
149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
Dpvrdma_cq.c116 struct pvrdma_create_cq ucmd; in pvrdma_create_cq() local
133 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_cq()
138 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
Dpvrdma_qp.c202 struct pvrdma_create_qp ucmd; in pvrdma_create_qp() local
262 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_qp()
279 ib_umem_get(pd->device, ucmd.rbuf_addr, in pvrdma_create_qp()
280 ucmd.rbuf_size, 0); in pvrdma_create_qp()
291 qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, in pvrdma_create_qp()
292 ucmd.sbuf_size, 0); in pvrdma_create_qp()
/drivers/dma-buf/
Ddma-heap.c170 static long dma_heap_ioctl(struct file *file, unsigned int ucmd, in dma_heap_ioctl() argument
177 int nr = _IOC_NR(ucmd); in dma_heap_ioctl()
189 out_size = _IOC_SIZE(ucmd); in dma_heap_ioctl()
191 if ((ucmd & kcmd & IOC_IN) == 0) in dma_heap_ioctl()
193 if ((ucmd & kcmd & IOC_OUT) == 0) in dma_heap_ioctl()
/drivers/infiniband/sw/rxe/
Drxe_srq.c124 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) in rxe_srq_from_attr() argument
135 mi = u64_to_user_ptr(ucmd->mmap_info_addr); in rxe_srq_from_attr()
Drxe_verbs.c305 struct rxe_modify_srq_cmd ucmd = {}; in rxe_modify_srq() local
308 if (udata->inlen < sizeof(ucmd)) in rxe_modify_srq()
311 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); in rxe_modify_srq()
320 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); in rxe_modify_srq()
Drxe_loc.h200 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
/drivers/infiniband/hw/qib/
Dqib_file_ops.c2036 const struct qib_cmd __user *ucmd; in qib_write() local
2055 ucmd = (const struct qib_cmd __user *) data; in qib_write()
2057 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { in qib_write()
2069 src = &ucmd->cmd.user_info; in qib_write()
2075 src = &ucmd->cmd.recv_ctrl; in qib_write()
2081 src = &ucmd->cmd.ctxt_info; in qib_write()
2088 src = &ucmd->cmd.tid_info; in qib_write()
2094 src = &ucmd->cmd.part_key; in qib_write()
2107 src = &ucmd->cmd.poll_type; in qib_write()
2113 src = &ucmd->cmd.armlaunch_ctrl; in qib_write()
[all …]
/drivers/block/rsxx/
Dcregs.c664 struct rsxx_reg_access __user *ucmd, in rsxx_reg_access() argument
670 st = copy_from_user(&cmd, ucmd, sizeof(cmd)); in rsxx_reg_access()
681 st = put_user(cmd.stat, &ucmd->stat); in rsxx_reg_access()
686 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt); in rsxx_reg_access()
Drsxx_priv.h413 struct rsxx_reg_access __user *ucmd,
/drivers/scsi/sym53c8xx_2/
Dsym_glue.c129 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym_xpt_done() local
132 if (ucmd->eh_done) in sym_xpt_done()
133 complete(ucmd->eh_done); in sym_xpt_done()
576 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym_eh_handler() local
656 ucmd->eh_done = &eh_done; in sym_eh_handler()
659 ucmd->eh_done = NULL; in sym_eh_handler()

12