Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 36) sorted by relevance

12

/drivers/infiniband/core/
Dumem.c46 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
52 if (umem->nmap > 0) in __ib_umem_release()
53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release()
54 umem->nmap, in __ib_umem_release()
57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
60 if (umem->writable && dirty) in __ib_umem_release()
65 sg_free_table(&umem->sg_head); in __ib_umem_release()
81 struct ib_umem *umem; in ib_umem_get() local
111 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get()
112 if (!umem) in ib_umem_get()
[all …]
DMakefile13 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
/drivers/infiniband/hw/mlx4/
Dmr.c76 mr->umem = NULL; in mlx4_ib_get_dma_mr()
90 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx4_ib_umem_write_mtt()
109 umem->page_size * k; in mlx4_ib_umem_write_mtt()
149 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr()
151 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
152 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
156 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr()
157 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr()
164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c39 struct ib_umem *umem; member
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
Dsrq.c116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in mlx4_ib_create_srq()
118 if (IS_ERR(srq->umem)) { in mlx4_ib_create_srq()
119 err = PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq()
124 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
214 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
282 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
Dcq.c138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, in mlx4_ib_get_cq_umem() argument
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
146 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem()
147 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem()
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem()
150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem()
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem()
164 ib_umem_release(*umem); in mlx4_ib_get_cq_umem()
200 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq()
255 ib_umem_release(cq->umem); in mlx4_ib_create_cq()
[all …]
Dmlx4_ib.h111 struct ib_umem *umem; member
118 struct ib_umem *umem; member
277 struct ib_umem *umem; member
308 struct ib_umem *umem; member
634 struct ib_umem *umem);
/drivers/infiniband/hw/ipath/
Dipath_mr.c153 mr->umem = NULL; in ipath_reg_phys_mr()
190 struct ib_umem *umem; in ipath_reg_user_mr() local
200 umem = ib_umem_get(pd->uobject->context, start, length, in ipath_reg_user_mr()
202 if (IS_ERR(umem)) in ipath_reg_user_mr()
203 return (void *) umem; in ipath_reg_user_mr()
205 n = umem->nmap; in ipath_reg_user_mr()
209 ib_umem_release(umem); in ipath_reg_user_mr()
217 mr->mr.offset = umem->offset; in ipath_reg_user_mr()
220 mr->umem = umem; in ipath_reg_user_mr()
224 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in ipath_reg_user_mr()
[all …]
/drivers/infiniband/hw/qib/
Dqib_mr.c234 struct ib_umem *umem; in qib_reg_user_mr() local
244 umem = ib_umem_get(pd->uobject->context, start, length, in qib_reg_user_mr()
246 if (IS_ERR(umem)) in qib_reg_user_mr()
247 return (void *) umem; in qib_reg_user_mr()
249 n = umem->nmap; in qib_reg_user_mr()
254 ib_umem_release(umem); in qib_reg_user_mr()
261 mr->mr.offset = umem->offset; in qib_reg_user_mr()
263 mr->umem = umem; in qib_reg_user_mr()
265 if (is_power_of_2(umem->page_size)) in qib_reg_user_mr()
266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr()
[all …]
/drivers/infiniband/hw/mlx5/
Dmem.c44 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, in mlx5_ib_cont_pages() argument
58 unsigned long page_shift = ilog2(umem->page_size); in mlx5_ib_cont_pages()
66 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages()
111 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in mlx5_ib_populate_pas() argument
114 unsigned long umem_page_shift = ilog2(umem->page_size); in mlx5_ib_populate_pas()
125 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_populate_pas()
Ddoorbell.c41 struct ib_umem *umem; member
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dmr.c643 mr->umem = NULL; in mlx5_ib_get_dma_mr()
731 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, in reg_umr() argument
767 mlx5_ib_populate_pas(dev, umem, page_shift, in reg_umr()
816 u64 length, struct ib_umem *umem, in reg_create() argument
836 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0); in reg_create()
855 mr->umem = umem; in reg_create()
877 struct ib_umem *umem; in mlx5_ib_reg_user_mr() local
886 umem = ib_umem_get(pd->uobject->context, start, length, access_flags, in mlx5_ib_reg_user_mr()
888 if (IS_ERR(umem)) { in mlx5_ib_reg_user_mr()
889 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); in mlx5_ib_reg_user_mr()
[all …]
Dmlx5_ib.h165 struct ib_umem *umem; member
199 struct ib_umem *umem; member
211 struct ib_umem *umem; member
244 struct ib_umem *umem; member
259 struct ib_umem *umem; member
534 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
536 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
542 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
Dcq.c629 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user()
632 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
633 err = PTR_ERR(cq->buf.umem); in create_cq_user()
642 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
653 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user()
664 ib_umem_release(cq->buf.umem); in create_cq_user()
671 ib_umem_release(cq->buf.umem); in destroy_cq_user()
948 struct ib_umem *umem; in resize_user() local
951 struct ib_ucontext *context = cq->buf.umem->context; in resize_user()
960 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, in resize_user()
[all …]
Dsrq.c105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user()
107 if (IS_ERR(srq->umem)) { in create_srq_user()
109 err = PTR_ERR(srq->umem); in create_srq_user()
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user()
129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user()
147 ib_umem_release(srq->umem); in create_srq_user()
225 ib_umem_release(srq->umem); in destroy_srq_user()
405 ib_umem_release(msrq->umem); in mlx5_ib_destroy_srq()
/drivers/infiniband/hw/amso1100/
Dc2_provider.c395 mr->umem = NULL; in c2_reg_phys_mr()
447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c2_reg_user_mr()
448 if (IS_ERR(c2mr->umem)) { in c2_reg_user_mr()
449 err = PTR_ERR(c2mr->umem); in c2_reg_user_mr()
454 shift = ffs(c2mr->umem->page_size) - 1; in c2_reg_user_mr()
455 n = c2mr->umem->nmap; in c2_reg_user_mr()
464 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { in c2_reg_user_mr()
469 (c2mr->umem->page_size * k); in c2_reg_user_mr()
476 c2mr->umem->page_size, in c2_reg_user_mr()
479 c2mr->umem->offset, in c2_reg_user_mr()
[all …]
Dc2_provider.h76 struct ib_umem *umem; member
/drivers/infiniband/hw/usnic/
Dusnic_uiom.c58 struct usnic_uiom_reg *umem = container_of(work, in usnic_uiom_reg_account() local
61 down_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account()
62 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account()
63 up_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account()
64 mmput(umem->mm); in usnic_uiom_reg_account()
65 kfree(umem); in usnic_uiom_reg_account()
Dusnic_ib_verbs.c608 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr()
610 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr()
611 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr()
627 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); in usnic_ib_dereg_mr()
629 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); in usnic_ib_dereg_mr()
Dusnic_ib.h51 struct usnic_uiom_reg *umem; member
/drivers/infiniband/hw/cxgb4/
Dmem.c708 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr()
709 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr()
710 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr()
715 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr()
717 n = mhp->umem->nmap; in c4iw_reg_user_mr()
730 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in c4iw_reg_user_mr()
734 mhp->umem->page_size * k); in c4iw_reg_user_mr()
774 ib_umem_release(mhp->umem); in c4iw_reg_user_mr()
950 if (mhp->umem) in c4iw_dereg_mr()
951 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
/drivers/infiniband/hw/mthca/
Dmthca_provider.c888 mr->umem = NULL; in mthca_get_dma_mr()
971 mr->umem = NULL; in mthca_reg_phys_mr()
1004 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr()
1007 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr()
1008 err = PTR_ERR(mr->umem); in mthca_reg_user_mr()
1012 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr()
1013 n = mr->umem->nmap; in mthca_reg_user_mr()
1031 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { in mthca_reg_user_mr()
1035 mr->umem->page_size * k; in mthca_reg_user_mr()
1069 ib_umem_release(mr->umem); in mthca_reg_user_mr()
[all …]
/drivers/infiniband/hw/cxgb3/
Diwch_provider.c465 if (mhp->umem) in iwch_dereg_mr()
466 ib_umem_release(mhp->umem); in iwch_dereg_mr()
638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in iwch_reg_user_mr()
639 if (IS_ERR(mhp->umem)) { in iwch_reg_user_mr()
640 err = PTR_ERR(mhp->umem); in iwch_reg_user_mr()
645 shift = ffs(mhp->umem->page_size) - 1; in iwch_reg_user_mr()
647 n = mhp->umem->nmap; in iwch_reg_user_mr()
661 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in iwch_reg_user_mr()
665 mhp->umem->page_size * k); in iwch_reg_user_mr()
714 ib_umem_release(mhp->umem); in iwch_reg_user_mr()
/drivers/infiniband/hw/ehca/
Dehca_mrmw.c361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, in ehca_reg_user_mr()
363 if (IS_ERR(e_mr->umem)) { in ehca_reg_user_mr()
364 ib_mr = (void *)e_mr->umem; in ehca_reg_user_mr()
368 if (e_mr->umem->page_size != PAGE_SIZE) { in ehca_reg_user_mr()
370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); in ehca_reg_user_mr()
379 if (e_mr->umem->hugetlb) { in ehca_reg_user_mr()
401 pginfo.u.usr.region = e_mr->umem; in ehca_reg_user_mr()
402 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; in ehca_reg_user_mr()
428 ib_umem_release(e_mr->umem); in ehca_reg_user_mr()
674 if (e_mr->umem) in ehca_dereg_mr()
[all …]
/drivers/block/
DMakefile29 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o

12