/drivers/infiniband/core/ |
D | umem.c | 50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 55 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { in __ib_umem_release() 61 if (umem->writable && dirty) in __ib_umem_release() 81 struct ib_umem *umem; in ib_umem_get() local 100 umem = kmalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get() 101 if (!umem) in ib_umem_get() 104 umem->context = context; in ib_umem_get() 105 umem->length = size; in ib_umem_get() 106 umem->offset = addr & ~PAGE_MASK; in ib_umem_get() 107 umem->page_size = PAGE_SIZE; in ib_umem_get() [all …]
|
D | Makefile | 12 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
|
/drivers/infiniband/hw/ipath/ |
D | ipath_mr.c | 153 mr->umem = NULL; in ipath_reg_phys_mr() 190 struct ib_umem *umem; in ipath_reg_user_mr() local 200 umem = ib_umem_get(pd->uobject->context, start, length, in ipath_reg_user_mr() 202 if (IS_ERR(umem)) in ipath_reg_user_mr() 203 return (void *) umem; in ipath_reg_user_mr() 206 list_for_each_entry(chunk, &umem->chunk_list, list) in ipath_reg_user_mr() 212 ib_umem_release(umem); in ipath_reg_user_mr() 220 mr->mr.offset = umem->offset; in ipath_reg_user_mr() 223 mr->umem = umem; in ipath_reg_user_mr() 227 list_for_each_entry(chunk, &umem->chunk_list, list) { in ipath_reg_user_mr() [all …]
|
D | ipath_verbs.h | 270 struct ib_umem *umem; member
|
/drivers/infiniband/hw/mlx4/ |
D | doorbell.c | 39 struct ib_umem *umem; member 65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 67 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 68 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 76 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); in mlx4_ib_db_map_user() 93 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
D | mr.c | 76 mr->umem = NULL; in mlx4_ib_get_dma_mr() 90 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument 105 list_for_each_entry(chunk, &umem->chunk_list, list) in mlx4_ib_umem_write_mtt() 110 umem->page_size * k; in mlx4_ib_umem_write_mtt() 148 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr() 150 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 151 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 155 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr() 156 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr() 163 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() [all …]
|
D | srq.c | 116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in mlx4_ib_create_srq() 118 if (IS_ERR(srq->umem)) { in mlx4_ib_create_srq() 119 err = PTR_ERR(srq->umem); in mlx4_ib_create_srq() 123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq() 124 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq() 128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 213 ib_umem_release(srq->umem); in mlx4_ib_create_srq() 281 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
|
D | cq.c | 138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, in mlx4_ib_get_cq_umem() argument 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 146 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem() 147 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem() 149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem() 150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem() 154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 164 ib_umem_release(*umem); in mlx4_ib_get_cq_umem() 200 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, in mlx4_ib_create_cq() 255 ib_umem_release(cq->umem); in mlx4_ib_create_cq() [all …]
|
D | mlx4_ib.h | 109 struct ib_umem *umem; member 116 struct ib_umem *umem; member 251 struct ib_umem *umem; member 280 struct ib_umem *umem; member 589 struct ib_umem *umem);
|
D | qp.c | 688 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common() 690 if (IS_ERR(qp->umem)) { in create_qp_common() 691 err = PTR_ERR(qp->umem); in create_qp_common() 695 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common() 696 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common() 700 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common() 812 ib_umem_release(qp->umem); in create_qp_common() 944 ib_umem_release(qp->umem); in destroy_qp_common()
|
/drivers/infiniband/hw/qib/ |
D | qib_mr.c | 234 struct ib_umem *umem; in qib_reg_user_mr() local 244 umem = ib_umem_get(pd->uobject->context, start, length, in qib_reg_user_mr() 246 if (IS_ERR(umem)) in qib_reg_user_mr() 247 return (void *) umem; in qib_reg_user_mr() 250 list_for_each_entry(chunk, &umem->chunk_list, list) in qib_reg_user_mr() 256 ib_umem_release(umem); in qib_reg_user_mr() 263 mr->mr.offset = umem->offset; in qib_reg_user_mr() 265 mr->umem = umem; in qib_reg_user_mr() 267 if (is_power_of_2(umem->page_size)) in qib_reg_user_mr() 268 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr() [all …]
|
D | qib_verbs.h | 329 struct ib_umem *umem; member
|
/drivers/infiniband/hw/amso1100/ |
D | c2_provider.c | 395 mr->umem = NULL; in c2_reg_phys_mr() 447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c2_reg_user_mr() 448 if (IS_ERR(c2mr->umem)) { in c2_reg_user_mr() 449 err = PTR_ERR(c2mr->umem); in c2_reg_user_mr() 454 shift = ffs(c2mr->umem->page_size) - 1; in c2_reg_user_mr() 457 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) in c2_reg_user_mr() 467 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) { in c2_reg_user_mr() 473 (c2mr->umem->page_size * k); in c2_reg_user_mr() 481 c2mr->umem->page_size, in c2_reg_user_mr() 484 c2mr->umem->offset, in c2_reg_user_mr() [all …]
|
D | c2_provider.h | 76 struct ib_umem *umem; member
|
/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 704 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr() 705 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr() 706 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr() 711 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr() 714 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) in c4iw_reg_user_mr() 729 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) in c4iw_reg_user_mr() 735 mhp->umem->page_size * k); in c4iw_reg_user_mr() 775 ib_umem_release(mhp->umem); in c4iw_reg_user_mr() 943 if (mhp->umem) in c4iw_dereg_mr() 944 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 887 mr->umem = NULL; in mthca_get_dma_mr() 970 mr->umem = NULL; in mthca_reg_phys_mr() 1003 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr() 1006 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr() 1007 err = PTR_ERR(mr->umem); in mthca_reg_user_mr() 1011 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr() 1014 list_for_each_entry(chunk, &mr->umem->chunk_list, list) in mthca_reg_user_mr() 1033 list_for_each_entry(chunk, &mr->umem->chunk_list, list) in mthca_reg_user_mr() 1038 mr->umem->page_size * k; in mthca_reg_user_mr() 1072 ib_umem_release(mr->umem); in mthca_reg_user_mr() [all …]
|
D | mthca_provider.h | 75 struct ib_umem *umem; member
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 464 if (mhp->umem) in iwch_dereg_mr() 465 ib_umem_release(mhp->umem); in iwch_dereg_mr() 638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in iwch_reg_user_mr() 639 if (IS_ERR(mhp->umem)) { in iwch_reg_user_mr() 640 err = PTR_ERR(mhp->umem); in iwch_reg_user_mr() 645 shift = ffs(mhp->umem->page_size) - 1; in iwch_reg_user_mr() 648 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) in iwch_reg_user_mr() 663 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) in iwch_reg_user_mr() 669 mhp->umem->page_size * k); in iwch_reg_user_mr() 718 ib_umem_release(mhp->umem); in iwch_reg_user_mr()
|
D | iwch_provider.h | 76 struct ib_umem *umem; member
|
/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, in ehca_reg_user_mr() 363 if (IS_ERR(e_mr->umem)) { in ehca_reg_user_mr() 364 ib_mr = (void *)e_mr->umem; in ehca_reg_user_mr() 368 if (e_mr->umem->page_size != PAGE_SIZE) { in ehca_reg_user_mr() 370 "e_mr->umem->page_size=%x", e_mr->umem->page_size); in ehca_reg_user_mr() 379 if (e_mr->umem->hugetlb) { in ehca_reg_user_mr() 401 pginfo.u.usr.region = e_mr->umem; in ehca_reg_user_mr() 402 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; in ehca_reg_user_mr() 404 (&e_mr->umem->chunk_list), in ehca_reg_user_mr() 431 ib_umem_release(e_mr->umem); in ehca_reg_user_mr() [all …]
|
D | ehca_classes.h | 271 struct ib_umem *umem; member
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 573 struct ib_umem *umem = mr->umem; in build_user_pbes() local 582 shift = ilog2(umem->page_size); in build_user_pbes() 584 list_for_each_entry(chunk, &umem->chunk_list, list) { in build_user_pbes() 593 (umem->page_size * pg_cnt)); in build_user_pbes() 598 umem->page_size * pg_cnt))); in build_user_pbes() 640 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); in ocrdma_reg_user_mr() 641 if (IS_ERR(mr->umem)) { in ocrdma_reg_user_mr() 645 num_pbes = ib_umem_page_count(mr->umem); in ocrdma_reg_user_mr() 650 mr->hwmr.pbe_size = mr->umem->page_size; in ocrdma_reg_user_mr() 651 mr->hwmr.fbo = mr->umem->offset; in ocrdma_reg_user_mr() [all …]
|
D | ocrdma.h | 326 struct ib_umem *umem; member
|
/drivers/block/ |
D | Makefile | 28 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
D | Kconfig | 164 <http://www.umem.com/> 170 module will be called umem. 172 The umem driver has not yet been allocated a MAJOR number, so
|