Home
last modified time | relevance | path

Searched refs:fmr (Results 1 – 17 of 17) sorted by relevance

/drivers/infiniband/core/
Dfmr_pool.c120 struct ib_pool_fmr *fmr; in ib_fmr_cache_lookup() local
127 hlist_for_each_entry(fmr, bucket, cache_node) in ib_fmr_cache_lookup()
128 if (io_virtual_address == fmr->io_virtual_address && in ib_fmr_cache_lookup()
129 page_list_len == fmr->page_list_len && in ib_fmr_cache_lookup()
130 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup()
132 return fmr; in ib_fmr_cache_lookup()
140 struct ib_pool_fmr *fmr; in ib_fmr_batch_release() local
146 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
147 hlist_del_init(&fmr->cache_node); in ib_fmr_batch_release()
148 fmr->remap_count = 0; in ib_fmr_batch_release()
[all …]
Dverbs.c1143 struct ib_fmr *fmr; in ib_alloc_fmr() local
1148 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); in ib_alloc_fmr()
1149 if (!IS_ERR(fmr)) { in ib_alloc_fmr()
1150 fmr->device = pd->device; in ib_alloc_fmr()
1151 fmr->pd = pd; in ib_alloc_fmr()
1155 return fmr; in ib_alloc_fmr()
1161 struct ib_fmr *fmr; in ib_unmap_fmr() local
1166 fmr = list_entry(fmr_list->next, struct ib_fmr, list); in ib_unmap_fmr()
1167 return fmr->device->unmap_fmr(fmr_list); in ib_unmap_fmr()
1171 int ib_dealloc_fmr(struct ib_fmr *fmr) in ib_dealloc_fmr() argument
[all …]
/drivers/infiniband/hw/ipath/
Dipath_mr.c290 struct ipath_fmr *fmr; in ipath_alloc_fmr() local
296 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); in ipath_alloc_fmr()
297 if (!fmr) in ipath_alloc_fmr()
302 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], in ipath_alloc_fmr()
304 if (!fmr->mr.map[i]) in ipath_alloc_fmr()
307 fmr->mr.mapsz = m; in ipath_alloc_fmr()
313 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) in ipath_alloc_fmr()
315 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey; in ipath_alloc_fmr()
320 fmr->mr.pd = pd; in ipath_alloc_fmr()
321 fmr->mr.user_base = 0; in ipath_alloc_fmr()
[all …]
/drivers/infiniband/hw/qib/
Dqib_mr.c386 struct qib_fmr *fmr; in qib_alloc_fmr() local
393 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); in qib_alloc_fmr()
394 if (!fmr) in qib_alloc_fmr()
397 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); in qib_alloc_fmr()
405 rval = qib_alloc_lkey(&fmr->mr, 0); in qib_alloc_fmr()
408 fmr->ibfmr.rkey = fmr->mr.lkey; in qib_alloc_fmr()
409 fmr->ibfmr.lkey = fmr->mr.lkey; in qib_alloc_fmr()
414 fmr->mr.access_flags = mr_access_flags; in qib_alloc_fmr()
415 fmr->mr.max_segs = fmr_attr->max_pages; in qib_alloc_fmr()
416 fmr->mr.page_shift = fmr_attr->page_shift; in qib_alloc_fmr()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Dmr.c814 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, in mlx4_check_fmr() argument
819 if (npages > fmr->max_pages) in mlx4_check_fmr()
822 page_mask = (1 << fmr->page_shift) - 1; in mlx4_check_fmr()
835 if (fmr->maps >= fmr->max_maps) in mlx4_check_fmr()
841 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, in mlx4_map_phys_fmr() argument
847 err = mlx4_check_fmr(fmr, page_list, npages, iova); in mlx4_map_phys_fmr()
851 ++fmr->maps; in mlx4_map_phys_fmr()
853 key = key_to_hw_index(fmr->mr.key); in mlx4_map_phys_fmr()
855 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); in mlx4_map_phys_fmr()
857 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; in mlx4_map_phys_fmr()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_mr.c678 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) in mthca_free_fmr() argument
680 if (fmr->maps) in mthca_free_fmr()
683 mthca_free_region(dev, fmr->ibmr.lkey); in mthca_free_fmr()
684 mthca_free_mtt(dev, fmr->mtt); in mthca_free_fmr()
689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, in mthca_check_fmr() argument
694 if (list_len > fmr->attr.max_pages) in mthca_check_fmr()
697 page_mask = (1 << fmr->attr.page_shift) - 1; in mthca_check_fmr()
710 if (fmr->maps >= fmr->attr.max_maps) in mthca_check_fmr()
720 struct mthca_fmr *fmr = to_mfmr(ibfmr); in mthca_tavor_map_phys_fmr() local
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr()
[all …]
Dmthca_provider.c1094 struct mthca_fmr *fmr; in mthca_alloc_fmr() local
1097 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); in mthca_alloc_fmr()
1098 if (!fmr) in mthca_alloc_fmr()
1101 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); in mthca_alloc_fmr()
1103 convert_access(mr_access_flags), fmr); in mthca_alloc_fmr()
1106 kfree(fmr); in mthca_alloc_fmr()
1110 return &fmr->ibmr; in mthca_alloc_fmr()
1113 static int mthca_dealloc_fmr(struct ib_fmr *fmr) in mthca_dealloc_fmr() argument
1115 struct mthca_fmr *mfmr = to_mfmr(fmr); in mthca_dealloc_fmr()
1118 err = mthca_free_fmr(to_mdev(fmr->device), mfmr); in mthca_dealloc_fmr()
[all …]
Dmthca_dev.h482 u32 access, struct mthca_fmr *fmr);
485 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
488 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
489 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
/drivers/infiniband/hw/mlx4/
Dmr.c349 struct mlx4_ib_fmr *fmr; in mlx4_ib_fmr_alloc() local
352 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); in mlx4_ib_fmr_alloc()
353 if (!fmr) in mlx4_ib_fmr_alloc()
358 fmr_attr->page_shift, &fmr->mfmr); in mlx4_ib_fmr_alloc()
362 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); in mlx4_ib_fmr_alloc()
366 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; in mlx4_ib_fmr_alloc()
368 return &fmr->ibfmr; in mlx4_ib_fmr_alloc()
371 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); in mlx4_ib_fmr_alloc()
374 kfree(fmr); in mlx4_ib_fmr_alloc()
Dmlx4_ib.h657 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
/drivers/infiniband/hw/ehca/
Dehca_mrmw.c858 int ehca_map_phys_fmr(struct ib_fmr *fmr, in ehca_map_phys_fmr() argument
865 container_of(fmr->device, struct ehca_shca, ib_device); in ehca_map_phys_fmr()
866 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); in ehca_map_phys_fmr()
867 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); in ehca_map_phys_fmr()
872 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", in ehca_map_phys_fmr()
882 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", in ehca_map_phys_fmr()
889 ehca_info(fmr->device, "map limit exceeded, fmr=%p " in ehca_map_phys_fmr()
891 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); in ehca_map_phys_fmr()
900 pginfo.u.fmr.page_list = page_list; in ehca_map_phys_fmr()
903 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; in ehca_map_phys_fmr()
[all …]
Dehca_iverbs.h109 int ehca_map_phys_fmr(struct ib_fmr *fmr,
114 int ehca_dealloc_fmr(struct ib_fmr *fmr);
Dehca_classes.h332 } fmr; member
/drivers/mtd/nand/
Dfsl_elbc_nand.c59 unsigned int fmr; /* FCM Flash Mode Register value */ member
205 out_be32(&lbc->fmr, priv->fmr | 3); in fsl_elbc_run_command()
211 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); in fsl_elbc_run_command()
635 priv->fmr |= al << FMR_AL_SHIFT; in fsl_elbc_chip_init_tail()
685 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? in fsl_elbc_chip_init_tail()
742 priv->fmr = 15 << FMR_CWTO_SHIFT; in fsl_elbc_chip_init()
744 priv->fmr |= FMR_ECCM; in fsl_elbc_chip_init()
772 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? in fsl_elbc_chip_init()
/drivers/infiniband/hw/ocrdma/
Docrdma_hw.h98 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
/drivers/infiniband/ulp/srp/
Dib_srp.c779 struct ib_pool_fmr *fmr; in srp_map_finish_fmr() local
792 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages, in srp_map_finish_fmr()
794 if (IS_ERR(fmr)) in srp_map_finish_fmr()
795 return PTR_ERR(fmr); in srp_map_finish_fmr()
797 *state->next_fmr++ = fmr; in srp_map_finish_fmr()
800 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); in srp_map_finish_fmr()
/drivers/infiniband/ulp/iser/
Diser_verbs.c708 mem_reg->lkey = mem->fmr->lkey; in iser_reg_page_vec()
709 mem_reg->rkey = mem->fmr->rkey; in iser_reg_page_vec()