/drivers/infiniband/hw/hns/ |
D | hns_roce_alloc.c | 171 dma_free_coherent(dev, 1 << buf->page_shift, in hns_roce_buf_free() 179 struct hns_roce_buf *buf, u32 page_shift) in hns_roce_buf_alloc() argument 184 u32 page_size = 1 << page_shift; in hns_roce_buf_alloc() 192 if (order <= page_shift - PAGE_SHIFT) in hns_roce_buf_alloc() 195 order -= page_shift - PAGE_SHIFT; in hns_roce_buf_alloc() 197 buf->page_shift = page_shift; in hns_roce_buf_alloc() 206 while (t & ((1 << buf->page_shift) - 1)) { in hns_roce_buf_alloc() 207 --buf->page_shift; in hns_roce_buf_alloc() 213 buf->page_shift = page_shift; in hns_roce_buf_alloc() 258 ((dma_addr_t)i << buf->page_shift); in hns_roce_get_kmem_bufs() [all …]
|
D | hns_roce_srq.c | 197 buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; in create_user_srq() 198 ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, in create_user_srq() 219 buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; in create_user_srq() 220 ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, in create_user_srq() 253 u32 page_shift) in hns_roce_create_idx_que() argument 264 if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2, in hns_roce_create_idx_que() 265 &idx_que->idx_buf, page_shift)) { in hns_roce_create_idx_que() 276 u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; in create_kernel_srq() local 279 if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2, in create_kernel_srq() 280 &srq->buf, page_shift)) in create_kernel_srq() [all …]
|
D | hns_roce_cq.c | 213 u32 page_shift; in hns_roce_ib_get_cq_umem() local 230 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; in hns_roce_ib_get_cq_umem() 231 ret = hns_roce_mtt_init(hr_dev, npages, page_shift, in hns_roce_ib_get_cq_umem() 258 u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; in hns_roce_ib_alloc_cq_buf() local 261 (1 << page_shift) * 2, &buf->hr_buf, in hns_roce_ib_alloc_cq_buf() 262 page_shift); in hns_roce_ib_alloc_cq_buf() 272 buf->hr_buf.page_shift, &buf->hr_mtt); in hns_roce_ib_alloc_cq_buf()
|
D | hns_roce_qp.c | 439 int region_max, int page_shift) in split_wqe_buf_region() argument 441 int page_size = 1 << page_shift; in split_wqe_buf_region() 698 u32 page_shift; in hns_roce_create_qp_common() local 732 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; in hns_roce_create_qp_common() 756 page_shift); in hns_roce_create_qp_common() 768 hr_qp->umem, page_shift); in hns_roce_create_qp_common() 849 (1 << page_shift) * 2, in hns_roce_create_qp_common() 850 &hr_qp->hr_buf, page_shift)) { in hns_roce_create_qp_common() 857 page_shift); in hns_roce_create_qp_common() 910 page_shift); in hns_roce_create_qp_common()
|
D | hns_roce_device.h | 344 int page_shift; member 454 int page_shift; member 1113 u32 page_size = 1 << buf->page_shift; in hns_roce_buf_offset() 1118 return (char *)(buf->page_list[offset >> buf->page_shift].buf) + in hns_roce_buf_offset() 1134 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 1213 struct hns_roce_buf *buf, u32 page_shift); 1228 int page_shift);
|
D | hns_roce_mr.c | 221 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument 230 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT; in hns_roce_mtt_init() 235 mtt->page_shift = page_shift; in hns_roce_mtt_init() 903 page_list[i] = buf->direct.map + (i << buf->page_shift); in hns_roce_buf_write_mtt() 1062 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) { in hns_roce_ib_umem_write_mtt() 1063 if (page_addr & ((1 << mtt->page_shift) - 1)) { in hns_roce_ib_umem_write_mtt() 1066 page_addr, mtt->page_shift); in hns_roce_ib_umem_write_mtt()
|
/drivers/pci/endpoint/ |
D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->page_size); in pci_epc_mem_get_order() local 29 size >>= page_shift; in pci_epc_mem_get_order() 54 unsigned int page_shift; in __pci_epc_mem_init() local 61 page_shift = ilog2(page_size); in __pci_epc_mem_init() 62 pages = size >> page_shift; in __pci_epc_mem_init() 127 unsigned int page_shift = ilog2(mem->page_size); in pci_epc_mem_alloc_addr() local 137 *phys_addr = mem->phys_base + (pageno << page_shift); in pci_epc_mem_alloc_addr() 160 unsigned int page_shift = ilog2(mem->page_size); in pci_epc_mem_free_addr() local 164 pageno = (phys_addr - mem->phys_base) >> page_shift; in pci_epc_mem_free_addr()
|
/drivers/infiniband/core/ |
D | umem_odp.c | 215 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 231 umem_odp->page_shift; in ib_init_umem_odp() 319 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_implicit() 362 odp_data->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_child() 415 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_get() 428 umem_odp->page_shift = huge_page_shift(h); in ib_umem_odp_get() 525 ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift), in ib_umem_odp_map_dma_single_page() 552 (page_index << umem_odp->page_shift), in ib_umem_odp_map_dma_single_page() 554 ((page_index + 1) << umem_odp->page_shift)); in ib_umem_odp_map_dma_single_page() 596 unsigned int flags = 0, page_shift; in ib_umem_odp_map_dma_pages() local [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 204 mtt->page_shift = page_shift; in mlx4_mtt_init() 419 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 591 int page_shift, struct mlx4_mpt_entry *mpt_entry) in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 601 mpt_entry->entity_size = cpu_to_be32(page_shift); in mlx4_mr_rereg_mem_write() [all …]
|
D | alloc.c | 584 buf->page_shift = get_order(size) + PAGE_SHIFT; in mlx4_buf_direct_alloc() 593 while (t & ((1 << buf->page_shift) - 1)) { in mlx4_buf_direct_alloc() 594 --buf->page_shift; in mlx4_buf_direct_alloc() 618 buf->page_shift = PAGE_SHIFT; in mlx4_buf_alloc() 789 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
|
D | cq.c | 205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 338 1UL << buf->page_shift); in mlx4_init_kernel_cqes() 384 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc()
|
/drivers/infiniband/hw/mlx5/ |
D | mem.c | 129 int page_shift, size_t offset, size_t num_pages, in __mlx5_ib_populate_pas() argument 132 int shift = page_shift - PAGE_SHIFT; in __mlx5_ib_populate_pas() 193 int page_shift, __be64 *pas, int access_flags) in mlx5_ib_populate_pas() argument 195 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, in mlx5_ib_populate_pas() 199 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) in mlx5_ib_get_buf_offset() argument 207 page_size = (u64)1 << page_shift; in mlx5_ib_get_buf_offset()
|
D | mr.c | 737 static int get_octo_len(u64 addr, u64 len, int page_shift) in get_octo_len() argument 739 u64 page_size = 1ULL << page_shift; in get_octo_len() 744 npages = ALIGN(len + offset, page_size) >> page_shift; in get_octo_len() 757 struct ib_umem **umem, int *npages, int *page_shift, in mr_umem_get() argument 776 *page_shift = odp->page_shift; in mr_umem_get() 778 *npages = *ncont << (*page_shift - PAGE_SHIFT); in mr_umem_get() 789 page_shift, ncont, order); in mr_umem_get() 801 *npages, *ncont, *order, *page_shift); in mr_umem_get() 852 int page_shift, int order, int access_flags) in alloc_mr_from_cache() argument 886 void *xlt, int page_shift, size_t size, in populate_xlt() argument [all …]
|
D | srq.c | 55 int page_shift; in create_srq_user() local 91 &page_shift, &ncont, NULL); in create_srq_user() 92 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, in create_srq_user() 105 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); in create_srq_user() 113 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_srq_user() 164 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); in create_srq_kernel() 179 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_srq_kernel()
|
D | cq.c | 687 int page_shift; in create_cq_user() local 723 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, in create_cq_user() 726 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); in create_cq_user() 737 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); in create_cq_user() 741 page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_cq_user() 858 cq->buf.frag_buf.page_shift - in create_cq_kernel() 1094 int *page_shift, int *cqe_size) in resize_user() argument 1120 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, in resize_user() 1215 int page_shift; in mlx5_ib_resize_cq() local 1242 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, in mlx5_ib_resize_cq() [all …]
|
D | odp.c | 280 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range() 281 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range() 615 int npages = 0, current_seq, page_shift, ret, np; in pagefault_mr() local 637 page_shift = odp->page_shift; in pagefault_mr() 638 page_mask = ~(BIT(page_shift) - 1); in pagefault_mr() 639 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; in pagefault_mr() 676 page_shift, MLX5_IB_UPD_XLT_ATOMIC); in pagefault_mr() 689 u32 new_mappings = (np << page_shift) - in pagefault_mr() 690 (io_virt - round_down(io_virt, 1 << page_shift)); in pagefault_mr() 694 npages += np << (page_shift - PAGE_SHIFT); in pagefault_mr()
|
D | mlx5_ib.h | 312 unsigned int page_shift; member 480 unsigned int page_shift; member 1160 int page_shift, int flags); 1188 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 1215 int page_shift, size_t offset, size_t num_pages, 1218 int page_shift, __be64 *pas, int access_flags);
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | alloc.c | 81 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; in mlx5_buf_alloc_node() 94 while (t & ((1 << buf->page_shift) - 1)) { in mlx5_buf_alloc_node() 95 --buf->page_shift; in mlx5_buf_alloc_node() 128 buf->page_shift = PAGE_SHIFT; in mlx5_frag_buf_alloc_node() 142 if (frag->map & ((1 << buf->page_shift) - 1)) { in mlx5_frag_buf_alloc_node() 146 &frag->map, buf->page_shift); in mlx5_frag_buf_alloc_node() 295 addr = buf->frags->map + (i << buf->page_shift); in mlx5_fill_page_array()
|
/drivers/infiniband/sw/rdmavt/ |
D | mr.c | 412 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr() 604 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page() 605 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page() 639 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg() 758 fmr->mr.page_shift = fmr_attr->page_shift; in rvt_alloc_fmr() 806 ps = 1 << fmr->mr.page_shift; in rvt_map_phys_fmr() 970 if (mr->page_shift) { in rvt_lkey_ok() 978 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok() 979 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok() 1077 if (mr->page_shift) { in rvt_rkey_ok() [all …]
|
/drivers/mtd/nand/raw/ |
D | nand_bbt.c | 180 from = ((loff_t)page) << this->page_shift; in read_bbt() 396 scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift, in read_abs_bbts() 405 scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift, in read_abs_bbts() 532 int blocktopage = this->bbt_erase_shift - this->page_shift; in search_bbt() 634 (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 662 page = block << (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 765 page = block << (this->bbt_erase_shift - this->page_shift); in write_bbt() 786 to = ((loff_t)page) << this->page_shift; in write_bbt() 802 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; in write_bbt() 809 pageoffs = page - (int)(to >> this->page_shift); in write_bbt() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 192 mem->page_shift = PAGE_SHIFT; in rxe_mem_init_user() 275 if (likely(mem->page_shift)) { in lookup_iova() 277 offset >>= mem->page_shift; in lookup_iova() 607 page_size = 1 << mem->page_shift; in rxe_mem_map_pages() 626 mem->length = num_pages << mem->page_shift; in rxe_mem_map_pages()
|
/drivers/vfio/ |
D | vfio_iommu_spapr_tce.c | 193 return page_shift(compound_head(page)) >= it_page_shift; in tce_page_is_contained() 609 __u32 page_shift, in tce_iommu_create_table() argument 616 table_size = table_group->ops->get_table_size(page_shift, window_size, in tce_iommu_create_table() 626 page_shift, window_size, levels, ptbl); in tce_iommu_create_table() 644 __u32 page_shift, __u64 window_size, __u32 levels, in tce_iommu_create_window() argument 663 if (!(table_group->pgsizes & (1ULL << page_shift))) in tce_iommu_create_window() 673 page_shift, window_size, levels, &tbl); in tce_iommu_create_window() 1087 create.page_shift, in tce_iommu_ioctl()
|
/drivers/misc/mic/host/ |
D | mic_x100.c | 515 dma_addr >> mdev->smpt->info.page_shift); in mic_x100_smpt_set() 532 info->page_shift = 34; in mic_x100_smpt_hw_init() 533 info->page_size = (1ULL << info->page_shift); in mic_x100_smpt_hw_init()
|
D | mic_smpt.h | 40 u8 page_shift; member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 578 if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32) in mthca_fmr_alloc() 636 mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12); in mthca_fmr_alloc() 697 page_mask = (1 << fmr->attr.page_shift) - 1; in mthca_check_fmr() 745 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); in mthca_tavor_map_phys_fmr() 795 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); in mthca_arbel_map_phys_fmr()
|