/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local 61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg() 63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() argument 211 return first + npages == last; in page_contiguous() 242 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local 246 pfn_to_page(pfn + npages), in alloc_table() 247 npages)) { in alloc_table() 254 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 52 s32 npages; member 199 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 215 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages() 353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument 366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages() 374 for (i = 0; i < npages; i++) { in give_pages() 381 dev->priv.fw_pages_alloc_failed += (npages - i); in give_pages() 393 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages() 408 func_id, npages, err); in give_pages() 413 dev->priv.page_counters[func_type] += npages; in give_pages() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | pin_system.c | 20 unsigned int npages; member 55 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument 60 evict_data.target = npages; in sdma_cache_evict() 66 unsigned int start, unsigned int npages) in unpin_vector_pages() argument 68 hfi1_release_user_pages(mm, pages + start, npages, false); in unpin_vector_pages() 79 if (node->npages) { in free_system_node() 81 node->npages); in free_system_node() 82 atomic_sub(node->npages, &node->pq->n_locked); in free_system_node() 116 struct sdma_mmu_node *node, int npages) in pin_system_pages() argument 122 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_system_pages() [all …]
|
D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages() 66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages() 74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages() 80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 96 size_t npages, bool dirty) in hfi1_release_user_pages() argument 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
D | user_exp_rcv.c | 16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 20 u16 pageidx, unsigned int npages); 136 unsigned int npages, in unpin_rcv_pages() argument 145 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages() 152 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 153 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 167 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages() 173 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_rcv_pages() 182 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { in pin_rcv_pages() [all …]
|
/drivers/gpu/drm/i915/gem/selftests/ |
D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap() 98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 110 mock->npages = npages; in mock_dmabuf() 111 for (i = 0; i < npages; i++) { in mock_dmabuf() 118 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
D | huge_gem_object.c | 32 unsigned int npages; /* restricted by sg_alloc_table */ in huge_get_pages() local 37 if (overflows_type(obj->base.size / PAGE_SIZE, npages)) in huge_get_pages() 40 npages = obj->base.size / PAGE_SIZE; in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 63 if (nreal < npages) { in huge_get_pages()
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_migrate.c | 49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, in svm_migrate_gart_map() argument 65 num_bytes = npages * 8; in svm_migrate_gart_map() 93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map() 125 uint64_t *vram, uint64_t npages, in svm_migrate_copy_memory_gart() argument 138 while (npages) { in svm_migrate_copy_memory_gart() 139 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart() 164 npages -= size; in svm_migrate_copy_memory_gart() 165 if (npages) { in svm_migrate_copy_memory_gart() 268 for (i = 0; i < migrate->npages; i++) { in svm_migrate_successful_pages() 281 for (i = 0; i < migrate->npages; i++) { in svm_migrate_unsuccessful_pages() [all …]
|
/drivers/vfio/ |
D | iova_bitmap.c | 45 unsigned long npages; member 164 unsigned long npages; in iova_bitmap_get() local 174 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get() 188 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get() 191 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get() 196 mapped->npages = (unsigned long)ret; in iova_bitmap_get() 218 if (mapped->npages) { in iova_bitmap_put() 219 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() 220 mapped->npages = 0; in iova_bitmap_put() 302 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 175 &chunk->mem[chunk->npages], in mthca_alloc_icm() 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() [all …]
|
D | mthca_allocator.c | 195 int npages, shift; in mthca_buf_alloc() local 202 npages = 1; in mthca_buf_alloc() 214 npages *= 2; in mthca_buf_alloc() 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 222 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 240 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
/drivers/fpga/ |
D | dfl-afu-dma-region.c | 37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local 41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages() 45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages() 51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages() 85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local 88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages() 90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages() 92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages() [all …]
|
/drivers/infiniband/core/ |
D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
D | umem.c | 152 unsigned long npages; in ib_umem_get() local 191 npages = ib_umem_num_pages(umem); in ib_umem_get() 192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 211 while (npages) { in ib_umem_get() 214 min_t(unsigned long, npages, in ib_umem_get() 224 npages -= pinned; in ib_umem_get() 228 npages, GFP_KERNEL); in ib_umem_get()
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument 159 while (npages > 0) { in mlx4_alloc_icm() 179 while (1 << cur_order > npages) in mlx4_alloc_icm() 188 &chunk->buf[chunk->npages], in mlx4_alloc_icm() 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm() 202 ++chunk->npages; in mlx4_alloc_icm() 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm() [all …]
|
D | mr.c | 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 199 if (!npages) { in mlx4_mtt_init() 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init() 418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument 428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 693 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|
D | pvrdma_mr.c | 122 int ret, npages; in pvrdma_reg_user_mr() local 136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr() 137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr() 139 npages); in pvrdma_reg_user_mr() 154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); in pvrdma_reg_user_mr() 171 cmd->nchunks = npages; in pvrdma_reg_user_mr() 306 if (mr->npages == mr->max_pages) in pvrdma_set_page() 309 mr->pages[mr->npages++] = addr; in pvrdma_set_page() 320 mr->npages = 0; in pvrdma_map_mr_sg()
|
/drivers/iommu/iommufd/ |
D | pages.c | 163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned() 178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned() 186 unsigned long npages = last_index - start_index + 1; in iopt_pages_err_unpin() local 188 unpin_user_pages(page_list, npages); in iopt_pages_err_unpin() 189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin() 616 size_t npages) in batch_from_pages() argument [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_internal.c | 38 unsigned int npages; /* restricted by sg_alloc_table */ in i915_gem_object_get_pages_internal() local 43 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages)) in i915_gem_object_get_pages_internal() 46 npages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_get_pages_internal() 62 if (sg_alloc_table(st, npages, GFP_KERNEL)) { in i915_gem_object_get_pages_internal() 71 int order = min(fls(npages) - 1, max_order); in i915_gem_object_get_pages_internal() 89 npages -= 1 << order; in i915_gem_object_get_pages_internal() 90 if (!npages) { in i915_gem_object_get_pages_internal()
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem_prime.c | 20 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local 25 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table() 117 int ret, npages; in etnaviv_gem_prime_import_sg_table() local 126 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table() 129 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table() 135 ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages); in etnaviv_gem_prime_import_sg_table()
|
/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 95 unsigned long npages; in usnic_uiom_get_pages() local 121 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages() 126 locked = atomic64_add_return(npages, ¤t->mm->pinned_vm); in usnic_uiom_get_pages() 139 while (npages) { in usnic_uiom_get_pages() 141 min_t(unsigned long, npages, in usnic_uiom_get_pages() 148 npages -= ret; in usnic_uiom_get_pages() 181 atomic64_sub(npages, ¤t->mm->pinned_vm); in usnic_uiom_get_pages() 213 int npages; in __usnic_uiom_reg_release() local 219 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release() 221 vpn_last = vpn_start + npages - 1; in __usnic_uiom_reg_release() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | mr.c | 98 int *start_index, int *npages) in mlx4_ib_umem_write_mtt_block() argument 127 pages[*npages] = cur_start_addr + (mtt_size * k); in mlx4_ib_umem_write_mtt_block() 128 (*npages)++; in mlx4_ib_umem_write_mtt_block() 133 if (*npages == PAGE_SIZE / sizeof(u64)) { in mlx4_ib_umem_write_mtt_block() 135 *npages, pages); in mlx4_ib_umem_write_mtt_block() 139 (*start_index) += *npages; in mlx4_ib_umem_write_mtt_block() 140 *npages = 0; in mlx4_ib_umem_write_mtt_block() 192 int npages = 0; in mlx4_ib_umem_write_mtt() local 218 &npages); in mlx4_ib_umem_write_mtt() 235 &start_index, &npages); in mlx4_ib_umem_write_mtt() [all …]
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_dmem.c | 60 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages, 376 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT; in nouveau_dmem_evict_chunk() local 381 src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL); in nouveau_dmem_evict_chunk() 382 dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL); in nouveau_dmem_evict_chunk() 383 dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL); in nouveau_dmem_evict_chunk() 386 npages); in nouveau_dmem_evict_chunk() 388 for (i = 0; i < npages; i++) { in nouveau_dmem_evict_chunk() 406 migrate_device_pages(src_pfns, dst_pfns, npages); in nouveau_dmem_evict_chunk() 408 migrate_device_finalize(src_pfns, dst_pfns, npages); in nouveau_dmem_evict_chunk() 411 for (i = 0; i < npages; i++) in nouveau_dmem_evict_chunk() [all …]
|
/drivers/vfio/pci/pds/ |
D | lm.c | 18 unsigned long long npages; in pds_vfio_get_lm_file() local 44 npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE); in pds_vfio_get_lm_file() 45 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); in pds_vfio_get_lm_file() 54 for (unsigned long long i = 0; i < npages; i++) { in pds_vfio_get_lm_file() 66 if (sg_alloc_table_from_pages(&lm_file->sg_table, pages, npages, 0, in pds_vfio_get_lm_file() 72 lm_file->npages = npages; in pds_vfio_get_lm_file() 74 lm_file->alloc_size = npages * PAGE_SIZE; in pds_vfio_get_lm_file() 269 lm_file->size, lm_file->alloc_size, lm_file->npages); in pds_vfio_get_save_file()
|