/kernel/linux/linux-5.10/tools/testing/selftests/vm/ |
D | hmm-tests.c | 123 unsigned long npages) in hmm_dmirror_cmd() argument 131 cmd.npages = npages; in hmm_dmirror_cmd() 223 unsigned long npages; in TEST_F() local 230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 231 ASSERT_NE(npages, 0); in TEST_F() 232 size = npages << self->page_shift; in TEST_F() 265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages); in TEST_F() 267 ASSERT_EQ(buffer->cpages, npages); in TEST_F() 287 unsigned long npages; in TEST_F() local 293 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local 61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg() 63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() argument 211 return first + npages == last; in page_contiguous() 238 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local 242 pfn_to_page(pfn + npages), in alloc_table() 243 npages)) { in alloc_table() 250 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 52 s32 npages; member 180 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 196 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages() 333 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument 344 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages() 352 for (i = 0; i < npages; i++) { in give_pages() 369 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages() 375 func_id, npages, err); in give_pages() 379 dev->priv.fw_pages += npages; in give_pages() 381 dev->priv.vfs_pages += npages; in give_pages() [all …]
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 204 int npages, nid; in dma_4u_alloc_coherent() local 233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page() [all …]
|
D | pci_sun4v.c | 60 unsigned long npages; /* Number of pages in list. */ member 74 p->npages = 0; in iommu_batch_start() 91 unsigned long npages = p->npages; in iommu_batch_flush() local 100 while (npages != 0) { in iommu_batch_flush() 104 npages, in iommu_batch_flush() 112 npages, prot, __pa(pglist), in iommu_batch_flush() 117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 135 npages -= num; in iommu_batch_flush() 140 p->npages = 0; in iommu_batch_flush() 149 if (p->entry + p->npages == entry) in iommu_batch_new_entry() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 68 return vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 75 vm_unmap_ram(vaddr, mock->npages); in mock_dmabuf_vunmap() 92 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 99 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 104 mock->npages = npages; in mock_dmabuf() 105 for (i = 0; i < npages; i++) { in mock_dmabuf() 112 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | user_pages.c | 72 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 97 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages() 100 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages() 103 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 109 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 119 size_t npages, bool dirty) in hfi1_release_user_pages() argument 121 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 124 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
D | user_exp_rcv.c | 58 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 62 u16 pageidx, unsigned int npages); 172 unsigned int npages, in unpin_rcv_pages() argument 181 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); in unpin_rcv_pages() 188 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 189 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 198 unsigned int npages; in pin_rcv_pages() local 204 npages = num_user_pages(vaddr, tidbuf->length); in pin_rcv_pages() 205 if (!npages) in pin_rcv_pages() 208 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 74 unsigned npages; member 247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument 253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put() 254 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put() 257 for (i = 0; i < npages; ++i) { in ttm_pages_put() 269 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 423 count += (pool->npages << pool->order); in ttm_pool_shrink_count() 495 unsigned npages = 1 << order; in ttm_alloc_new_pages() local 537 for (j = 0; j < npages; ++j) { in ttm_alloc_new_pages() 591 && count > pool->npages) { in ttm_page_pool_fill_locked() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
D | iommu.c | 165 unsigned long npages, in iommu_range_alloc() argument 172 int largealloc = npages > 15; in iommu_range_alloc() 184 if (unlikely(npages == 0)) { in iommu_range_alloc() 238 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 265 end = n + npages; in iommu_range_alloc() 287 void *page, unsigned int npages, in iommu_alloc() argument 296 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 305 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 315 __iommu_free(tbl, ret, npages); in iommu_alloc() 330 unsigned int npages) in iommu_free_check() argument [all …]
|
/kernel/linux/linux-5.10/drivers/fpga/ |
D | dfl-afu-dma-region.c | 37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local 41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages() 45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages() 51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages() 85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local 88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages() 90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages() 92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages() [all …]
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
D | cpu_entry_area.c | 106 unsigned int npages; in percpu_setup_debug_store() local 113 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 115 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 123 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 124 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store() 132 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ 134 estacks->name## _stack, npages, PAGE_KERNEL); \ 141 unsigned int npages; in percpu_setup_exception_stacks() local
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 175 &chunk->mem[chunk->npages], in mthca_alloc_icm() 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() [all …]
|
D | mthca_allocator.c | 200 int npages, shift; in mthca_buf_alloc() local 207 npages = 1; in mthca_buf_alloc() 219 npages *= 2; in mthca_buf_alloc() 222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 227 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 239 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 245 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 248 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
D | umem.c | 151 unsigned long npages; in ib_umem_get() local 191 npages = ib_umem_num_pages(umem); in ib_umem_get() 192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 211 while (npages) { in ib_umem_get() 214 min_t(unsigned long, npages, in ib_umem_get() 222 npages -= ret; in ib_umem_get() 225 ib_dma_max_seg_size(device), sg, npages, in ib_umem_get()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|
D | pvrdma_mr.c | 122 int ret, npages; in pvrdma_reg_user_mr() local 136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr() 137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr() 139 npages); in pvrdma_reg_user_mr() 154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); in pvrdma_reg_user_mr() 171 cmd->nchunks = npages; in pvrdma_reg_user_mr() 306 if (mr->npages == mr->max_pages) in pvrdma_set_page() 309 mr->pages[mr->npages++] = addr; in pvrdma_set_page() 320 mr->npages = 0; in pvrdma_map_mr_sg()
|
/kernel/linux/linux-5.10/mm/ |
D | hmm.c | 120 const unsigned long hmm_pfns[], unsigned long npages, in hmm_range_need_fault() argument 136 for (i = 0; i < npages; ++i) { in hmm_range_need_fault() 151 unsigned long i, npages; in hmm_vma_walk_hole() local 155 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 158 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole() 191 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local 195 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 198 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); in hmm_vma_handle_pmd() 328 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() local 339 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { in hmm_vma_walk_pmd() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument 159 while (npages > 0) { in mlx4_alloc_icm() 179 while (1 << cur_order > npages) in mlx4_alloc_icm() 188 &chunk->buf[chunk->npages], in mlx4_alloc_icm() 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm() 202 ++chunk->npages; in mlx4_alloc_icm() 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm() [all …]
|
D | mr.c | 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 199 if (!npages) { in mlx4_mtt_init() 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init() 418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument 428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 693 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem_prime.c | 17 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local 22 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table() 111 int ret, npages; in etnaviv_gem_prime_import_sg_table() local 120 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table() 123 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table() 130 NULL, npages); in etnaviv_gem_prime_import_sg_table()
|
/kernel/linux/linux-5.10/arch/powerpc/sysdev/ |
D | dart_iommu.c | 173 long npages, unsigned long uaddr, in dart_build() argument 181 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 188 l = npages; in dart_build() 196 dart_cache_sync(orig_dp, npages); in dart_build() 200 while (npages--) in dart_build() 209 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 212 long orig_npages = npages; in dart_free() 219 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 223 while (npages--) in dart_free()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_internal.c | 39 unsigned int npages; in i915_gem_object_get_pages_internal() local 69 npages = obj->base.size / PAGE_SIZE; in i915_gem_object_get_pages_internal() 70 if (sg_alloc_table(st, npages, GFP_KERNEL)) { in i915_gem_object_get_pages_internal() 80 int order = min(fls(npages) - 1, max_order); in i915_gem_object_get_pages_internal() 99 npages -= 1 << order; in i915_gem_object_get_pages_internal() 100 if (!npages) { in i915_gem_object_get_pages_internal()
|
/kernel/linux/linux-5.10/arch/x86/kvm/svm/ |
D | sev.c | 32 unsigned long npages; member 329 unsigned long npages, size; in sev_pin_memory() local 344 npages = (last - first + 1); in sev_pin_memory() 346 locked = sev->pages_locked + npages; in sev_pin_memory() 353 if (WARN_ON_ONCE(npages > INT_MAX)) in sev_pin_memory() 357 size = npages * sizeof(struct page *); in sev_pin_memory() 367 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); in sev_pin_memory() 368 if (npinned != npages) { in sev_pin_memory() 369 pr_err("SEV: Failure locking %lu pages.\n", npages); in sev_pin_memory() 374 *n = npages; in sev_pin_memory() [all …]
|