Searched refs:npages (Results 1 – 7 of 7) sorted by relevance
/mm/ |
D | hmm.c | 122 const unsigned long hmm_pfns[], unsigned long npages, in hmm_range_need_fault() argument 138 for (i = 0; i < npages; ++i) { in hmm_range_need_fault() 153 unsigned long i, npages; in hmm_vma_walk_hole() local 157 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 160 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole() 193 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local 197 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 200 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); in hmm_vma_handle_pmd() 329 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() local 340 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { in hmm_vma_walk_pmd() [all …]
|
D | migrate.c | 2245 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 2246 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 2265 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole() 2266 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 2267 migrate->npages++; in migrate_vma_collect_hole() 2445 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd() 2446 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd() 2490 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect() 2557 const unsigned long npages = migrate->npages; in migrate_vma_prepare() local 2564 for (i = 0; (i < npages) && migrate->cpages; i++) { in migrate_vma_prepare() [all …]
|
D | gup.c | 257 static inline void compound_range_next(unsigned long i, unsigned long npages, in compound_range_next() argument 264 if (i >= npages) in compound_range_next() 271 page + compound_nr(page) - next, npages - i); in compound_range_next() 283 static inline void compound_next(unsigned long i, unsigned long npages, in compound_next() argument 290 if (i >= npages) in compound_next() 294 for (nr = i + 1; nr < npages; nr++) { in compound_next() 331 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument 339 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock() 343 for_each_compound_head(index, pages, npages, head, ntails) { in unpin_user_pages_dirty_lock() 392 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, in unpin_user_page_range_dirty_lock() argument [all …]
|
D | rmap.c | 2308 long npages = (end - start) >> PAGE_SHIFT; in make_device_exclusive_range() local 2311 npages = get_user_pages_remote(mm, start, npages, in make_device_exclusive_range() 2314 if (npages < 0) in make_device_exclusive_range() 2315 return npages; in make_device_exclusive_range() 2317 for (i = 0; i < npages; i++, start += PAGE_SIZE) { in make_device_exclusive_range() 2331 return npages; in make_device_exclusive_range()
|
D | mmap.c | 3346 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument 3348 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm() 3352 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm() 3355 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm() 3360 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm() 3371 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument 3373 mm->total_vm += npages; in vm_stat_account() 3376 mm->exec_vm += npages; in vm_stat_account() 3378 mm->stack_vm += npages; in vm_stat_account() 3380 mm->data_vm += npages; in vm_stat_account()
|
D | nommu.c | 1351 unsigned long npages; in split_vma() local 1375 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma() 1381 region->vm_pgoff = new->vm_pgoff += npages; in split_vma() 1392 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
|
D | hugetlb.c | 4299 unsigned long npages = pages_per_huge_page(h); in copy_hugetlb_page_range() local 4400 npages); in copy_hugetlb_page_range() 4435 hugetlb_count_add(npages, dst); in copy_hugetlb_page_range()
|