Searched refs:npages (Results 1 – 5 of 5) sorted by relevance
/mm/ |
D | hmm.c | 358 const uint64_t *pfns, unsigned long npages, in hmm_range_need_fault() argument 370 for (i = 0; i < npages; ++i) { in hmm_range_need_fault() 384 unsigned long i, npages; in hmm_vma_walk_hole() local 388 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 390 hmm_range_need_fault(hmm_vma_walk, pfns, npages, in hmm_vma_walk_hole() 410 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local 414 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 416 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, in hmm_vma_handle_pmd() 571 unsigned long npages; in hmm_vma_walk_pmd() local 575 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_pmd() [all …]
|
D | migrate.c | 2143 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole() 2144 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 2145 migrate->npages++; in migrate_vma_collect_hole() 2160 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 2161 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 2318 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd() 2319 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd() 2356 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect() 2423 const unsigned long npages = migrate->npages; in migrate_vma_prepare() local 2430 for (i = 0; (i < npages) && migrate->cpages; i++) { in migrate_vma_prepare() [all …]
|
D | mmap.c | 3300 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument 3302 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm() 3306 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm() 3309 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm() 3314 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm() 3325 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument 3327 mm->total_vm += npages; in vm_stat_account() 3330 mm->exec_vm += npages; in vm_stat_account() 3332 mm->stack_vm += npages; in vm_stat_account() 3334 mm->data_vm += npages; in vm_stat_account()
|
D | gup.c | 54 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, in put_user_pages_dirty_lock() argument 66 put_user_pages(pages, npages); in put_user_pages_dirty_lock() 70 for (index = 0; index < npages; index++) { in put_user_pages_dirty_lock() 108 void put_user_pages(struct page **pages, unsigned long npages) in put_user_pages() argument 117 for (index = 0; index < npages; index++) in put_user_pages()
|
D | nommu.c | 1384 unsigned long npages; in split_vma() local 1408 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma() 1414 region->vm_pgoff = new->vm_pgoff += npages; in split_vma() 1425 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
|