Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 5 of 5) sorted by relevance

/mm/
Dhmm.c120 const unsigned long hmm_pfns[], unsigned long npages, in hmm_range_need_fault() argument
136 for (i = 0; i < npages; ++i) { in hmm_range_need_fault()
151 unsigned long i, npages; in hmm_vma_walk_hole() local
155 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole()
158 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole()
191 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local
195 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd()
198 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); in hmm_vma_handle_pmd()
328 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() local
339 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { in hmm_vma_walk_pmd()
[all …]
Dmigrate.c2261 migrate->src[migrate->npages] = 0; in migrate_vma_collect_hole()
2262 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
2263 migrate->npages++; in migrate_vma_collect_hole()
2269 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
2270 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
2271 migrate->npages++; in migrate_vma_collect_hole()
2286 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
2287 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
2460 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd()
2461 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd()
[all …]
Dmmap.c3512 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument
3514 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm()
3518 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm()
3521 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm()
3526 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm()
3537 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument
3539 mm->total_vm += npages; in vm_stat_account()
3542 mm->exec_vm += npages; in vm_stat_account()
3544 mm->stack_vm += npages; in vm_stat_account()
3546 mm->data_vm += npages; in vm_stat_account()
Dgup.c298 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
310 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
314 for (index = 0; index < npages; index++) { in unpin_user_pages_dirty_lock()
352 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
361 if (WARN_ON(IS_ERR_VALUE(npages))) in unpin_user_pages()
368 for (index = 0; index < npages; index++) in unpin_user_pages()
Dnommu.c1359 unsigned long npages; in split_vma() local
1383 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1389 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1400 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()