/mm/ |
D | mmap.c | 76 struct vm_area_struct *vma, struct vm_area_struct *prev, 112 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 114 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 117 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 118 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 123 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 129 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 132 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 134 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 138 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() [all …]
|
D | nommu.c | 99 struct vm_area_struct *vma; in kobjsize() local 101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 102 if (vma) in kobjsize() 103 return vma->vm_end - vma->vm_start; in kobjsize() 118 struct vm_area_struct *vma; in __get_user_pages() local 131 vma = find_vma(mm, start); in __get_user_pages() 132 if (!vma) in __get_user_pages() 136 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages() 137 !(vm_flags & vma->vm_flags)) in __get_user_pages() 146 vmas[i] = vma; in __get_user_pages() [all …]
|
D | mremap.c | 59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 84 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 86 if (vma->vm_file) in take_rmap_locks() 87 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 88 if (vma->anon_vma) in take_rmap_locks() 89 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 92 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 94 if (vma->anon_vma) in drop_rmap_locks() 95 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks() 96 if (vma->vm_file) in drop_rmap_locks() [all …]
|
D | madvise.c | 55 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument 59 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 62 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 78 if (vma->vm_flags & VM_IO) { in madvise_behavior() 86 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior() 99 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior() 107 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 120 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior() 133 if (new_flags == vma->vm_flags) { in madvise_behavior() 134 *prev = vma; in madvise_behavior() [all …]
|
D | memory.c | 618 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 621 while (vma) { in free_pgtables() 622 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 623 unsigned long addr = vma->vm_start; in free_pgtables() 629 unlink_anon_vmas(vma); in free_pgtables() 630 unlink_file_vma(vma); in free_pgtables() 632 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 633 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 639 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 641 vma = next; in free_pgtables() [all …]
|
D | mprotect.c | 37 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 41 struct mm_struct *mm = vma->vm_mm; in change_pte_range() 61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 64 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 65 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 68 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 83 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 107 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 176 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument 181 struct mm_struct *mm = vma->vm_mm; in change_pmd_range() [all …]
|
D | huge_memory.c | 470 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 472 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite() 553 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local 561 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg, in __do_huge_pmd_anonymous_page() 568 pgtable = pte_alloc_one(vma->vm_mm, haddr); in __do_huge_pmd_anonymous_page() 582 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 588 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 593 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page() 599 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 605 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page() [all …]
|
D | rmap.c | 137 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 141 avc->vma = vma; in anon_vma_chain_link() 143 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 175 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 177 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 187 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 199 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 200 vma->anon_vma = anon_vma; in __anon_vma_prepare() 201 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 315 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
D | mlock.c | 369 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument 380 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 393 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 439 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument 442 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range() 459 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range() 491 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range() 513 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 516 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() 521 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() [all …]
|
D | gup.c | 23 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument 34 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table() 39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument 54 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte() 55 update_mmu_cache(vma, address, pte); in follow_pfn_pte() 73 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument 76 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 84 return no_page_table(vma, flags); in follow_page_pte() 113 page = vm_normal_page(vma, address, pte); in follow_page_pte() 136 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte() [all …]
|
D | pagewalk.c | 39 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range() 62 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 84 if (pud_none(*pud) || !walk->vma) { in walk_pud_range() 93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); in walk_pud_range() 104 split_huge_pud(walk->vma, pud, addr); in walk_pud_range() 180 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local 181 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 222 struct vm_area_struct *vma = walk->vma; in walk_page_test() local 235 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test() 248 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local [all …]
|
D | hugetlb.c | 216 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument 218 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma() 622 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument 624 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 625 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset() 628 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument 631 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 639 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument 643 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize() 646 hstate = hstate_vma(vma); in vma_kernel_pagesize() [all …]
|
D | pgtable-generic.c | 54 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 60 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 61 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 68 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 72 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 74 flush_tlb_page(vma, address); in ptep_clear_flush_young() 80 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 83 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 87 flush_tlb_page(vma, address); in ptep_clear_flush() 95 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument [all …]
|
D | mempolicy.c | 379 struct vm_area_struct *vma; in mpol_rebind_mm() local 382 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm() 383 mpol_rebind_policy(vma->vm_policy, new); in mpol_rebind_mm() 452 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); in queue_pages_pmd() 473 if (!vma_migratable(walk->vma)) { in queue_pages_pmd() 494 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local 502 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range() 518 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 546 if (!vma_migratable(vma)) in queue_pages_pte_range() 568 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb() [all …]
|
D | khugepaged.c | 306 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument 317 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise() 328 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise() 433 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument 437 if (!vma->anon_vma) in khugepaged_enter_vma_merge() 443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) in khugepaged_enter_vma_merge() 446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge() 447 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge() 449 return khugepaged_enter(vma, vm_flags); in khugepaged_enter_vma_merge() 501 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument [all …]
|
D | mincore.c | 88 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() argument 93 if (vma->vm_file) { in __mincore_unmapped_range() 96 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range() 98 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range() 110 walk->vma, walk->private); in mincore_unmapped_range() 118 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() local 123 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 131 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() 141 vma, vec); in mincore_pte_range() 172 static inline bool can_do_mincore(struct vm_area_struct *vma) in can_do_mincore() argument [all …]
|
D | ksm.c | 453 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument 460 page = follow_page(vma, addr, in break_ksm() 465 ret = handle_mm_fault(vma, addr, in break_ksm() 505 struct vm_area_struct *vma; in find_mergeable_vma() local 508 vma = find_vma(mm, addr); in find_mergeable_vma() 509 if (!vma || vma->vm_start > addr) in find_mergeable_vma() 511 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 513 return vma; in find_mergeable_vma() 520 struct vm_area_struct *vma; in break_cow() local 529 vma = find_mergeable_vma(mm, addr); in break_cow() [all …]
|
D | msync.c | 36 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local 59 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 66 if (!vma) in SYSCALL_DEFINE3() 69 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 70 start = vma->vm_start; in SYSCALL_DEFINE3() 77 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 81 file = vma->vm_file; in SYSCALL_DEFINE3() 82 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3() 83 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3() 84 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() [all …]
|
D | debug.c | 85 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument 92 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma() 93 vma->vm_prev, vma->vm_mm, in dump_vma() 94 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma() 95 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma() 96 vma->vm_file, vma->vm_private_data, in dump_vma() 97 vma->vm_flags, &vma->vm_flags); in dump_vma()
|
D | migrate.c | 202 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument 207 .vma = vma, in remove_migration_pte() 221 linear_page_index(vma, pvmw.address); in remove_migration_pte() 233 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte() 242 pte = maybe_mkwrite(pte, vma); in remove_migration_pte() 256 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte() 257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte() 265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 268 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte() [all …]
|
D | internal.h | 46 static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) in can_madv_dontneed_vma() argument 48 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); in can_madv_dontneed_vma() 52 struct vm_area_struct *vma, 283 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 287 extern long populate_vma_page_range(struct vm_area_struct *vma, 289 extern void munlock_vma_pages_range(struct vm_area_struct *vma, 291 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) in munlock_vma_pages_all() argument 293 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); in munlock_vma_pages_all() 330 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 336 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument [all …]
|
D | swap_state.c | 58 #define GET_SWAP_RA_VAL(vma) \ argument 59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 331 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache() argument 346 if (vma) { in lookup_swap_cache() 347 ra_info = GET_SWAP_RA_VAL(vma); in lookup_swap_cache() 352 atomic_long_set(&vma->swap_readahead_info, in lookup_swap_cache() 357 if (!vma) in lookup_swap_cache() 365 struct vm_area_struct *vma, unsigned long addr, in __read_swap_cache_async() argument 398 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 464 struct vm_area_struct *vma, unsigned long addr, bool do_poll) in read_swap_cache_async() argument [all …]
|
D | vmacache.c | 60 struct vm_area_struct *vma = current->vmacache.vmas[i]; in vmacache_find() local 62 if (!vma) in vmacache_find() 64 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find() 66 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find() 68 return vma; in vmacache_find() 88 struct vm_area_struct *vma = current->vmacache.vmas[i]; in vmacache_find_exact() local 90 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact() 92 return vma; in vmacache_find_exact()
|
D | hmm.c | 252 struct vm_area_struct *vma = walk->vma; in hmm_vma_do_fault() local 257 r = handle_mm_fault(vma, addr, flags); in hmm_vma_do_fault() 357 struct vm_area_struct *vma = walk->vma; in hmm_vma_walk_pmd() local 365 flag = vma->vm_flags & VM_READ ? HMM_PFN_READ : 0; in hmm_vma_walk_pmd() 372 if (pmd_huge(*pmdp) && vma->vm_flags & VM_HUGETLB) in hmm_vma_walk_pmd() 448 migration_entry_wait(vma->vm_mm, in hmm_vma_walk_pmd() 497 int hmm_vma_get_pfns(struct vm_area_struct *vma, in hmm_vma_get_pfns() argument 508 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { in hmm_vma_get_pfns() 514 if (start < vma->vm_start || start >= vma->vm_end) in hmm_vma_get_pfns() 516 if (end < vma->vm_start || end > vma->vm_end) in hmm_vma_get_pfns() [all …]
|
D | frame_vector.c | 38 struct vm_area_struct *vma; in get_vaddr_frames() local 51 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames() 52 if (!vma) { in get_vaddr_frames() 65 if (vma_is_fsdax(vma)) { in get_vaddr_frames() 70 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames() 83 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames() 84 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames() 97 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames() 99 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames() 100 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); in get_vaddr_frames()
|