/mm/ |
D | mmap.c | 83 struct vm_area_struct *vma, struct vm_area_struct *prev, 134 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 136 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 139 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 140 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 145 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 151 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 154 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 158 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 166 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument [all …]
|
D | nommu.c | 100 struct vm_area_struct *vma; in kobjsize() local 102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 103 if (vma) in kobjsize() 104 return vma->vm_end - vma->vm_start; in kobjsize() 124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument 127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn() 173 struct vm_area_struct *vma; in __vmalloc_user_flags() local 176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 177 if (vma) in __vmalloc_user_flags() 178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags() [all …]
|
D | mremap.c | 70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument 84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 90 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd() 103 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 105 if (vma->vm_file) in take_rmap_locks() 106 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 107 if (vma->anon_vma) in take_rmap_locks() 108 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 111 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 113 if (vma->anon_vma) in drop_rmap_locks() [all …]
|
D | madvise.c | 93 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument 95 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 97 if (vma->vm_file) in anon_vma_name() 100 return vma->anon_name; in anon_vma_name() 104 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 107 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name() 110 vma->anon_name = NULL; in replace_anon_vma_name() 118 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name() 124 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 139 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument [all …]
|
D | memory.c | 216 struct vm_area_struct *vma; in get_vma() local 219 vma = find_vma_from_tree(mm, addr); in get_vma() 237 if (vma) { in get_vma() 238 if (vma->vm_start > addr || in get_vma() 239 !atomic_inc_unless_negative(&vma->file_ref_count)) in get_vma() 240 vma = NULL; in get_vma() 244 return vma; in get_vma() 247 void put_vma(struct vm_area_struct *vma) in put_vma() argument 251 new_ref_count = atomic_dec_return(&vma->file_ref_count); in put_vma() 253 vm_area_free_no_check(vma); in put_vma() [all …]
|
D | mprotect.c | 40 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument 68 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 71 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 72 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 75 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 94 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 99 if (is_cow_mapping(vma->vm_flags) && in change_pte_range() 119 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range() 140 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 143 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range() [all …]
|
D | rmap.c | 149 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 153 avc->vma = vma; in anon_vma_chain_link() 155 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 187 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 189 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 199 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 212 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 213 vma->anon_vma = anon_vma; in __anon_vma_prepare() 214 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
D | huge_memory.c | 67 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() argument 69 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && in file_thp_enabled() 70 !inode_is_open_for_write(vma->vm_file->f_inode) && in file_thp_enabled() 71 (vma->vm_flags & VM_EXEC); in file_thp_enabled() 74 bool transparent_hugepage_active(struct vm_area_struct *vma) in transparent_hugepage_active() argument 77 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_active() 79 if (!transhuge_vma_suitable(vma, addr)) in transparent_hugepage_active() 81 if (vma_is_anonymous(vma)) in transparent_hugepage_active() 82 return __transparent_hugepage_enabled(vma); in transparent_hugepage_active() 83 if (vma_is_shmem(vma)) in transparent_hugepage_active() [all …]
|
D | mlock.c | 351 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument 362 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 375 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 421 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument 424 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range() 441 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range() 473 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range() 495 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 498 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() 503 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() [all …]
|
D | hugetlb.c | 85 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 243 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument 245 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma() 804 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument 806 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 807 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset() 810 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument 813 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 821 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument 823 if (vma->vm_ops && vma->vm_ops->pagesize) in vma_kernel_pagesize() [all …]
|
D | pgtable-generic.c | 64 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 71 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 78 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 82 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 84 flush_tlb_page(vma, address); in ptep_clear_flush_young() 90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 97 flush_tlb_page(vma, address); in ptep_clear_flush() 105 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument [all …]
|
D | mempolicy.c | 380 struct vm_area_struct *vma; in mpol_rebind_mm() local 383 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm() 384 mpol_rebind_policy(vma->vm_policy, new); in mpol_rebind_mm() 476 if (!vma_migratable(walk->vma) || in queue_pages_pmd() 504 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local 513 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range() 528 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 541 if (!vma_migratable(vma)) { in queue_pages_pte_range() 577 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb() 594 if (!vma_migratable(walk->vma)) { in queue_pages_hugetlb() [all …]
|
D | pagewalk.c | 139 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || in walk_pmd_range() 144 if (walk->vma) { in walk_pmd_range() 145 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 192 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || in walk_pud_range() 197 if (walk->vma) in walk_pud_range() 198 split_huge_pud(walk->vma, pud, addr); in walk_pud_range() 296 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local 297 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 339 struct vm_area_struct *vma = walk->vma; in walk_page_test() local 353 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test() [all …]
|
D | gup.c | 447 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument 459 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) in no_page_table() 464 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument 479 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte() 480 update_mmu_cache(vma, address, pte); in follow_pfn_pte() 498 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument 502 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 517 if (is_vm_hugetlb_page(vma)) { in follow_page_pte() 518 page = follow_huge_pmd_pte(vma, address, flags); in follow_page_pte() 521 return no_page_table(vma, flags); in follow_page_pte() [all …]
|
D | khugepaged.c | 347 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument 358 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise() 369 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise() 442 static bool hugepage_vma_check(struct vm_area_struct *vma, in hugepage_vma_check() argument 445 if (!transhuge_vma_enabled(vma, vm_flags)) in hugepage_vma_check() 448 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - in hugepage_vma_check() 449 vma->vm_pgoff, HPAGE_PMD_NR)) in hugepage_vma_check() 453 if (shmem_file(vma->vm_file)) in hugepage_vma_check() 454 return shmem_huge_enabled(vma); in hugepage_vma_check() 461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && in hugepage_vma_check() [all …]
|
D | mincore.c | 72 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() argument 77 if (vma->vm_file) { in __mincore_unmapped_range() 80 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range() 82 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range() 95 walk->vma, walk->private); in mincore_unmapped_range() 103 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() local 108 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 116 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() 126 vma, vec); in mincore_pte_range() 157 static inline bool can_do_mincore(struct vm_area_struct *vma) in can_do_mincore() argument [all …]
|
D | internal.h | 43 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) in can_madv_lru_vma() argument 45 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); in can_madv_lru_vma() 49 struct vm_area_struct *vma, 350 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 352 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); 355 extern long populate_vma_page_range(struct vm_area_struct *vma, 357 extern long faultin_vma_page_range(struct vm_area_struct *vma, 360 extern void munlock_vma_pages_range(struct vm_area_struct *vma, 362 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) in munlock_vma_pages_all() argument 364 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); in munlock_vma_pages_all() [all …]
|
D | msync.c | 36 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local 63 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 70 if (!vma) in SYSCALL_DEFINE3() 73 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 76 start = vma->vm_start; in SYSCALL_DEFINE3() 83 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 87 file = vma->vm_file; in SYSCALL_DEFINE3() 88 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3() 89 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3() 90 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() [all …]
|
D | ksm.c | 468 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument 475 page = follow_page(vma, addr, in break_ksm() 480 ret = handle_mm_fault(vma, addr, in break_ksm() 521 struct vm_area_struct *vma; in find_mergeable_vma() local 524 vma = vma_lookup(mm, addr); in find_mergeable_vma() 525 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 527 return vma; in find_mergeable_vma() 534 struct vm_area_struct *vma; in break_cow() local 543 vma = find_mergeable_vma(mm, addr); in break_cow() 544 if (vma) in break_cow() [all …]
|
D | vmacache.c | 72 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find() local 74 if (vma) { in vmacache_find() 76 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find() 79 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find() 81 return vma; in vmacache_find() 105 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact() local 107 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact() 109 return vma; in vmacache_find_exact()
|
D | debug.c | 190 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument 197 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma() 198 vma->vm_prev, vma->vm_mm, in dump_vma() 199 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma() 200 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma() 201 vma->vm_file, vma->vm_private_data, in dump_vma() 202 vma->vm_flags, &vma->vm_flags); in dump_vma()
|
D | swap_state.c | 58 #define GET_SWAP_RA_VAL(vma) \ argument 59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 334 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache() argument 360 if (vma && vma_ra) { in lookup_swap_cache() 364 ra_val = GET_SWAP_RA_VAL(vma); in lookup_swap_cache() 369 atomic_long_set(&vma->swap_readahead_info, in lookup_swap_cache() 375 if (!vma || !vma_ra) in lookup_swap_cache() 418 struct vm_area_struct *vma, unsigned long addr, in __read_swap_cache_async() argument 459 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 522 struct vm_area_struct *vma, unsigned long addr, bool do_poll) in read_swap_cache_async() argument [all …]
|
D | migrate.c | 181 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument 186 .vma = vma, in remove_migration_pte() 200 linear_page_index(vma, pvmw.address); in remove_migration_pte() 212 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte() 221 pte = maybe_mkwrite(pte, vma); in remove_migration_pte() 241 unsigned int shift = huge_page_shift(hstate_vma(vma)); in remove_migration_pte() 244 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); in remove_migration_pte() 245 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 247 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte() 253 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
D | secretmem.c | 52 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in secretmem_fault() 53 struct inode *inode = file_inode(vmf->vma->vm_file); in secretmem_fault() 121 static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) in secretmem_mmap() argument 123 unsigned long len = vma->vm_end - vma->vm_start; in secretmem_mmap() 125 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) in secretmem_mmap() 128 if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) in secretmem_mmap() 131 vma->vm_flags |= VM_LOCKED | VM_DONTDUMP; in secretmem_mmap() 132 vma->vm_ops = &secretmem_vm_ops; in secretmem_mmap() 137 bool vma_is_secretmem(struct vm_area_struct *vma) in vma_is_secretmem() argument 139 return vma->vm_ops == &secretmem_vm_ops; in vma_is_secretmem()
|
D | mapping_dirty_helpers.c | 40 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in wp_pte() 43 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 96 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + in clean_record_pte() 97 walk->vma->vm_pgoff - cwalk->bitmap_pgoff; in clean_record_pte() 98 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in clean_record_pte() 101 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte() 193 walk->vma, walk->mm, start, end); in wp_clean_pre_vma() 195 flush_cache_range(walk->vma, start, end); in wp_clean_pre_vma() 218 flush_tlb_range(walk->vma, wpwalk->range.start, in wp_clean_post_vma() 221 flush_tlb_range(walk->vma, wpwalk->tlbflush_start, in wp_clean_post_vma() [all …]
|