/mm/ |
D | mmap.c | 73 struct vm_area_struct *vma, struct vm_area_struct *prev, 110 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 112 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 114 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 115 if (vma_wants_writenotify(vma)) { in vma_set_page_prot() 117 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, in vma_set_page_prot() 249 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 252 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 254 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 258 if (unlikely(vma->vm_flags & VM_NONLINEAR)) in __remove_shared_vm_struct() [all …]
|
D | nommu.c | 135 struct vm_area_struct *vma; in kobjsize() local 137 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 138 if (vma) in kobjsize() 139 return vma->vm_end - vma->vm_start; in kobjsize() 154 struct vm_area_struct *vma; in __get_user_pages() local 167 vma = find_vma(mm, start); in __get_user_pages() 168 if (!vma) in __get_user_pages() 172 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages() 173 !(vm_flags & vma->vm_flags)) in __get_user_pages() 182 vmas[i] = vma; in __get_user_pages() [all …]
|
D | mremap.c | 52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 90 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument 97 struct mm_struct *mm = vma->vm_mm; in move_ptes() 120 if (vma->vm_file) { in move_ptes() 121 mapping = vma->vm_file->f_mapping; in move_ptes() 124 if (vma->anon_vma) { in move_ptes() 125 anon_vma = vma->anon_vma; in move_ptes() 164 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument 176 flush_cache_range(vma, old_addr, old_end); in move_page_tables() 180 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables() [all …]
|
D | memory.c | 526 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 529 while (vma) { in free_pgtables() 530 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 531 unsigned long addr = vma->vm_start; in free_pgtables() 537 unlink_anon_vmas(vma); in free_pgtables() 538 unlink_file_vma(vma); in free_pgtables() 540 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 541 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 547 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 549 vma = next; in free_pgtables() [all …]
|
D | madvise.c | 45 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument 49 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 52 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 68 if (vma->vm_flags & VM_IO) { in madvise_behavior() 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 92 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior() 98 if (new_flags == vma->vm_flags) { in madvise_behavior() 99 *prev = vma; in madvise_behavior() 103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior() 104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() [all …]
|
D | mprotect.c | 39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, in lock_pte_protection() argument 47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection() 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 64 struct mm_struct *mm = vma->vm_mm; in change_pte_range() 69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range() 91 !(vma->vm_flags & VM_SOFTDIRTY))) in change_pte_range() 98 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 133 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument [all …]
|
D | rmap.c | 127 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 131 avc->vma = vma; in anon_vma_chain_link() 133 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 164 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument 166 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare() 171 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare() 178 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare() 190 if (likely(!vma->anon_vma)) { in anon_vma_prepare() 191 vma->anon_vma = anon_vma; in anon_vma_prepare() 192 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare() [all …]
|
D | mlock.c | 227 long __mlock_vma_pages_range(struct vm_area_struct *vma, in __mlock_vma_pages_range() argument 230 struct mm_struct *mm = vma->vm_mm; in __mlock_vma_pages_range() 236 VM_BUG_ON_VMA(start < vma->vm_start, vma); in __mlock_vma_pages_range() 237 VM_BUG_ON_VMA(end > vma->vm_end, vma); in __mlock_vma_pages_range() 246 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in __mlock_vma_pages_range() 253 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) in __mlock_vma_pages_range() 417 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument 428 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 440 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 479 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument [all …]
|
D | fremap.c | 31 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, in zap_pte() argument 39 flush_cache_page(vma, addr, pte_pfn(pte)); in zap_pte() 40 pte = ptep_clear_flush(vma, addr, ptep); in zap_pte() 41 page = vm_normal_page(vma, addr, pte); in zap_pte() 72 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, in install_file_pte() argument 86 zap_pte(mm, vma, addr, pte); in install_file_pte() 102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, in generic_file_remap_pages() argument 105 struct mm_struct *mm = vma->vm_mm; in generic_file_remap_pages() 109 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); in generic_file_remap_pages() 147 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local [all …]
|
D | huge_memory.c | 698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 700 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite() 714 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument 749 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page() 750 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page() 751 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page() 753 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page() 770 struct vm_area_struct *vma, in alloc_hugepage_vma() argument 775 HPAGE_PMD_ORDER, vma, haddr, nd); in alloc_hugepage_vma() 780 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument [all …]
|
D | mempolicy.c | 451 struct vm_area_struct *vma; in mpol_rebind_mm() local 454 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm() 455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm() 484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in queue_pages_pte_range() argument 493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range() 500 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 522 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, in queue_pages_hugetlb_pmd_range() argument 532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); in queue_pages_hugetlb_pmd_range() 551 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, in queue_pages_pmd_range() argument 564 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { in queue_pages_pmd_range() [all …]
|
D | hugetlb.c | 137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument 139 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma() 327 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument 329 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 330 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset() 333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument 336 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument 347 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize() 350 hstate = hstate_vma(vma); in vma_kernel_pagesize() [all …]
|
D | gup.c | 19 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument 30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table() 45 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument 48 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 55 return no_page_table(vma, flags); in follow_page_pte() 84 page = vm_normal_page(vma, address, pte); in follow_page_pte() 105 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte() 137 return no_page_table(vma, flags); in follow_page_pte() 153 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument 162 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() [all …]
|
D | mincore.c | 22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma, in mincore_hugetlb_page_range() argument 29 h = hstate_vma(vma); in mincore_hugetlb_page_range() 97 static void mincore_unmapped_range(struct vm_area_struct *vma, in mincore_unmapped_range() argument 104 if (vma->vm_file) { in mincore_unmapped_range() 107 pgoff = linear_page_index(vma, addr); in mincore_unmapped_range() 109 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in mincore_unmapped_range() 116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in mincore_pte_range() argument 124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mincore_pte_range() 131 mincore_unmapped_range(vma, addr, next, vec); in mincore_pte_range() 136 *vec = mincore_page(vma->vm_file->f_mapping, pgoff); in mincore_pte_range() [all …]
|
D | pgtable-generic.c | 47 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 54 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 61 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 69 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_set_access_flags() 81 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 85 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 87 flush_tlb_page(vma, address); in ptep_clear_flush_young() 93 int pmdp_clear_flush_young(struct vm_area_struct *vma, in pmdp_clear_flush_young() argument [all …]
|
D | ksm.c | 364 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument 371 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm() 375 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm() 415 struct vm_area_struct *vma; in find_mergeable_vma() local 418 vma = find_vma(mm, addr); in find_mergeable_vma() 419 if (!vma || vma->vm_start > addr) in find_mergeable_vma() 421 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 423 return vma; in find_mergeable_vma() 430 struct vm_area_struct *vma; in break_cow() local 439 vma = find_mergeable_vma(mm, addr); in break_cow() [all …]
|
D | msync.c | 35 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local 58 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 65 if (!vma) in SYSCALL_DEFINE3() 68 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 69 start = vma->vm_start; in SYSCALL_DEFINE3() 76 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 80 file = vma->vm_file; in SYSCALL_DEFINE3() 81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3() 82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3() 83 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() [all …]
|
D | migrate.c | 106 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, in remove_migration_pte() argument 109 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte() 119 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte() 147 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); in remove_migration_pte() 153 pte = maybe_mkwrite(pte, vma); in remove_migration_pte() 158 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte() 166 hugepage_add_anon_rmap(new, vma, addr); in remove_migration_pte() 170 page_add_anon_rmap(new, vma, addr); in remove_migration_pte() 175 update_mmu_cache(vma, addr, ptep); in remove_migration_pte() 198 struct vm_area_struct *vma; in remove_linear_migration_ptes_from_nonlinear() local [all …]
|
D | vmacache.c | 94 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find() local 96 if (!vma) in vmacache_find() 98 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find() 100 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find() 102 return vma; in vmacache_find() 122 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find_exact() local 124 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact() 126 return vma; in vmacache_find_exact()
|
D | debug.c | 152 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument 158 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma() 159 vma->vm_prev, vma->vm_mm, in dump_vma() 160 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma() 161 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma() 162 vma->vm_file, vma->vm_private_data); in dump_vma() 163 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); in dump_vma()
|
D | pagewalk.c | 108 static int walk_hugetlb_range(struct vm_area_struct *vma, in walk_hugetlb_range() argument 112 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 131 static int walk_hugetlb_range(struct vm_area_struct *vma, in walk_hugetlb_range() argument 184 struct vm_area_struct *vma = NULL; in walk_page_range() local 194 vma = find_vma(walk->mm, addr); in walk_page_range() 195 if (vma) { in walk_page_range() 200 if ((vma->vm_start <= addr) && in walk_page_range() 201 (vma->vm_flags & VM_PFNMAP)) { in walk_page_range() 215 if (walk->hugetlb_entry && (vma->vm_start <= addr) && in walk_page_range() 216 is_vm_hugetlb_page(vma)) { in walk_page_range() [all …]
|
D | filemap_xip.c | 168 struct vm_area_struct *vma; in __xip_unmap() local 186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in __xip_unmap() 187 mm = vma->vm_mm; in __xip_unmap() 188 address = vma->vm_start + in __xip_unmap() 189 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in __xip_unmap() 190 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in __xip_unmap() 194 flush_cache_page(vma, address, pte_pfn(*pte)); in __xip_unmap() 195 pteval = ptep_clear_flush(vma, address, pte); in __xip_unmap() 222 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in xip_file_fault() argument 224 struct file *file = vma->vm_file; in xip_file_fault() [all …]
|
D | util.c | 141 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_list() argument 146 vma->vm_prev = prev; in __vma_link_list() 149 prev->vm_next = vma; in __vma_link_list() 151 mm->mmap = vma; in __vma_link_list() 158 vma->vm_next = next; in __vma_link_list() 160 next->vm_prev = vma; in __vma_link_list() 165 struct vm_area_struct *vma) in vm_is_stack_for_task() argument 167 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vm_is_stack_for_task() 177 struct vm_area_struct *vma, bool in_group) in task_of_stack() argument 179 if (vm_is_stack_for_task(task, vma)) in task_of_stack() [all …]
|
D | internal.h | 221 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 225 extern long __mlock_vma_pages_range(struct vm_area_struct *vma, 227 extern void munlock_vma_pages_range(struct vm_area_struct *vma, 229 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) in munlock_vma_pages_all() argument 231 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); in munlock_vma_pages_all() 269 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 273 struct vm_area_struct *vma);
|
D | filemap.c | 1779 static void do_sync_mmap_readahead(struct vm_area_struct *vma, in do_sync_mmap_readahead() argument 1788 if (vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead() 1793 if (vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead() 1824 static void do_async_mmap_readahead(struct vm_area_struct *vma, in do_async_mmap_readahead() argument 1833 if (vma->vm_flags & VM_RAND_READ) in do_async_mmap_readahead() 1866 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_fault() argument 1869 struct file *file = vma->vm_file; in filemap_fault() 1891 do_async_mmap_readahead(vma, ra, file, page, offset); in filemap_fault() 1894 do_sync_mmap_readahead(vma, ra, file, offset); in filemap_fault() 1896 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault() [all …]
|