/mm/ |
D | mmap.c | 110 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument 112 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & in vm_get_page_prot() 114 pgprot_val(arch_vm_get_page_prot(vm_flags))); in vm_get_page_prot() 120 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) in vm_pgprot_modify() argument 122 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); in vm_pgprot_modify() 128 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local 131 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 133 vm_flags &= ~VM_SHARED; in vma_set_page_prot() 134 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); in vma_set_page_prot() 146 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() [all …]
|
D | mremap.c | 547 unsigned long vm_flags = vma->vm_flags; in move_vma() local 571 MADV_UNMERGEABLE, &vm_flags); in move_vma() 621 if (vm_flags & VM_ACCOUNT) { in move_vma() 622 vma->vm_flags &= ~VM_ACCOUNT; in move_vma() 639 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma() 642 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma() 646 if (vm_flags & VM_ACCOUNT) { in move_vma() 648 vma->vm_flags |= VM_ACCOUNT; in move_vma() 665 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in move_vma() 677 if (vm_flags & VM_LOCKED) { in move_vma() [all …]
|
D | nommu.c | 127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn() 155 pgprot_t prot, unsigned long vm_flags, int node, in __vmalloc_node_range() argument 178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags() 540 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region() 921 unsigned long vm_flags; in determine_vm_flags() local 923 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); in determine_vm_flags() 928 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in determine_vm_flags() 930 vm_flags |= VM_MAYSHARE; in determine_vm_flags() 935 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); in determine_vm_flags() 937 vm_flags |= VM_SHARED; in determine_vm_flags() [all …]
|
D | userfaultfd.c | 64 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte() 65 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte() 287 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 288 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 338 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 523 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte() 604 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic() 612 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) in __mcopy_atomic() 634 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic() 786 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) in mwriteprotect_range() [all …]
|
D | mlock.c | 455 WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); in munlock_vma_pages_range() 542 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() 544 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup() 589 WRITE_ONCE(vma->vm_flags, newflags); in mlock_fixup() 622 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags() 673 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr() 741 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local 747 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3() 749 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3() 804 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
|
D | mprotect.c | 68 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 96 if (is_cow_mapping(vma->vm_flags) && in change_pte_range() 137 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 403 unsigned long oldflags = vma->vm_flags; in mprotect_fixup() 421 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup() 460 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup() 484 WRITE_ONCE(vma->vm_flags, newflags); in mprotect_fixup() 564 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey() 572 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey() 587 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey() [all …]
|
D | khugepaged.c | 346 unsigned long *vm_flags, int advice) in hugepage_madvise() argument 359 *vm_flags &= ~VM_NOHUGEPAGE; in hugepage_madvise() 360 *vm_flags |= VM_HUGEPAGE; in hugepage_madvise() 366 if (!(*vm_flags & VM_NO_KHUGEPAGED) && in hugepage_madvise() 367 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise() 371 *vm_flags &= ~VM_HUGEPAGE; in hugepage_madvise() 372 *vm_flags |= VM_NOHUGEPAGE; in hugepage_madvise() 441 unsigned long vm_flags) in hugepage_vma_check() argument 443 if (!transhuge_vma_enabled(vma, vm_flags)) in hugepage_vma_check() 455 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) in hugepage_vma_check() [all …]
|
D | gup.c | 551 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte() 946 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page() 953 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags() local 957 if (vm_flags & (VM_IO | VM_PFNMAP)) in check_vma_flags() 967 if (!(vm_flags & VM_WRITE)) { in check_vma_flags() 979 if (!is_cow_mapping(vm_flags)) in check_vma_flags() 982 } else if (!(vm_flags & VM_READ)) { in check_vma_flags() 989 if (!(vm_flags & VM_MAYREAD)) in check_vma_flags() 1195 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; in vma_permits_fault() local 1197 if (!(vm_flags & vma->vm_flags)) in vma_permits_fault() [all …]
|
D | memory.c | 586 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index); in print_bad_pte() 709 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd() 710 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd() 719 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd() 751 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() local 775 is_cow_mapping(vm_flags)) { in copy_nonpresent_pte() 812 is_cow_mapping(vm_flags)) { in copy_nonpresent_pte() 854 if (!is_cow_mapping(src_vma->vm_flags)) in copy_present_page() 903 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma->vm_flags); in copy_present_page() 921 unsigned long vm_flags = src_vma->vm_flags; in copy_present_pte() local [all …]
|
D | rmap.c | 778 unsigned long vm_flags; member 798 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one() 800 pra->vm_flags |= VM_LOCKED; in page_referenced_one() 816 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one() 838 pra->vm_flags |= vma->vm_flags; in page_referenced_one() 874 unsigned long *vm_flags) in page_referenced() argument 888 *vm_flags = 0; in page_referenced() 911 *vm_flags = pra.vm_flags; in page_referenced() 996 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma() 1473 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one() [all …]
|
D | hugetlb.c | 963 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map() 978 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map() 987 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags() 1003 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages() 1010 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves() 1020 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves() 1027 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves() 2214 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common() 2227 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common() 3828 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range() [all …]
|
D | huge_memory.c | 68 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && in file_thp_enabled() 70 (vma->vm_flags & VM_EXEC); in file_thp_enabled() 481 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite() 670 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); in alloc_hugepage_direct_gfpmask() 722 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page() 844 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd_prot() 846 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd_prot() 848 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd_prot() 869 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite() 934 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud_prot() [all …]
|
D | mapping_dirty_helpers.c | 232 unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); in wp_clean_test_walk() local 235 if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != in wp_clean_test_walk()
|
D | madvise.c | 76 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 92 if (vma->vm_flags & VM_IO) { in madvise_behavior() 100 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior() 133 if (new_flags == vma->vm_flags) { in madvise_behavior() 174 WRITE_ONCE(vma->vm_flags, new_flags); in madvise_behavior() 872 if (vma->vm_flags & VM_LOCKED) in madvise_remove() 881 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
|
D | msync.c | 79 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 89 (vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE3()
|
D | ksm.c | 529 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 985 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items() 1256 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { in try_to_merge_one_page() 2297 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item() 2439 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument 2449 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | in ksm_madvise() 2458 if (*vm_flags & VM_SAO) in ksm_madvise() 2462 if (*vm_flags & VM_SPARC_ADI) in ksm_madvise() 2472 *vm_flags |= VM_MERGEABLE; in ksm_madvise() 2476 if (!(*vm_flags & VM_MERGEABLE)) in ksm_madvise() [all …]
|
D | debug.c | 213 vma->vm_flags, &vma->vm_flags); in dump_vma()
|
D | hmm.c | 72 if (!(vma->vm_flags & VM_WRITE)) in hmm_vma_fault() 518 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && in hmm_vma_walk_test() 519 vma->vm_flags & VM_READ) in hmm_vma_walk_test()
|
D | frame_vector.c | 75 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames()
|
D | vmscan.c | 1023 unsigned long vm_flags; in page_check_references() local 1037 &vm_flags); in page_check_references() 1046 if (vm_flags & VM_LOCKED) in page_check_references() 1076 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page)) in page_check_references() 2084 unsigned long vm_flags; in shrink_active_list() local 2142 &vm_flags) != 0) { in shrink_active_list() 2152 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { in shrink_active_list()
|
D | filemap.c | 2637 if (vmf->vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead() 2642 if (vmf->vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead() 2690 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead() 2773 if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) { in filemap_fault() 3137 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in generic_file_readonly_mmap()
|
D | shmem.c | 2135 if ((vma->vm_flags & VM_NOHUGEPAGE) || in shmem_fault() 2138 else if (vma->vm_flags & VM_HUGEPAGE) in shmem_fault() 2293 vma->vm_flags |= VM_MTE_ALLOWED; in shmem_mmap() 2300 khugepaged_enter(vma, vma->vm_flags); in shmem_mmap() 4080 if (!transhuge_vma_enabled(vma, vma->vm_flags)) in shmem_huge_enabled() 4100 return (vma->vm_flags & VM_HUGEPAGE); in shmem_huge_enabled() 4266 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); in shmem_zero_setup() 4278 khugepaged_enter(vma, vma->vm_flags); in shmem_zero_setup()
|
D | migrate.c | 223 pte = maybe_mkwrite(pte, vma->vm_flags); in remove_migration_pte() 255 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) in remove_migration_pte() 2798 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) in migrate_vma_setup() 2907 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page() 2919 if (vma->vm_flags & VM_WRITE) in migrate_vma_insert_page()
|
D | internal.h | 64 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); in can_madv_lru_vma()
|
D | pagewalk.c | 305 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test()
|