Home
last modified time | relevance | path

Searched refs:vm_flags (Results 1 – 24 of 24) sorted by relevance

/mm/
Dmmap.c98 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument
100 return __pgprot(pgprot_val(protection_map[vm_flags & in vm_get_page_prot()
102 pgprot_val(arch_vm_get_page_prot(vm_flags))); in vm_get_page_prot()
106 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) in vm_pgprot_modify() argument
108 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); in vm_pgprot_modify()
114 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local
116 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
118 vm_flags &= ~VM_SHARED; in vma_set_page_prot()
120 vm_flags); in vma_set_page_prot()
254 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
[all …]
Dnommu.c141 unsigned long vm_flags; in __get_user_pages() local
147 vm_flags = (foll_flags & FOLL_WRITE) ? in __get_user_pages()
149 vm_flags &= (foll_flags & FOLL_FORCE) ? in __get_user_pages()
158 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
159 !(vm_flags & vma->vm_flags)) in __get_user_pages()
240 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
278 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
674 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
724 protect_vma(vma, vma->vm_flags); in add_vma_to_mm()
1081 unsigned long vm_flags; in determine_vm_flags() local
[all …]
Dmremap.c254 unsigned long vm_flags = vma->vm_flags; in move_vma() local
278 MADV_UNMERGEABLE, &vm_flags); in move_vma()
314 if (vm_flags & VM_ACCOUNT) { in move_vma()
315 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
332 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); in move_vma()
343 vma->vm_flags |= VM_ACCOUNT; in move_vma()
345 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
348 if (vm_flags & VM_LOCKED) { in move_vma()
382 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
385 if (vma->vm_flags & VM_LOCKED) { in vma_to_resize()
[all …]
Dgup.c77 return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); in should_force_cow_break()
155 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
226 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
238 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
375 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
382 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags() local
384 if (vm_flags & (VM_IO | VM_PFNMAP)) in check_vma_flags()
391 if (!(vm_flags & VM_WRITE)) { in check_vma_flags()
403 if (!is_cow_mapping(vm_flags)) { in check_vma_flags()
404 WARN_ON_ONCE(vm_flags & VM_MAYWRITE); in check_vma_flags()
[all …]
Dmprotect.c107 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
299 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
317 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
371 vma->vm_flags = newflags; in mprotect_fixup()
400 unsigned long vm_flags, nstart, end, tmp, reqprot; in SYSCALL_DEFINE3() local
426 vm_flags = calc_vm_prot_bits(prot); in SYSCALL_DEFINE3()
440 if (!(vma->vm_flags & VM_GROWSDOWN)) in SYSCALL_DEFINE3()
448 if (!(vma->vm_flags & VM_GROWSUP)) in SYSCALL_DEFINE3()
460 newflags = vm_flags; in SYSCALL_DEFINE3()
461 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); in SYSCALL_DEFINE3()
Dmlock.c426 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range()
507 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
509 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup()
553 vma->vm_flags = newflags; in mlock_fixup()
585 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags()
654 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local
660 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3()
662 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3()
714 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
Drmap.c845 unsigned long vm_flags; member
871 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
873 pra->vm_flags |= VM_LOCKED; in page_referenced_one()
892 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
894 pra->vm_flags |= VM_LOCKED; in page_referenced_one()
906 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
919 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
953 unsigned long *vm_flags) in page_referenced() argument
967 *vm_flags = 0; in page_referenced()
990 *vm_flags = pra.vm_flags; in page_referenced()
[all …]
Dmemory.c681 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
751 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
760 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
761 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
770 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
802 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
803 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
812 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
842 unsigned long vm_flags = vma->vm_flags; in copy_one_pte() local
872 is_cow_mapping(vm_flags)) { in copy_one_pte()
[all …]
Dmadvise.c53 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
69 if (vma->vm_flags & VM_IO) { in madvise_behavior()
99 if (new_flags == vma->vm_flags) { in madvise_behavior()
131 vma->vm_flags = new_flags; in madvise_behavior()
281 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) in madvise_dontneed()
302 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
311 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
Dhuge_memory.c700 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
819 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
898 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_pmd()
899 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
901 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
902 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_pmd()
1310 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1372 if (!(vma->vm_flags & VM_WRITE)) in do_huge_pmd_numa_page()
1513 (new_vma->vm_flags & VM_NOHUGEPAGE)) in move_huge_pmd()
2037 unsigned long *vm_flags, int advice) in hugepage_madvise() argument
[all …]
Dhugetlb.c765 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
780 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
789 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
805 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
812 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
822 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
829 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
1883 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
3183 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
3387 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
[all …]
Dmsync.c76 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3()
86 (vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE3()
Dksm.c421 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
789 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items()
1062 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { in try_to_merge_one_page()
1656 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item()
1788 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument
1798 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | in ksm_madvise()
1804 if (*vm_flags & VM_SAO) in ksm_madvise()
1814 *vm_flags |= VM_MERGEABLE; in ksm_madvise()
1818 if (!(*vm_flags & VM_MERGEABLE)) in ksm_madvise()
1827 *vm_flags &= ~VM_MERGEABLE; in ksm_madvise()
Dframe_vector.c55 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames()
85 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); in get_vaddr_frames()
Duserfaultfd.c70 if (dst_vma->vm_flags & VM_WRITE) in mcopy_atomic_pte()
178 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic()
Dvmscan.c808 unsigned long vm_flags; in page_check_references() local
811 &vm_flags); in page_check_references()
818 if (vm_flags & VM_LOCKED) in page_check_references()
846 if (vm_flags & VM_EXEC) in page_check_references()
1789 unsigned long vm_flags; in shrink_active_list() local
1840 &vm_flags)) { in shrink_active_list()
1851 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { in shrink_active_list()
Dfilemap.c1877 if (vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
1882 if (vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
1921 if (vma->vm_flags & VM_RAND_READ) in do_async_mmap_readahead()
2198 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in generic_file_readonly_mmap()
Ddebug.c165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); in dump_vma()
Dpagewalk.c189 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test()
Dmigrate.c175 if (vma->vm_flags & VM_LOCKED) in remove_migration_pte()
1705 (vma->vm_flags & VM_EXEC)) in migrate_misplaced_page()
Dvmalloc.c1660 pgprot_t prot, unsigned long vm_flags, int node, in __vmalloc_node_range() argument
1672 vm_flags, start, end, node, gfp_mask, caller); in __vmalloc_node_range()
2191 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in remap_vmalloc_range_partial()
Dmempolicy.c598 if (vma->vm_flags & VM_PFNMAP) in queue_pages_test_walk()
617 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) in queue_pages_test_walk()
725 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
Dswap.c716 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { in lru_cache_add_active_or_unevictable()
Dshmem.c3437 file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); in shmem_zero_setup()