Home
last modified time | relevance | path

Searched refs:vm_flags (Results 1 – 25 of 29) sorted by relevance

12

/mm/
Dmmap.c106 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument
108 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & in vm_get_page_prot()
110 pgprot_val(arch_vm_get_page_prot(vm_flags))); in vm_get_page_prot()
116 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) in vm_pgprot_modify() argument
118 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); in vm_pgprot_modify()
124 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local
127 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
129 vm_flags &= ~VM_SHARED; in vma_set_page_prot()
130 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); in vma_set_page_prot()
142 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
[all …]
Dmremap.c326 unsigned long vm_flags = vma->vm_flags; in move_vma() local
350 MADV_UNMERGEABLE, &vm_flags); in move_vma()
387 if (vm_flags & VM_ACCOUNT) { in move_vma()
388 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
405 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma()
408 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma()
420 vma->vm_flags |= VM_ACCOUNT; in move_vma()
422 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
425 if (vm_flags & VM_LOCKED) { in move_vma()
451 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in vma_to_resize()
[all …]
Dnommu.c127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
169 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
559 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
946 unsigned long vm_flags; in determine_vm_flags() local
948 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); in determine_vm_flags()
953 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in determine_vm_flags()
955 vm_flags |= VM_MAYSHARE; in determine_vm_flags()
960 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); in determine_vm_flags()
962 vm_flags |= VM_SHARED; in determine_vm_flags()
970 vm_flags &= ~VM_MAYSHARE; in determine_vm_flags()
[all …]
Dmprotect.c64 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
88 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
120 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
343 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
361 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
400 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup()
423 vma->vm_flags = newflags; in mprotect_fixup()
502 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
510 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
525 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
[all …]
Dmlock.c448 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range()
527 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
529 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup()
574 vma->vm_flags = newflags; in mlock_fixup()
606 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags()
657 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
725 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local
731 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3()
733 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3()
788 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
Dgup.c272 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
326 if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { in follow_pmd_mask()
436 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_pud_mask()
671 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
678 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags() local
682 if (vm_flags & (VM_IO | VM_PFNMAP)) in check_vma_flags()
689 if (!(vm_flags & VM_WRITE)) { in check_vma_flags()
701 if (!is_cow_mapping(vm_flags)) in check_vma_flags()
704 } else if (!(vm_flags & VM_READ)) { in check_vma_flags()
711 if (!(vm_flags & VM_MAYREAD)) in check_vma_flags()
[all …]
Dmemory.c541 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
603 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
616 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
617 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
626 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
659 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
660 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
669 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
701 unsigned long vm_flags = vma->vm_flags; in copy_one_pte() local
728 is_cow_mapping(vm_flags)) { in copy_one_pte()
[all …]
Drmap.c748 unsigned long vm_flags; member
768 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
770 pra->vm_flags |= VM_LOCKED; in page_referenced_one()
785 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
807 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
840 unsigned long *vm_flags) in page_referenced() argument
853 *vm_flags = 0; in page_referenced()
876 *vm_flags = pra.vm_flags; in page_referenced()
960 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1357 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
[all …]
Duserfaultfd.c73 if (dst_vma->vm_flags & VM_WRITE) in mcopy_atomic_pte()
180 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
181 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
243 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
413 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
499 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic()
518 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic()
Dkhugepaged.c314 unsigned long *vm_flags, int advice) in hugepage_madvise() argument
327 *vm_flags &= ~VM_NOHUGEPAGE; in hugepage_madvise()
328 *vm_flags |= VM_HUGEPAGE; in hugepage_madvise()
334 if (!(*vm_flags & VM_NO_KHUGEPAGED) && in hugepage_madvise()
335 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise()
339 *vm_flags &= ~VM_HUGEPAGE; in hugepage_madvise()
340 *vm_flags |= VM_NOHUGEPAGE; in hugepage_madvise()
408 unsigned long vm_flags) in hugepage_vma_check() argument
410 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || in hugepage_vma_check()
411 (vm_flags & VM_NOHUGEPAGE) || in hugepage_vma_check()
[all …]
Dhugetlb.c759 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
774 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
783 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
799 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
806 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
816 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
823 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
2010 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
2021 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
3431 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
[all …]
Dmadvise.c72 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
88 if (vma->vm_flags & VM_IO) { in madvise_behavior()
96 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
129 if (new_flags == vma->vm_flags) { in madvise_behavior()
169 vma->vm_flags = new_flags; in madvise_behavior()
824 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
833 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
Dmsync.c79 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3()
89 (vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE3()
Dhuge_memory.c494 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
674 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); in alloc_hugepage_direct_gfpmask()
726 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
836 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd()
838 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
840 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
861 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
913 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud()
915 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud()
917 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud()
[all …]
Dframe_vector.c72 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames()
102 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); in get_vaddr_frames()
Dksm.c528 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
983 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items()
1254 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { in try_to_merge_one_page()
2287 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item()
2429 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument
2439 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | in ksm_madvise()
2448 if (*vm_flags & VM_SAO) in ksm_madvise()
2452 if (*vm_flags & VM_SPARC_ADI) in ksm_madvise()
2462 *vm_flags |= VM_MERGEABLE; in ksm_madvise()
2466 if (!(*vm_flags & VM_MERGEABLE)) in ksm_madvise()
[all …]
Ddebug.c130 vma->vm_flags, &vma->vm_flags); in dump_vma()
Dshmem.c2060 if ((vma->vm_flags & VM_NOHUGEPAGE) || in shmem_fault()
2063 else if (vma->vm_flags & VM_HUGEPAGE) in shmem_fault()
2214 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) in shmem_mmap()
2224 if (vma->vm_flags & VM_SHARED) in shmem_mmap()
2225 vma->vm_flags &= ~(VM_MAYWRITE); in shmem_mmap()
2233 khugepaged_enter(vma, vma->vm_flags); in shmem_mmap()
2380 if (dst_vma->vm_flags & VM_WRITE) in shmem_mfill_atomic_pte()
3995 if ((vma->vm_flags & VM_NOHUGEPAGE) || in shmem_huge_enabled()
4016 return (vma->vm_flags & VM_HUGEPAGE); in shmem_huge_enabled()
4182 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); in shmem_zero_setup()
[all …]
Dhmm.c295 if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE)) in hmm_vma_walk_hole_()
907 if (vma == NULL || (vma->vm_flags & device_vma)) in hmm_range_fault()
910 if (!(vma->vm_flags & VM_READ)) { in hmm_range_fault()
Dvmscan.c1035 unsigned long vm_flags; in page_check_references() local
1038 &vm_flags); in page_check_references()
1045 if (vm_flags & VM_LOCKED) in page_check_references()
1073 if (vm_flags & VM_EXEC) in page_check_references()
2050 unsigned long vm_flags; in shrink_active_list() local
2095 &vm_flags)) { in shrink_active_list()
2106 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { in shrink_active_list()
Dmigrate.c272 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) in remove_migration_pte()
1961 (vma->vm_flags & VM_EXEC)) in migrate_misplaced_page()
2649 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) in migrate_vma_setup()
2751 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2756 if (vma->vm_flags & VM_WRITE) in migrate_vma_insert_page()
Dfilemap.c2392 if (vmf->vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
2397 if (vmf->vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
2441 if (vmf->vma->vm_flags & VM_RAND_READ) in do_async_mmap_readahead()
2721 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in generic_file_readonly_mmap()
Dpagewalk.c242 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test()
Dinternal.h44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); in can_madv_lru_vma()
Dmmu_notifier.c544 return range->vma->vm_flags & VM_READ; in mmu_notifier_range_update_to_read_only()

12