• Home
  • Raw
  • Download

Lines Matching refs:vm_flags

96 pgprot_t vm_get_page_prot(unsigned long vm_flags)  in vm_get_page_prot()  argument
98 return __pgprot(pgprot_val(protection_map[vm_flags & in vm_get_page_prot()
100 pgprot_val(arch_vm_get_page_prot(vm_flags))); in vm_get_page_prot()
104 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) in vm_pgprot_modify() argument
106 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); in vm_pgprot_modify()
112 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local
114 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
116 vm_flags &= ~VM_SHARED; in vma_set_page_prot()
118 vm_flags); in vma_set_page_prot()
252 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
254 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
258 if (unlikely(vma->vm_flags & VM_NONLINEAR)) in __remove_shared_vm_struct()
676 if (vma->vm_flags & VM_DENYWRITE) in __vma_link_file()
678 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
682 if (unlikely(vma->vm_flags & VM_NONLINEAR)) in __vma_link_file()
820 if (!(vma->vm_flags & VM_NONLINEAR)) { in vma_adjust()
961 struct file *file, unsigned long vm_flags, in is_mergeable_vma() argument
972 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
1009 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
1013 if (is_mergeable_vma(vma, file, vm_flags, anon_name) && in can_vma_merge_before()
1029 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
1033 if (is_mergeable_vma(vma, file, vm_flags, anon_name) && in can_vma_merge_after()
1074 unsigned long end, unsigned long vm_flags, in vma_merge() argument
1087 if (vm_flags & VM_SPECIAL) in vma_merge()
1103 can_vma_merge_after(prev, vm_flags, anon_vma, in vma_merge()
1110 can_vma_merge_before(next, vm_flags, anon_vma, in vma_merge()
1122 khugepaged_enter_vma_merge(prev, vm_flags); in vma_merge()
1131 can_vma_merge_before(next, vm_flags, anon_vma, in vma_merge()
1141 khugepaged_enter_vma_merge(area, vm_flags); in vma_merge()
1166 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && in anon_vma_compatible()
1302 vm_flags_t vm_flags; in do_mmap_pgoff() local
1346 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | in do_mmap_pgoff()
1353 if (mlock_future_check(mm, vm_flags, len)) in do_mmap_pgoff()
1377 vm_flags |= VM_SHARED | VM_MAYSHARE; in do_mmap_pgoff()
1379 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); in do_mmap_pgoff()
1386 if (vm_flags & VM_EXEC) in do_mmap_pgoff()
1388 vm_flags &= ~VM_MAYEXEC; in do_mmap_pgoff()
1393 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) in do_mmap_pgoff()
1403 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) in do_mmap_pgoff()
1409 vm_flags |= VM_SHARED | VM_MAYSHARE; in do_mmap_pgoff()
1429 vm_flags |= VM_NORESERVE; in do_mmap_pgoff()
1433 vm_flags |= VM_NORESERVE; in do_mmap_pgoff()
1436 addr = mmap_region(file, addr, len, vm_flags, pgoff); in do_mmap_pgoff()
1438 ((vm_flags & VM_LOCKED) || in do_mmap_pgoff()
1526 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify() local
1529 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) in vma_wants_writenotify()
1539 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) in vma_wants_writenotify()
1543 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) in vma_wants_writenotify()
1547 if (vm_flags & VM_PFNMAP) in vma_wants_writenotify()
1559 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) in accountable_mapping() argument
1568 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; in accountable_mapping()
1572 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) in mmap_region() argument
1588 if (!(vm_flags & MAP_FIXED)) in mmap_region()
1609 if (accountable_mapping(file, vm_flags)) { in mmap_region()
1613 vm_flags |= VM_ACCOUNT; in mmap_region()
1619 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, in mmap_region()
1638 vma->vm_flags = vm_flags; in mmap_region()
1639 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
1644 if (vm_flags & VM_DENYWRITE) { in mmap_region()
1649 if (vm_flags & VM_SHARED) { in mmap_region()
1675 vm_flags = vma->vm_flags; in mmap_region()
1676 } else if (vm_flags & VM_SHARED) { in mmap_region()
1685 if (vm_flags & VM_SHARED) in mmap_region()
1687 if (vm_flags & VM_DENYWRITE) in mmap_region()
1694 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); in mmap_region()
1695 if (vm_flags & VM_LOCKED) { in mmap_region()
1696 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || in mmap_region()
1700 vma->vm_flags &= ~VM_LOCKED; in mmap_region()
1713 vma->vm_flags |= VM_SOFTDIRTY; in mmap_region()
1726 if (vm_flags & VM_SHARED) in mmap_region()
1729 if (vm_flags & VM_DENYWRITE) in mmap_region()
2155 if (vma->vm_flags & VM_LOCKED) { in acct_stack_growth()
2166 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2179 if (vma->vm_flags & VM_LOCKED) in acct_stack_growth()
2181 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); in acct_stack_growth()
2196 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2214 (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { in expand_upwards()
2215 if (!(next->vm_flags & VM_GROWSUP)) in expand_upwards()
2268 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_upwards()
2295 (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { in expand_downwards()
2296 if (!(prev->vm_flags & VM_GROWSDOWN)) in expand_downwards()
2347 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_downwards()
2385 if (prev->vm_flags & VM_LOCKED) in find_extend_vma()
2407 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2412 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2433 if (vma->vm_flags & VM_ACCOUNT) in remove_vma_list()
2435 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); in remove_vma_list()
2643 if (tmp->vm_flags & VM_LOCKED) { in do_munmap()
2766 vma->vm_flags = flags; in do_brk()
2774 vma->vm_flags |= VM_SOFTDIRTY; in do_brk()
2807 if (vma->vm_flags & VM_LOCKED) in exit_mmap()
2834 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
2872 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
2906 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3038 unsigned long vm_flags, const struct vm_operations_struct *ops, in __install_special_mapping() argument
3053 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3054 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3086 unsigned long vm_flags, const struct vm_special_mapping *spec) in _install_special_mapping() argument
3088 return __install_special_mapping(mm, addr, len, vm_flags, in _install_special_mapping()
3094 unsigned long vm_flags, struct page **pages) in install_special_mapping() argument
3097 mm, addr, len, vm_flags, &legacy_special_mapping_vmops, in install_special_mapping()