/include/trace/events/ |
D | fs_dax.h | 18 __field(unsigned long, vm_flags) 31 __entry->vm_flags = vmf->vma->vm_flags; 43 __entry->vm_flags & VM_SHARED ? "shared" : "private", 70 __field(unsigned long, vm_flags) 79 __entry->vm_flags = vmf->vma->vm_flags; 89 __entry->vm_flags & VM_SHARED ? "shared" : "private", 111 __field(unsigned long, vm_flags) 122 __entry->vm_flags = vmf->vma->vm_flags; 134 __entry->vm_flags & VM_SHARED ? "shared" : "private", 158 __field(unsigned long, vm_flags) [all …]
|
/include/linux/ |
D | mman.h | 191 static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) in map_deny_write_exec() argument 196 if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE)) in map_deny_write_exec() 199 if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC)) in map_deny_write_exec()
|
D | userfaultfd_k.h | 168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 216 unsigned long vm_flags) in vma_can_userfault() argument 218 if ((vm_flags & VM_UFFD_MINOR) && in vma_can_userfault() 227 if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) in vma_can_userfault()
|
D | khugepaged.h | 16 unsigned long vm_flags); 49 unsigned long vm_flags) in khugepaged_enter_vma() argument
|
D | huge_mm.h | 91 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ argument 92 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) 213 (vma->vm_flags & VM_EXEC) && in file_thp_enabled() 218 unsigned long vm_flags, 239 unsigned long vm_flags, in thp_vma_allowable_orders() argument 247 if (vm_flags & VM_HUGEPAGE) in thp_vma_allowable_orders() 250 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) in thp_vma_allowable_orders() 258 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); in thp_vma_allowable_orders() 332 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, 436 unsigned long vm_flags, in thp_vma_allowable_orders() argument [all …]
|
D | hugetlb_inline.h | 11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page()
|
D | pgsize_migration.h | 72 unsigned long vm_flags); 119 unsigned long vm_flags) in is_mergable_pad_vma() argument
|
D | ksm.h | 20 unsigned long end, int advice, unsigned long *vm_flags); 138 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument
|
D | shmem_fs.h | 118 struct mm_struct *mm, unsigned long vm_flags); 121 struct mm_struct *mm, unsigned long vm_flags) in shmem_is_huge() argument
|
D | mm.h | 878 vm_flags_init(vma, (vma->vm_flags | set) & ~clear); in __vm_flags_mod() 929 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in vma_is_temporary_stack() 934 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in vma_is_temporary_stack() 954 return vma->vm_flags & VM_ACCESS_FLAGS; in vma_is_accessible() 1066 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 1365 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite() 2578 if (vma->vm_flags & VM_SHARED) in vma_wants_manual_pte_write_upgrade() 2580 return !!(vma->vm_flags & VM_WRITE); in vma_wants_manual_pte_write_upgrade() 3289 unsigned long end, unsigned long vm_flags, struct anon_vma *, 3346 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, [all …]
|
D | rmap.h | 635 struct mem_cgroup *memcg, unsigned long *vm_flags); 751 unsigned long *vm_flags) in folio_referenced() argument 753 *vm_flags = 0; in folio_referenced()
|
D | dax.h | 75 if (!(vma->vm_flags & VM_SYNC)) in daxdev_mapping_supported() 118 return !(vma->vm_flags & VM_SYNC); in daxdev_mapping_supported()
|
D | vmalloc.h | 152 pgprot_t prot, unsigned long vm_flags, int node,
|
D | mm_types.h | 553 vm_flags_t vm_flags; /* VMA vm_flags */ member 619 const vm_flags_t vm_flags; member
|
D | hugetlb.h | 162 vm_flags_t vm_flags); 1266 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; in __vma_shareable_lock()
|
D | mm_inline.h | 632 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) in vma_has_recency()
|
D | pgtable.h | 1910 pgprot_t vm_get_page_prot(unsigned long vm_flags) \ 1912 return protection_map[vm_flags & \
|
/include/asm-generic/ |
D | tlb.h | 425 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush() 451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags() 452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
|