Home
last modified time | relevance | path

Searched refs:vm_flags (Results 1 – 18 of 18) sorted by relevance

/include/trace/events/
Dfs_dax.h18 __field(unsigned long, vm_flags)
31 __entry->vm_flags = vmf->vma->vm_flags;
43 __entry->vm_flags & VM_SHARED ? "shared" : "private",
70 __field(unsigned long, vm_flags)
79 __entry->vm_flags = vmf->vma->vm_flags;
89 __entry->vm_flags & VM_SHARED ? "shared" : "private",
111 __field(unsigned long, vm_flags)
122 __entry->vm_flags = vmf->vma->vm_flags;
134 __entry->vm_flags & VM_SHARED ? "shared" : "private",
158 __field(unsigned long, vm_flags)
[all …]
/include/linux/
Dmman.h191 static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) in map_deny_write_exec() argument
196 if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE)) in map_deny_write_exec()
199 if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC)) in map_deny_write_exec()
Duserfaultfd_k.h168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share()
180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around()
185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed()
216 unsigned long vm_flags) in vma_can_userfault() argument
218 if ((vm_flags & VM_UFFD_MINOR) && in vma_can_userfault()
227 if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) in vma_can_userfault()
Dkhugepaged.h16 unsigned long vm_flags);
49 unsigned long vm_flags) in khugepaged_enter_vma() argument
Dhuge_mm.h91 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ argument
92 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
213 (vma->vm_flags & VM_EXEC) && in file_thp_enabled()
218 unsigned long vm_flags,
239 unsigned long vm_flags, in thp_vma_allowable_orders() argument
247 if (vm_flags & VM_HUGEPAGE) in thp_vma_allowable_orders()
250 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) in thp_vma_allowable_orders()
258 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); in thp_vma_allowable_orders()
332 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
436 unsigned long vm_flags, in thp_vma_allowable_orders() argument
[all …]
Dhugetlb_inline.h11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page()
Dpgsize_migration.h72 unsigned long vm_flags);
119 unsigned long vm_flags) in is_mergable_pad_vma() argument
Dksm.h20 unsigned long end, int advice, unsigned long *vm_flags);
138 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument
Dshmem_fs.h118 struct mm_struct *mm, unsigned long vm_flags);
121 struct mm_struct *mm, unsigned long vm_flags) in shmem_is_huge() argument
Dmm.h878 vm_flags_init(vma, (vma->vm_flags | set) & ~clear); in __vm_flags_mod()
929 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in vma_is_temporary_stack()
934 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in vma_is_temporary_stack()
954 return vma->vm_flags & VM_ACCESS_FLAGS; in vma_is_accessible()
1066 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1365 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite()
2578 if (vma->vm_flags & VM_SHARED) in vma_wants_manual_pte_write_upgrade()
2580 return !!(vma->vm_flags & VM_WRITE); in vma_wants_manual_pte_write_upgrade()
3289 unsigned long end, unsigned long vm_flags, struct anon_vma *,
3346 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
[all …]
Drmap.h635 struct mem_cgroup *memcg, unsigned long *vm_flags);
751 unsigned long *vm_flags) in folio_referenced() argument
753 *vm_flags = 0; in folio_referenced()
Ddax.h75 if (!(vma->vm_flags & VM_SYNC)) in daxdev_mapping_supported()
118 return !(vma->vm_flags & VM_SYNC); in daxdev_mapping_supported()
Dvmalloc.h152 pgprot_t prot, unsigned long vm_flags, int node,
Dmm_types.h553 vm_flags_t vm_flags; /* VMA vm_flags */ member
619 const vm_flags_t vm_flags; member
Dhugetlb.h162 vm_flags_t vm_flags);
1266 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; in __vma_shareable_lock()
Dmm_inline.h632 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) in vma_has_recency()
Dpgtable.h1910 pgprot_t vm_get_page_prot(unsigned long vm_flags) \
1912 return protection_map[vm_flags & \
/include/asm-generic/
Dtlb.h425 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()