| /include/trace/events/ |
| D | fs_dax.h | 18 __field(unsigned long, vm_flags) 31 __entry->vm_flags = vmf->vma->vm_flags; 43 __entry->vm_flags & VM_SHARED ? "shared" : "private", 70 __field(unsigned long, vm_flags) 79 __entry->vm_flags = vmf->vma->vm_flags; 89 __entry->vm_flags & VM_SHARED ? "shared" : "private", 111 __field(unsigned long, vm_flags) 122 __entry->vm_flags = vmf->vma->vm_flags; 134 __entry->vm_flags & VM_SHARED ? "shared" : "private", 158 __field(unsigned long, vm_flags) [all …]
|
| /include/linux/ |
| D | userfaultfd_k.h | 168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 216 unsigned long vm_flags, in vma_can_userfault() argument 219 vm_flags &= __VM_UFFD_FLAGS; in vma_can_userfault() 221 if (vm_flags & VM_DROPPABLE) in vma_can_userfault() 224 if ((vm_flags & VM_UFFD_MINOR) && in vma_can_userfault() [all …]
|
| D | huge_mm.h | 96 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ argument 97 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) 270 unsigned long vm_flags, 291 unsigned long vm_flags, in thp_vma_allowable_orders() argument 299 if (vm_flags & VM_HUGEPAGE) in thp_vma_allowable_orders() 302 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) in thp_vma_allowable_orders() 310 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); in thp_vma_allowable_orders() 326 unsigned long vm_flags) in vma_thp_disabled() argument 333 return (vm_flags & VM_NOHUGEPAGE) || in vma_thp_disabled() 347 vm_flags_t vm_flags); [all …]
|
| D | khugepaged.h | 17 unsigned long vm_flags); 50 unsigned long vm_flags) in khugepaged_enter_vma() argument
|
| D | pgsize_migration.h | 36 unsigned long vm_flags); 66 unsigned long vm_flags) in is_mergable_pad_vma() argument
|
| D | hugetlb_inline.h | 11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page()
|
| D | pgsize_migration_inline.h | 57 return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK); in vma_pad_fixup_flags()
|
| D | ksm.h | 20 unsigned long end, int advice, unsigned long *vm_flags); 133 unsigned long end, int advice, unsigned long *vm_flags) in ksm_madvise() argument
|
| D | mm.h | 1010 vm_flags_init(vma, (vma->vm_flags | set) & ~clear); in __vm_flags_mod() 1061 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in vma_is_temporary_stack() 1066 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in vma_is_temporary_stack() 1086 return vma->vm_flags & VM_ACCESS_FLAGS; in vma_is_accessible() 1089 static inline bool is_shared_maywrite(vm_flags_t vm_flags) in is_shared_maywrite() argument 1091 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == in is_shared_maywrite() 1097 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite() 1189 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 1450 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite() 3523 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); [all …]
|
| D | dax.h | 77 if (!(vma->vm_flags & VM_SYNC)) in daxdev_mapping_supported() 122 return !(vma->vm_flags & VM_SYNC); in daxdev_mapping_supported()
|
| D | rmap.h | 661 struct mem_cgroup *memcg, unsigned long *vm_flags); 794 unsigned long *vm_flags) in folio_referenced() argument 796 *vm_flags = 0; in folio_referenced()
|
| D | vmalloc.h | 171 pgprot_t prot, unsigned long vm_flags, int node,
|
| D | mm_types.h | 607 vm_flags_t vm_flags; /* VMA vm_flags */ member 731 const vm_flags_t vm_flags; member
|
| D | hugetlb.h | 153 vm_flags_t vm_flags); 1296 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; in __vma_shareable_lock()
|
| D | mm_inline.h | 623 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) in vma_has_recency()
|
| D | pgtable.h | 2027 pgprot_t vm_get_page_prot(unsigned long vm_flags) \ 2029 return protection_map[vm_flags & \
|
| /include/linux/sched/ |
| D | mm.h | 186 unsigned long flags, vm_flags_t vm_flags); 202 vm_flags_t vm_flags); 207 unsigned long flags, vm_flags_t vm_flags); 211 unsigned long flags, vm_flags_t vm_flags);
|
| /include/asm-generic/ |
| D | tlb.h | 425 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush() 451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags() 452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
|
| /include/trace/hooks/ |
| D | mm.h | 598 TP_PROTO(struct page *page, const vm_flags_t vm_flags), 599 TP_ARGS(page, vm_flags));
|