| /include/linux/ |
| D | pgsize_migration.h | 21 extern void vma_set_pad_pages(struct vm_area_struct *vma, 24 extern unsigned long vma_pad_pages(struct vm_area_struct *vma); 26 extern void madvise_vma_pad_pages(struct vm_area_struct *vma, 29 extern void show_map_pad_vma(struct vm_area_struct *vma, 32 extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new, 35 extern bool is_mergable_pad_vma(struct vm_area_struct *vma, 38 extern unsigned long vma_data_pages(struct vm_area_struct *vma); 40 static inline void vma_set_pad_pages(struct vm_area_struct *vma, in vma_set_pad_pages() argument 45 static inline unsigned long vma_pad_pages(struct vm_area_struct *vma) in vma_pad_pages() argument 50 static inline void madvise_vma_pad_pages(struct vm_area_struct *vma, in madvise_vma_pad_pages() argument [all …]
|
| D | userfaultfd_k.h | 135 extern long uffd_wp_range(struct vm_area_struct *vma, 149 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 152 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 166 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument 168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 178 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() argument 180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 183 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 188 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument [all …]
|
| D | huge_mm.h | 18 struct vm_area_struct *vma); 29 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 31 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 33 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 35 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 37 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 96 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ argument 97 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) 208 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, in thp_vma_suitable_order() argument 215 if (!vma_is_anonymous(vma)) { in thp_vma_suitable_order() [all …]
|
| D | pgsize_migration_inline.h | 47 #define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT)) argument 53 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma, in vma_pad_fixup_flags() argument 57 return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK); in vma_pad_fixup_flags() 62 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma, in vma_pad_fixup_flags() argument
|
| D | hugetlb.h | 107 struct vm_area_struct *vma; member 122 void hugetlb_dup_vma_private(struct vm_area_struct *vma); 123 void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 124 int move_hugetlb_page_tables(struct vm_area_struct *vma, 134 struct vm_area_struct *vma, 141 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 152 struct vm_area_struct *vma, 166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 169 struct vm_area_struct *vma, 196 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, [all …]
|
| D | mm.h | 551 struct vm_area_struct *vma; /* Target VMA */ member 615 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 634 int (*access)(struct vm_area_struct *vma, unsigned long addr, 640 const char *(*name)(struct vm_area_struct *vma); 650 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 662 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 670 struct page *(*find_special_page)(struct vm_area_struct *vma, 680 static inline void vma_numab_state_init(struct vm_area_struct *vma) in vma_numab_state_init() argument 682 vma->numab_state = NULL; in vma_numab_state_init() 684 static inline void vma_numab_state_free(struct vm_area_struct *vma) in vma_numab_state_free() argument [all …]
|
| D | hugetlb_inline.h | 9 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument 11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page() 16 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument
|
| D | pgtable.h | 285 extern int ptep_set_access_flags(struct vm_area_struct *vma, 292 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 295 extern int pudp_set_access_flags(struct vm_area_struct *vma, 299 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 306 static inline int pudp_set_access_flags(struct vm_area_struct *vma, in pudp_set_access_flags() argument 352 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument 361 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 368 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument 377 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young() 381 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument [all …]
|
| D | cacheflush.h | 21 static inline void flush_icache_pages(struct vm_area_struct *vma, in flush_icache_pages() argument 27 #define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) argument
|
| D | rmap.h | 83 struct vm_area_struct *vma; member 159 static inline int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument 161 if (likely(vma->anon_vma)) in anon_vma_prepare() 164 return __anon_vma_prepare(vma); in anon_vma_prepare() 167 static inline void anon_vma_merge(struct vm_area_struct *vma, in anon_vma_merge() argument 170 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge() 242 #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ argument 243 folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) 250 #define folio_add_file_rmap_pte(folio, page, vma) \ argument 251 folio_add_file_rmap_ptes(folio, page, 1, vma) [all …]
|
| D | ksm.h | 19 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 22 void ksm_add_vma(struct vm_area_struct *vma); 90 struct vm_area_struct *vma, unsigned long addr); 100 static inline void ksm_add_vma(struct vm_area_struct *vma) in ksm_add_vma() argument 132 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() argument 139 struct vm_area_struct *vma, unsigned long addr) in ksm_might_need_to_copy() argument
|
| D | secretmem.h | 14 bool vma_is_secretmem(struct vm_area_struct *vma); 19 static inline bool vma_is_secretmem(struct vm_area_struct *vma) in vma_is_secretmem() argument
|
| D | mempolicy.h | 124 struct vm_area_struct *vma, struct mempolicy *mpol); 130 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 132 struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 134 bool vma_policy_mof(struct vm_area_struct *vma); 141 extern int huge_node(struct vm_area_struct *vma, 168 extern bool vma_migratable(struct vm_area_struct *vma); 224 static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() argument 254 static inline int huge_node(struct vm_area_struct *vma, in huge_node() argument
|
| D | buildid.h | 10 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 11 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
|
| D | pagewalk.h | 123 struct vm_area_struct *vma; member 136 int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, 139 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, 188 struct vm_area_struct *vma; member 193 struct vm_area_struct *vma, unsigned long addr,
|
| D | pkeys.h | 14 #define arch_override_mprotect_pkey(vma, prot, pkey) (0) argument 18 static inline int vma_pkey(struct vm_area_struct *vma) in vma_pkey() argument
|
| D | migrate.h | 146 struct vm_area_struct *vma, int node); 147 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 151 struct vm_area_struct *vma, int node) in migrate_misplaced_folio_prepare() argument 156 struct vm_area_struct *vma, int node) in migrate_misplaced_folio() argument 193 struct vm_area_struct *vma; member
|
| D | khugepaged.h | 16 extern void khugepaged_enter_vma(struct vm_area_struct *vma, 49 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, in khugepaged_enter_vma() argument
|
| D | mm_inline.h | 434 static inline void free_anon_vma_name(struct vm_area_struct *vma) in free_anon_vma_name() argument 440 anon_vma_name_put(vma->anon_name); in free_anon_vma_name() 458 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} in free_anon_vma_name() argument 586 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, in pte_install_uffd_wp_if_needed() argument 601 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) in pte_install_uffd_wp_if_needed() 616 set_pte_at(vma->vm_mm, addr, pte, in pte_install_uffd_wp_if_needed() 621 static inline bool vma_has_recency(struct vm_area_struct *vma) in vma_has_recency() argument 623 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) in vma_has_recency() 626 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) in vma_has_recency()
|
| D | mmdebug.h | 14 void dump_vma(const struct vm_area_struct *vma); 34 #define VM_BUG_ON_VMA(cond, vma) \ argument 37 dump_vma(vma); \ 99 #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) argument
|
| /include/xen/ |
| D | xen-ops.h | 47 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 51 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, in xen_remap_pfn() argument 64 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 70 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 77 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, in xen_xlate_remap_gfn_array() argument 87 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, in xen_xlate_unmap_gfn_range() argument 94 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, 114 static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, in xen_remap_domain_gfn_array() argument 122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() [all …]
|
| /include/asm-generic/ |
| D | cacheflush.h | 35 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument 43 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument 81 static inline void flush_icache_user_page(struct vm_area_struct *vma, in flush_icache_user_page() argument 107 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 111 flush_icache_user_page(vma, page, vaddr, len); \ 117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | tlb.h | 280 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma); 294 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_flush_rmaps() argument 423 struct vm_area_struct vma = { in tlb_flush() local 429 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush() 437 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument 450 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags() 451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags() 452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags() 539 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument 544 tlb_update_vma_flags(tlb, vma); in tlb_start_vma() [all …]
|
| D | hugetlb.h | 94 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument 97 return ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 138 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument 142 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
|
| /include/trace/events/ |
| D | mmap.h | 72 TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma), 74 TP_ARGS(mt, vma), 78 __field(struct vm_area_struct *, vma) 85 __entry->vma = vma; 86 __entry->vm_start = vma->vm_start; 87 __entry->vm_end = vma->vm_end - 1; 91 __entry->mt, __entry->vma,
|