| /include/linux/ |
| D | pgsize_migration.h | 21 extern void vma_set_pad_pages(struct vm_area_struct *vma, 24 extern unsigned long vma_pad_pages(struct vm_area_struct *vma); 26 extern void madvise_vma_pad_pages(struct vm_area_struct *vma, 29 extern void show_map_pad_vma(struct vm_area_struct *vma, 32 extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new, 35 extern bool is_mergable_pad_vma(struct vm_area_struct *vma, 38 extern unsigned long vma_data_pages(struct vm_area_struct *vma); 40 static inline void vma_set_pad_pages(struct vm_area_struct *vma, in vma_set_pad_pages() 45 static inline unsigned long vma_pad_pages(struct vm_area_struct *vma) in vma_pad_pages() 50 static inline void madvise_vma_pad_pages(struct vm_area_struct *vma, in madvise_vma_pad_pages() [all …]
|
| D | userfaultfd_k.h | 119 struct vm_area_struct *dst_vma, 135 extern long uffd_wp_range(struct vm_area_struct *vma, 144 struct vm_area_struct *dst_vma, 145 struct vm_area_struct *src_vma, 149 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() 166 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() 178 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() 183 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() 188 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() 193 static inline bool userfaultfd_minor(struct vm_area_struct *vma) in userfaultfd_minor() [all …]
|
| D | huge_mm.h | 14 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 18 struct vm_area_struct *vma); 29 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 31 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 33 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 35 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 37 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 208 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, in thp_vma_suitable_order() 233 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, in thp_vma_suitable_orders() 256 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() [all …]
|
| D | hugetlb.h | 107 struct vm_area_struct *vma; 122 void hugetlb_dup_vma_private(struct vm_area_struct *vma); 123 void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 124 int move_hugetlb_page_tables(struct vm_area_struct *vma, 125 struct vm_area_struct *new_vma, 129 struct vm_area_struct *, struct vm_area_struct *); 130 void unmap_hugepage_range(struct vm_area_struct *, 134 struct vm_area_struct *vma, 141 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 145 struct vm_area_struct *dst_vma, [all …]
|
| D | rmap.h | 83 struct vm_area_struct *vma; 154 int __anon_vma_prepare(struct vm_area_struct *); 155 void unlink_anon_vmas(struct vm_area_struct *); 156 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 157 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 159 static inline int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() 167 static inline void anon_vma_merge(struct vm_area_struct *vma, in anon_vma_merge() 168 struct vm_area_struct *next) in anon_vma_merge() 239 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); 241 struct vm_area_struct *, unsigned long address, rmap_t flags); [all …]
|
| D | mm.h | 260 struct vm_area_struct *vm_area_alloc(struct mm_struct *); 261 struct vm_area_struct *vm_area_dup(struct vm_area_struct *); 262 void vm_area_free(struct vm_area_struct *); 551 struct vm_area_struct *vma; /* Target VMA */ 601 void (*open)(struct vm_area_struct * area); 606 void (*close)(struct vm_area_struct * area); 608 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 609 int (*mremap)(struct vm_area_struct *area); 615 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 621 unsigned long (*pagesize)(struct vm_area_struct * area); [all …]
|
| D | mempolicy.h | 121 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); 124 struct vm_area_struct *vma, struct mempolicy *mpol); 130 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 132 struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 134 bool vma_policy_mof(struct vm_area_struct *vma); 141 extern int huge_node(struct vm_area_struct *vma, 168 extern bool vma_migratable(struct vm_area_struct *vma); 224 static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() 232 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) in vma_dup_policy() 254 static inline int huge_node(struct vm_area_struct *vma, in huge_node()
|
| D | buildid.h | 9 struct vm_area_struct; 10 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 11 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
|
| D | ksm.h | 19 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 22 void ksm_add_vma(struct vm_area_struct *vma); 90 struct vm_area_struct *vma, unsigned long addr); 100 static inline void ksm_add_vma(struct vm_area_struct *vma) in ksm_add_vma() 132 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() 139 struct vm_area_struct *vma, unsigned long addr) in ksm_might_need_to_copy()
|
| D | pgtable.h | 285 extern int ptep_set_access_flags(struct vm_area_struct *vma, 292 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 295 extern int pudp_set_access_flags(struct vm_area_struct *vma, 299 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() 306 static inline int pudp_set_access_flags(struct vm_area_struct *vma, in pudp_set_access_flags() 352 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() 368 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() 381 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() 392 int ptep_clear_flush_young(struct vm_area_struct *vma, 398 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, [all …]
|
| D | hugetlb_inline.h | 9 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() 16 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page()
|
| D | secretmem.h | 14 bool vma_is_secretmem(struct vm_area_struct *vma); 19 static inline bool vma_is_secretmem(struct vm_area_struct *vma) in vma_is_secretmem()
|
| D | pagewalk.h | 123 struct vm_area_struct *vma; 136 int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, 139 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, 188 struct vm_area_struct *vma; 193 struct vm_area_struct *vma, unsigned long addr,
|
| D | pgsize_migration_inline.h | 53 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma, in vma_pad_fixup_flags() 62 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma, in vma_pad_fixup_flags()
|
| D | shmem_fs.h | 98 extern int shmem_zero_setup(struct vm_area_struct *); 118 struct vm_area_struct *vma, pgoff_t index, 122 struct vm_area_struct *vma, pgoff_t index, in shmem_allowable_huge_orders() 130 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); 132 static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma) in shmem_swap_usage() 194 struct vm_area_struct *dst_vma,
|
| D | migrate.h | 146 struct vm_area_struct *vma, int node); 147 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 151 struct vm_area_struct *vma, int node) in migrate_misplaced_folio_prepare() 156 struct vm_area_struct *vma, int node) in migrate_misplaced_folio() 193 struct vm_area_struct *vma;
|
| D | uprobes.h | 20 struct vm_area_struct; 122 extern int uprobe_mmap(struct vm_area_struct *vma); 123 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); 176 static inline int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() 181 uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap()
|
| D | khugepaged.h | 16 extern void khugepaged_enter_vma(struct vm_area_struct *vma, 49 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, in khugepaged_enter_vma()
|
| D | swapfile.h | 8 extern int unuse_swap_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
| D | mm_inline.h | 425 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, in dup_anon_vma_name() 426 struct vm_area_struct *new_vma) in dup_anon_vma_name() 434 static inline void free_anon_vma_name(struct vm_area_struct *vma) in free_anon_vma_name() 456 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, in dup_anon_vma_name() 457 struct vm_area_struct *new_vma) {} in dup_anon_vma_name() 458 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} in free_anon_vma_name() 559 swp_entry_t entry, struct vm_area_struct *dst_vma) in copy_pte_marker() 586 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, in pte_install_uffd_wp_if_needed() 621 static inline bool vma_has_recency(struct vm_area_struct *vma) in vma_has_recency()
|
| D | cacheflush.h | 21 static inline void flush_icache_pages(struct vm_area_struct *vma, in flush_icache_pages()
|
| /include/xen/ |
| D | xen-ops.h | 47 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 51 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, in xen_remap_pfn() 61 struct vm_area_struct; 64 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 70 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 77 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, in xen_xlate_remap_gfn_array() 87 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, in xen_xlate_unmap_gfn_range() 94 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, 114 static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, in xen_remap_domain_gfn_array() 150 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma, in xen_remap_domain_mfn_array() [all …]
|
| /include/asm-generic/ |
| D | cacheflush.h | 8 struct vm_area_struct; 35 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() 43 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() 81 static inline void flush_icache_user_page(struct vm_area_struct *vma, in flush_icache_user_page()
|
| D | mm_hooks.h | 20 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, in arch_vma_access_permitted()
|
| /include/drm/ |
| D | drm_gem.h | 187 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); 480 void drm_gem_vm_open(struct vm_area_struct *vma); 481 void drm_gem_vm_close(struct vm_area_struct *vma); 483 struct vm_area_struct *vma); 484 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|