Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 76) sorted by relevance

1234

/include/linux/
Dhuge_mm.h13 struct vm_area_struct *vma);
17 struct vm_area_struct *vma);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
33 struct vm_area_struct *vma,
36 struct vm_area_struct *vma,
39 struct vm_area_struct *vma,
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
[all …]
Duserfaultfd_k.h44 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument
47 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx()
50 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument
52 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
55 static inline bool userfaultfd_armed(struct vm_area_struct *vma) in userfaultfd_armed() argument
57 return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); in userfaultfd_armed()
69 extern bool userfaultfd_remove(struct vm_area_struct *vma,
73 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
88 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument
94 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument
[all …]
Dmempolicy.h95 #define vma_policy(vma) ((vma)->vm_policy) argument
133 struct vm_area_struct *vma,
140 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
142 bool vma_policy_mof(struct vm_area_struct *vma);
149 extern int huge_node(struct vm_area_struct *vma,
176 static inline bool vma_migratable(struct vm_area_struct *vma) in vma_migratable() argument
178 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in vma_migratable()
185 if (vma_is_dax(vma)) in vma_migratable()
189 if (vma->vm_flags & VM_HUGETLB) in vma_migratable()
198 if (vma->vm_file && in vma_migratable()
[all …]
Dkhugepaged.h16 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
55 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument
58 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter()
62 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in khugepaged_enter()
63 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
75 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument
80 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
Dhugetlb_inline.h9 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument
11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page()
16 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument
Drmap.h91 struct vm_area_struct *vma; member
167 static inline int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
169 if (likely(vma->anon_vma)) in anon_vma_prepare()
172 return __anon_vma_prepare(vma); in anon_vma_prepare()
175 static inline void anon_vma_merge(struct vm_area_struct *vma, in anon_vma_merge() argument
178 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge()
226 struct vm_area_struct *vma; member
266 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
287 bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
292 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
[all …]
Dhugetlb.h63 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81 struct vm_area_struct *vma,
84 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
91 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99 struct vm_area_struct *vma,
124 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
128 struct page *follow_huge_pd(struct vm_area_struct *vma,
131 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
140 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
147 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
[all …]
Dmm.h431 struct vm_area_struct *vma; /* Target VMA */ member
506 int (*access)(struct vm_area_struct *vma, unsigned long addr,
512 const char *(*name)(struct vm_area_struct *vma);
522 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
534 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
542 struct page *(*find_special_page)(struct vm_area_struct *vma,
551 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init() argument
555 memset(vma, 0, sizeof(*vma)); in vma_init()
556 vma->vm_mm = mm; in vma_init()
557 vma->vm_ops = &dummy_vm_ops; in vma_init()
[all …]
Dksm.h22 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
52 struct vm_area_struct *vma, unsigned long address);
69 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() argument
76 struct vm_area_struct *vma, unsigned long address) in ksm_might_need_to_copy() argument
Dpkeys.h12 #define arch_override_mprotect_pkey(vma, prot, pkey) (0) argument
16 static inline int vma_pkey(struct vm_area_struct *vma) in vma_pkey() argument
Dmmdebug.h14 void dump_vma(const struct vm_area_struct *vma);
26 #define VM_BUG_ON_VMA(cond, vma) \ argument
29 dump_vma(vma); \
59 #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) argument
Dhighmem.h15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
183 struct vm_area_struct *vma, in __alloc_zeroed_user_highpage() argument
187 vma, vaddr); in __alloc_zeroed_user_highpage()
205 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, in alloc_zeroed_user_highpage_movable() argument
209 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); in alloc_zeroed_user_highpage_movable()
211 return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, in alloc_zeroed_user_highpage_movable()
256 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument
Dpagewalk.h56 struct vm_area_struct *vma; member
63 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
Dmigrate.h127 struct vm_area_struct *vma, int node);
134 struct vm_area_struct *vma, int node) in migrate_misplaced_page() argument
142 struct vm_area_struct *vma,
148 struct vm_area_struct *vma, in migrate_misplaced_transhuge_page() argument
184 struct vm_area_struct *vma; member
Dgfp.h542 struct vm_area_struct *vma, unsigned long addr,
544 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ argument
545 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
549 #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ argument
551 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ argument
555 #define alloc_page_vma(gfp_mask, vma, addr) \ argument
556 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
557 #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ argument
558 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
Duprobes.h117 extern int uprobe_mmap(struct vm_area_struct *vma);
118 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
169 static inline int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() argument
174 uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap() argument
Ddax.h64 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, in daxdev_mapping_supported() argument
67 if (!(vma->vm_flags & VM_SYNC)) in daxdev_mapping_supported()
69 if (!IS_DAX(file_inode(vma->vm_file))) in daxdev_mapping_supported()
113 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, in daxdev_mapping_supported() argument
116 return !(vma->vm_flags & VM_SYNC); in daxdev_mapping_supported()
/include/xen/
Dxen-ops.h65 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
69 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, in xen_remap_pfn() argument
82 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
88 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
95 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, in xen_xlate_remap_gfn_array() argument
105 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, in xen_xlate_unmap_gfn_range() argument
112 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
132 static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, in xen_remap_domain_gfn_array() argument
140 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
148 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
[all …]
/include/asm-generic/
Dcacheflush.h33 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument
41 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument
73 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, in flush_icache_user_range() argument
100 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
103 flush_icache_user_range(vma, page, vaddr, len); \
108 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dpgtable.h30 extern int ptep_set_access_flags(struct vm_area_struct *vma,
37 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
40 extern int pudp_set_access_flags(struct vm_area_struct *vma,
44 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument
51 static inline int pudp_set_access_flags(struct vm_area_struct *vma, in pudp_set_access_flags() argument
62 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument
71 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
78 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument
87 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
91 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument
[all …]
Dtlb.h347 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
350 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_end_vma() argument
370 struct vm_area_struct vma = { in tlb_flush() local
376 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
381 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
394 tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); in tlb_update_vma_flags()
395 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
401 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
476 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
481 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
[all …]
Dmm_hooks.h26 struct vm_area_struct *vma) in arch_bprm_mm_init() argument
30 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, in arch_vma_access_permitted() argument
Dhugetlb.h69 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
72 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush()
114 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
118 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
/include/drm/
Ddrm_gem.h334 void drm_gem_vm_open(struct vm_area_struct *vma);
335 void drm_gem_vm_close(struct vm_area_struct *vma);
337 struct vm_area_struct *vma);
338 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
Ddrm_vram_mm_helper.h68 int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
85 struct file *filp, struct vm_area_struct *vma);

1234