Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 33) sorted by relevance

12

/mm/
Dmmap.c75 struct vm_area_struct *vma, struct vm_area_struct *prev,
112 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
114 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
116 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
117 if (vma_wants_writenotify(vma)) { in vma_set_page_prot()
119 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, in vma_set_page_prot()
251 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
254 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
256 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
260 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
[all …]
Dnommu.c121 struct vm_area_struct *vma; in kobjsize() local
123 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
124 if (vma) in kobjsize()
125 return vma->vm_end - vma->vm_start; in kobjsize()
140 struct vm_area_struct *vma; in __get_user_pages() local
153 vma = find_vma(mm, start); in __get_user_pages()
154 if (!vma) in __get_user_pages()
158 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
159 !(vm_flags & vma->vm_flags)) in __get_user_pages()
168 vmas[i] = vma; in __get_user_pages()
[all …]
Dmremap.c53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
96 struct mm_struct *mm = vma->vm_mm; in move_ptes()
121 if (vma->vm_file) { in move_ptes()
122 mapping = vma->vm_file->f_mapping; in move_ptes()
125 if (vma->anon_vma) { in move_ptes()
126 anon_vma = vma->anon_vma; in move_ptes()
140 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
167 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
180 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
[all …]
Dmemory.c520 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
523 while (vma) { in free_pgtables()
524 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
525 unsigned long addr = vma->vm_start; in free_pgtables()
531 unlink_anon_vmas(vma); in free_pgtables()
532 unlink_file_vma(vma); in free_pgtables()
534 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
535 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
541 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
543 vma = next; in free_pgtables()
[all …]
Dmadvise.c46 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument
50 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
53 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
69 if (vma->vm_flags & VM_IO) { in madvise_behavior()
79 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior()
87 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
93 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior()
99 if (new_flags == vma->vm_flags) { in madvise_behavior()
100 *prev = vma; in madvise_behavior()
104 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
[all …]
Dmprotect.c41 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, in lock_pte_protection() argument
49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
51 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
62 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
66 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
71 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range()
75 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
90 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
107 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
[all …]
Drmap.c132 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument
136 avc->vma = vma; in anon_vma_chain_link()
138 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
169 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
171 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare()
176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
183 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare()
195 if (likely(!vma->anon_vma)) { in anon_vma_prepare()
196 vma->anon_vma = anon_vma; in anon_vma_prepare()
197 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare()
[all …]
Dhuge_memory.c698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
700 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
714 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument
757 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
764 ret = handle_userfault(vma, address, flags, in __do_huge_pmd_anonymous_page()
770 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
771 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
772 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
774 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
793 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
[all …]
Dgup.c21 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument
32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
52 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
53 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
75 static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) in should_force_cow_break() argument
77 return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); in should_force_cow_break()
80 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
83 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
90 return no_page_table(vma, flags); in follow_page_pte()
[all …]
Dmlock.c361 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument
372 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
384 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
423 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument
426 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range()
444 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, in munlock_vma_pages_range()
475 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
499 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
502 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
507 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
[all …]
Dpgtable-generic.c47 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument
53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
54 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
61 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument
65 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
67 flush_tlb_page(vma, address); in ptep_clear_flush_young()
73 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument
76 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
80 flush_tlb_page(vma, address); in ptep_clear_flush()
98 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
[all …]
Dpagewalk.c38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range()
135 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local
136 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range()
176 struct vm_area_struct *vma = walk->vma; in walk_page_test() local
189 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test()
202 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local
204 if (vma && is_vm_hugetlb_page(vma)) { in __walk_page_range()
248 struct vm_area_struct *vma; in walk_page_range() local
258 vma = find_vma(walk->mm, start); in walk_page_range()
260 if (!vma) { /* after the last vma */ in walk_page_range()
[all …]
Dhugetlb.c226 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument
228 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
632 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
634 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
635 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
638 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
641 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
648 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
652 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
655 hstate = hstate_vma(vma); in vma_kernel_pagesize()
[all …]
Dmempolicy.c445 struct vm_area_struct *vma; in mpol_rebind_mm() local
448 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm()
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm()
488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local
496 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pte_range()
504 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range()
518 if (!vma_migratable(vma)) in queue_pages_pte_range()
541 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
571 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
576 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa()
[all …]
Dksm.c364 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument
371 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
375 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
415 struct vm_area_struct *vma; in find_mergeable_vma() local
418 vma = find_vma(mm, addr); in find_mergeable_vma()
419 if (!vma || vma->vm_start > addr) in find_mergeable_vma()
421 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
423 return vma; in find_mergeable_vma()
430 struct vm_area_struct *vma; in break_cow() local
439 vma = find_mergeable_vma(mm, addr); in break_cow()
[all …]
Dmincore.c85 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() argument
90 if (vma->vm_file) { in __mincore_unmapped_range()
93 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range()
95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range()
107 walk->vma, walk->private); in mincore_unmapped_range()
115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mincore_pte_range()
127 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range()
137 vma, vec); in mincore_pte_range()
168 static inline bool can_do_mincore(struct vm_area_struct *vma) in can_do_mincore() argument
[all …]
Dmsync.c35 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local
58 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
65 if (!vma) in SYSCALL_DEFINE3()
68 if (start < vma->vm_start) { in SYSCALL_DEFINE3()
69 start = vma->vm_start; in SYSCALL_DEFINE3()
76 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3()
80 file = vma->vm_file; in SYSCALL_DEFINE3()
81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3()
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3()
83 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
[all …]
Dvmacache.c58 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find() local
60 if (!vma) in vmacache_find()
62 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
64 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find()
66 return vma; in vmacache_find()
86 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find_exact() local
88 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact()
90 return vma; in vmacache_find_exact()
Ddebug.c154 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument
160 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma()
161 vma->vm_prev, vma->vm_mm, in dump_vma()
162 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma()
163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma()
164 vma->vm_file, vma->vm_private_data); in dump_vma()
165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); in dump_vma()
Dmigrate.c107 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, in remove_migration_pte() argument
110 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
120 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte()
148 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); in remove_migration_pte()
154 pte = maybe_mkwrite(pte, vma); in remove_migration_pte()
159 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte()
167 hugepage_add_anon_rmap(new, vma, addr); in remove_migration_pte()
171 page_add_anon_rmap(new, vma, addr); in remove_migration_pte()
175 if (vma->vm_flags & VM_LOCKED) in remove_migration_pte()
179 update_mmu_cache(vma, addr, ptep); in remove_migration_pte()
[all …]
Dframe_vector.c37 struct vm_area_struct *vma; in get_vaddr_frames() local
50 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
51 if (!vma) { in get_vaddr_frames()
55 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames()
68 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames()
69 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames()
82 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames()
84 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
85 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); in get_vaddr_frames()
Dinternal.h288 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
292 extern long populate_vma_page_range(struct vm_area_struct *vma,
294 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
296 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) in munlock_vma_pages_all() argument
298 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); in munlock_vma_pages_all()
335 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
339 struct vm_area_struct *vma);
Dutil.c203 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_list() argument
208 vma->vm_prev = prev; in __vma_link_list()
211 prev->vm_next = vma; in __vma_link_list()
213 mm->mmap = vma; in __vma_link_list()
220 vma->vm_next = next; in __vma_link_list()
222 next->vm_prev = vma; in __vma_link_list()
226 int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) in vma_is_stack_for_task() argument
228 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_task()
Dfilemap.c1869 static void do_sync_mmap_readahead(struct vm_area_struct *vma, in do_sync_mmap_readahead() argument
1877 if (vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
1882 if (vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
1912 static void do_async_mmap_readahead(struct vm_area_struct *vma, in do_async_mmap_readahead() argument
1921 if (vma->vm_flags & VM_RAND_READ) in do_async_mmap_readahead()
1954 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in filemap_fault() argument
1957 struct file *file = vma->vm_file; in filemap_fault()
1979 do_async_mmap_readahead(vma, ra, file, page, offset); in filemap_fault()
1982 do_sync_mmap_readahead(vma, ra, file, offset); in filemap_fault()
1984 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in filemap_fault()
[all …]
Dswap_state.c293 struct vm_area_struct *vma, unsigned long addr, in __read_swap_cache_async() argument
315 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
391 struct vm_area_struct *vma, unsigned long addr) in read_swap_cache_async() argument
395 vma, addr, &page_was_allocated); in read_swap_cache_async()
467 struct vm_area_struct *vma, unsigned long addr) in swapin_readahead() argument
490 gfp_mask, vma, addr); in swapin_readahead()
501 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()

12