Lines Matching refs:vma
138 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument
142 avc->vma = vma; in anon_vma_chain_link()
144 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
176 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
178 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
188 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare()
200 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
201 vma->anon_vma = anon_vma; in __anon_vma_prepare()
202 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare()
316 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
327 vma->anon_vma = NULL; in anon_vma_fork()
333 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
338 if (vma->anon_vma) in anon_vma_fork()
362 vma->anon_vma = anon_vma; in anon_vma_fork()
364 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
373 unlink_anon_vmas(vma); in anon_vma_fork()
377 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
386 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
404 if (vma->anon_vma) in unlink_anon_vmas()
405 vma->anon_vma->degree--; in unlink_anon_vmas()
413 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
688 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
697 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
698 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
701 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
705 address = __vma_address(page, vma); in page_address_in_vma()
706 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
754 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
760 .vma = vma, in page_referenced_one()
768 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
775 if (ptep_clear_flush_young_notify(vma, address, in page_referenced_one()
785 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
789 if (pmdp_clear_flush_young_notify(vma, address, in page_referenced_one()
807 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
816 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() argument
821 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
884 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
889 .vma = vma, in page_mkclean_one()
901 0, vma, vma->vm_mm, address, in page_mkclean_one()
902 min(vma->vm_end, address + page_size(page))); in page_mkclean_one()
916 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
917 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
920 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
930 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
931 entry = pmdp_invalidate(vma, address, pmd); in page_mkclean_one()
934 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
958 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
960 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1001 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1003 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1008 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1027 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1029 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1046 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1056 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1071 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1072 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); in __page_check_anon_rmap()
1089 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_anon_rmap() argument
1091 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1100 struct vm_area_struct *vma, unsigned long address, int flags) in do_page_add_anon_rmap() argument
1134 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1137 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1152 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_new_anon_rmap() argument
1156 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1170 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1341 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1344 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1347 .vma = vma, in try_to_unmap_one()
1357 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
1365 split_huge_pmd_address(vma, address, in try_to_unmap_one()
1377 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
1379 min(vma->vm_end, address + page_size(page))); in try_to_unmap_one()
1385 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1407 if (vma->vm_flags & VM_LOCKED) { in try_to_unmap_one()
1439 flush_cache_range(vma, range.start, range.end); in try_to_unmap_one()
1440 flush_tlb_range(vma, range.start, range.end); in try_to_unmap_one()
1492 if (ptep_clear_flush_young_notify(vma, address, in try_to_unmap_one()
1501 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); in try_to_unmap_one()
1515 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
1531 vma_mmu_pagesize(vma)); in try_to_unmap_one()
1537 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_unmap_one()
1557 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1623 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1674 bool is_vma_temporary_stack(struct vm_area_struct *vma) in is_vma_temporary_stack() argument
1676 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in is_vma_temporary_stack()
1681 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in is_vma_temporary_stack()
1688 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1690 return is_vma_temporary_stack(vma); in invalid_migration_vma()
1833 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
1834 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1838 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
1841 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1869 struct vm_area_struct *vma; in rmap_walk_file() local
1886 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
1888 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1892 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
1895 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1934 struct vm_area_struct *vma, unsigned long address) in hugepage_add_anon_rmap() argument
1936 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
1944 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1948 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
1950 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
1952 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()