• Home
  • Raw
  • Download

Lines Matching refs:vma

132 static void anon_vma_chain_link(struct vm_area_struct *vma,  in anon_vma_chain_link()  argument
136 avc->vma = vma; in anon_vma_chain_link()
138 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
169 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
171 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare()
176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
183 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare()
195 if (likely(!vma->anon_vma)) { in anon_vma_prepare()
196 vma->anon_vma = anon_vma; in anon_vma_prepare()
197 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare()
311 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
322 vma->anon_vma = NULL; in anon_vma_fork()
328 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
333 if (vma->anon_vma) in anon_vma_fork()
357 vma->anon_vma = anon_vma; in anon_vma_fork()
359 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
368 unlink_anon_vmas(vma); in anon_vma_fork()
372 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
381 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
399 if (vma->anon_vma) in unlink_anon_vmas()
400 vma->anon_vma->degree--; in unlink_anon_vmas()
408 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
572 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
575 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in __vma_address()
579 vma_address(struct page *page, struct vm_area_struct *vma) in vma_address() argument
581 unsigned long address = __vma_address(page, vma); in vma_address()
584 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in vma_address()
716 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
725 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
726 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
729 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
733 address = __vma_address(page, vma); in page_address_in_vma()
734 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
825 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) in page_mapped_in_vma() argument
831 address = __vma_address(page, vma); in page_mapped_in_vma()
832 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_mapped_in_vma()
834 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
851 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
854 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
871 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
878 if (pmdp_clear_flush_young_notify(vma, address, pmd)) in page_referenced_one()
892 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
898 if (ptep_clear_flush_young_notify(vma, address, pte)) { in page_referenced_one()
906 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
919 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
929 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() argument
934 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
998 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
1001 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
1014 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
1015 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
1032 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
1034 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1077 struct vm_area_struct *vma, unsigned long address) in page_move_anon_rmap() argument
1079 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1082 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1083 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); in page_move_anon_rmap()
1102 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1104 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1121 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1131 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1146 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1147 BUG_ON(page->index != linear_page_index(vma, address)); in __page_check_anon_rmap()
1163 struct vm_area_struct *vma, unsigned long address) in page_add_anon_rmap() argument
1165 do_page_add_anon_rmap(page, vma, address, 0); in page_add_anon_rmap()
1174 struct vm_area_struct *vma, unsigned long address, int exclusive) in do_page_add_anon_rmap() argument
1196 __page_set_anon_rmap(page, vma, address, exclusive); in do_page_add_anon_rmap()
1198 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1212 struct vm_area_struct *vma, unsigned long address) in page_add_new_anon_rmap() argument
1214 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1221 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1319 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1322 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1333 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
1344 spmd_end = spmd_start + vma_mmu_pagesize(vma); in try_to_unmap_one()
1351 adjust_range_if_pmd_sharing_possible(vma, &spmd_start, in try_to_unmap_one()
1353 if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) { in try_to_unmap_one()
1357 mmu_notifier_invalidate_range_start(vma->vm_mm, in try_to_unmap_one()
1372 if (vma->vm_flags & VM_LOCKED) { in try_to_unmap_one()
1382 if (ptep_clear_flush_young_notify(vma, address, pte)) { in try_to_unmap_one()
1399 flush_cache_range(vma, spmd_start, spmd_end); in try_to_unmap_one()
1400 flush_tlb_range(vma, spmd_start, spmd_end); in try_to_unmap_one()
1413 flush_cache_page(vma, address, page_to_pfn(page)); in try_to_unmap_one()
1426 pteval = ptep_clear_flush(vma, address, pte); in try_to_unmap_one()
1507 mmu_notifier_invalidate_range_end(vma->vm_mm, in try_to_unmap_one()
1512 bool is_vma_temporary_stack(struct vm_area_struct *vma) in is_vma_temporary_stack() argument
1514 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in is_vma_temporary_stack()
1519 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in is_vma_temporary_stack()
1526 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1528 return is_vma_temporary_stack(vma); in invalid_migration_vma()
1670 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
1671 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1675 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
1678 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_anon()
1705 struct vm_area_struct *vma; in rmap_walk_file() local
1721 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in rmap_walk_file()
1722 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1726 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
1729 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_file()
1758 struct vm_area_struct *vma, unsigned long address, int exclusive) in __hugepage_set_anon_rmap() argument
1760 struct anon_vma *anon_vma = vma->anon_vma; in __hugepage_set_anon_rmap()
1771 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1775 struct vm_area_struct *vma, unsigned long address) in hugepage_add_anon_rmap() argument
1777 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
1785 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1789 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
1791 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
1793 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()