• Home
  • Raw
  • Download

Lines Matching refs:vma

106 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,  in remove_migration_pte()  argument
109 struct mm_struct *mm = vma->vm_mm; in remove_migration_pte()
119 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte()
147 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); in remove_migration_pte()
153 pte = maybe_mkwrite(pte, vma); in remove_migration_pte()
158 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte()
166 hugepage_add_anon_rmap(new, vma, addr); in remove_migration_pte()
170 page_add_anon_rmap(new, vma, addr); in remove_migration_pte()
175 update_mmu_cache(vma, addr, ptep); in remove_migration_pte()
198 struct vm_area_struct *vma; in remove_linear_migration_ptes_from_nonlinear() local
203 list_for_each_entry(vma, in remove_linear_migration_ptes_from_nonlinear()
206 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in remove_linear_migration_ptes_from_nonlinear()
207 if (addr >= vma->vm_start && addr < vma->vm_end) in remove_linear_migration_ptes_from_nonlinear()
208 remove_migration_pte(page, vma, addr, arg); in remove_linear_migration_ptes_from_nonlinear()
276 void migration_entry_wait_huge(struct vm_area_struct *vma, in migration_entry_wait_huge() argument
279 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge()
1229 struct vm_area_struct *vma; in do_move_page_to_node_array() local
1233 vma = find_vma(mm, pp->addr); in do_move_page_to_node_array()
1234 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) in do_move_page_to_node_array()
1237 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); in do_move_page_to_node_array()
1399 struct vm_area_struct *vma; in do_pages_stat_array() local
1403 vma = find_vma(mm, addr); in do_pages_stat_array()
1404 if (!vma || addr < vma->vm_start) in do_pages_stat_array()
1407 page = follow_page(vma, addr, 0); in do_pages_stat_array()
1537 struct vm_area_struct *vma; in migrate_vmas() local
1540 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { in migrate_vmas()
1541 if (vma->vm_ops && vma->vm_ops->migrate) { in migrate_vmas()
1542 err = vma->vm_ops->migrate(vma, to, from, flags); in migrate_vmas()
1710 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1723 (vma->vm_flags & VM_EXEC)) in migrate_misplaced_page()
1767 struct vm_area_struct *vma, in migrate_misplaced_transhuge_page() argument
1802 flush_tlb_range(vma, mmun_start, mmun_end); in migrate_misplaced_transhuge_page()
1842 entry = mk_pmd(new_page, vma->vm_page_prot); in migrate_misplaced_transhuge_page()
1844 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in migrate_misplaced_transhuge_page()
1853 flush_cache_range(vma, mmun_start, mmun_end); in migrate_misplaced_transhuge_page()
1854 page_add_anon_rmap(new_page, vma, mmun_start); in migrate_misplaced_transhuge_page()
1855 pmdp_clear_flush(vma, mmun_start, pmd); in migrate_misplaced_transhuge_page()
1857 flush_tlb_range(vma, mmun_start, mmun_end); in migrate_misplaced_transhuge_page()
1858 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
1862 flush_tlb_range(vma, mmun_start, mmun_end); in migrate_misplaced_transhuge_page()
1863 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
1899 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()