• Home
  • Raw
  • Download

Lines Matching refs:vma

430 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,  in free_pgtables()  argument
433 while (vma) { in free_pgtables()
434 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
435 unsigned long addr = vma->vm_start; in free_pgtables()
441 vm_write_begin(vma); in free_pgtables()
442 unlink_anon_vmas(vma); in free_pgtables()
443 vm_write_end(vma); in free_pgtables()
444 unlink_file_vma(vma); in free_pgtables()
446 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
447 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
453 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
455 vma = next; in free_pgtables()
456 next = vma->vm_next; in free_pgtables()
457 vm_write_begin(vma); in free_pgtables()
458 unlink_anon_vmas(vma); in free_pgtables()
459 vm_write_end(vma); in free_pgtables()
460 unlink_file_vma(vma); in free_pgtables()
462 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
465 vma = next; in free_pgtables()
545 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
548 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
577 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
578 index = linear_page_index(vma, addr); in print_bad_pte()
586 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index); in print_bad_pte()
588 vma->vm_file, in print_bad_pte()
589 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
590 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
639 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in _vm_normal_page() argument
647 if (vma->vm_ops && vma->vm_ops->find_special_page) in _vm_normal_page()
648 return vma->vm_ops->find_special_page(vma, addr); in _vm_normal_page()
656 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
673 off = (addr - vma->vm_start) >> PAGE_SHIFT; in _vm_normal_page()
674 if (pfn == vma->vm_pgoff + off) in _vm_normal_page()
686 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
699 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
709 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
710 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
716 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
717 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
719 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
964 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, in page_copy_prealloc() argument
969 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); in page_copy_prealloc()
1279 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1309 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1332 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1338 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1385 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1417 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1429 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1430 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1454 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1463 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1476 split_huge_pud(vma, pud, addr); in zap_pud_range()
1477 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1483 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1492 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1504 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1511 struct vm_area_struct *vma, in unmap_page_range() argument
1519 tlb_start_vma(tlb, vma); in unmap_page_range()
1520 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1525 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1527 tlb_end_vma(tlb, vma); in unmap_page_range()
1532 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1536 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1539 if (start >= vma->vm_end) in unmap_single_vma()
1541 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1542 if (end <= vma->vm_start) in unmap_single_vma()
1545 if (vma->vm_file) in unmap_single_vma()
1546 uprobe_munmap(vma, start, end); in unmap_single_vma()
1548 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1549 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1552 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1564 if (vma->vm_file) { in unmap_single_vma()
1565 i_mmap_lock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1566 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1567 i_mmap_unlock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1570 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1593 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1598 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1601 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas()
1602 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1614 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1621 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
1623 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); in zap_page_range()
1624 update_hiwater_rss(vma->vm_mm); in zap_page_range()
1626 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) in zap_page_range()
1627 unmap_single_vma(&tlb, vma, start, range.end, NULL); in zap_page_range()
1641 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1648 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range_single()
1650 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single()
1651 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1653 unmap_single_vma(&tlb, vma, address, range.end, details); in zap_page_range_single()
1669 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1672 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1673 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1676 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1740 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1743 struct mm_struct *mm = vma->vm_mm; in insert_page()
1778 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
1784 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1848 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
1854 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
1856 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
1857 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
1858 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
1859 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_pages()
1862 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
1868 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); in vm_insert_pages()
1907 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1910 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1914 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
1915 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
1916 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
1917 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
1919 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1934 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
1937 unsigned long count = vma_pages(vma); in __vm_map_pages()
1938 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
1950 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages()
1977 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
1980 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
1997 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2000 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2004 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2007 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2032 vma->vm_flags); in insert_pfn()
2033 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2034 update_mmu_cache(vma, addr, pte); in insert_pfn()
2047 entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags); in insert_pfn()
2051 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2079 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2088 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2089 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2091 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2092 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2094 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2100 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vmf_insert_pfn_prot()
2102 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2127 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2130 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2134 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) in vm_mixed_ok() argument
2137 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2148 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2154 BUG_ON(!vm_mixed_ok(vma, pfn)); in __vm_insert_mixed()
2156 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2159 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
2181 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2183 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2220 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed_prot() argument
2223 return __vm_insert_mixed(vma, addr, pfn, pgprot, false); in vmf_insert_mixed_prot()
2227 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2230 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); in vmf_insert_mixed()
2239 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2242 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); in vmf_insert_mixed_mkwrite()
2356 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2362 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2387 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range()
2388 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
2390 vma->vm_pgoff = pfn; in remap_pfn_range()
2393 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2397 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
2402 flush_cache_range(vma, addr, end); in remap_pfn_range()
2412 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); in remap_pfn_range()
2433 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2452 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2454 pfn += vma->vm_pgoff; in vm_iomap_memory()
2455 pages -= vma->vm_pgoff; in vm_iomap_memory()
2458 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2463 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
2644 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2651 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2663 trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2667 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in pte_spinlock()
2669 trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2679 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2705 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2715 trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2726 ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in __pte_map_lock_speculative()
2730 trace_spf_pte_lock(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2740 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2755 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2766 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2784 if (vma_is_anonymous(vmf->vma)) { in vmf_allows_speculation()
2790 if (!vmf->vma->anon_vma) { in vmf_allows_speculation()
2791 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2803 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2807 if (!(vmf->vma->vm_flags & VM_SHARED) && in vmf_allows_speculation()
2809 !vmf->vma->anon_vma) { in vmf_allows_speculation()
2814 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2818 if (vmf->vma->vm_ops->allow_speculation && in vmf_allows_speculation()
2819 vmf->vma->vm_ops->allow_speculation()) { in vmf_allows_speculation()
2823 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2830 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2837 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2844 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2908 struct vm_area_struct *vma = vmf->vma; in cow_user_page() local
2909 struct mm_struct *mm = vma->vm_mm; in cow_user_page()
2913 copy_user_highpage(dst, src, addr, vma); in cow_user_page()
2940 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2946 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in cow_user_page()
2947 update_mmu_cache(vma, addr, vmf->pte); in cow_user_page()
2965 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2996 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
2998 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
3024 if (vmf->vma->vm_file && in do_page_mkwrite()
3025 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3028 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3052 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3056 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3070 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3106 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3117 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3120 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3121 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
3144 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3145 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3153 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
3157 new_page = alloc_zeroed_user_highpage_movable(vma, in wp_page_copy()
3162 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in wp_page_copy()
3188 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in wp_page_copy()
3210 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3220 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
3221 __page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
3229 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
3260 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3322 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3336 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3338 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3343 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3355 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3360 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3407 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
3409 if (userfaultfd_pte_wp(vma, *vmf->pte)) { in do_wp_page()
3420 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3421 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3422 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3424 vmf->page = _vm_normal_page(vma, vmf->address, vmf->orig_pte, in do_wp_page()
3480 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
3484 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3490 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
3493 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
3496 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3497 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3505 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
3506 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3507 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3616 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
3657 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3665 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3673 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
3687 page = alloc_page_vma(flags, vma, vmf->address); in do_swap_page()
3697 err = mem_cgroup_charge(page, vma->vm_mm, in do_swap_page()
3750 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3761 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3779 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3814 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3815 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3823 flush_icache_page(vma, page); in do_swap_page()
3830 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3831 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3836 __page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3839 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3869 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3894 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
3917 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3927 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
3933 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
3936 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3948 if (userfaultfd_missing(vma)) { in do_anonymous_page()
3956 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
3958 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3962 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
3984 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3988 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3994 userfaultfd_missing(vma)) { in do_anonymous_page()
4000 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4001 __page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
4004 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4007 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
4029 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
4052 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4059 ret = vma->vm_ops->fault(vmf); in __do_fault()
4092 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
4094 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4099 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4105 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
4112 if (!transhuge_vma_suitable(vma, haddr)) in do_set_pmd()
4124 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4130 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4135 flush_icache_page(vma, page + i); in do_set_pmd()
4139 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
4141 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4149 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4151 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4169 struct vm_area_struct *vma = vmf->vma; in do_set_pte() local
4174 flush_icache_page(vma, page); in do_set_pte()
4186 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
4187 __page_add_new_anon_rmap(page, vma, addr, false); in do_set_pte()
4190 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in do_set_pte()
4193 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4213 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
4228 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
4229 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4246 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in finish_fault()
4248 mm_inc_nr_ptes(vma->vm_mm); in finish_fault()
4249 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in finish_fault()
4253 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in finish_fault()
4276 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4351 address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
4362 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
4367 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4373 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
4378 struct vm_area_struct *vma = vmf->vma; in do_read_fault() local
4386 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { in do_read_fault()
4387 if (likely(!userfaultfd_minor(vmf->vma))) { in do_read_fault()
4407 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
4410 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
4413 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4417 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) { in do_cow_fault()
4429 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4445 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
4456 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
4488 struct vm_area_struct *vma = vmf->vma; in do_fault() local
4489 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
4495 if (!vma->vm_ops->fault) { in do_fault()
4503 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4536 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
4548 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
4553 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
4579 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4584 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4585 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
4587 page = _vm_normal_page(vma, vmf->address, pte, vmf->vma_flags); in do_numa_page()
4619 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4643 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
4645 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
4646 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
4653 if (vma_is_anonymous(vmf->vma)) { in wp_huge_pmd()
4654 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd)) in wp_huge_pmd()
4658 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pmd()
4659 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
4666 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4676 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
4678 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
4679 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
4689 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
4691 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pud()
4692 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
4699 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); in wp_huge_pud()
4780 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
4792 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4799 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4807 if (!mmu_notifier_trylock(vmf->vma->vm_mm)) { in handle_pte_fault()
4813 mmu_notifier_unlock(vmf->vma->vm_mm); in handle_pte_fault()
4819 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4821 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4835 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4850 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
4854 .vma = vma, in __handle_mm_fault()
4857 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
4858 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
4859 .vma_flags = vma->vm_flags, in __handle_mm_fault()
4860 .vma_page_prot = vma->vm_page_prot, in __handle_mm_fault()
4863 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
4877 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4909 vmf.sequence = raw_read_seqcount(&vma->vm_sequence); in __handle_mm_fault()
4911 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4927 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
5026 struct vm_area_struct *vma) in ___handle_speculative_fault() argument
5030 .pgoff = linear_page_index(vma, address), in ___handle_speculative_fault()
5031 .vma = vma, in ___handle_speculative_fault()
5032 .gfp_mask = __get_fault_gfp_mask(vma), in ___handle_speculative_fault()
5050 seq = raw_read_seqcount(&vmf.vma->vm_sequence); in ___handle_speculative_fault()
5052 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5056 vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags); in ___handle_speculative_fault()
5057 vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot); in ___handle_speculative_fault()
5065 uffd_missing_sigbus = vma_is_anonymous(vmf.vma) && in ___handle_speculative_fault()
5067 userfaultfd_using_sigbus(vmf.vma); in ___handle_speculative_fault()
5069 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5073 if (!vmf.vma->anon_vma) in ___handle_speculative_fault()
5087 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5091 if (address < READ_ONCE(vmf.vma->vm_start) in ___handle_speculative_fault()
5092 || READ_ONCE(vmf.vma->vm_end) <= address) { in ___handle_speculative_fault()
5093 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5097 if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE, in ___handle_speculative_fault()
5115 pol = __get_vma_policy(vmf.vma, address); in ___handle_speculative_fault()
5119 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5205 if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) { in ___handle_speculative_fault()
5206 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5215 if (vma_is_anonymous(vmf.vma)) in ___handle_speculative_fault()
5232 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5242 trace_spf_vma_access(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5248 struct vm_area_struct **vma, in __handle_speculative_fault() argument
5255 *vma = get_vma(mm, address); in __handle_speculative_fault()
5256 if (!*vma) in __handle_speculative_fault()
5259 ret = ___handle_speculative_fault(mm, address, flags, *vma); in __handle_speculative_fault()
5265 put_vma(*vma); in __handle_speculative_fault()
5266 *vma = NULL; in __handle_speculative_fault()
5281 bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address) in can_reuse_spf_vma() argument
5285 ret = !RB_EMPTY_NODE(&vma->vm_rb) && in can_reuse_spf_vma()
5286 vma->vm_start <= address && address < vma->vm_end; in can_reuse_spf_vma()
5287 put_vma(vma); in can_reuse_spf_vma()
5298 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
5306 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
5311 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
5323 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
5324 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5326 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
5526 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
5533 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
5536 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5546 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
5554 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
5557 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5574 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
5582 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
5607 struct vm_area_struct *vma; in __access_remote_vm() local
5621 gup_flags, &page, &vma, NULL); in __access_remote_vm()
5630 vma = find_vma(mm, addr); in __access_remote_vm()
5631 if (!vma || vma->vm_start > addr) in __access_remote_vm()
5633 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
5634 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
5648 copy_to_user_page(vma, page, addr, in __access_remote_vm()
5652 copy_from_user_page(vma, page, addr, in __access_remote_vm()
5714 struct vm_area_struct *vma; in print_vma_addr() local
5722 vma = find_vma(mm, ip); in print_vma_addr()
5723 if (vma && vma->vm_file) { in print_vma_addr()
5724 struct file *f = vma->vm_file; in print_vma_addr()
5733 vma->vm_start, in print_vma_addr()
5734 vma->vm_end - vma->vm_start); in print_vma_addr()
5853 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
5862 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
5873 struct vm_area_struct *vma; member
5881 addr, copy_arg->vma); in copy_subpage()
5885 unsigned long addr_hint, struct vm_area_struct *vma, in copy_user_huge_page() argument
5893 .vma = vma, in copy_user_huge_page()
5897 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()