Lines Matching refs:vmf
2637 static bool pte_spinlock(struct vm_fault *vmf) in pte_spinlock() argument
2643 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { in pte_spinlock()
2644 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2645 spin_lock(vmf->ptl); in pte_spinlock()
2650 if (vma_has_changed(vmf)) { in pte_spinlock()
2651 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2661 pmdval = READ_ONCE(*vmf->pmd); in pte_spinlock()
2662 if (!pmd_same(pmdval, vmf->orig_pmd)) { in pte_spinlock()
2663 trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2667 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in pte_spinlock()
2668 if (unlikely(!spin_trylock(vmf->ptl))) { in pte_spinlock()
2669 trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2677 if (vma_has_changed(vmf)) { in pte_spinlock()
2678 spin_unlock(vmf->ptl); in pte_spinlock()
2679 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2689 static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr) in __pte_map_lock_speculative() argument
2704 if (vma_has_changed(vmf)) { in __pte_map_lock_speculative()
2705 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2713 pmdval = READ_ONCE(*vmf->pmd); in __pte_map_lock_speculative()
2714 if (!pmd_same(pmdval, vmf->orig_pmd)) { in __pte_map_lock_speculative()
2715 trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2726 ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); in __pte_map_lock_speculative()
2730 trace_spf_pte_lock(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2738 if (vma_has_changed(vmf)) { in __pte_map_lock_speculative()
2740 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); in __pte_map_lock_speculative()
2744 vmf->pte = pte; in __pte_map_lock_speculative()
2745 vmf->ptl = ptl; in __pte_map_lock_speculative()
2752 static bool pte_map_lock(struct vm_fault *vmf) in pte_map_lock() argument
2754 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { in pte_map_lock()
2755 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2756 vmf->address, &vmf->ptl); in pte_map_lock()
2760 return __pte_map_lock_speculative(vmf, vmf->address); in pte_map_lock()
2763 bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr) in pte_map_lock_addr() argument
2765 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { in pte_map_lock_addr()
2766 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2767 addr, &vmf->ptl); in pte_map_lock_addr()
2771 return __pte_map_lock_speculative(vmf, addr); in pte_map_lock_addr()
2782 static bool vmf_allows_speculation(struct vm_fault *vmf) in vmf_allows_speculation() argument
2784 if (vma_is_anonymous(vmf->vma)) { in vmf_allows_speculation()
2790 if (!vmf->vma->anon_vma) { in vmf_allows_speculation()
2791 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2803 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2807 if (!(vmf->vma->vm_flags & VM_SHARED) && in vmf_allows_speculation()
2808 (vmf->flags & FAULT_FLAG_WRITE) && in vmf_allows_speculation()
2809 !vmf->vma->anon_vma) { in vmf_allows_speculation()
2814 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2818 if (vmf->vma->vm_ops->allow_speculation && in vmf_allows_speculation()
2819 vmf->vma->vm_ops->allow_speculation()) { in vmf_allows_speculation()
2823 trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address); in vmf_allows_speculation()
2828 static inline bool pte_spinlock(struct vm_fault *vmf) in pte_spinlock() argument
2830 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2831 spin_lock(vmf->ptl); in pte_spinlock()
2835 static inline bool pte_map_lock(struct vm_fault *vmf) in pte_map_lock() argument
2837 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock()
2838 vmf->address, &vmf->ptl); in pte_map_lock()
2842 inline bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr) in pte_map_lock_addr() argument
2844 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_map_lock_addr()
2845 addr, &vmf->ptl); in pte_map_lock_addr()
2849 static inline bool vmf_allows_speculation(struct vm_fault *vmf) in vmf_allows_speculation() argument
2883 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
2889 if (pte_spinlock(vmf)) { in pte_unmap_same()
2890 if (!pte_same(*vmf->pte, vmf->orig_pte)) in pte_unmap_same()
2892 spin_unlock(vmf->ptl); in pte_unmap_same()
2897 pte_unmap(vmf->pte); in pte_unmap_same()
2902 struct vm_fault *vmf) in cow_user_page() argument
2908 struct vm_area_struct *vma = vmf->vma; in cow_user_page()
2910 unsigned long addr = vmf->address; in cow_user_page()
2930 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page()
2933 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2935 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2940 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2945 entry = pte_mkyoung(vmf->orig_pte); in cow_user_page()
2946 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in cow_user_page()
2947 update_mmu_cache(vma, addr, vmf->pte); in cow_user_page()
2961 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2963 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2965 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2989 pte_unmap_unlock(vmf->pte, vmf->ptl); in cow_user_page()
3016 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
3019 struct page *page = vmf->page; in do_page_mkwrite()
3020 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
3022 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
3024 if (vmf->vma->vm_file && in do_page_mkwrite()
3025 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3028 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3030 vmf->flags = old_flags; in do_page_mkwrite()
3050 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
3052 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
3054 struct page *page = vmf->page; in fault_dirty_shared_page()
3084 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
3103 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
3104 __releases(vmf->ptl) in wp_page_reuse()
3106 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
3107 struct page *page = vmf->page; in wp_page_reuse()
3117 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3118 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
3119 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); in wp_page_reuse()
3120 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3121 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
3122 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
3142 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
3144 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
3146 struct page *old_page = vmf->page; in wp_page_copy()
3156 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
3158 vmf->address); in wp_page_copy()
3163 vmf->address); in wp_page_copy()
3167 if (!cow_user_page(new_page, old_page, vmf)) { in wp_page_copy()
3179 trace_android_vh_cow_user_page(vmf, new_page); in wp_page_copy()
3189 vmf->address & PAGE_MASK, in wp_page_copy()
3190 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3196 if (!pte_map_lock(vmf)) { in wp_page_copy()
3200 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy()
3210 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3211 entry = mk_pte(new_page, vmf->vma_page_prot); in wp_page_copy()
3213 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); in wp_page_copy()
3220 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
3221 __page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
3222 __lru_cache_add_inactive_or_unevictable(new_page, vmf->vma_flags); in wp_page_copy()
3228 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3229 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
3260 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3266 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3277 if (page_copied && (vmf->vma_flags & VM_LOCKED)) { in wp_page_copy()
3312 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
3314 WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED)); in finish_mkwrite_fault()
3315 if (!pte_map_lock(vmf)) in finish_mkwrite_fault()
3321 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in finish_mkwrite_fault()
3322 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3323 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3326 wp_page_reuse(vmf); in finish_mkwrite_fault()
3334 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3336 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3341 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3342 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3343 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3346 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
3348 wp_page_reuse(vmf); in wp_pfn_shared()
3352 static vm_fault_t wp_page_shared(struct vm_fault *vmf) in wp_page_shared() argument
3353 __releases(vmf->ptl) in wp_page_shared()
3355 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3358 get_page(vmf->page); in wp_page_shared()
3363 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3364 tmp = do_page_mkwrite(vmf); in wp_page_shared()
3367 put_page(vmf->page); in wp_page_shared()
3370 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
3372 unlock_page(vmf->page); in wp_page_shared()
3373 put_page(vmf->page); in wp_page_shared()
3377 wp_page_reuse(vmf); in wp_page_shared()
3378 lock_page(vmf->page); in wp_page_shared()
3380 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3381 put_page(vmf->page); in wp_page_shared()
3404 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3405 __releases(vmf->ptl) in do_wp_page()
3407 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3409 if (userfaultfd_pte_wp(vma, *vmf->pte)) { in do_wp_page()
3410 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3411 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in do_wp_page()
3413 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3420 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3421 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3422 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3424 vmf->page = _vm_normal_page(vma, vmf->address, vmf->orig_pte, in do_wp_page()
3425 vmf->vma_flags); in do_wp_page()
3426 if (!vmf->page) { in do_wp_page()
3434 if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
3436 return wp_pfn_shared(vmf); in do_wp_page()
3438 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3439 return wp_page_copy(vmf); in do_wp_page()
3446 if (PageAnon(vmf->page)) { in do_wp_page()
3447 struct page *page = vmf->page; in do_wp_page()
3464 wp_page_reuse(vmf); in do_wp_page()
3466 } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
3468 return wp_page_shared(vmf); in do_wp_page()
3474 get_page(vmf->page); in do_wp_page()
3476 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3477 return wp_page_copy(vmf); in do_wp_page()
3614 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
3616 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
3625 if (vmf->flags & FAULT_FLAG_SPECULATIVE) { in do_swap_page()
3633 pte_unmap(vmf->pte); in do_swap_page()
3638 ret = pte_unmap_same(vmf); in do_swap_page()
3650 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
3652 if (vmf->flags & FAULT_FLAG_SPECULATIVE) { in do_swap_page()
3657 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3658 vmf->address); in do_swap_page()
3660 vmf->page = device_private_entry_to_page(entry); in do_swap_page()
3661 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
3665 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3673 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
3687 page = alloc_page_vma(flags, vma, vmf->address); in do_swap_page()
3712 } else if (vmf->flags & FAULT_FLAG_SPECULATIVE) { in do_swap_page()
3725 vmf); in do_swap_page()
3735 if (!pte_map_lock(vmf)) { in do_swap_page()
3741 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3761 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3779 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3792 if (!pte_map_lock(vmf)) { in do_swap_page()
3796 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3816 pte = mk_pte(page, vmf->vma_page_prot); in do_swap_page()
3817 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { in do_swap_page()
3818 pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags); in do_swap_page()
3819 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
3824 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
3826 if (pte_swp_uffd_wp(vmf->orig_pte)) { in do_swap_page()
3830 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3831 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3832 vmf->orig_pte = pte; in do_swap_page()
3836 __page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3837 __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); in do_swap_page()
3839 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3842 trace_android_vh_swapin_add_anon_rmap(vmf, page); in do_swap_page()
3845 (vmf->vma_flags & VM_LOCKED) || PageMlocked(page)) in do_swap_page()
3861 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
3862 ret |= do_wp_page(vmf); in do_swap_page()
3869 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3871 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3875 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3892 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
3894 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
3900 if (vmf->vma_flags & VM_SHARED) in do_anonymous_page()
3904 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in do_anonymous_page()
3917 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3921 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3926 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
3928 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3929 vmf->vma_page_prot)); in do_anonymous_page()
3930 if (!pte_map_lock(vmf)) in do_anonymous_page()
3932 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3933 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
3945 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in do_anonymous_page()
3949 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3950 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3958 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3973 entry = mk_pte(page, vmf->vma_page_prot); in do_anonymous_page()
3975 if (vmf->vma_flags & VM_WRITE) in do_anonymous_page()
3978 if (!pte_map_lock(vmf)) { in do_anonymous_page()
3983 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3984 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3993 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && in do_anonymous_page()
3995 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3997 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
4001 __page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
4002 __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); in do_anonymous_page()
4004 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4007 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
4009 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4012 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4027 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
4029 struct vm_area_struct *vma = vmf->vma; in __do_fault()
4033 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in __do_fault()
4051 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
4052 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4053 if (!vmf->prealloc_pte) in __do_fault()
4059 ret = vma->vm_ops->fault(vmf); in __do_fault()
4064 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
4065 struct page *page = vmf->page; in __do_fault()
4077 vmf->page = NULL; in __do_fault()
4082 lock_page(vmf->page); in __do_fault()
4084 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
4090 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
4092 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
4094 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4100 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
4103 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
4105 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
4106 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
4107 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
4123 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
4124 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4125 if (!vmf->prealloc_pte) in do_set_pmd()
4130 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4131 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
4137 entry = mk_huge_pmd(page, vmf->vma_page_prot); in do_set_pmd()
4147 deposit_prealloc_pte(vmf); in do_set_pmd()
4149 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4151 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4157 spin_unlock(vmf->ptl); in do_set_pmd()
4161 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
4167 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) in do_set_pte() argument
4169 struct vm_area_struct *vma = vmf->vma; in do_set_pte()
4170 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pte()
4171 bool prefault = vmf->address != addr; in do_set_pte()
4175 entry = mk_pte(page, vmf->vma_page_prot); in do_set_pte()
4183 entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); in do_set_pte()
4185 if (write && !(vmf->vma_flags & VM_SHARED)) { in do_set_pte()
4188 __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); in do_set_pte()
4193 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4211 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
4213 struct vm_area_struct *vma = vmf->vma; in finish_fault()
4218 if ((vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
4219 !(vmf->vma_flags & VM_SHARED)) in finish_fault()
4220 page = vmf->cow_page; in finish_fault()
4222 page = vmf->page; in finish_fault()
4235 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in finish_fault()
4238 if (pmd_none(*vmf->pmd)) { in finish_fault()
4240 ret = do_set_pmd(vmf, page); in finish_fault()
4245 if (vmf->prealloc_pte) { in finish_fault()
4246 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in finish_fault()
4247 if (likely(pmd_none(*vmf->pmd))) { in finish_fault()
4249 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in finish_fault()
4250 vmf->prealloc_pte = NULL; in finish_fault()
4252 spin_unlock(vmf->ptl); in finish_fault()
4253 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in finish_fault()
4262 if (pmd_devmap_trans_unstable(vmf->pmd)) in finish_fault()
4266 if (!pte_map_lock(vmf)) in finish_fault()
4271 if (likely(pte_none(*vmf->pte))) in finish_fault()
4272 do_set_pte(vmf, page, vmf->address); in finish_fault()
4276 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4277 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
4341 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
4343 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around()
4344 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around()
4351 address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
4352 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
4362 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
4365 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && in do_fault_around()
4366 pmd_none(*vmf->pmd)) { in do_fault_around()
4367 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4368 if (!vmf->prealloc_pte) in do_fault_around()
4373 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
4376 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
4378 struct vm_area_struct *vma = vmf->vma; in do_read_fault()
4387 if (likely(!userfaultfd_minor(vmf->vma))) { in do_read_fault()
4388 ret = do_fault_around(vmf); in do_read_fault()
4394 ret = __do_fault(vmf); in do_read_fault()
4398 ret |= finish_fault(vmf); in do_read_fault()
4399 unlock_page(vmf->page); in do_read_fault()
4401 put_page(vmf->page); in do_read_fault()
4405 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
4407 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
4413 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4414 if (!vmf->cow_page) in do_cow_fault()
4417 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) { in do_cow_fault()
4418 put_page(vmf->cow_page); in do_cow_fault()
4421 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL); in do_cow_fault()
4423 ret = __do_fault(vmf); in do_cow_fault()
4429 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4430 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
4432 ret |= finish_fault(vmf); in do_cow_fault()
4433 unlock_page(vmf->page); in do_cow_fault()
4434 put_page(vmf->page); in do_cow_fault()
4439 put_page(vmf->cow_page); in do_cow_fault()
4443 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
4445 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
4448 ret = __do_fault(vmf); in do_shared_fault()
4457 unlock_page(vmf->page); in do_shared_fault()
4458 tmp = do_page_mkwrite(vmf); in do_shared_fault()
4461 put_page(vmf->page); in do_shared_fault()
4466 ret |= finish_fault(vmf); in do_shared_fault()
4469 unlock_page(vmf->page); in do_shared_fault()
4470 put_page(vmf->page); in do_shared_fault()
4474 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
4486 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
4488 struct vm_area_struct *vma = vmf->vma; in do_fault()
4500 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
4503 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4504 vmf->pmd, in do_fault()
4505 vmf->address, in do_fault()
4506 &vmf->ptl); in do_fault()
4514 if (unlikely(pte_none(*vmf->pte))) in do_fault()
4519 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
4521 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
4522 ret = do_read_fault(vmf); in do_fault()
4523 else if (!(vmf->vma_flags & VM_SHARED)) in do_fault()
4524 ret = do_cow_fault(vmf); in do_fault()
4526 ret = do_shared_fault(vmf); in do_fault()
4529 if (vmf->prealloc_pte) { in do_fault()
4530 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4531 vmf->prealloc_pte = NULL; in do_fault()
4551 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
4553 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
4560 bool was_writable = pte_savedwrite(vmf->orig_pte); in do_numa_page()
4568 if (!pte_spinlock(vmf)) in do_numa_page()
4570 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
4571 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4579 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4580 pte = pte_modify(old_pte, vmf->vma_page_prot); in do_numa_page()
4584 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4585 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
4587 page = _vm_normal_page(vma, vmf->address, pte, vmf->vma_flags); in do_numa_page()
4589 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4595 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4614 if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED)) in do_numa_page()
4619 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4621 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4628 migrated = migrate_misplaced_page(page, vmf, target_nid); in do_numa_page()
4641 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
4643 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
4644 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
4645 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
4646 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
4651 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) in wp_huge_pmd() argument
4653 if (vma_is_anonymous(vmf->vma)) { in wp_huge_pmd()
4654 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd)) in wp_huge_pmd()
4655 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
4656 return do_huge_pmd_wp_page(vmf, orig_pmd); in wp_huge_pmd()
4658 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pmd()
4659 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
4666 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4671 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
4676 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
4678 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
4679 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
4684 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
4689 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
4691 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pud()
4692 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
4699 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); in wp_huge_pud()
4719 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
4725 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in handle_pte_fault()
4728 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
4735 vmf->pte = NULL; in handle_pte_fault()
4749 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
4760 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
4761 vmf->orig_pte = *vmf->pte; in handle_pte_fault()
4772 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
4773 pte_unmap(vmf->pte); in handle_pte_fault()
4774 vmf->pte = NULL; in handle_pte_fault()
4779 if (!vmf->pte) { in handle_pte_fault()
4780 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
4781 return do_anonymous_page(vmf); in handle_pte_fault()
4782 else if ((vmf->flags & FAULT_FLAG_SPECULATIVE) && in handle_pte_fault()
4783 !vmf_allows_speculation(vmf)) in handle_pte_fault()
4786 return do_fault(vmf); in handle_pte_fault()
4789 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
4790 return do_swap_page(vmf); in handle_pte_fault()
4792 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4793 return do_numa_page(vmf); in handle_pte_fault()
4795 if (!pte_spinlock(vmf)) in handle_pte_fault()
4797 entry = vmf->orig_pte; in handle_pte_fault()
4798 if (unlikely(!pte_same(*vmf->pte, entry))) { in handle_pte_fault()
4799 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4802 if (vmf->flags & FAULT_FLAG_WRITE) { in handle_pte_fault()
4804 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) in handle_pte_fault()
4805 return do_wp_page(vmf); in handle_pte_fault()
4807 if (!mmu_notifier_trylock(vmf->vma->vm_mm)) { in handle_pte_fault()
4812 ret = do_wp_page(vmf); in handle_pte_fault()
4813 mmu_notifier_unlock(vmf->vma->vm_mm); in handle_pte_fault()
4819 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4820 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
4821 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4824 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
4826 if (vmf->flags & FAULT_FLAG_SPECULATIVE) in handle_pte_fault()
4834 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
4835 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4837 trace_android_rvh_handle_pte_fault_end(vmf, highest_memmap_pfn); in handle_pte_fault()
4838 trace_android_vh_handle_pte_fault_end(vmf, highest_memmap_pfn); in handle_pte_fault()
4840 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
4853 struct vm_fault vmf = { in __handle_mm_fault() local
4873 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
4874 if (!vmf.pud) in __handle_mm_fault()
4877 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4878 ret = create_huge_pud(&vmf); in __handle_mm_fault()
4882 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
4890 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
4894 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
4900 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4901 if (!vmf.pmd) in __handle_mm_fault()
4905 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
4909 vmf.sequence = raw_read_seqcount(&vma->vm_sequence); in __handle_mm_fault()
4911 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4912 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
4916 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
4923 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
4928 return do_huge_pmd_numa_page(&vmf, orig_pmd); in __handle_mm_fault()
4931 ret = wp_huge_pmd(&vmf, orig_pmd); in __handle_mm_fault()
4935 huge_pmd_set_accessed(&vmf, orig_pmd); in __handle_mm_fault()
4941 return handle_pte_fault(&vmf); in __handle_mm_fault()
5028 struct vm_fault vmf = { in ___handle_speculative_fault() local
5050 seq = raw_read_seqcount(&vmf.vma->vm_sequence); in ___handle_speculative_fault()
5052 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5056 vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags); in ___handle_speculative_fault()
5057 vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot); in ___handle_speculative_fault()
5064 if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) { in ___handle_speculative_fault()
5065 uffd_missing_sigbus = vma_is_anonymous(vmf.vma) && in ___handle_speculative_fault()
5066 (vmf.vma_flags & VM_UFFD_MISSING) && in ___handle_speculative_fault()
5067 userfaultfd_using_sigbus(vmf.vma); in ___handle_speculative_fault()
5069 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5073 if (!vmf.vma->anon_vma) in ___handle_speculative_fault()
5078 if (!vmf_allows_speculation(&vmf)) in ___handle_speculative_fault()
5081 if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) { in ___handle_speculative_fault()
5087 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5091 if (address < READ_ONCE(vmf.vma->vm_start) in ___handle_speculative_fault()
5092 || READ_ONCE(vmf.vma->vm_end) <= address) { in ___handle_speculative_fault()
5093 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5097 if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE, in ___handle_speculative_fault()
5104 if (unlikely(!(vmf.vma_flags & VM_WRITE))) in ___handle_speculative_fault()
5106 } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) in ___handle_speculative_fault()
5115 pol = __get_vma_policy(vmf.vma, address); in ___handle_speculative_fault()
5119 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5140 vmf.pud = pud_offset(p4d, address); in ___handle_speculative_fault()
5143 pudval = READ_ONCE(*vmf.pud); in ___handle_speculative_fault()
5151 vmf.pmd = pmd_offset(vmf.pud, address); in ___handle_speculative_fault()
5152 if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval)) in ___handle_speculative_fault()
5154 vmf.orig_pmd = READ_ONCE(*vmf.pmd); in ___handle_speculative_fault()
5166 if (unlikely(pmd_devmap(vmf.orig_pmd) || in ___handle_speculative_fault()
5167 pmd_none(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) || in ___handle_speculative_fault()
5168 is_swap_pmd(vmf.orig_pmd))) in ___handle_speculative_fault()
5180 vmf.pte = pte_offset_map(vmf.pmd, address); in ___handle_speculative_fault()
5181 if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) { in ___handle_speculative_fault()
5182 pte_unmap(vmf.pte); in ___handle_speculative_fault()
5183 vmf.pte = NULL; in ___handle_speculative_fault()
5186 vmf.orig_pte = READ_ONCE(*vmf.pte); in ___handle_speculative_fault()
5188 if (pte_none(vmf.orig_pte)) { in ___handle_speculative_fault()
5189 pte_unmap(vmf.pte); in ___handle_speculative_fault()
5190 vmf.pte = NULL; in ___handle_speculative_fault()
5193 vmf.sequence = seq; in ___handle_speculative_fault()
5194 vmf.flags = flags; in ___handle_speculative_fault()
5198 if (!vmf.pte && uffd_missing_sigbus) in ___handle_speculative_fault()
5205 if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) { in ___handle_speculative_fault()
5206 trace_spf_vma_changed(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5211 ret = handle_pte_fault(&vmf); in ___handle_speculative_fault()
5215 if (vma_is_anonymous(vmf.vma)) in ___handle_speculative_fault()
5232 trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()
5242 trace_spf_vma_access(_RET_IP_, vmf.vma, address); in ___handle_speculative_fault()