Lines Matching refs:vmf
2215 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
2218 struct page *page = vmf->page; in do_page_mkwrite()
2219 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2221 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2223 if (vmf->vma->vm_file && in do_page_mkwrite()
2224 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2227 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2229 vmf->flags = old_flags; in do_page_mkwrite()
2249 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
2251 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
2253 struct page *page = vmf->page; in fault_dirty_shared_page()
2283 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
2302 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
2303 __releases(vmf->ptl) in wp_page_reuse()
2305 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
2306 struct page *page = vmf->page; in wp_page_reuse()
2316 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2317 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
2319 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2320 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2321 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
2340 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
2342 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
2344 struct page *old_page = vmf->page; in wp_page_copy()
2354 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
2356 vmf->address); in wp_page_copy()
2361 vmf->address); in wp_page_copy()
2364 cow_user_page(new_page, old_page, vmf->address, vma); in wp_page_copy()
2373 vmf->address & PAGE_MASK, in wp_page_copy()
2374 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
2380 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
2381 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy()
2391 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
2400 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
2401 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
2409 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
2410 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
2447 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
2491 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
2493 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
2494 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2495 &vmf->ptl); in finish_mkwrite_fault()
2500 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in finish_mkwrite_fault()
2501 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
2504 wp_page_reuse(vmf); in finish_mkwrite_fault()
2512 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
2514 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
2519 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
2520 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
2521 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
2524 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
2526 wp_page_reuse(vmf); in wp_pfn_shared()
2530 static vm_fault_t wp_page_shared(struct vm_fault *vmf) in wp_page_shared() argument
2531 __releases(vmf->ptl) in wp_page_shared()
2533 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
2536 get_page(vmf->page); in wp_page_shared()
2541 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
2542 tmp = do_page_mkwrite(vmf); in wp_page_shared()
2545 put_page(vmf->page); in wp_page_shared()
2548 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
2550 unlock_page(vmf->page); in wp_page_shared()
2551 put_page(vmf->page); in wp_page_shared()
2555 wp_page_reuse(vmf); in wp_page_shared()
2556 lock_page(vmf->page); in wp_page_shared()
2558 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
2559 put_page(vmf->page); in wp_page_shared()
2582 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
2583 __releases(vmf->ptl) in do_wp_page()
2585 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
2587 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
2588 if (!vmf->page) { in do_wp_page()
2598 return wp_pfn_shared(vmf); in do_wp_page()
2600 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2601 return wp_page_copy(vmf); in do_wp_page()
2608 if (PageAnon(vmf->page)) { in do_wp_page()
2610 if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) || in do_wp_page()
2611 page_count(vmf->page) != 1)) in do_wp_page()
2613 if (!trylock_page(vmf->page)) { in do_wp_page()
2614 get_page(vmf->page); in do_wp_page()
2615 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2616 lock_page(vmf->page); in do_wp_page()
2617 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_wp_page()
2618 vmf->address, &vmf->ptl); in do_wp_page()
2619 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in do_wp_page()
2620 unlock_page(vmf->page); in do_wp_page()
2621 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2622 put_page(vmf->page); in do_wp_page()
2625 put_page(vmf->page); in do_wp_page()
2627 if (PageKsm(vmf->page)) { in do_wp_page()
2628 bool reused = reuse_ksm_page(vmf->page, vmf->vma, in do_wp_page()
2629 vmf->address); in do_wp_page()
2630 unlock_page(vmf->page); in do_wp_page()
2633 wp_page_reuse(vmf); in do_wp_page()
2636 if (reuse_swap_page(vmf->page, &total_map_swapcount)) { in do_wp_page()
2645 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
2647 unlock_page(vmf->page); in do_wp_page()
2648 wp_page_reuse(vmf); in do_wp_page()
2651 unlock_page(vmf->page); in do_wp_page()
2654 return wp_page_shared(vmf); in do_wp_page()
2660 get_page(vmf->page); in do_wp_page()
2662 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2663 return wp_page_copy(vmf); in do_wp_page()
2770 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
2772 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
2781 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
2784 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
2787 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
2788 vmf->address); in do_swap_page()
2790 vmf->page = device_private_entry_to_page(entry); in do_swap_page()
2791 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
2795 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
2803 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
2813 vmf->address); in do_swap_page()
2823 vmf); in do_swap_page()
2832 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
2833 vmf->address, &vmf->ptl); in do_swap_page()
2834 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
2854 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
2872 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
2888 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
2889 &vmf->ptl); in do_swap_page()
2890 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
2911 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { in do_swap_page()
2913 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
2918 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
2920 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
2921 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
2922 vmf->orig_pte = pte; in do_swap_page()
2926 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
2930 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
2953 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
2954 ret |= do_wp_page(vmf); in do_swap_page()
2961 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
2963 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
2968 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
2985 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
2987 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
3007 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3011 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3015 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
3017 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3019 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3020 vmf->address, &vmf->ptl); in do_anonymous_page()
3021 if (!pte_none(*vmf->pte)) in do_anonymous_page()
3028 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3029 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3037 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3056 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3057 &vmf->ptl); in do_anonymous_page()
3058 if (!pte_none(*vmf->pte)) in do_anonymous_page()
3067 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3070 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3074 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3078 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3081 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3083 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3100 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
3102 struct vm_area_struct *vma = vmf->vma; in __do_fault()
3120 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
3121 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in __do_fault()
3122 if (!vmf->prealloc_pte) in __do_fault()
3127 ret = vma->vm_ops->fault(vmf); in __do_fault()
3132 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
3134 unlock_page(vmf->page); in __do_fault()
3135 put_page(vmf->page); in __do_fault()
3136 vmf->page = NULL; in __do_fault()
3141 lock_page(vmf->page); in __do_fault()
3143 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
3159 static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) in pte_alloc_one_map() argument
3161 struct vm_area_struct *vma = vmf->vma; in pte_alloc_one_map()
3163 if (!pmd_none(*vmf->pmd)) in pte_alloc_one_map()
3165 if (vmf->prealloc_pte) { in pte_alloc_one_map()
3166 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3167 if (unlikely(!pmd_none(*vmf->pmd))) { in pte_alloc_one_map()
3168 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3173 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3174 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3175 vmf->prealloc_pte = NULL; in pte_alloc_one_map()
3176 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in pte_alloc_one_map()
3191 if (pmd_devmap_trans_unstable(vmf->pmd)) in pte_alloc_one_map()
3203 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3204 &vmf->ptl); in pte_alloc_one_map()
3209 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
3211 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
3213 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3219 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
3222 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3224 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
3225 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
3226 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
3241 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
3242 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
3243 if (!vmf->prealloc_pte) in do_set_pmd()
3248 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3249 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
3265 deposit_prealloc_pte(vmf); in do_set_pmd()
3267 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3269 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3275 spin_unlock(vmf->ptl); in do_set_pmd()
3279 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3302 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, in alloc_set_pte() argument
3305 struct vm_area_struct *vma = vmf->vma; in alloc_set_pte()
3306 bool write = vmf->flags & FAULT_FLAG_WRITE; in alloc_set_pte()
3310 if (pmd_none(*vmf->pmd) && PageTransCompound(page) && in alloc_set_pte()
3315 ret = do_set_pmd(vmf, page); in alloc_set_pte()
3320 if (!vmf->pte) { in alloc_set_pte()
3321 ret = pte_alloc_one_map(vmf); in alloc_set_pte()
3327 if (unlikely(!pte_none(*vmf->pte))) in alloc_set_pte()
3337 page_add_new_anon_rmap(page, vma, vmf->address, false); in alloc_set_pte()
3344 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3347 update_mmu_cache(vma, vmf->address, vmf->pte); in alloc_set_pte()
3368 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
3374 if ((vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
3375 !(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3376 page = vmf->cow_page; in finish_fault()
3378 page = vmf->page; in finish_fault()
3384 if (!(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3385 ret = check_stable_address_space(vmf->vma->vm_mm); in finish_fault()
3387 ret = alloc_set_pte(vmf, vmf->memcg, page); in finish_fault()
3388 if (vmf->pte) in finish_fault()
3389 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
3453 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
3455 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around()
3456 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around()
3464 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
3465 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
3473 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + in do_fault_around()
3475 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
3478 if (pmd_none(*vmf->pmd)) { in do_fault_around()
3479 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
3480 if (!vmf->prealloc_pte) in do_fault_around()
3485 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
3488 if (pmd_trans_huge(*vmf->pmd)) { in do_fault_around()
3494 if (!vmf->pte) in do_fault_around()
3498 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); in do_fault_around()
3499 if (!pte_none(*vmf->pte)) in do_fault_around()
3501 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault_around()
3503 vmf->address = address; in do_fault_around()
3504 vmf->pte = NULL; in do_fault_around()
3508 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
3510 struct vm_area_struct *vma = vmf->vma; in do_read_fault()
3519 ret = do_fault_around(vmf); in do_read_fault()
3524 ret = __do_fault(vmf); in do_read_fault()
3528 ret |= finish_fault(vmf); in do_read_fault()
3529 unlock_page(vmf->page); in do_read_fault()
3531 put_page(vmf->page); in do_read_fault()
3535 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
3537 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
3543 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
3544 if (!vmf->cow_page) in do_cow_fault()
3547 if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, in do_cow_fault()
3548 &vmf->memcg, false)) { in do_cow_fault()
3549 put_page(vmf->cow_page); in do_cow_fault()
3553 ret = __do_fault(vmf); in do_cow_fault()
3559 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
3560 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
3562 ret |= finish_fault(vmf); in do_cow_fault()
3563 unlock_page(vmf->page); in do_cow_fault()
3564 put_page(vmf->page); in do_cow_fault()
3569 mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); in do_cow_fault()
3570 put_page(vmf->cow_page); in do_cow_fault()
3574 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
3576 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
3579 ret = __do_fault(vmf); in do_shared_fault()
3588 unlock_page(vmf->page); in do_shared_fault()
3589 tmp = do_page_mkwrite(vmf); in do_shared_fault()
3592 put_page(vmf->page); in do_shared_fault()
3597 ret |= finish_fault(vmf); in do_shared_fault()
3600 unlock_page(vmf->page); in do_shared_fault()
3601 put_page(vmf->page); in do_shared_fault()
3605 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
3617 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
3619 struct vm_area_struct *vma = vmf->vma; in do_fault()
3631 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
3634 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
3635 vmf->pmd, in do_fault()
3636 vmf->address, in do_fault()
3637 &vmf->ptl); in do_fault()
3645 if (unlikely(pte_none(*vmf->pte))) in do_fault()
3650 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
3652 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
3653 ret = do_read_fault(vmf); in do_fault()
3655 ret = do_cow_fault(vmf); in do_fault()
3657 ret = do_shared_fault(vmf); in do_fault()
3660 if (vmf->prealloc_pte) { in do_fault()
3661 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
3662 vmf->prealloc_pte = NULL; in do_fault()
3682 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
3684 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
3691 bool was_writable = pte_savedwrite(vmf->orig_pte); in do_numa_page()
3699 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
3700 spin_lock(vmf->ptl); in do_numa_page()
3701 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
3702 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3710 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
3715 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
3716 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
3718 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
3720 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3726 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3750 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
3752 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3772 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
3774 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
3775 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
3776 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
3777 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
3782 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) in wp_huge_pmd() argument
3784 if (vma_is_anonymous(vmf->vma)) in wp_huge_pmd()
3785 return do_huge_pmd_wp_page(vmf, orig_pmd); in wp_huge_pmd()
3786 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pmd()
3787 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
3790 VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); in wp_huge_pmd()
3791 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
3801 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
3805 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
3807 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
3808 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
3813 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
3817 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
3819 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pud()
3820 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
3840 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
3844 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
3851 vmf->pte = NULL; in handle_pte_fault()
3854 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
3862 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
3863 vmf->orig_pte = *vmf->pte; in handle_pte_fault()
3874 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
3875 pte_unmap(vmf->pte); in handle_pte_fault()
3876 vmf->pte = NULL; in handle_pte_fault()
3880 if (!vmf->pte) { in handle_pte_fault()
3881 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
3882 return do_anonymous_page(vmf); in handle_pte_fault()
3884 return do_fault(vmf); in handle_pte_fault()
3887 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
3888 return do_swap_page(vmf); in handle_pte_fault()
3890 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
3891 return do_numa_page(vmf); in handle_pte_fault()
3893 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
3894 spin_lock(vmf->ptl); in handle_pte_fault()
3895 entry = vmf->orig_pte; in handle_pte_fault()
3896 if (unlikely(!pte_same(*vmf->pte, entry))) in handle_pte_fault()
3898 if (vmf->flags & FAULT_FLAG_WRITE) { in handle_pte_fault()
3900 return do_wp_page(vmf); in handle_pte_fault()
3904 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
3905 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3906 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
3914 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
3915 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
3918 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
3931 struct vm_fault vmf = { in __handle_mm_fault() local
3949 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
3950 if (!vmf.pud) in __handle_mm_fault()
3952 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3953 ret = create_huge_pud(&vmf); in __handle_mm_fault()
3957 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
3965 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
3969 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
3975 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
3976 if (!vmf.pmd) in __handle_mm_fault()
3978 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3979 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
3983 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
3990 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
3995 return do_huge_pmd_numa_page(&vmf, orig_pmd); in __handle_mm_fault()
3998 ret = wp_huge_pmd(&vmf, orig_pmd); in __handle_mm_fault()
4002 huge_pmd_set_accessed(&vmf, orig_pmd); in __handle_mm_fault()
4008 return handle_pte_fault(&vmf); in __handle_mm_fault()