Lines Matching refs:address
1372 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1380 address, address + size); in zap_page_range_single()
1381 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single()
1384 unmap_single_vma(&tlb, vma, address, range.end, details); in zap_page_range_single()
1386 tlb_finish_mmu(&tlb, address, range.end); in zap_page_range_single()
1400 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1403 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1407 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
2316 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2319 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2320 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2356 vmf->address); in wp_page_copy()
2361 vmf->address); in wp_page_copy()
2364 cow_user_page(new_page, old_page, vmf->address, vma); in wp_page_copy()
2373 vmf->address & PAGE_MASK, in wp_page_copy()
2374 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
2380 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
2391 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
2400 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
2401 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
2409 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
2410 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
2494 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2587 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
2618 vmf->address, &vmf->ptl); in do_wp_page()
2629 vmf->address); in do_wp_page()
2788 vmf->address); in do_swap_page()
2795 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
2803 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
2813 vmf->address); in do_swap_page()
2833 vmf->address, &vmf->ptl); in do_swap_page()
2872 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
2888 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
2920 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
2921 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
2926 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
2930 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
2961 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3017 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3020 vmf->address, &vmf->ptl); in do_anonymous_page()
3037 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3056 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3074 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3078 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3081 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3203 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3226 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
3337 page_add_new_anon_rmap(page, vma, vmf->address, false); in alloc_set_pte()
3344 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3347 update_mmu_cache(vma, vmf->address, vmf->pte); in alloc_set_pte()
3455 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around() local
3464 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
3465 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
3473 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + in do_fault_around()
3498 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); in do_fault_around()
3503 vmf->address = address; in do_fault_around()
3543 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
3559 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
3636 vmf->address, in do_fault()
3710 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
3715 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
3716 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
3718 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
3750 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
3791 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
3862 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
3904 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
3906 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
3915 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
3929 unsigned long address, unsigned int flags) in __handle_mm_fault() argument
3933 .address = address & PAGE_MASK, in __handle_mm_fault()
3935 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
3944 pgd = pgd_offset(mm, address); in __handle_mm_fault()
3945 p4d = p4d_alloc(mm, pgd, address); in __handle_mm_fault()
3949 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
3975 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4017 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
4043 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
4045 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
4068 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __p4d_alloc() argument
4070 p4d_t *new = p4d_alloc_one(mm, address); in __p4d_alloc()
4091 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) in __pud_alloc() argument
4093 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
4123 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
4126 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
4151 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, in __follow_pte_pmd() argument
4161 pgd = pgd_offset(mm, address); in __follow_pte_pmd()
4165 p4d = p4d_offset(pgd, address); in __follow_pte_pmd()
4169 pud = pud_offset(p4d, address); in __follow_pte_pmd()
4173 pmd = pmd_offset(pud, address); in __follow_pte_pmd()
4182 NULL, mm, address & PMD_MASK, in __follow_pte_pmd()
4183 (address & PMD_MASK) + PMD_SIZE); in __follow_pte_pmd()
4201 address & PAGE_MASK, in __follow_pte_pmd()
4202 (address & PAGE_MASK) + PAGE_SIZE); in __follow_pte_pmd()
4205 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte_pmd()
4218 static inline int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
4225 !(res = __follow_pte_pmd(mm, address, NULL, in follow_pte()
4230 int follow_pte_pmd(struct mm_struct *mm, unsigned long address, in follow_pte_pmd() argument
4238 !(res = __follow_pte_pmd(mm, address, range, in follow_pte_pmd()
4254 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
4264 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
4275 unsigned long address, unsigned int flags, in follow_phys() argument
4285 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()