Lines Matching refs:vma
526 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
529 while (vma) { in free_pgtables()
530 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
531 unsigned long addr = vma->vm_start; in free_pgtables()
537 unlink_anon_vmas(vma); in free_pgtables()
538 unlink_file_vma(vma); in free_pgtables()
540 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
541 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
547 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
549 vma = next; in free_pgtables()
550 next = vma->vm_next; in free_pgtables()
551 unlink_anon_vmas(vma); in free_pgtables()
552 unlink_file_vma(vma); in free_pgtables()
554 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
557 vma = next; in free_pgtables()
561 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, in __pte_alloc() argument
597 wait_split_huge_page(vma->anon_vma, pmd); in __pte_alloc()
644 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
647 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
676 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
677 index = linear_page_index(vma, addr); in print_bad_pte()
687 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
691 if (vma->vm_ops) in print_bad_pte()
693 vma->vm_ops->fault); in print_bad_pte()
694 if (vma->vm_file) in print_bad_pte()
696 vma->vm_file->f_op->mmap); in print_bad_pte()
748 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
756 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
759 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
765 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
766 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
772 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
773 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
775 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
784 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
804 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, in copy_one_pte() argument
807 unsigned long vm_flags = vma->vm_flags; in copy_one_pte()
871 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
887 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, in copy_pte_range() argument
926 vma, addr, rss); in copy_pte_range()
950 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, in copy_pmd_range() argument
966 dst_pmd, src_pmd, addr, vma); in copy_pmd_range()
976 vma, addr, next)) in copy_pmd_range()
983 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, in copy_pud_range() argument
998 vma, addr, next)) in copy_pud_range()
1005 struct vm_area_struct *vma) in copy_page_range() argument
1009 unsigned long addr = vma->vm_start; in copy_page_range()
1010 unsigned long end = vma->vm_end; in copy_page_range()
1022 if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR | in copy_page_range()
1024 if (!vma->anon_vma) in copy_page_range()
1028 if (is_vm_hugetlb_page(vma)) in copy_page_range()
1029 return copy_hugetlb_page_range(dst_mm, src_mm, vma); in copy_page_range()
1031 if (unlikely(vma->vm_flags & VM_PFNMAP)) { in copy_page_range()
1036 ret = track_pfn_copy(vma); in copy_page_range()
1047 is_cow = is_cow_mapping(vma->vm_flags); in copy_page_range()
1062 vma, addr, next))) { in copy_page_range()
1074 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1099 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1139 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1145 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1160 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) in zap_pte_range()
1161 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1178 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1209 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1225 vma->vm_start, in zap_pmd_range()
1226 vma->vm_end); in zap_pmd_range()
1230 split_huge_page_pmd(vma, addr, pmd); in zap_pmd_range()
1231 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1244 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1253 struct vm_area_struct *vma, pgd_t *pgd, in zap_pud_range() argument
1265 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1272 struct vm_area_struct *vma, in unmap_page_range() argument
1283 tlb_start_vma(tlb, vma); in unmap_page_range()
1284 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1289 next = zap_pud_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1291 tlb_end_vma(tlb, vma); in unmap_page_range()
1296 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1300 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1303 if (start >= vma->vm_end) in unmap_single_vma()
1305 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1306 if (end <= vma->vm_start) in unmap_single_vma()
1309 if (vma->vm_file) in unmap_single_vma()
1310 uprobe_munmap(vma, start, end); in unmap_single_vma()
1312 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1313 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1316 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1328 if (vma->vm_file) { in unmap_single_vma()
1329 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); in unmap_single_vma()
1330 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1331 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); in unmap_single_vma()
1334 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1357 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1360 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1363 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas()
1364 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1377 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1380 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1388 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) in zap_page_range()
1389 unmap_single_vma(&tlb, vma, start, end, details); in zap_page_range()
1403 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1406 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1414 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1431 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1434 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1435 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1437 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1464 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1467 struct mm_struct *mm = vma->vm_mm; in insert_page()
1526 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1529 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1533 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
1534 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1535 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
1536 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
1538 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1542 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
1545 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1561 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1587 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pfn() argument
1591 pgprot_t pgprot = vma->vm_page_prot; in vm_insert_pfn()
1598 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vm_insert_pfn()
1599 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vm_insert_pfn()
1601 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vm_insert_pfn()
1602 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vm_insert_pfn()
1604 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_pfn()
1606 if (track_pfn_insert(vma, &pgprot, pfn)) in vm_insert_pfn()
1609 ret = insert_pfn(vma, addr, pfn, pgprot); in vm_insert_pfn()
1615 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vm_insert_mixed() argument
1618 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); in vm_insert_mixed()
1620 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_mixed()
1634 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_mixed()
1636 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); in vm_insert_mixed()
1717 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1723 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
1744 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range()
1745 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
1747 vma->vm_pgoff = pfn; in remap_pfn_range()
1750 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
1754 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1759 flush_cache_range(vma, addr, end); in remap_pfn_range()
1769 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); in remap_pfn_range()
1788 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1807 if (vma->vm_pgoff > pages) in vm_iomap_memory()
1809 pfn += vma->vm_pgoff; in vm_iomap_memory()
1810 pages -= vma->vm_pgoff; in vm_iomap_memory()
1813 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1818 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1948 …oid cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) in cow_user_page() argument
1973 copy_user_highpage(dst, src, va, vma); in cow_user_page()
1982 static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, in do_page_mkwrite() argument
1993 ret = vma->vm_ops->page_mkwrite(vma, &vmf); in do_page_mkwrite()
2026 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_wp_page() argument
2040 old_page = vm_normal_page(vma, address, orig_pte); in do_wp_page()
2050 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2079 page_move_anon_rmap(old_page, vma, address); in do_wp_page()
2084 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2091 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in do_wp_page()
2095 tmp = do_page_mkwrite(vma, old_page, address); in do_wp_page()
2128 flush_cache_page(vma, address, pte_pfn(orig_pte)); in do_wp_page()
2130 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_wp_page()
2131 if (ptep_set_access_flags(vma, address, page_table, entry,1)) in do_wp_page()
2132 update_mmu_cache(vma, address, page_table); in do_wp_page()
2158 if (vma->vm_file) in do_wp_page()
2159 file_update_time(vma->vm_file); in do_wp_page()
2187 if (unlikely(anon_vma_prepare(vma))) in do_wp_page()
2191 new_page = alloc_zeroed_user_highpage_movable(vma, address); in do_wp_page()
2195 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_wp_page()
2198 cow_user_page(new_page, old_page, address, vma); in do_wp_page()
2221 flush_cache_page(vma, address, pte_pfn(orig_pte)); in do_wp_page()
2222 entry = mk_pte(new_page, vma->vm_page_prot); in do_wp_page()
2223 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_wp_page()
2230 ptep_clear_flush(vma, address, page_table); in do_wp_page()
2231 page_add_new_anon_rmap(new_page, vma, address); in do_wp_page()
2233 lru_cache_add_active_or_unevictable(new_page, vma); in do_wp_page()
2240 update_mmu_cache(vma, address, page_table); in do_wp_page()
2284 if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { in do_wp_page()
2300 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
2304 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
2310 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
2313 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
2316 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
2317 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
2326 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
2327 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2328 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2336 struct vm_area_struct *vma; in unmap_mapping_range_list() local
2344 list_for_each_entry(vma, head, shared.nonlinear) { in unmap_mapping_range_list()
2345 details->nonlinear_vma = vma; in unmap_mapping_range_list()
2346 unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details); in unmap_mapping_range_list()
2404 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_swap_page() argument
2427 print_bad_pte(vma, address, orig_pte, NULL); in do_swap_page()
2436 GFP_HIGHUSER_MOVABLE, vma, address); in do_swap_page()
2482 page = ksm_might_need_to_copy(page, vma, address); in do_swap_page()
2518 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
2520 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
2525 flush_icache_page(vma, page); in do_swap_page()
2530 do_page_add_anon_rmap(page, vma, address, exclusive); in do_swap_page()
2533 page_add_new_anon_rmap(page, vma, address); in do_swap_page()
2535 lru_cache_add_active_or_unevictable(page, vma); in do_swap_page()
2539 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) in do_swap_page()
2556 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2563 update_mmu_cache(vma, address, page_table); in do_swap_page()
2587 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_anonymous_page() argument
2599 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
2605 vma->vm_page_prot)); in do_anonymous_page()
2613 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
2615 page = alloc_zeroed_user_highpage_movable(vma, address); in do_anonymous_page()
2628 entry = mk_pte(page, vma->vm_page_prot); in do_anonymous_page()
2629 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
2637 page_add_new_anon_rmap(page, vma, address); in do_anonymous_page()
2639 lru_cache_add_active_or_unevictable(page, vma); in do_anonymous_page()
2644 update_mmu_cache(vma, address, page_table); in do_anonymous_page()
2663 static int __do_fault(struct vm_area_struct *vma, unsigned long address, in __do_fault() argument
2674 ret = vma->vm_ops->fault(vma, &vmf); in __do_fault()
2709 void do_set_pte(struct vm_area_struct *vma, unsigned long address, in do_set_pte() argument
2714 flush_icache_page(vma, page); in do_set_pte()
2715 entry = mk_pte(page, vma->vm_page_prot); in do_set_pte()
2717 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_set_pte()
2721 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
2722 page_add_new_anon_rmap(page, vma, address); in do_set_pte()
2724 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); in do_set_pte()
2727 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2730 update_mmu_cache(vma, address, pte); in do_set_pte()
2797 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, in do_fault_around() argument
2808 start_addr = max(address & mask, vma->vm_start); in do_fault_around()
2819 max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, in do_fault_around()
2827 if (start_addr >= vma->vm_end) in do_fault_around()
2837 vma->vm_ops->map_pages(vma, &vmf); in do_fault_around()
2840 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_read_fault() argument
2854 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) && in do_read_fault()
2857 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
2863 ret = __do_fault(vma, address, pgoff, flags, &fault_page); in do_read_fault()
2874 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
2881 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_cow_fault() argument
2891 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
2894 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_cow_fault()
2903 ret = __do_fault(vma, address, pgoff, flags, &fault_page); in do_cow_fault()
2907 copy_user_highpage(new_page, fault_page, address, vma); in do_cow_fault()
2917 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
2919 lru_cache_add_active_or_unevictable(new_page, vma); in do_cow_fault()
2930 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_shared_fault() argument
2941 ret = __do_fault(vma, address, pgoff, flags, &fault_page); in do_shared_fault()
2949 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
2951 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault()
2966 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
2973 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { in do_shared_fault()
2982 if (vma->vm_file && !vma->vm_ops->page_mkwrite) in do_shared_fault()
2983 file_update_time(vma->vm_file); in do_shared_fault()
2994 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_linear_fault() argument
2999 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in do_linear_fault()
3003 if (!vma->vm_ops->fault) in do_linear_fault()
3006 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_linear_fault()
3008 if (!(vma->vm_flags & VM_SHARED)) in do_linear_fault()
3009 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_linear_fault()
3011 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_linear_fault()
3025 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_nonlinear_fault() argument
3036 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { in do_nonlinear_fault()
3040 print_bad_pte(vma, address, orig_pte, NULL); in do_nonlinear_fault()
3046 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_nonlinear_fault()
3048 if (!(vma->vm_flags & VM_SHARED)) in do_nonlinear_fault()
3049 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_nonlinear_fault()
3051 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_nonlinear_fault()
3054 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
3066 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
3069 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_numa_page() argument
3098 update_mmu_cache(vma, addr, ptep); in do_numa_page()
3100 page = vm_normal_page(vma, addr, pte); in do_numa_page()
3119 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
3124 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); in do_numa_page()
3132 migrated = migrate_misplaced_page(page, vma, target_nid); in do_numa_page()
3161 struct vm_area_struct *vma, unsigned long address, in handle_pte_fault() argument
3170 if (vma->vm_ops) in handle_pte_fault()
3171 return do_linear_fault(mm, vma, address, in handle_pte_fault()
3173 return do_anonymous_page(mm, vma, address, in handle_pte_fault()
3177 return do_nonlinear_fault(mm, vma, address, in handle_pte_fault()
3179 return do_swap_page(mm, vma, address, in handle_pte_fault()
3184 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3192 return do_wp_page(mm, vma, address, in handle_pte_fault()
3197 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3198 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3207 flush_tlb_fix_spurious_fault(vma, address); in handle_pte_fault()
3220 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in __handle_mm_fault() argument
3228 if (unlikely(is_vm_hugetlb_page(vma))) in __handle_mm_fault()
3229 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3238 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3240 if (!vma->vm_ops) in __handle_mm_fault()
3241 ret = do_huge_pmd_anonymous_page(mm, vma, address, in __handle_mm_fault()
3262 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3266 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3271 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3284 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3307 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3316 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in handle_mm_fault() argument
3336 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3465 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
3472 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
3475 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
3485 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
3493 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
3496 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
3513 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
3521 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
3543 struct vm_area_struct *vma; in __access_remote_vm() local
3554 write, 1, &page, &vma); in __access_remote_vm()
3563 vma = find_vma(mm, addr); in __access_remote_vm()
3564 if (!vma || vma->vm_start > addr) in __access_remote_vm()
3566 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
3567 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
3581 copy_to_user_page(vma, page, addr, in __access_remote_vm()
3585 copy_from_user_page(vma, page, addr, in __access_remote_vm()
3643 struct vm_area_struct *vma; in print_vma_addr() local
3653 vma = find_vma(mm, ip); in print_vma_addr()
3654 if (vma && vma->vm_file) { in print_vma_addr()
3655 struct file *f = vma->vm_file; in print_vma_addr()
3664 vma->vm_start, in print_vma_addr()
3665 vma->vm_end - vma->vm_start); in print_vma_addr()
3734 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
3743 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
3752 unsigned long addr, struct vm_area_struct *vma, in copy_user_huge_page() argument
3758 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()
3766 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); in copy_user_huge_page()