Lines Matching refs:dst_vma
30 struct vm_area_struct *dst_vma; in find_dst_vma() local
32 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma()
33 if (!dst_vma) in find_dst_vma()
36 if (dst_start < dst_vma->vm_start || in find_dst_vma()
37 dst_start + len > dst_vma->vm_end) in find_dst_vma()
45 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma()
48 return dst_vma; in find_dst_vma()
53 struct vm_area_struct *dst_vma, in mcopy_atomic_pte() argument
69 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); in mcopy_atomic_pte()
102 _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); in mcopy_atomic_pte()
103 if (dst_vma->vm_flags & VM_WRITE) { in mcopy_atomic_pte()
111 if (dst_vma->vm_file) { in mcopy_atomic_pte()
113 inode = dst_vma->vm_file->f_inode; in mcopy_atomic_pte()
114 offset = linear_page_index(dst_vma, dst_addr); in mcopy_atomic_pte()
125 page_add_new_anon_rmap(page, dst_vma, dst_addr, false); in mcopy_atomic_pte()
126 lru_cache_add_inactive_or_unevictable(page, dst_vma); in mcopy_atomic_pte()
131 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mcopy_atomic_pte()
146 struct vm_area_struct *dst_vma, in mfill_zeropage_pte() argument
156 dst_vma->vm_page_prot)); in mfill_zeropage_pte()
158 if (dst_vma->vm_file) { in mfill_zeropage_pte()
160 inode = dst_vma->vm_file->f_inode; in mfill_zeropage_pte()
161 offset = linear_page_index(dst_vma, dst_addr); in mfill_zeropage_pte()
172 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_zeropage_pte()
206 struct vm_area_struct *dst_vma, in __mcopy_atomic_hugetlb() argument
212 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
213 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
239 vma_hpagesize = vma_kernel_pagesize(dst_vma); in __mcopy_atomic_hugetlb()
253 if (!dst_vma) { in __mcopy_atomic_hugetlb()
255 dst_vma = find_dst_vma(dst_mm, dst_start, len); in __mcopy_atomic_hugetlb()
256 if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic_hugetlb()
260 if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) in __mcopy_atomic_hugetlb()
263 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
271 if (unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic_hugetlb()
286 mapping = dst_vma->vm_file->f_mapping; in __mcopy_atomic_hugetlb()
288 idx = linear_page_index(dst_vma, dst_addr); in __mcopy_atomic_hugetlb()
308 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, in __mcopy_atomic_hugetlb()
331 dst_vma = NULL; in __mcopy_atomic_hugetlb()
407 struct vm_area_struct *dst_vma,
416 struct vm_area_struct *dst_vma, in mfill_atomic_pte() argument
435 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
437 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, in mfill_atomic_pte()
442 dst_vma, dst_addr); in mfill_atomic_pte()
447 dst_vma, dst_addr, in mfill_atomic_pte()
451 dst_vma, dst_addr); in mfill_atomic_pte()
465 struct vm_area_struct *dst_vma; in __mcopy_atomic() local
504 dst_vma = find_dst_vma(dst_mm, dst_start, len); in __mcopy_atomic()
505 if (!dst_vma) in __mcopy_atomic()
513 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && in __mcopy_atomic()
514 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic()
522 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) in __mcopy_atomic()
528 if (is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic()
529 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, in __mcopy_atomic()
532 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) in __mcopy_atomic()
541 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic()
542 unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic()
579 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in __mcopy_atomic()
642 struct vm_area_struct *dst_vma; in mwriteprotect_range() local
667 dst_vma = find_dst_vma(dst_mm, start, len); in mwriteprotect_range()
672 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) in mwriteprotect_range()
674 if (!userfaultfd_wp(dst_vma)) in mwriteprotect_range()
676 if (!vma_is_anonymous(dst_vma)) in mwriteprotect_range()
680 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); in mwriteprotect_range()
682 newprot = vm_get_page_prot(dst_vma->vm_flags); in mwriteprotect_range()
684 change_protection(dst_vma, start, start + len, newprot, in mwriteprotect_range()