• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include "internal.h"
20 
21 static __always_inline
find_dst_vma(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long len)22 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
23 				    unsigned long dst_start,
24 				    unsigned long len)
25 {
26 	/*
27 	 * Make sure that the dst range is both valid and fully within a
28 	 * single existing vma.
29 	 */
30 	struct vm_area_struct *dst_vma;
31 
32 	dst_vma = find_vma(dst_mm, dst_start);
33 	if (!dst_vma)
34 		return NULL;
35 
36 	if (dst_start < dst_vma->vm_start ||
37 	    dst_start + len > dst_vma->vm_end)
38 		return NULL;
39 
40 	/*
41 	 * Check the vma is registered in uffd, this is required to
42 	 * enforce the VM_MAYWRITE check done at uffd registration
43 	 * time.
44 	 */
45 	if (!rcu_access_pointer(dst_vma->vm_userfaultfd_ctx.ctx))
46 		return NULL;
47 
48 	return dst_vma;
49 }
50 
51 /*
52  * Install PTEs, to map dst_addr (within dst_vma) to page.
53  *
54  * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
55  * and anon, and for both shared and private VMAs.
56  */
mfill_atomic_install_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,struct page * page,bool newly_allocated,bool wp_copy)57 int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
58 			     struct vm_area_struct *dst_vma,
59 			     unsigned long dst_addr, struct page *page,
60 			     bool newly_allocated, bool wp_copy)
61 {
62 	int ret;
63 	pte_t _dst_pte, *dst_pte;
64 	bool writable = dst_vma->vm_flags & VM_WRITE;
65 	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
66 	bool page_in_cache = page_mapping(page);
67 	spinlock_t *ptl;
68 	struct inode *inode;
69 	pgoff_t offset, max_off;
70 
71 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
72 	if (page_in_cache && !vm_shared)
73 		writable = false;
74 	if (writable || !page_in_cache)
75 		_dst_pte = pte_mkdirty(_dst_pte);
76 	if (writable) {
77 		if (wp_copy)
78 			_dst_pte = pte_mkuffd_wp(_dst_pte);
79 		else
80 			_dst_pte = pte_mkwrite(_dst_pte);
81 	}
82 
83 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
84 
85 	if (vma_is_shmem(dst_vma)) {
86 		/* serialize against truncate with the page table lock */
87 		inode = dst_vma->vm_file->f_inode;
88 		offset = linear_page_index(dst_vma, dst_addr);
89 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
90 		ret = -EFAULT;
91 		if (unlikely(offset >= max_off))
92 			goto out_unlock;
93 	}
94 
95 	ret = -EEXIST;
96 	if (!pte_none(*dst_pte))
97 		goto out_unlock;
98 
99 	if (page_in_cache)
100 		page_add_file_rmap(page, false);
101 	else
102 		page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
103 
104 	/*
105 	 * Must happen after rmap, as mm_counter() checks mapping (via
106 	 * PageAnon()), which is set by __page_set_anon_rmap().
107 	 */
108 	inc_mm_counter(dst_mm, mm_counter(page));
109 
110 	if (newly_allocated)
111 		lru_cache_add_inactive_or_unevictable(page, dst_vma);
112 
113 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
114 
115 	/* No need to invalidate - it was non-present before */
116 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
117 	ret = 0;
118 out_unlock:
119 	pte_unmap_unlock(dst_pte, ptl);
120 	return ret;
121 }
122 
mcopy_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** pagep,bool wp_copy)123 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
124 			    pmd_t *dst_pmd,
125 			    struct vm_area_struct *dst_vma,
126 			    unsigned long dst_addr,
127 			    unsigned long src_addr,
128 			    struct page **pagep,
129 			    bool wp_copy)
130 {
131 	void *page_kaddr;
132 	int ret;
133 	struct page *page;
134 
135 	if (!*pagep) {
136 		ret = -ENOMEM;
137 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
138 		if (!page)
139 			goto out;
140 
141 		page_kaddr = kmap_atomic(page);
142 		ret = copy_from_user(page_kaddr,
143 				     (const void __user *) src_addr,
144 				     PAGE_SIZE);
145 		kunmap_atomic(page_kaddr);
146 
147 		/* fallback to copy_from_user outside mmap_lock */
148 		if (unlikely(ret)) {
149 			ret = -ENOENT;
150 			*pagep = page;
151 			/* don't free the page */
152 			goto out;
153 		}
154 
155 		flush_dcache_page(page);
156 	} else {
157 		page = *pagep;
158 		*pagep = NULL;
159 	}
160 
161 	/*
162 	 * The memory barrier inside __SetPageUptodate makes sure that
163 	 * preceding stores to the page contents become visible before
164 	 * the set_pte_at() write.
165 	 */
166 	__SetPageUptodate(page);
167 
168 	ret = -ENOMEM;
169 	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
170 		goto out_release;
171 
172 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
173 				       page, true, wp_copy);
174 	if (ret)
175 		goto out_release;
176 out:
177 	return ret;
178 out_release:
179 	put_page(page);
180 	goto out;
181 }
182 
mfill_zeropage_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr)183 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
184 			      pmd_t *dst_pmd,
185 			      struct vm_area_struct *dst_vma,
186 			      unsigned long dst_addr)
187 {
188 	pte_t _dst_pte, *dst_pte;
189 	spinlock_t *ptl;
190 	int ret;
191 	pgoff_t offset, max_off;
192 	struct inode *inode;
193 
194 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
195 					 dst_vma->vm_page_prot));
196 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
197 	if (dst_vma->vm_file) {
198 		/* the shmem MAP_PRIVATE case requires checking the i_size */
199 		inode = dst_vma->vm_file->f_inode;
200 		offset = linear_page_index(dst_vma, dst_addr);
201 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
202 		ret = -EFAULT;
203 		if (unlikely(offset >= max_off))
204 			goto out_unlock;
205 	}
206 	ret = -EEXIST;
207 	if (!pte_none(*dst_pte))
208 		goto out_unlock;
209 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
210 	/* No need to invalidate - it was non-present before */
211 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
212 	ret = 0;
213 out_unlock:
214 	pte_unmap_unlock(dst_pte, ptl);
215 	return ret;
216 }
217 
218 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
mcontinue_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,bool wp_copy)219 static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
220 				pmd_t *dst_pmd,
221 				struct vm_area_struct *dst_vma,
222 				unsigned long dst_addr,
223 				bool wp_copy)
224 {
225 	struct inode *inode = file_inode(dst_vma->vm_file);
226 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
227 	struct page *page;
228 	int ret;
229 
230 	ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC);
231 	/* Our caller expects us to return -EFAULT if we failed to find page. */
232 	if (ret == -ENOENT)
233 		ret = -EFAULT;
234 	if (ret)
235 		goto out;
236 	if (!page) {
237 		ret = -EFAULT;
238 		goto out;
239 	}
240 
241 	if (PageHWPoison(page)) {
242 		ret = -EIO;
243 		goto out_release;
244 	}
245 
246 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
247 				       page, false, wp_copy);
248 	if (ret)
249 		goto out_release;
250 
251 	unlock_page(page);
252 	ret = 0;
253 out:
254 	return ret;
255 out_release:
256 	unlock_page(page);
257 	put_page(page);
258 	goto out;
259 }
260 
mm_alloc_pmd(struct mm_struct * mm,unsigned long address)261 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
262 {
263 	pgd_t *pgd;
264 	p4d_t *p4d;
265 	pud_t *pud;
266 
267 	pgd = pgd_offset(mm, address);
268 	p4d = p4d_alloc(mm, pgd, address);
269 	if (!p4d)
270 		return NULL;
271 	pud = pud_alloc(mm, p4d, address);
272 	if (!pud)
273 		return NULL;
274 	/*
275 	 * Note that we didn't run this because the pmd was
276 	 * missing, the *pmd may be already established and in
277 	 * turn it may also be a trans_huge_pmd.
278 	 */
279 	return pmd_alloc(mm, pud, address);
280 }
281 
282 #ifdef CONFIG_HUGETLB_PAGE
283 /*
284  * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
285  * called with mmap_lock held, it will release mmap_lock before returning.
286  */
__mcopy_atomic_hugetlb(struct mm_struct * dst_mm,struct vm_area_struct * dst_vma,unsigned long dst_start,unsigned long src_start,unsigned long len,atomic_t * mmap_changing,enum mcopy_atomic_mode mode)287 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
288 					      struct vm_area_struct *dst_vma,
289 					      unsigned long dst_start,
290 					      unsigned long src_start,
291 					      unsigned long len,
292 					      atomic_t *mmap_changing,
293 					      enum mcopy_atomic_mode mode)
294 {
295 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
296 	ssize_t err;
297 	pte_t *dst_pte;
298 	unsigned long src_addr, dst_addr;
299 	long copied;
300 	struct page *page;
301 	unsigned long vma_hpagesize;
302 	pgoff_t idx;
303 	u32 hash;
304 	struct address_space *mapping;
305 
306 	/*
307 	 * There is no default zero huge page for all huge page sizes as
308 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
309 	 * by THP.  Since we can not reliably insert a zero page, this
310 	 * feature is not supported.
311 	 */
312 	if (mode == MCOPY_ATOMIC_ZEROPAGE) {
313 		mmap_read_unlock(dst_mm);
314 		return -EINVAL;
315 	}
316 
317 	src_addr = src_start;
318 	dst_addr = dst_start;
319 	copied = 0;
320 	page = NULL;
321 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
322 
323 	/*
324 	 * Validate alignment based on huge page size
325 	 */
326 	err = -EINVAL;
327 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
328 		goto out_unlock;
329 
330 retry:
331 	/*
332 	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
333 	 * retry, dst_vma will be set to NULL and we must lookup again.
334 	 */
335 	if (!dst_vma) {
336 		err = -ENOENT;
337 		dst_vma = find_dst_vma(dst_mm, dst_start, len);
338 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
339 			goto out_unlock;
340 
341 		err = -EINVAL;
342 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
343 			goto out_unlock;
344 
345 		vm_shared = dst_vma->vm_flags & VM_SHARED;
346 	}
347 
348 	/*
349 	 * If not shared, ensure the dst_vma has a anon_vma.
350 	 */
351 	err = -ENOMEM;
352 	if (!vm_shared) {
353 		if (unlikely(anon_vma_prepare(dst_vma)))
354 			goto out_unlock;
355 	}
356 
357 	while (src_addr < src_start + len) {
358 		BUG_ON(dst_addr >= dst_start + len);
359 
360 		/*
361 		 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
362 		 * i_mmap_rwsem ensures the dst_pte remains valid even
363 		 * in the case of shared pmds.  fault mutex prevents
364 		 * races with other faulting threads.
365 		 */
366 		mapping = dst_vma->vm_file->f_mapping;
367 		i_mmap_lock_read(mapping);
368 		idx = linear_page_index(dst_vma, dst_addr);
369 		hash = hugetlb_fault_mutex_hash(mapping, idx);
370 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
371 
372 		err = -ENOMEM;
373 		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
374 		if (!dst_pte) {
375 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
376 			i_mmap_unlock_read(mapping);
377 			goto out_unlock;
378 		}
379 
380 		if (mode != MCOPY_ATOMIC_CONTINUE &&
381 		    !huge_pte_none(huge_ptep_get(dst_pte))) {
382 			err = -EEXIST;
383 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
384 			i_mmap_unlock_read(mapping);
385 			goto out_unlock;
386 		}
387 
388 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
389 					       dst_addr, src_addr, mode, &page);
390 
391 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
392 		i_mmap_unlock_read(mapping);
393 
394 		cond_resched();
395 
396 		if (unlikely(err == -ENOENT)) {
397 			mmap_read_unlock(dst_mm);
398 			BUG_ON(!page);
399 
400 			err = copy_huge_page_from_user(page,
401 						(const void __user *)src_addr,
402 						vma_hpagesize / PAGE_SIZE,
403 						true);
404 			if (unlikely(err)) {
405 				err = -EFAULT;
406 				goto out;
407 			}
408 			mmap_read_lock(dst_mm);
409 			/*
410 			 * If memory mappings are changing because of non-cooperative
411 			 * operation (e.g. mremap) running in parallel, bail out and
412 			 * request the user to retry later
413 			 */
414 			if (mmap_changing && atomic_read(mmap_changing)) {
415 				err = -EAGAIN;
416 				break;
417 			}
418 
419 			dst_vma = NULL;
420 			goto retry;
421 		} else
422 			BUG_ON(page);
423 
424 		if (!err) {
425 			dst_addr += vma_hpagesize;
426 			src_addr += vma_hpagesize;
427 			copied += vma_hpagesize;
428 
429 			if (fatal_signal_pending(current))
430 				err = -EINTR;
431 		}
432 		if (err)
433 			break;
434 	}
435 
436 out_unlock:
437 	mmap_read_unlock(dst_mm);
438 out:
439 	if (page)
440 		put_page(page);
441 	BUG_ON(copied < 0);
442 	BUG_ON(err > 0);
443 	BUG_ON(!copied && !err);
444 	return copied ? copied : err;
445 }
446 #else /* !CONFIG_HUGETLB_PAGE */
447 /* fail at build time if gcc attempts to use this */
448 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
449 				      struct vm_area_struct *dst_vma,
450 				      unsigned long dst_start,
451 				      unsigned long src_start,
452 				      unsigned long len,
453 				      atomic_t *mmap_changing,
454 				      enum mcopy_atomic_mode mode);
455 #endif /* CONFIG_HUGETLB_PAGE */
456 
mfill_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** page,enum mcopy_atomic_mode mode,bool wp_copy)457 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
458 						pmd_t *dst_pmd,
459 						struct vm_area_struct *dst_vma,
460 						unsigned long dst_addr,
461 						unsigned long src_addr,
462 						struct page **page,
463 						enum mcopy_atomic_mode mode,
464 						bool wp_copy)
465 {
466 	ssize_t err;
467 
468 	if (mode == MCOPY_ATOMIC_CONTINUE) {
469 		return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
470 					    wp_copy);
471 	}
472 
473 	/*
474 	 * The normal page fault path for a shmem will invoke the
475 	 * fault, fill the hole in the file and COW it right away. The
476 	 * result generates plain anonymous memory. So when we are
477 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
478 	 * generate anonymous memory directly without actually filling
479 	 * the hole. For the MAP_PRIVATE case the robustness check
480 	 * only happens in the pagetable (to verify it's still none)
481 	 * and not in the radix tree.
482 	 */
483 	if (!(dst_vma->vm_flags & VM_SHARED)) {
484 		if (mode == MCOPY_ATOMIC_NORMAL)
485 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
486 					       dst_addr, src_addr, page,
487 					       wp_copy);
488 		else
489 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
490 						 dst_vma, dst_addr);
491 	} else {
492 		VM_WARN_ON_ONCE(wp_copy);
493 		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
494 					     dst_addr, src_addr,
495 					     mode != MCOPY_ATOMIC_NORMAL,
496 					     page);
497 	}
498 
499 	return err;
500 }
501 
__mcopy_atomic(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long src_start,unsigned long len,enum mcopy_atomic_mode mcopy_mode,atomic_t * mmap_changing,__u64 mode)502 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
503 					      unsigned long dst_start,
504 					      unsigned long src_start,
505 					      unsigned long len,
506 					      enum mcopy_atomic_mode mcopy_mode,
507 					      atomic_t *mmap_changing,
508 					      __u64 mode)
509 {
510 	struct vm_area_struct *dst_vma;
511 	ssize_t err;
512 	pmd_t *dst_pmd;
513 	unsigned long src_addr, dst_addr;
514 	long copied;
515 	struct page *page;
516 	bool wp_copy;
517 
518 	/*
519 	 * Sanitize the command parameters:
520 	 */
521 	BUG_ON(dst_start & ~PAGE_MASK);
522 	BUG_ON(len & ~PAGE_MASK);
523 
524 	/* Does the address range wrap, or is the span zero-sized? */
525 	BUG_ON(src_start + len <= src_start);
526 	BUG_ON(dst_start + len <= dst_start);
527 
528 	src_addr = src_start;
529 	dst_addr = dst_start;
530 	copied = 0;
531 	page = NULL;
532 retry:
533 	err = -EAGAIN;
534 	if (mode & UFFDIO_MODE_MMAP_TRYLOCK) {
535 		if (!mmap_read_trylock(dst_mm))
536 			goto out;
537 	} else {
538 		mmap_read_lock(dst_mm);
539 	}
540 
541 	/*
542 	 * If memory mappings are changing because of non-cooperative
543 	 * operation (e.g. mremap) running in parallel, bail out and
544 	 * request the user to retry later
545 	 */
546 	if (mmap_changing && atomic_read(mmap_changing))
547 		goto out_unlock;
548 
549 	/*
550 	 * Make sure the vma is not shared, that the dst range is
551 	 * both valid and fully within a single existing vma.
552 	 */
553 	err = -ENOENT;
554 	dst_vma = find_dst_vma(dst_mm, dst_start, len);
555 	if (!dst_vma)
556 		goto out_unlock;
557 
558 	err = -EINVAL;
559 	/*
560 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
561 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
562 	 */
563 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
564 	    dst_vma->vm_flags & VM_SHARED))
565 		goto out_unlock;
566 
567 	/*
568 	 * validate 'mode' now that we know the dst_vma: don't allow
569 	 * a wrprotect copy if the userfaultfd didn't register as WP.
570 	 */
571 	wp_copy = mode & UFFDIO_COPY_MODE_WP;
572 	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
573 		goto out_unlock;
574 
575 	/*
576 	 * If this is a HUGETLB vma, pass off to appropriate routine
577 	 */
578 	if (is_vm_hugetlb_page(dst_vma))
579 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
580 					       src_start, len, mmap_changing,
581 					       mcopy_mode);
582 
583 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
584 		goto out_unlock;
585 	if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
586 		goto out_unlock;
587 
588 	/*
589 	 * Ensure the dst_vma has a anon_vma or this page
590 	 * would get a NULL anon_vma when moved in the
591 	 * dst_vma.
592 	 */
593 	err = -ENOMEM;
594 	if (!(dst_vma->vm_flags & VM_SHARED) &&
595 	    unlikely(anon_vma_prepare(dst_vma)))
596 		goto out_unlock;
597 
598 	while (src_addr < src_start + len) {
599 		pmd_t dst_pmdval;
600 
601 		BUG_ON(dst_addr >= dst_start + len);
602 
603 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
604 		if (unlikely(!dst_pmd)) {
605 			err = -ENOMEM;
606 			break;
607 		}
608 
609 		dst_pmdval = pmd_read_atomic(dst_pmd);
610 		/*
611 		 * If the dst_pmd is mapped as THP don't
612 		 * override it and just be strict.
613 		 */
614 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
615 			err = -EEXIST;
616 			break;
617 		}
618 		if (unlikely(pmd_none(dst_pmdval)) &&
619 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
620 			err = -ENOMEM;
621 			break;
622 		}
623 		/* If an huge pmd materialized from under us fail */
624 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
625 			err = -EFAULT;
626 			break;
627 		}
628 
629 		BUG_ON(pmd_none(*dst_pmd));
630 		BUG_ON(pmd_trans_huge(*dst_pmd));
631 
632 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
633 				       src_addr, &page, mcopy_mode, wp_copy);
634 		cond_resched();
635 
636 		if (unlikely(err == -ENOENT)) {
637 			void *page_kaddr;
638 
639 			/*
640 			 * Return early due to mmap_lock contention only after
641 			 * some pages are copied to ensure that jank sensitive
642 			 * threads don't keep retrying for progress-critical
643 			 * pages.
644 			 */
645 			if (copied && mmap_lock_is_contended(dst_mm))
646 				break;
647 
648 			mmap_read_unlock(dst_mm);
649 			BUG_ON(!page);
650 
651 			page_kaddr = kmap(page);
652 			err = copy_from_user(page_kaddr,
653 					     (const void __user *) src_addr,
654 					     PAGE_SIZE);
655 			kunmap(page);
656 			if (unlikely(err)) {
657 				err = -EFAULT;
658 				goto out;
659 			}
660 			flush_dcache_page(page);
661 			goto retry;
662 		} else
663 			BUG_ON(page);
664 
665 		if (!err) {
666 			dst_addr += PAGE_SIZE;
667 			src_addr += PAGE_SIZE;
668 			copied += PAGE_SIZE;
669 
670 			if (fatal_signal_pending(current))
671 				err = -EINTR;
672 
673 			if (mmap_lock_is_contended(dst_mm))
674 				err = -EAGAIN;
675 		}
676 		if (err)
677 			break;
678 	}
679 
680 out_unlock:
681 	mmap_read_unlock(dst_mm);
682 out:
683 	if (page)
684 		put_page(page);
685 	BUG_ON(copied < 0);
686 	BUG_ON(err > 0);
687 	BUG_ON(!copied && !err);
688 	return copied ? copied : err;
689 }
690 
mcopy_atomic(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long src_start,unsigned long len,atomic_t * mmap_changing,__u64 mode)691 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
692 		     unsigned long src_start, unsigned long len,
693 		     atomic_t *mmap_changing, __u64 mode)
694 {
695 	return __mcopy_atomic(dst_mm, dst_start, src_start, len,
696 			      MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
697 }
698 
mfill_zeropage(struct mm_struct * dst_mm,unsigned long start,unsigned long len,atomic_t * mmap_changing,__u64 mode)699 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
700 		       unsigned long len, atomic_t *mmap_changing, __u64 mode)
701 {
702 	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
703 			      mmap_changing, mode);
704 }
705 
mcopy_continue(struct mm_struct * dst_mm,unsigned long start,unsigned long len,atomic_t * mmap_changing)706 ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
707 		       unsigned long len, atomic_t *mmap_changing)
708 {
709 	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
710 			      mmap_changing, 0);
711 }
712 
mwriteprotect_range(struct mm_struct * dst_mm,unsigned long start,unsigned long len,bool enable_wp,atomic_t * mmap_changing)713 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
714 			unsigned long len, bool enable_wp,
715 			atomic_t *mmap_changing)
716 {
717 	struct vm_area_struct *dst_vma;
718 	pgprot_t newprot;
719 	int err;
720 
721 	/*
722 	 * Sanitize the command parameters:
723 	 */
724 	BUG_ON(start & ~PAGE_MASK);
725 	BUG_ON(len & ~PAGE_MASK);
726 
727 	/* Does the address range wrap, or is the span zero-sized? */
728 	BUG_ON(start + len <= start);
729 
730 	mmap_read_lock(dst_mm);
731 
732 	/*
733 	 * If memory mappings are changing because of non-cooperative
734 	 * operation (e.g. mremap) running in parallel, bail out and
735 	 * request the user to retry later
736 	 */
737 	err = -EAGAIN;
738 	if (mmap_changing && atomic_read(mmap_changing))
739 		goto out_unlock;
740 
741 	err = -ENOENT;
742 	dst_vma = find_dst_vma(dst_mm, start, len);
743 	/*
744 	 * Make sure the vma is not shared, that the dst range is
745 	 * both valid and fully within a single existing vma.
746 	 */
747 	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
748 		goto out_unlock;
749 	if (!userfaultfd_wp(dst_vma))
750 		goto out_unlock;
751 	if (!vma_is_anonymous(dst_vma))
752 		goto out_unlock;
753 
754 	if (enable_wp)
755 		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
756 	else
757 		newprot = vm_get_page_prot(dst_vma->vm_flags);
758 
759 	change_protection(dst_vma, start, start + len, newprot,
760 			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
761 
762 	err = 0;
763 out_unlock:
764 	mmap_read_unlock(dst_mm);
765 	return err;
766 }
767