• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include "internal.h"
20 
mcopy_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** pagep)21 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
22 			    pmd_t *dst_pmd,
23 			    struct vm_area_struct *dst_vma,
24 			    unsigned long dst_addr,
25 			    unsigned long src_addr,
26 			    struct page **pagep)
27 {
28 	struct mem_cgroup *memcg;
29 	pte_t _dst_pte, *dst_pte;
30 	spinlock_t *ptl;
31 	void *page_kaddr;
32 	int ret;
33 	struct page *page;
34 	pgoff_t offset, max_off;
35 	struct inode *inode;
36 
37 	if (!*pagep) {
38 		ret = -ENOMEM;
39 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
40 		if (!page)
41 			goto out;
42 
43 		page_kaddr = kmap_atomic(page);
44 		ret = copy_from_user(page_kaddr,
45 				     (const void __user *) src_addr,
46 				     PAGE_SIZE);
47 		kunmap_atomic(page_kaddr);
48 
49 		/* fallback to copy_from_user outside mmap_sem */
50 		if (unlikely(ret)) {
51 			ret = -ENOENT;
52 			*pagep = page;
53 			/* don't free the page */
54 			goto out;
55 		}
56 
57 		flush_dcache_page(page);
58 	} else {
59 		page = *pagep;
60 		*pagep = NULL;
61 	}
62 
63 	/*
64 	 * The memory barrier inside __SetPageUptodate makes sure that
65 	 * preceeding stores to the page contents become visible before
66 	 * the set_pte_at() write.
67 	 */
68 	__SetPageUptodate(page);
69 
70 	ret = -ENOMEM;
71 	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
72 		goto out_release;
73 
74 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
75 	if (dst_vma->vm_flags & VM_WRITE)
76 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
77 
78 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
79 	if (dst_vma->vm_file) {
80 		/* the shmem MAP_PRIVATE case requires checking the i_size */
81 		inode = dst_vma->vm_file->f_inode;
82 		offset = linear_page_index(dst_vma, dst_addr);
83 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
84 		ret = -EFAULT;
85 		if (unlikely(offset >= max_off))
86 			goto out_release_uncharge_unlock;
87 	}
88 	ret = -EEXIST;
89 	if (!pte_none(*dst_pte))
90 		goto out_release_uncharge_unlock;
91 
92 	inc_mm_counter(dst_mm, MM_ANONPAGES);
93 	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
94 	mem_cgroup_commit_charge(page, memcg, false, false);
95 	lru_cache_add_active_or_unevictable(page, dst_vma);
96 
97 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
98 
99 	/* No need to invalidate - it was non-present before */
100 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
101 
102 	pte_unmap_unlock(dst_pte, ptl);
103 	ret = 0;
104 out:
105 	return ret;
106 out_release_uncharge_unlock:
107 	pte_unmap_unlock(dst_pte, ptl);
108 	mem_cgroup_cancel_charge(page, memcg, false);
109 out_release:
110 	put_page(page);
111 	goto out;
112 }
113 
mfill_zeropage_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr)114 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
115 			      pmd_t *dst_pmd,
116 			      struct vm_area_struct *dst_vma,
117 			      unsigned long dst_addr)
118 {
119 	pte_t _dst_pte, *dst_pte;
120 	spinlock_t *ptl;
121 	int ret;
122 	pgoff_t offset, max_off;
123 	struct inode *inode;
124 
125 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
126 					 dst_vma->vm_page_prot));
127 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
128 	if (dst_vma->vm_file) {
129 		/* the shmem MAP_PRIVATE case requires checking the i_size */
130 		inode = dst_vma->vm_file->f_inode;
131 		offset = linear_page_index(dst_vma, dst_addr);
132 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
133 		ret = -EFAULT;
134 		if (unlikely(offset >= max_off))
135 			goto out_unlock;
136 	}
137 	ret = -EEXIST;
138 	if (!pte_none(*dst_pte))
139 		goto out_unlock;
140 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
141 	/* No need to invalidate - it was non-present before */
142 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
143 	ret = 0;
144 out_unlock:
145 	pte_unmap_unlock(dst_pte, ptl);
146 	return ret;
147 }
148 
mm_alloc_pmd(struct mm_struct * mm,unsigned long address)149 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
150 {
151 	pgd_t *pgd;
152 	p4d_t *p4d;
153 	pud_t *pud;
154 
155 	pgd = pgd_offset(mm, address);
156 	p4d = p4d_alloc(mm, pgd, address);
157 	if (!p4d)
158 		return NULL;
159 	pud = pud_alloc(mm, p4d, address);
160 	if (!pud)
161 		return NULL;
162 	/*
163 	 * Note that we didn't run this because the pmd was
164 	 * missing, the *pmd may be already established and in
165 	 * turn it may also be a trans_huge_pmd.
166 	 */
167 	return pmd_alloc(mm, pud, address);
168 }
169 
170 #ifdef CONFIG_HUGETLB_PAGE
171 /*
172  * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
173  * called with mmap_sem held, it will release mmap_sem before returning.
174  */
__mcopy_atomic_hugetlb(struct mm_struct * dst_mm,struct vm_area_struct * dst_vma,unsigned long dst_start,unsigned long src_start,unsigned long len,bool * mmap_changing,bool zeropage)175 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
176 					      struct vm_area_struct *dst_vma,
177 					      unsigned long dst_start,
178 					      unsigned long src_start,
179 					      unsigned long len,
180 					      bool *mmap_changing,
181 					      bool zeropage)
182 {
183 	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
184 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
185 	ssize_t err;
186 	pte_t *dst_pte;
187 	unsigned long src_addr, dst_addr;
188 	long copied;
189 	struct page *page;
190 	struct hstate *h;
191 	unsigned long vma_hpagesize;
192 	pgoff_t idx;
193 	u32 hash;
194 	struct address_space *mapping;
195 
196 	/*
197 	 * There is no default zero huge page for all huge page sizes as
198 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
199 	 * by THP.  Since we can not reliably insert a zero page, this
200 	 * feature is not supported.
201 	 */
202 	if (zeropage) {
203 		up_read(&dst_mm->mmap_sem);
204 		return -EINVAL;
205 	}
206 
207 	src_addr = src_start;
208 	dst_addr = dst_start;
209 	copied = 0;
210 	page = NULL;
211 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
212 
213 	/*
214 	 * Validate alignment based on huge page size
215 	 */
216 	err = -EINVAL;
217 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
218 		goto out_unlock;
219 
220 retry:
221 	/*
222 	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
223 	 * retry, dst_vma will be set to NULL and we must lookup again.
224 	 */
225 	if (!dst_vma) {
226 		err = -ENOENT;
227 		dst_vma = find_vma(dst_mm, dst_start);
228 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
229 			goto out_unlock;
230 		/*
231 		 * Check the vma is registered in uffd, this is
232 		 * required to enforce the VM_MAYWRITE check done at
233 		 * uffd registration time.
234 		 */
235 		if (!dst_vma->vm_userfaultfd_ctx.ctx)
236 			goto out_unlock;
237 
238 		if (dst_start < dst_vma->vm_start ||
239 		    dst_start + len > dst_vma->vm_end)
240 			goto out_unlock;
241 
242 		err = -EINVAL;
243 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
244 			goto out_unlock;
245 
246 		vm_shared = dst_vma->vm_flags & VM_SHARED;
247 	}
248 
249 	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
250 		    (len - copied) & (vma_hpagesize - 1)))
251 		goto out_unlock;
252 
253 	/*
254 	 * If not shared, ensure the dst_vma has a anon_vma.
255 	 */
256 	err = -ENOMEM;
257 	if (!vm_shared) {
258 		if (unlikely(anon_vma_prepare(dst_vma)))
259 			goto out_unlock;
260 	}
261 
262 	h = hstate_vma(dst_vma);
263 
264 	while (src_addr < src_start + len) {
265 		pte_t dst_pteval;
266 
267 		BUG_ON(dst_addr >= dst_start + len);
268 		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
269 
270 		/*
271 		 * Serialize via hugetlb_fault_mutex
272 		 */
273 		idx = linear_page_index(dst_vma, dst_addr);
274 		mapping = dst_vma->vm_file->f_mapping;
275 		hash = hugetlb_fault_mutex_hash(h, mapping, idx);
276 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
277 
278 		err = -ENOMEM;
279 		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
280 		if (!dst_pte) {
281 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
282 			goto out_unlock;
283 		}
284 
285 		err = -EEXIST;
286 		dst_pteval = huge_ptep_get(dst_pte);
287 		if (!huge_pte_none(dst_pteval)) {
288 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
289 			goto out_unlock;
290 		}
291 
292 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
293 						dst_addr, src_addr, &page);
294 
295 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
296 		vm_alloc_shared = vm_shared;
297 
298 		cond_resched();
299 
300 		if (unlikely(err == -ENOENT)) {
301 			up_read(&dst_mm->mmap_sem);
302 			BUG_ON(!page);
303 
304 			err = copy_huge_page_from_user(page,
305 						(const void __user *)src_addr,
306 						pages_per_huge_page(h), true);
307 			if (unlikely(err)) {
308 				err = -EFAULT;
309 				goto out;
310 			}
311 			down_read(&dst_mm->mmap_sem);
312 			/*
313 			 * If memory mappings are changing because of non-cooperative
314 			 * operation (e.g. mremap) running in parallel, bail out and
315 			 * request the user to retry later
316 			 */
317 			if (mmap_changing && READ_ONCE(*mmap_changing)) {
318 				err = -EAGAIN;
319 				break;
320 			}
321 
322 			dst_vma = NULL;
323 			goto retry;
324 		} else
325 			BUG_ON(page);
326 
327 		if (!err) {
328 			dst_addr += vma_hpagesize;
329 			src_addr += vma_hpagesize;
330 			copied += vma_hpagesize;
331 
332 			if (fatal_signal_pending(current))
333 				err = -EINTR;
334 		}
335 		if (err)
336 			break;
337 	}
338 
339 out_unlock:
340 	up_read(&dst_mm->mmap_sem);
341 out:
342 	if (page) {
343 		/*
344 		 * We encountered an error and are about to free a newly
345 		 * allocated huge page.
346 		 *
347 		 * Reservation handling is very subtle, and is different for
348 		 * private and shared mappings.  See the routine
349 		 * restore_reserve_on_error for details.  Unfortunately, we
350 		 * can not call restore_reserve_on_error now as it would
351 		 * require holding mmap_sem.
352 		 *
353 		 * If a reservation for the page existed in the reservation
354 		 * map of a private mapping, the map was modified to indicate
355 		 * the reservation was consumed when the page was allocated.
356 		 * We clear the PagePrivate flag now so that the global
357 		 * reserve count will not be incremented in free_huge_page.
358 		 * The reservation map will still indicate the reservation
359 		 * was consumed and possibly prevent later page allocation.
360 		 * This is better than leaking a global reservation.  If no
361 		 * reservation existed, it is still safe to clear PagePrivate
362 		 * as no adjustments to reservation counts were made during
363 		 * allocation.
364 		 *
365 		 * The reservation map for shared mappings indicates which
366 		 * pages have reservations.  When a huge page is allocated
367 		 * for an address with a reservation, no change is made to
368 		 * the reserve map.  In this case PagePrivate will be set
369 		 * to indicate that the global reservation count should be
370 		 * incremented when the page is freed.  This is the desired
371 		 * behavior.  However, when a huge page is allocated for an
372 		 * address without a reservation a reservation entry is added
373 		 * to the reservation map, and PagePrivate will not be set.
374 		 * When the page is freed, the global reserve count will NOT
375 		 * be incremented and it will appear as though we have leaked
376 		 * reserved page.  In this case, set PagePrivate so that the
377 		 * global reserve count will be incremented to match the
378 		 * reservation map entry which was created.
379 		 *
380 		 * Note that vm_alloc_shared is based on the flags of the vma
381 		 * for which the page was originally allocated.  dst_vma could
382 		 * be different or NULL on error.
383 		 */
384 		if (vm_alloc_shared)
385 			SetPagePrivate(page);
386 		else
387 			ClearPagePrivate(page);
388 		put_page(page);
389 	}
390 	BUG_ON(copied < 0);
391 	BUG_ON(err > 0);
392 	BUG_ON(!copied && !err);
393 	return copied ? copied : err;
394 }
395 #else /* !CONFIG_HUGETLB_PAGE */
396 /* fail at build time if gcc attempts to use this */
397 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
398 				      struct vm_area_struct *dst_vma,
399 				      unsigned long dst_start,
400 				      unsigned long src_start,
401 				      unsigned long len,
402 				      bool *mmap_changing,
403 				      bool zeropage);
404 #endif /* CONFIG_HUGETLB_PAGE */
405 
mfill_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,struct page ** page,bool zeropage)406 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
407 						pmd_t *dst_pmd,
408 						struct vm_area_struct *dst_vma,
409 						unsigned long dst_addr,
410 						unsigned long src_addr,
411 						struct page **page,
412 						bool zeropage)
413 {
414 	ssize_t err;
415 
416 	/*
417 	 * The normal page fault path for a shmem will invoke the
418 	 * fault, fill the hole in the file and COW it right away. The
419 	 * result generates plain anonymous memory. So when we are
420 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
421 	 * generate anonymous memory directly without actually filling
422 	 * the hole. For the MAP_PRIVATE case the robustness check
423 	 * only happens in the pagetable (to verify it's still none)
424 	 * and not in the radix tree.
425 	 */
426 	if (!(dst_vma->vm_flags & VM_SHARED)) {
427 		if (!zeropage)
428 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
429 					       dst_addr, src_addr, page);
430 		else
431 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
432 						 dst_vma, dst_addr);
433 	} else {
434 		if (!zeropage)
435 			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
436 						     dst_vma, dst_addr,
437 						     src_addr, page);
438 		else
439 			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
440 						       dst_vma, dst_addr);
441 	}
442 
443 	return err;
444 }
445 
__mcopy_atomic(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long src_start,unsigned long len,bool zeropage,bool * mmap_changing)446 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
447 					      unsigned long dst_start,
448 					      unsigned long src_start,
449 					      unsigned long len,
450 					      bool zeropage,
451 					      bool *mmap_changing)
452 {
453 	struct vm_area_struct *dst_vma;
454 	ssize_t err;
455 	pmd_t *dst_pmd;
456 	unsigned long src_addr, dst_addr;
457 	long copied;
458 	struct page *page;
459 
460 	/*
461 	 * Sanitize the command parameters:
462 	 */
463 	BUG_ON(dst_start & ~PAGE_MASK);
464 	BUG_ON(len & ~PAGE_MASK);
465 
466 	/* Does the address range wrap, or is the span zero-sized? */
467 	BUG_ON(src_start + len <= src_start);
468 	BUG_ON(dst_start + len <= dst_start);
469 
470 	src_addr = src_start;
471 	dst_addr = dst_start;
472 	copied = 0;
473 	page = NULL;
474 retry:
475 	down_read(&dst_mm->mmap_sem);
476 
477 	/*
478 	 * If memory mappings are changing because of non-cooperative
479 	 * operation (e.g. mremap) running in parallel, bail out and
480 	 * request the user to retry later
481 	 */
482 	err = -EAGAIN;
483 	if (mmap_changing && READ_ONCE(*mmap_changing))
484 		goto out_unlock;
485 
486 	/*
487 	 * Make sure the vma is not shared, that the dst range is
488 	 * both valid and fully within a single existing vma.
489 	 */
490 	err = -ENOENT;
491 	dst_vma = find_vma(dst_mm, dst_start);
492 	if (!dst_vma)
493 		goto out_unlock;
494 	/*
495 	 * Check the vma is registered in uffd, this is required to
496 	 * enforce the VM_MAYWRITE check done at uffd registration
497 	 * time.
498 	 */
499 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
500 		goto out_unlock;
501 
502 	if (dst_start < dst_vma->vm_start ||
503 	    dst_start + len > dst_vma->vm_end)
504 		goto out_unlock;
505 
506 	err = -EINVAL;
507 	/*
508 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
509 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
510 	 */
511 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
512 	    dst_vma->vm_flags & VM_SHARED))
513 		goto out_unlock;
514 
515 	/*
516 	 * If this is a HUGETLB vma, pass off to appropriate routine
517 	 */
518 	if (is_vm_hugetlb_page(dst_vma))
519 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
520 					       src_start, len, mmap_changing,
521 					       zeropage);
522 
523 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
524 		goto out_unlock;
525 
526 	/*
527 	 * Ensure the dst_vma has a anon_vma or this page
528 	 * would get a NULL anon_vma when moved in the
529 	 * dst_vma.
530 	 */
531 	err = -ENOMEM;
532 	if (!(dst_vma->vm_flags & VM_SHARED) &&
533 	    unlikely(anon_vma_prepare(dst_vma)))
534 		goto out_unlock;
535 
536 	while (src_addr < src_start + len) {
537 		pmd_t dst_pmdval;
538 
539 		BUG_ON(dst_addr >= dst_start + len);
540 
541 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
542 		if (unlikely(!dst_pmd)) {
543 			err = -ENOMEM;
544 			break;
545 		}
546 
547 		dst_pmdval = pmd_read_atomic(dst_pmd);
548 		/*
549 		 * If the dst_pmd is mapped as THP don't
550 		 * override it and just be strict.
551 		 */
552 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
553 			err = -EEXIST;
554 			break;
555 		}
556 		if (unlikely(pmd_none(dst_pmdval)) &&
557 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
558 			err = -ENOMEM;
559 			break;
560 		}
561 		/* If an huge pmd materialized from under us fail */
562 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
563 			err = -EFAULT;
564 			break;
565 		}
566 
567 		BUG_ON(pmd_none(*dst_pmd));
568 		BUG_ON(pmd_trans_huge(*dst_pmd));
569 
570 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
571 				       src_addr, &page, zeropage);
572 		cond_resched();
573 
574 		if (unlikely(err == -ENOENT)) {
575 			void *page_kaddr;
576 
577 			up_read(&dst_mm->mmap_sem);
578 			BUG_ON(!page);
579 
580 			page_kaddr = kmap(page);
581 			err = copy_from_user(page_kaddr,
582 					     (const void __user *) src_addr,
583 					     PAGE_SIZE);
584 			kunmap(page);
585 			if (unlikely(err)) {
586 				err = -EFAULT;
587 				goto out;
588 			}
589 			flush_dcache_page(page);
590 			goto retry;
591 		} else
592 			BUG_ON(page);
593 
594 		if (!err) {
595 			dst_addr += PAGE_SIZE;
596 			src_addr += PAGE_SIZE;
597 			copied += PAGE_SIZE;
598 
599 			if (fatal_signal_pending(current))
600 				err = -EINTR;
601 		}
602 		if (err)
603 			break;
604 	}
605 
606 out_unlock:
607 	up_read(&dst_mm->mmap_sem);
608 out:
609 	if (page)
610 		put_page(page);
611 	BUG_ON(copied < 0);
612 	BUG_ON(err > 0);
613 	BUG_ON(!copied && !err);
614 	return copied ? copied : err;
615 }
616 
mcopy_atomic(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long src_start,unsigned long len,bool * mmap_changing)617 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
618 		     unsigned long src_start, unsigned long len,
619 		     bool *mmap_changing)
620 {
621 	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
622 			      mmap_changing);
623 }
624 
mfill_zeropage(struct mm_struct * dst_mm,unsigned long start,unsigned long len,bool * mmap_changing)625 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
626 		       unsigned long len, bool *mmap_changing)
627 {
628 	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
629 }
630