• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	mm/mremap.c
4  *
5  *	(C) Copyright 1996 Linus Torvalds
6  *
7  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
18 #include <linux/fs.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26 #include <linux/userfaultfd_k.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30 
31 #include "internal.h"
32 
get_old_pud(struct mm_struct * mm,unsigned long addr)33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34 {
35 	pgd_t *pgd;
36 	p4d_t *p4d;
37 	pud_t *pud;
38 
39 	pgd = pgd_offset(mm, addr);
40 	if (pgd_none_or_clear_bad(pgd))
41 		return NULL;
42 
43 	p4d = p4d_offset(pgd, addr);
44 	if (p4d_none_or_clear_bad(p4d))
45 		return NULL;
46 
47 	pud = pud_offset(p4d, addr);
48 	if (pud_none_or_clear_bad(pud))
49 		return NULL;
50 
51 	return pud;
52 }
53 
get_old_pmd(struct mm_struct * mm,unsigned long addr)54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55 {
56 	pud_t *pud;
57 	pmd_t *pmd;
58 
59 	pud = get_old_pud(mm, addr);
60 	if (!pud)
61 		return NULL;
62 
63 	pmd = pmd_offset(pud, addr);
64 	if (pmd_none(*pmd))
65 		return NULL;
66 
67 	return pmd;
68 }
69 
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71 			    unsigned long addr)
72 {
73 	pgd_t *pgd;
74 	p4d_t *p4d;
75 
76 	pgd = pgd_offset(mm, addr);
77 	p4d = p4d_alloc(mm, pgd, addr);
78 	if (!p4d)
79 		return NULL;
80 
81 	return pud_alloc(mm, p4d, addr);
82 }
83 
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 			    unsigned long addr)
86 {
87 	pud_t *pud;
88 	pmd_t *pmd;
89 
90 	pud = alloc_new_pud(mm, vma, addr);
91 	if (!pud)
92 		return NULL;
93 
94 	pmd = pmd_alloc(mm, pud, addr);
95 	if (!pmd)
96 		return NULL;
97 
98 	VM_BUG_ON(pmd_trans_huge(*pmd));
99 
100 	return pmd;
101 }
102 
take_rmap_locks(struct vm_area_struct * vma)103 static void take_rmap_locks(struct vm_area_struct *vma)
104 {
105 	if (vma->vm_file)
106 		i_mmap_lock_write(vma->vm_file->f_mapping);
107 	if (vma->anon_vma)
108 		anon_vma_lock_write(vma->anon_vma);
109 }
110 
drop_rmap_locks(struct vm_area_struct * vma)111 static void drop_rmap_locks(struct vm_area_struct *vma)
112 {
113 	if (vma->anon_vma)
114 		anon_vma_unlock_write(vma->anon_vma);
115 	if (vma->vm_file)
116 		i_mmap_unlock_write(vma->vm_file->f_mapping);
117 }
118 
move_soft_dirty_pte(pte_t pte)119 static pte_t move_soft_dirty_pte(pte_t pte)
120 {
121 	/*
122 	 * Set soft dirty bit so we can notice
123 	 * in userspace the ptes were moved.
124 	 */
125 #ifdef CONFIG_MEM_SOFT_DIRTY
126 	if (pte_present(pte))
127 		pte = pte_mksoft_dirty(pte);
128 	else if (is_swap_pte(pte))
129 		pte = pte_swp_mksoft_dirty(pte);
130 #endif
131 	return pte;
132 }
133 
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 		unsigned long old_addr, unsigned long old_end,
136 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
137 		unsigned long new_addr, bool need_rmap_locks)
138 {
139 	struct mm_struct *mm = vma->vm_mm;
140 	pte_t *old_pte, *new_pte, pte;
141 	spinlock_t *old_ptl, *new_ptl;
142 	bool force_flush = false;
143 	unsigned long len = old_end - old_addr;
144 
145 	/*
146 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
147 	 * locks to ensure that rmap will always observe either the old or the
148 	 * new ptes. This is the easiest way to avoid races with
149 	 * truncate_pagecache(), page migration, etc...
150 	 *
151 	 * When need_rmap_locks is false, we use other ways to avoid
152 	 * such races:
153 	 *
154 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
155 	 *   which rmap call sites look for using vma_is_temporary_stack().
156 	 *
157 	 * - During mremap(), new_vma is often known to be placed after vma
158 	 *   in rmap traversal order. This ensures rmap will always observe
159 	 *   either the old pte, or the new pte, or both (the page table locks
160 	 *   serialize access to individual ptes, but only rmap traversal
161 	 *   order guarantees that we won't miss both the old and new ptes).
162 	 */
163 	if (need_rmap_locks)
164 		take_rmap_locks(vma);
165 
166 	/*
167 	 * We don't have to worry about the ordering of src and dst
168 	 * pte locks because exclusive mmap_lock prevents deadlock.
169 	 */
170 	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171 	new_pte = pte_offset_map(new_pmd, new_addr);
172 	new_ptl = pte_lockptr(mm, new_pmd);
173 	if (new_ptl != old_ptl)
174 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175 	flush_tlb_batched_pending(vma->vm_mm);
176 	arch_enter_lazy_mmu_mode();
177 
178 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 				   new_pte++, new_addr += PAGE_SIZE) {
180 		if (pte_none(*old_pte))
181 			continue;
182 
183 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
184 		/*
185 		 * If we are remapping a valid PTE, make sure
186 		 * to flush TLB before we drop the PTL for the
187 		 * PTE.
188 		 *
189 		 * NOTE! Both old and new PTL matter: the old one
190 		 * for racing with page_mkclean(), the new one to
191 		 * make sure the physical page stays valid until
192 		 * the TLB entry for the old mapping has been
193 		 * flushed.
194 		 */
195 		if (pte_present(pte))
196 			force_flush = true;
197 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198 		pte = move_soft_dirty_pte(pte);
199 		set_pte_at(mm, new_addr, new_pte, pte);
200 	}
201 
202 	arch_leave_lazy_mmu_mode();
203 	if (force_flush)
204 		flush_tlb_range(vma, old_end - len, old_end);
205 	if (new_ptl != old_ptl)
206 		spin_unlock(new_ptl);
207 	pte_unmap(new_pte - 1);
208 	pte_unmap_unlock(old_pte - 1, old_ptl);
209 	if (need_rmap_locks)
210 		drop_rmap_locks(vma);
211 }
212 
213 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
trylock_vma_ref_count(struct vm_area_struct * vma)214 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
215 {
216 	/*
217 	 * If we have the only reference, swap the refcount to -1. This
218 	 * will prevent other concurrent references by get_vma() for SPFs.
219 	 */
220 	return atomic_cmpxchg_acquire(&vma->vm_ref_count, 1, -1) == 1;
221 }
222 
223 /*
224  * Restore the VMA reference count to 1 after a fast mremap.
225  */
unlock_vma_ref_count(struct vm_area_struct * vma)226 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
227 {
228 	int old = atomic_xchg_release(&vma->vm_ref_count, 1);
229 
230 	/*
231 	 * This should only be called after a corresponding,
232 	 * successful trylock_vma_ref_count().
233 	 */
234 	VM_BUG_ON_VMA(old != -1, vma);
235 }
236 #else	/* !CONFIG_SPECULATIVE_PAGE_FAULT */
trylock_vma_ref_count(struct vm_area_struct * vma)237 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
238 {
239 	return true;
240 }
unlock_vma_ref_count(struct vm_area_struct * vma)241 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
242 {
243 }
244 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
245 
246 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)247 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
248 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
249 {
250 	spinlock_t *old_ptl, *new_ptl;
251 	struct mm_struct *mm = vma->vm_mm;
252 	pmd_t pmd;
253 
254 	/*
255 	 * The destination pmd shouldn't be established, free_pgtables()
256 	 * should have released it.
257 	 *
258 	 * However, there's a case during execve() where we use mremap
259 	 * to move the initial stack, and in that case the target area
260 	 * may overlap the source area (always moving down).
261 	 *
262 	 * If everything is PMD-aligned, that works fine, as moving
263 	 * each pmd down will clear the source pmd. But if we first
264 	 * have a few 4kB-only pages that get moved down, and then
265 	 * hit the "now the rest is PMD-aligned, let's do everything
266 	 * one pmd at a time", we will still have the old (now empty
267 	 * of any 4kB pages, but still there) PMD in the page table
268 	 * tree.
269 	 *
270 	 * Warn on it once - because we really should try to figure
271 	 * out how to do this better - but then say "I won't move
272 	 * this pmd".
273 	 *
274 	 * One alternative might be to just unmap the target pmd at
275 	 * this point, and verify that it really is empty. We'll see.
276 	 */
277 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
278 		return false;
279 
280 	/*
281 	 * We hold both exclusive mmap_lock and rmap_lock at this point and
282 	 * cannot block. If we cannot immediately take exclusive ownership
283 	 * of the VMA fallback to the move_ptes().
284 	 */
285 	if (!trylock_vma_ref_count(vma))
286 		return false;
287 
288 	/*
289 	 * We don't have to worry about the ordering of src and dst
290 	 * ptlocks because exclusive mmap_lock prevents deadlock.
291 	 */
292 	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
293 	new_ptl = pmd_lockptr(mm, new_pmd);
294 	if (new_ptl != old_ptl)
295 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
296 
297 	/* Clear the pmd */
298 	pmd = *old_pmd;
299 	pmd_clear(old_pmd);
300 
301 	VM_BUG_ON(!pmd_none(*new_pmd));
302 
303 	/* Set the new pmd */
304 	set_pmd_at(mm, new_addr, new_pmd, pmd);
305 	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
306 	if (new_ptl != old_ptl)
307 		spin_unlock(new_ptl);
308 	spin_unlock(old_ptl);
309 
310 	unlock_vma_ref_count(vma);
311 	return true;
312 }
313 #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)314 static inline bool move_normal_pmd(struct vm_area_struct *vma,
315 		unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
316 		pmd_t *new_pmd)
317 {
318 	return false;
319 }
320 #endif
321 
322 #ifdef CONFIG_HAVE_MOVE_PUD
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)323 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
324 		  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
325 {
326 	spinlock_t *old_ptl, *new_ptl;
327 	struct mm_struct *mm = vma->vm_mm;
328 	pud_t pud;
329 
330 	/*
331 	 * The destination pud shouldn't be established, free_pgtables()
332 	 * should have released it.
333 	 */
334 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
335 		return false;
336 
337 	/*
338 	 * We hold both exclusive mmap_lock and rmap_lock at this point and
339 	 * cannot block. If we cannot immediately take exclusive ownership
340 	 * of the VMA fallback to the move_ptes().
341 	 */
342 	if (!trylock_vma_ref_count(vma))
343 		return false;
344 
345 	/*
346 	 * We don't have to worry about the ordering of src and dst
347 	 * ptlocks because exclusive mmap_lock prevents deadlock.
348 	 */
349 	old_ptl = pud_lock(vma->vm_mm, old_pud);
350 	new_ptl = pud_lockptr(mm, new_pud);
351 	if (new_ptl != old_ptl)
352 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
353 
354 	/* Clear the pud */
355 	pud = *old_pud;
356 	pud_clear(old_pud);
357 
358 	VM_BUG_ON(!pud_none(*new_pud));
359 
360 	/* Set the new pud */
361 	set_pud_at(mm, new_addr, new_pud, pud);
362 	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
363 	if (new_ptl != old_ptl)
364 		spin_unlock(new_ptl);
365 	spin_unlock(old_ptl);
366 
367 	unlock_vma_ref_count(vma);
368 	return true;
369 }
370 #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)371 static inline bool move_normal_pud(struct vm_area_struct *vma,
372 		unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
373 		pud_t *new_pud)
374 {
375 	return false;
376 }
377 #endif
378 
379 enum pgt_entry {
380 	NORMAL_PMD,
381 	HPAGE_PMD,
382 	NORMAL_PUD,
383 };
384 
385 /*
386  * Returns an extent of the corresponding size for the pgt_entry specified if
387  * valid. Else returns a smaller extent bounded by the end of the source and
388  * destination pgt_entry.
389  */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)390 static __always_inline unsigned long get_extent(enum pgt_entry entry,
391 			unsigned long old_addr, unsigned long old_end,
392 			unsigned long new_addr)
393 {
394 	unsigned long next, extent, mask, size;
395 
396 	switch (entry) {
397 	case HPAGE_PMD:
398 	case NORMAL_PMD:
399 		mask = PMD_MASK;
400 		size = PMD_SIZE;
401 		break;
402 	case NORMAL_PUD:
403 		mask = PUD_MASK;
404 		size = PUD_SIZE;
405 		break;
406 	default:
407 		BUILD_BUG();
408 		break;
409 	}
410 
411 	next = (old_addr + size) & mask;
412 	/* even if next overflowed, extent below will be ok */
413 	extent = next - old_addr;
414 	if (extent > old_end - old_addr)
415 		extent = old_end - old_addr;
416 	next = (new_addr + size) & mask;
417 	if (extent > next - new_addr)
418 		extent = next - new_addr;
419 	return extent;
420 }
421 
422 /*
423  * Attempts to speedup the move by moving entry at the level corresponding to
424  * pgt_entry. Returns true if the move was successful, else false.
425  */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)426 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
427 			unsigned long old_addr, unsigned long new_addr,
428 			void *old_entry, void *new_entry, bool need_rmap_locks)
429 {
430 	bool moved = false;
431 
432 	/* See comment in move_ptes() */
433 	if (need_rmap_locks)
434 		take_rmap_locks(vma);
435 
436 	switch (entry) {
437 	case NORMAL_PMD:
438 		moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
439 					new_entry);
440 		break;
441 	case NORMAL_PUD:
442 		moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
443 					new_entry);
444 		break;
445 	case HPAGE_PMD:
446 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
447 			move_huge_pmd(vma, old_addr, new_addr, old_entry,
448 				      new_entry);
449 		break;
450 	default:
451 		WARN_ON_ONCE(1);
452 		break;
453 	}
454 
455 	if (need_rmap_locks)
456 		drop_rmap_locks(vma);
457 
458 	return moved;
459 }
460 
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks)461 unsigned long move_page_tables(struct vm_area_struct *vma,
462 		unsigned long old_addr, struct vm_area_struct *new_vma,
463 		unsigned long new_addr, unsigned long len,
464 		bool need_rmap_locks)
465 {
466 	unsigned long extent, old_end;
467 	struct mmu_notifier_range range;
468 	pmd_t *old_pmd, *new_pmd;
469 
470 	if (!len)
471 		return 0;
472 
473 	old_end = old_addr + len;
474 	flush_cache_range(vma, old_addr, old_end);
475 
476 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
477 				old_addr, old_end);
478 	mmu_notifier_invalidate_range_start(&range);
479 
480 	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
481 		cond_resched();
482 		/*
483 		 * If extent is PUD-sized try to speed up the move by moving at the
484 		 * PUD level if possible.
485 		 */
486 		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
487 		if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
488 			pud_t *old_pud, *new_pud;
489 
490 			old_pud = get_old_pud(vma->vm_mm, old_addr);
491 			if (!old_pud)
492 				continue;
493 			new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
494 			if (!new_pud)
495 				break;
496 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
497 					   old_pud, new_pud, true))
498 				continue;
499 		}
500 
501 		extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
502 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
503 		if (!old_pmd)
504 			continue;
505 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
506 		if (!new_pmd)
507 			break;
508 		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
509 		    pmd_devmap(*old_pmd)) {
510 			if (extent == HPAGE_PMD_SIZE &&
511 			    move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
512 					   old_pmd, new_pmd, need_rmap_locks))
513 				continue;
514 			split_huge_pmd(vma, old_pmd, old_addr);
515 			if (pmd_trans_unstable(old_pmd))
516 				continue;
517 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
518 			   extent == PMD_SIZE) {
519 			/*
520 			 * If the extent is PMD-sized, try to speed the move by
521 			 * moving at the PMD level if possible.
522 			 */
523 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
524 					   old_pmd, new_pmd, true))
525 				continue;
526 		}
527 
528 		if (pte_alloc(new_vma->vm_mm, new_pmd))
529 			break;
530 		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
531 			  new_pmd, new_addr, need_rmap_locks);
532 	}
533 
534 	mmu_notifier_invalidate_range_end(&range);
535 
536 	return len + old_addr - old_end;	/* how much done */
537 }
538 
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)539 static unsigned long move_vma(struct vm_area_struct *vma,
540 		unsigned long old_addr, unsigned long old_len,
541 		unsigned long new_len, unsigned long new_addr,
542 		bool *locked, unsigned long flags,
543 		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
544 {
545 	struct mm_struct *mm = vma->vm_mm;
546 	struct vm_area_struct *new_vma;
547 	unsigned long vm_flags = vma->vm_flags;
548 	unsigned long new_pgoff;
549 	unsigned long moved_len;
550 	unsigned long excess = 0;
551 	unsigned long hiwater_vm;
552 	int split = 0;
553 	int err;
554 	bool need_rmap_locks;
555 
556 	/*
557 	 * We'd prefer to avoid failure later on in do_munmap:
558 	 * which may split one vma into three before unmapping.
559 	 */
560 	if (mm->map_count >= sysctl_max_map_count - 3)
561 		return -ENOMEM;
562 
563 	/*
564 	 * Advise KSM to break any KSM pages in the area to be moved:
565 	 * it would be confusing if they were to turn up at the new
566 	 * location, where they happen to coincide with different KSM
567 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
568 	 * so KSM can come around to merge on vma and new_vma afterwards.
569 	 */
570 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
571 						MADV_UNMERGEABLE, &vm_flags);
572 	if (err)
573 		return err;
574 
575 	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
576 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
577 			   &need_rmap_locks);
578 	if (!new_vma)
579 		return -ENOMEM;
580 
581 	/* new_vma is returned protected by copy_vma, to prevent speculative
582 	 * page fault to be done in the destination area before we move the pte.
583 	 * Now, we must also protect the source VMA since we don't want pages
584 	 * to be mapped in our back while we are copying the PTEs.
585 	 */
586 	if (vma != new_vma)
587 		vm_write_begin(vma);
588 
589 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
590 				     need_rmap_locks);
591 	if (moved_len < old_len) {
592 		err = -ENOMEM;
593 	} else if (vma->vm_ops && vma->vm_ops->mremap) {
594 		err = vma->vm_ops->mremap(new_vma);
595 	}
596 
597 	if (unlikely(err)) {
598 		/*
599 		 * On error, move entries back from new area to old,
600 		 * which will succeed since page tables still there,
601 		 * and then proceed to unmap new area instead of old.
602 		 */
603 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
604 				 true);
605 		if (vma != new_vma)
606 			vm_write_end(vma);
607 		vma = new_vma;
608 		old_len = new_len;
609 		old_addr = new_addr;
610 		new_addr = err;
611 	} else {
612 		mremap_userfaultfd_prep(new_vma, uf);
613 		arch_remap(mm, old_addr, old_addr + old_len,
614 			   new_addr, new_addr + new_len);
615 		if (vma != new_vma)
616 			vm_write_end(vma);
617 	}
618 	vm_write_end(new_vma);
619 
620 	/* Conceal VM_ACCOUNT so old reservation is not undone */
621 	if (vm_flags & VM_ACCOUNT) {
622 		vma->vm_flags &= ~VM_ACCOUNT;
623 		excess = vma->vm_end - vma->vm_start - old_len;
624 		if (old_addr > vma->vm_start &&
625 		    old_addr + old_len < vma->vm_end)
626 			split = 1;
627 	}
628 
629 	/*
630 	 * If we failed to move page tables we still do total_vm increment
631 	 * since do_munmap() will decrement it by old_len == new_len.
632 	 *
633 	 * Since total_vm is about to be raised artificially high for a
634 	 * moment, we need to restore high watermark afterwards: if stats
635 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
636 	 * If this were a serious issue, we'd add a flag to do_munmap().
637 	 */
638 	hiwater_vm = mm->hiwater_vm;
639 	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
640 
641 	/* Tell pfnmap has moved from this vma */
642 	if (unlikely(vma->vm_flags & VM_PFNMAP))
643 		untrack_pfn_moved(vma);
644 
645 	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
646 		if (vm_flags & VM_ACCOUNT) {
647 			/* Always put back VM_ACCOUNT since we won't unmap */
648 			vma->vm_flags |= VM_ACCOUNT;
649 
650 			vm_acct_memory(new_len >> PAGE_SHIFT);
651 		}
652 
653 		/*
654 		 * VMAs can actually be merged back together in copy_vma
655 		 * calling merge_vma. This can happen with anonymous vmas
656 		 * which have not yet been faulted, so if we were to consider
657 		 * this VMA split we'll end up adding VM_ACCOUNT on the
658 		 * next VMA, which is completely unrelated if this VMA
659 		 * was re-merged.
660 		 */
661 		if (split && new_vma == vma)
662 			split = 0;
663 
664 		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
665 		vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
666 
667 		/* Because we won't unmap we don't need to touch locked_vm */
668 		goto out;
669 	}
670 
671 	if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
672 		/* OOM: unable to split vma, just get accounts right */
673 		vm_unacct_memory(excess >> PAGE_SHIFT);
674 		excess = 0;
675 	}
676 
677 	if (vm_flags & VM_LOCKED) {
678 		mm->locked_vm += new_len >> PAGE_SHIFT;
679 		*locked = true;
680 	}
681 out:
682 	mm->hiwater_vm = hiwater_vm;
683 
684 	/* Restore VM_ACCOUNT if one or two pieces of vma left */
685 	if (excess) {
686 		vma->vm_flags |= VM_ACCOUNT;
687 		if (split)
688 			vma->vm_next->vm_flags |= VM_ACCOUNT;
689 	}
690 
691 	return new_addr;
692 }
693 
vma_to_resize(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long * p)694 static struct vm_area_struct *vma_to_resize(unsigned long addr,
695 	unsigned long old_len, unsigned long new_len, unsigned long flags,
696 	unsigned long *p)
697 {
698 	struct mm_struct *mm = current->mm;
699 	struct vm_area_struct *vma = find_vma(mm, addr);
700 	unsigned long pgoff;
701 
702 	if (!vma || vma->vm_start > addr)
703 		return ERR_PTR(-EFAULT);
704 
705 	/*
706 	 * !old_len is a special case where an attempt is made to 'duplicate'
707 	 * a mapping.  This makes no sense for private mappings as it will
708 	 * instead create a fresh/new mapping unrelated to the original.  This
709 	 * is contrary to the basic idea of mremap which creates new mappings
710 	 * based on the original.  There are no known use cases for this
711 	 * behavior.  As a result, fail such attempts.
712 	 */
713 	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
714 		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
715 		return ERR_PTR(-EINVAL);
716 	}
717 
718 	if ((flags & MREMAP_DONTUNMAP) &&
719 			(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
720 		return ERR_PTR(-EINVAL);
721 
722 	if (is_vm_hugetlb_page(vma))
723 		return ERR_PTR(-EINVAL);
724 
725 	/* We can't remap across vm area boundaries */
726 	if (old_len > vma->vm_end - addr)
727 		return ERR_PTR(-EFAULT);
728 
729 	if (new_len == old_len)
730 		return vma;
731 
732 	/* Need to be careful about a growing mapping */
733 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
734 	pgoff += vma->vm_pgoff;
735 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
736 		return ERR_PTR(-EINVAL);
737 
738 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
739 		return ERR_PTR(-EFAULT);
740 
741 	if (vma->vm_flags & VM_LOCKED) {
742 		unsigned long locked, lock_limit;
743 		locked = mm->locked_vm << PAGE_SHIFT;
744 		lock_limit = rlimit(RLIMIT_MEMLOCK);
745 		locked += new_len - old_len;
746 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
747 			return ERR_PTR(-EAGAIN);
748 	}
749 
750 	if (!may_expand_vm(mm, vma->vm_flags,
751 				(new_len - old_len) >> PAGE_SHIFT))
752 		return ERR_PTR(-ENOMEM);
753 
754 	if (vma->vm_flags & VM_ACCOUNT) {
755 		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
756 		if (security_vm_enough_memory_mm(mm, charged))
757 			return ERR_PTR(-ENOMEM);
758 		*p = charged;
759 	}
760 
761 	return vma;
762 }
763 
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)764 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
765 		unsigned long new_addr, unsigned long new_len, bool *locked,
766 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
767 		struct list_head *uf_unmap_early,
768 		struct list_head *uf_unmap)
769 {
770 	struct mm_struct *mm = current->mm;
771 	struct vm_area_struct *vma;
772 	unsigned long ret = -EINVAL;
773 	unsigned long charged = 0;
774 	unsigned long map_flags = 0;
775 
776 	if (offset_in_page(new_addr))
777 		goto out;
778 
779 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
780 		goto out;
781 
782 	/* Ensure the old/new locations do not overlap */
783 	if (addr + old_len > new_addr && new_addr + new_len > addr)
784 		goto out;
785 
786 	/*
787 	 * move_vma() need us to stay 4 maps below the threshold, otherwise
788 	 * it will bail out at the very beginning.
789 	 * That is a problem if we have already unmaped the regions here
790 	 * (new_addr, and old_addr), because userspace will not know the
791 	 * state of the vma's after it gets -ENOMEM.
792 	 * So, to avoid such scenario we can pre-compute if the whole
793 	 * operation has high chances to success map-wise.
794 	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
795 	 * split in 3 before unmaping it.
796 	 * That means 2 more maps (1 for each) to the ones we already hold.
797 	 * Check whether current map count plus 2 still leads us to 4 maps below
798 	 * the threshold, otherwise return -ENOMEM here to be more safe.
799 	 */
800 	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
801 		return -ENOMEM;
802 
803 	if (flags & MREMAP_FIXED) {
804 		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
805 		if (ret)
806 			goto out;
807 	}
808 
809 	if (old_len >= new_len) {
810 		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
811 		if (ret && old_len != new_len)
812 			goto out;
813 		old_len = new_len;
814 	}
815 
816 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
817 	if (IS_ERR(vma)) {
818 		ret = PTR_ERR(vma);
819 		goto out;
820 	}
821 
822 	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
823 	if (flags & MREMAP_DONTUNMAP &&
824 		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
825 		ret = -ENOMEM;
826 		goto out;
827 	}
828 
829 	if (flags & MREMAP_FIXED)
830 		map_flags |= MAP_FIXED;
831 
832 	if (vma->vm_flags & VM_MAYSHARE)
833 		map_flags |= MAP_SHARED;
834 
835 	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
836 				((addr - vma->vm_start) >> PAGE_SHIFT),
837 				map_flags);
838 	if (IS_ERR_VALUE(ret))
839 		goto out1;
840 
841 	/* We got a new mapping */
842 	if (!(flags & MREMAP_FIXED))
843 		new_addr = ret;
844 
845 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
846 		       uf_unmap);
847 
848 	if (!(offset_in_page(ret)))
849 		goto out;
850 
851 out1:
852 	vm_unacct_memory(charged);
853 
854 out:
855 	return ret;
856 }
857 
vma_expandable(struct vm_area_struct * vma,unsigned long delta)858 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
859 {
860 	unsigned long end = vma->vm_end + delta;
861 	if (end < vma->vm_end) /* overflow */
862 		return 0;
863 	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
864 		return 0;
865 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
866 			      0, MAP_FIXED) & ~PAGE_MASK)
867 		return 0;
868 	return 1;
869 }
870 
871 /*
872  * Expand (or shrink) an existing mapping, potentially moving it at the
873  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
874  *
875  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
876  * This option implies MREMAP_MAYMOVE.
877  */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)878 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
879 		unsigned long, new_len, unsigned long, flags,
880 		unsigned long, new_addr)
881 {
882 	struct mm_struct *mm = current->mm;
883 	struct vm_area_struct *vma;
884 	unsigned long ret = -EINVAL;
885 	unsigned long charged = 0;
886 	bool locked = false;
887 	bool downgraded = false;
888 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
889 	LIST_HEAD(uf_unmap_early);
890 	LIST_HEAD(uf_unmap);
891 
892 	/*
893 	 * There is a deliberate asymmetry here: we strip the pointer tag
894 	 * from the old address but leave the new address alone. This is
895 	 * for consistency with mmap(), where we prevent the creation of
896 	 * aliasing mappings in userspace by leaving the tag bits of the
897 	 * mapping address intact. A non-zero tag will cause the subsequent
898 	 * range checks to reject the address as invalid.
899 	 *
900 	 * See Documentation/arm64/tagged-address-abi.rst for more information.
901 	 */
902 	addr = untagged_addr(addr);
903 
904 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
905 		return ret;
906 
907 	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
908 		return ret;
909 
910 	/*
911 	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
912 	 * in the process.
913 	 */
914 	if (flags & MREMAP_DONTUNMAP &&
915 			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
916 		return ret;
917 
918 
919 	if (offset_in_page(addr))
920 		return ret;
921 
922 	old_len = PAGE_ALIGN(old_len);
923 	new_len = PAGE_ALIGN(new_len);
924 
925 	/*
926 	 * We allow a zero old-len as a special case
927 	 * for DOS-emu "duplicate shm area" thing. But
928 	 * a zero new-len is nonsensical.
929 	 */
930 	if (!new_len)
931 		return ret;
932 
933 	if (mmap_write_lock_killable(current->mm))
934 		return -EINTR;
935 
936 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
937 		ret = mremap_to(addr, old_len, new_addr, new_len,
938 				&locked, flags, &uf, &uf_unmap_early,
939 				&uf_unmap);
940 		goto out;
941 	}
942 
943 	/*
944 	 * Always allow a shrinking remap: that just unmaps
945 	 * the unnecessary pages..
946 	 * __do_munmap does all the needed commit accounting, and
947 	 * downgrades mmap_lock to read if so directed.
948 	 */
949 	if (old_len >= new_len) {
950 		int retval;
951 
952 		retval = __do_munmap(mm, addr+new_len, old_len - new_len,
953 				  &uf_unmap, true);
954 		if (retval < 0 && old_len != new_len) {
955 			ret = retval;
956 			goto out;
957 		/* Returning 1 indicates mmap_lock is downgraded to read. */
958 		} else if (retval == 1)
959 			downgraded = true;
960 		ret = addr;
961 		goto out;
962 	}
963 
964 	/*
965 	 * Ok, we need to grow..
966 	 */
967 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
968 	if (IS_ERR(vma)) {
969 		ret = PTR_ERR(vma);
970 		goto out;
971 	}
972 
973 	/* old_len exactly to the end of the area..
974 	 */
975 	if (old_len == vma->vm_end - addr) {
976 		/* can we just expand the current mapping? */
977 		if (vma_expandable(vma, new_len - old_len)) {
978 			int pages = (new_len - old_len) >> PAGE_SHIFT;
979 
980 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
981 				       vma->vm_pgoff, NULL)) {
982 				ret = -ENOMEM;
983 				goto out;
984 			}
985 
986 			vm_stat_account(mm, vma->vm_flags, pages);
987 			if (vma->vm_flags & VM_LOCKED) {
988 				mm->locked_vm += pages;
989 				locked = true;
990 				new_addr = addr;
991 			}
992 			ret = addr;
993 			goto out;
994 		}
995 	}
996 
997 	/*
998 	 * We weren't able to just expand or shrink the area,
999 	 * we need to create a new one and move it..
1000 	 */
1001 	ret = -ENOMEM;
1002 	if (flags & MREMAP_MAYMOVE) {
1003 		unsigned long map_flags = 0;
1004 		if (vma->vm_flags & VM_MAYSHARE)
1005 			map_flags |= MAP_SHARED;
1006 
1007 		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1008 					vma->vm_pgoff +
1009 					((addr - vma->vm_start) >> PAGE_SHIFT),
1010 					map_flags);
1011 		if (IS_ERR_VALUE(new_addr)) {
1012 			ret = new_addr;
1013 			goto out;
1014 		}
1015 
1016 		ret = move_vma(vma, addr, old_len, new_len, new_addr,
1017 			       &locked, flags, &uf, &uf_unmap);
1018 	}
1019 out:
1020 	if (offset_in_page(ret)) {
1021 		vm_unacct_memory(charged);
1022 		locked = false;
1023 	}
1024 	if (downgraded)
1025 		mmap_read_unlock(current->mm);
1026 	else
1027 		mmap_write_unlock(current->mm);
1028 	if (locked && new_len > old_len)
1029 		mm_populate(new_addr + old_len, new_len - old_len);
1030 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
1031 	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1032 	userfaultfd_unmap_complete(mm, &uf_unmap);
1033 	return ret;
1034 }
1035