1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
18 #include <linux/fs.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/userfaultfd_k.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/tlb.h>
29 #include <asm/pgalloc.h>
30
31 #include "internal.h"
32
get_old_pud(struct mm_struct * mm,unsigned long addr)33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34 {
35 pgd_t *pgd;
36 p4d_t *p4d;
37 pud_t *pud;
38
39 pgd = pgd_offset(mm, addr);
40 if (pgd_none_or_clear_bad(pgd))
41 return NULL;
42
43 p4d = p4d_offset(pgd, addr);
44 if (p4d_none_or_clear_bad(p4d))
45 return NULL;
46
47 pud = pud_offset(p4d, addr);
48 if (pud_none_or_clear_bad(pud))
49 return NULL;
50
51 return pud;
52 }
53
get_old_pmd(struct mm_struct * mm,unsigned long addr)54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55 {
56 pud_t *pud;
57 pmd_t *pmd;
58
59 pud = get_old_pud(mm, addr);
60 if (!pud)
61 return NULL;
62
63 pmd = pmd_offset(pud, addr);
64 if (pmd_none(*pmd))
65 return NULL;
66
67 return pmd;
68 }
69
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71 unsigned long addr)
72 {
73 pgd_t *pgd;
74 p4d_t *p4d;
75
76 pgd = pgd_offset(mm, addr);
77 p4d = p4d_alloc(mm, pgd, addr);
78 if (!p4d)
79 return NULL;
80
81 return pud_alloc(mm, p4d, addr);
82 }
83
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 unsigned long addr)
86 {
87 pud_t *pud;
88 pmd_t *pmd;
89
90 pud = alloc_new_pud(mm, vma, addr);
91 if (!pud)
92 return NULL;
93
94 pmd = pmd_alloc(mm, pud, addr);
95 if (!pmd)
96 return NULL;
97
98 VM_BUG_ON(pmd_trans_huge(*pmd));
99
100 return pmd;
101 }
102
take_rmap_locks(struct vm_area_struct * vma)103 static void take_rmap_locks(struct vm_area_struct *vma)
104 {
105 if (vma->vm_file)
106 i_mmap_lock_write(vma->vm_file->f_mapping);
107 if (vma->anon_vma)
108 anon_vma_lock_write(vma->anon_vma);
109 }
110
drop_rmap_locks(struct vm_area_struct * vma)111 static void drop_rmap_locks(struct vm_area_struct *vma)
112 {
113 if (vma->anon_vma)
114 anon_vma_unlock_write(vma->anon_vma);
115 if (vma->vm_file)
116 i_mmap_unlock_write(vma->vm_file->f_mapping);
117 }
118
move_soft_dirty_pte(pte_t pte)119 static pte_t move_soft_dirty_pte(pte_t pte)
120 {
121 /*
122 * Set soft dirty bit so we can notice
123 * in userspace the ptes were moved.
124 */
125 #ifdef CONFIG_MEM_SOFT_DIRTY
126 if (pte_present(pte))
127 pte = pte_mksoft_dirty(pte);
128 else if (is_swap_pte(pte))
129 pte = pte_swp_mksoft_dirty(pte);
130 #endif
131 return pte;
132 }
133
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 unsigned long old_addr, unsigned long old_end,
136 struct vm_area_struct *new_vma, pmd_t *new_pmd,
137 unsigned long new_addr, bool need_rmap_locks)
138 {
139 struct mm_struct *mm = vma->vm_mm;
140 pte_t *old_pte, *new_pte, pte;
141 spinlock_t *old_ptl, *new_ptl;
142 bool force_flush = false;
143 unsigned long len = old_end - old_addr;
144
145 /*
146 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
147 * locks to ensure that rmap will always observe either the old or the
148 * new ptes. This is the easiest way to avoid races with
149 * truncate_pagecache(), page migration, etc...
150 *
151 * When need_rmap_locks is false, we use other ways to avoid
152 * such races:
153 *
154 * - During exec() shift_arg_pages(), we use a specially tagged vma
155 * which rmap call sites look for using vma_is_temporary_stack().
156 *
157 * - During mremap(), new_vma is often known to be placed after vma
158 * in rmap traversal order. This ensures rmap will always observe
159 * either the old pte, or the new pte, or both (the page table locks
160 * serialize access to individual ptes, but only rmap traversal
161 * order guarantees that we won't miss both the old and new ptes).
162 */
163 if (need_rmap_locks)
164 take_rmap_locks(vma);
165
166 /*
167 * We don't have to worry about the ordering of src and dst
168 * pte locks because exclusive mmap_lock prevents deadlock.
169 */
170 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171 new_pte = pte_offset_map(new_pmd, new_addr);
172 new_ptl = pte_lockptr(mm, new_pmd);
173 if (new_ptl != old_ptl)
174 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175 flush_tlb_batched_pending(vma->vm_mm);
176 arch_enter_lazy_mmu_mode();
177
178 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 new_pte++, new_addr += PAGE_SIZE) {
180 if (pte_none(*old_pte))
181 continue;
182
183 pte = ptep_get_and_clear(mm, old_addr, old_pte);
184 /*
185 * If we are remapping a valid PTE, make sure
186 * to flush TLB before we drop the PTL for the
187 * PTE.
188 *
189 * NOTE! Both old and new PTL matter: the old one
190 * for racing with page_mkclean(), the new one to
191 * make sure the physical page stays valid until
192 * the TLB entry for the old mapping has been
193 * flushed.
194 */
195 if (pte_present(pte))
196 force_flush = true;
197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198 pte = move_soft_dirty_pte(pte);
199 set_pte_at(mm, new_addr, new_pte, pte);
200 }
201
202 arch_leave_lazy_mmu_mode();
203 if (force_flush)
204 flush_tlb_range(vma, old_end - len, old_end);
205 if (new_ptl != old_ptl)
206 spin_unlock(new_ptl);
207 pte_unmap(new_pte - 1);
208 pte_unmap_unlock(old_pte - 1, old_ptl);
209 if (need_rmap_locks)
210 drop_rmap_locks(vma);
211 }
212
213 #ifndef arch_supports_page_table_move
214 #define arch_supports_page_table_move arch_supports_page_table_move
arch_supports_page_table_move(void)215 static inline bool arch_supports_page_table_move(void)
216 {
217 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
218 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
219 }
220 #endif
221
222 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
trylock_vma_ref_count(struct vm_area_struct * vma)223 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
224 {
225 /*
226 * If we have the only reference, swap the refcount to -1. This
227 * will prevent other concurrent references by get_vma() for SPFs.
228 */
229 return atomic_cmpxchg_acquire(&vma->file_ref_count, 0, -1) == 0;
230 }
231
232 /*
233 * Restore the VMA reference count to 1 after a fast mremap.
234 */
unlock_vma_ref_count(struct vm_area_struct * vma)235 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
236 {
237 int old = atomic_xchg_release(&vma->file_ref_count, 0);
238
239 /*
240 * This should only be called after a corresponding,
241 * successful trylock_vma_ref_count().
242 */
243 VM_BUG_ON_VMA(old != -1, vma);
244 }
245 #else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
trylock_vma_ref_count(struct vm_area_struct * vma)246 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
247 {
248 return true;
249 }
unlock_vma_ref_count(struct vm_area_struct * vma)250 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
251 {
252 }
253 #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
254
255 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)256 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
257 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
258 {
259 spinlock_t *old_ptl, *new_ptl;
260 struct mm_struct *mm = vma->vm_mm;
261 pmd_t pmd;
262
263 if (!arch_supports_page_table_move())
264 return false;
265 /*
266 * The destination pmd shouldn't be established, free_pgtables()
267 * should have released it.
268 *
269 * However, there's a case during execve() where we use mremap
270 * to move the initial stack, and in that case the target area
271 * may overlap the source area (always moving down).
272 *
273 * If everything is PMD-aligned, that works fine, as moving
274 * each pmd down will clear the source pmd. But if we first
275 * have a few 4kB-only pages that get moved down, and then
276 * hit the "now the rest is PMD-aligned, let's do everything
277 * one pmd at a time", we will still have the old (now empty
278 * of any 4kB pages, but still there) PMD in the page table
279 * tree.
280 *
281 * Warn on it once - because we really should try to figure
282 * out how to do this better - but then say "I won't move
283 * this pmd".
284 *
285 * One alternative might be to just unmap the target pmd at
286 * this point, and verify that it really is empty. We'll see.
287 */
288 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
289 return false;
290
291 /*
292 * We hold both exclusive mmap_lock and rmap_lock at this point and
293 * cannot block. If we cannot immediately take exclusive ownership
294 * of the VMA fallback to the move_ptes().
295 */
296 if (!trylock_vma_ref_count(vma))
297 return false;
298
299 /*
300 * We don't have to worry about the ordering of src and dst
301 * ptlocks because exclusive mmap_lock prevents deadlock.
302 */
303 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
304 new_ptl = pmd_lockptr(mm, new_pmd);
305 if (new_ptl != old_ptl)
306 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
307
308 /* Clear the pmd */
309 pmd = *old_pmd;
310 pmd_clear(old_pmd);
311
312 VM_BUG_ON(!pmd_none(*new_pmd));
313
314 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
315 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
316 if (new_ptl != old_ptl)
317 spin_unlock(new_ptl);
318 spin_unlock(old_ptl);
319
320 unlock_vma_ref_count(vma);
321 return true;
322 }
323 #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)324 static inline bool move_normal_pmd(struct vm_area_struct *vma,
325 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
326 pmd_t *new_pmd)
327 {
328 return false;
329 }
330 #endif
331
332 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)333 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
334 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
335 {
336 spinlock_t *old_ptl, *new_ptl;
337 struct mm_struct *mm = vma->vm_mm;
338 pud_t pud;
339
340 if (!arch_supports_page_table_move())
341 return false;
342 /*
343 * The destination pud shouldn't be established, free_pgtables()
344 * should have released it.
345 */
346 if (WARN_ON_ONCE(!pud_none(*new_pud)))
347 return false;
348
349 /*
350 * We hold both exclusive mmap_lock and rmap_lock at this point and
351 * cannot block. If we cannot immediately take exclusive ownership
352 * of the VMA fallback to the move_ptes().
353 */
354 if (!trylock_vma_ref_count(vma))
355 return false;
356
357 /*
358 * We don't have to worry about the ordering of src and dst
359 * ptlocks because exclusive mmap_lock prevents deadlock.
360 */
361 old_ptl = pud_lock(vma->vm_mm, old_pud);
362 new_ptl = pud_lockptr(mm, new_pud);
363 if (new_ptl != old_ptl)
364 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
365
366 /* Clear the pud */
367 pud = *old_pud;
368 pud_clear(old_pud);
369
370 VM_BUG_ON(!pud_none(*new_pud));
371
372 pud_populate(mm, new_pud, pud_pgtable(pud));
373 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
374 if (new_ptl != old_ptl)
375 spin_unlock(new_ptl);
376 spin_unlock(old_ptl);
377
378 unlock_vma_ref_count(vma);
379 return true;
380 }
381 #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)382 static inline bool move_normal_pud(struct vm_area_struct *vma,
383 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
384 pud_t *new_pud)
385 {
386 return false;
387 }
388 #endif
389
390 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)391 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
392 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
393 {
394 spinlock_t *old_ptl, *new_ptl;
395 struct mm_struct *mm = vma->vm_mm;
396 pud_t pud;
397
398 /*
399 * The destination pud shouldn't be established, free_pgtables()
400 * should have released it.
401 */
402 if (WARN_ON_ONCE(!pud_none(*new_pud)))
403 return false;
404
405 /*
406 * We don't have to worry about the ordering of src and dst
407 * ptlocks because exclusive mmap_lock prevents deadlock.
408 */
409 old_ptl = pud_lock(vma->vm_mm, old_pud);
410 new_ptl = pud_lockptr(mm, new_pud);
411 if (new_ptl != old_ptl)
412 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
413
414 /* Clear the pud */
415 pud = *old_pud;
416 pud_clear(old_pud);
417
418 VM_BUG_ON(!pud_none(*new_pud));
419
420 /* Set the new pud */
421 /* mark soft_ditry when we add pud level soft dirty support */
422 set_pud_at(mm, new_addr, new_pud, pud);
423 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
424 if (new_ptl != old_ptl)
425 spin_unlock(new_ptl);
426 spin_unlock(old_ptl);
427
428 return true;
429 }
430 #else
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)431 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
432 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
433 {
434 WARN_ON_ONCE(1);
435 return false;
436
437 }
438 #endif
439
440 enum pgt_entry {
441 NORMAL_PMD,
442 HPAGE_PMD,
443 NORMAL_PUD,
444 HPAGE_PUD,
445 };
446
447 /*
448 * Returns an extent of the corresponding size for the pgt_entry specified if
449 * valid. Else returns a smaller extent bounded by the end of the source and
450 * destination pgt_entry.
451 */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)452 static __always_inline unsigned long get_extent(enum pgt_entry entry,
453 unsigned long old_addr, unsigned long old_end,
454 unsigned long new_addr)
455 {
456 unsigned long next, extent, mask, size;
457
458 switch (entry) {
459 case HPAGE_PMD:
460 case NORMAL_PMD:
461 mask = PMD_MASK;
462 size = PMD_SIZE;
463 break;
464 case HPAGE_PUD:
465 case NORMAL_PUD:
466 mask = PUD_MASK;
467 size = PUD_SIZE;
468 break;
469 default:
470 BUILD_BUG();
471 break;
472 }
473
474 next = (old_addr + size) & mask;
475 /* even if next overflowed, extent below will be ok */
476 extent = next - old_addr;
477 if (extent > old_end - old_addr)
478 extent = old_end - old_addr;
479 next = (new_addr + size) & mask;
480 if (extent > next - new_addr)
481 extent = next - new_addr;
482 return extent;
483 }
484
485 /*
486 * Attempts to speedup the move by moving entry at the level corresponding to
487 * pgt_entry. Returns true if the move was successful, else false.
488 */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)489 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
490 unsigned long old_addr, unsigned long new_addr,
491 void *old_entry, void *new_entry, bool need_rmap_locks)
492 {
493 bool moved = false;
494
495 /* See comment in move_ptes() */
496 if (need_rmap_locks)
497 take_rmap_locks(vma);
498
499 switch (entry) {
500 case NORMAL_PMD:
501 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
502 new_entry);
503 break;
504 case NORMAL_PUD:
505 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
506 new_entry);
507 break;
508 case HPAGE_PMD:
509 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
510 move_huge_pmd(vma, old_addr, new_addr, old_entry,
511 new_entry);
512 break;
513 case HPAGE_PUD:
514 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
515 move_huge_pud(vma, old_addr, new_addr, old_entry,
516 new_entry);
517 break;
518
519 default:
520 WARN_ON_ONCE(1);
521 break;
522 }
523
524 if (need_rmap_locks)
525 drop_rmap_locks(vma);
526
527 return moved;
528 }
529
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks)530 unsigned long move_page_tables(struct vm_area_struct *vma,
531 unsigned long old_addr, struct vm_area_struct *new_vma,
532 unsigned long new_addr, unsigned long len,
533 bool need_rmap_locks)
534 {
535 unsigned long extent, old_end;
536 struct mmu_notifier_range range;
537 pmd_t *old_pmd, *new_pmd;
538 pud_t *old_pud, *new_pud;
539
540 if (!len)
541 return 0;
542
543 old_end = old_addr + len;
544 flush_cache_range(vma, old_addr, old_end);
545
546 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
547 old_addr, old_end);
548 mmu_notifier_invalidate_range_start(&range);
549
550 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
551 cond_resched();
552 /*
553 * If extent is PUD-sized try to speed up the move by moving at the
554 * PUD level if possible.
555 */
556 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
557
558 old_pud = get_old_pud(vma->vm_mm, old_addr);
559 if (!old_pud)
560 continue;
561 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
562 if (!new_pud)
563 break;
564 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
565 if (extent == HPAGE_PUD_SIZE) {
566 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
567 old_pud, new_pud, need_rmap_locks);
568 /* We ignore and continue on error? */
569 continue;
570 }
571 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
572
573 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
574 old_pud, new_pud, true))
575 continue;
576 }
577
578 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
579 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
580 if (!old_pmd)
581 continue;
582 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
583 if (!new_pmd)
584 break;
585 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
586 pmd_devmap(*old_pmd)) {
587 if (extent == HPAGE_PMD_SIZE &&
588 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
589 old_pmd, new_pmd, need_rmap_locks))
590 continue;
591 split_huge_pmd(vma, old_pmd, old_addr);
592 if (pmd_trans_unstable(old_pmd))
593 continue;
594 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
595 extent == PMD_SIZE) {
596 /*
597 * If the extent is PMD-sized, try to speed the move by
598 * moving at the PMD level if possible.
599 */
600 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
601 old_pmd, new_pmd, true))
602 continue;
603 }
604
605 if (pte_alloc(new_vma->vm_mm, new_pmd))
606 break;
607 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
608 new_pmd, new_addr, need_rmap_locks);
609 }
610
611 mmu_notifier_invalidate_range_end(&range);
612
613 return len + old_addr - old_end; /* how much done */
614 }
615
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)616 static unsigned long move_vma(struct vm_area_struct *vma,
617 unsigned long old_addr, unsigned long old_len,
618 unsigned long new_len, unsigned long new_addr,
619 bool *locked, unsigned long flags,
620 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
621 {
622 struct mm_struct *mm = vma->vm_mm;
623 struct vm_area_struct *new_vma;
624 unsigned long vm_flags = vma->vm_flags;
625 unsigned long new_pgoff;
626 unsigned long moved_len;
627 unsigned long excess = 0;
628 unsigned long hiwater_vm;
629 int split = 0;
630 int err = 0;
631 bool need_rmap_locks;
632
633 /*
634 * We'd prefer to avoid failure later on in do_munmap:
635 * which may split one vma into three before unmapping.
636 */
637 if (mm->map_count >= sysctl_max_map_count - 3)
638 return -ENOMEM;
639
640 if (vma->vm_ops && vma->vm_ops->may_split) {
641 if (vma->vm_start != old_addr)
642 err = vma->vm_ops->may_split(vma, old_addr);
643 if (!err && vma->vm_end != old_addr + old_len)
644 err = vma->vm_ops->may_split(vma, old_addr + old_len);
645 if (err)
646 return err;
647 }
648
649 /*
650 * Advise KSM to break any KSM pages in the area to be moved:
651 * it would be confusing if they were to turn up at the new
652 * location, where they happen to coincide with different KSM
653 * pages recently unmapped. But leave vma->vm_flags as it was,
654 * so KSM can come around to merge on vma and new_vma afterwards.
655 */
656 err = ksm_madvise(vma, old_addr, old_addr + old_len,
657 MADV_UNMERGEABLE, &vm_flags);
658 if (err)
659 return err;
660
661 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
662 if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
663 return -ENOMEM;
664 }
665
666 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
667 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
668 &need_rmap_locks);
669 if (!new_vma) {
670 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
671 vm_unacct_memory(new_len >> PAGE_SHIFT);
672 return -ENOMEM;
673 }
674
675 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
676 need_rmap_locks);
677 if (moved_len < old_len) {
678 err = -ENOMEM;
679 } else if (vma->vm_ops && vma->vm_ops->mremap) {
680 err = vma->vm_ops->mremap(new_vma);
681 }
682
683 if (unlikely(err)) {
684 /*
685 * On error, move entries back from new area to old,
686 * which will succeed since page tables still there,
687 * and then proceed to unmap new area instead of old.
688 */
689 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
690 true);
691 vma = new_vma;
692 old_len = new_len;
693 old_addr = new_addr;
694 new_addr = err;
695 } else {
696 mremap_userfaultfd_prep(new_vma, uf);
697 }
698
699 /* Conceal VM_ACCOUNT so old reservation is not undone */
700 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
701 vma->vm_flags &= ~VM_ACCOUNT;
702 excess = vma->vm_end - vma->vm_start - old_len;
703 if (old_addr > vma->vm_start &&
704 old_addr + old_len < vma->vm_end)
705 split = 1;
706 }
707
708 /*
709 * If we failed to move page tables we still do total_vm increment
710 * since do_munmap() will decrement it by old_len == new_len.
711 *
712 * Since total_vm is about to be raised artificially high for a
713 * moment, we need to restore high watermark afterwards: if stats
714 * are taken meanwhile, total_vm and hiwater_vm appear too high.
715 * If this were a serious issue, we'd add a flag to do_munmap().
716 */
717 hiwater_vm = mm->hiwater_vm;
718 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
719
720 /* Tell pfnmap has moved from this vma */
721 if (unlikely(vma->vm_flags & VM_PFNMAP))
722 untrack_pfn_moved(vma);
723
724 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
725 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
726 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
727
728 #ifndef CONFIG_SPECULATIVE_PAGE_FAULT
729 /*
730 * anon_vma links of the old vma is no longer needed after its page
731 * table has been moved.
732 */
733 if (new_vma != vma && vma->vm_start == old_addr &&
734 vma->vm_end == (old_addr + old_len))
735 unlink_anon_vmas(vma);
736 #endif
737
738 /* Because we won't unmap we don't need to touch locked_vm */
739 return new_addr;
740 }
741
742 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
743 /* OOM: unable to split vma, just get accounts right */
744 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
745 vm_acct_memory(old_len >> PAGE_SHIFT);
746 excess = 0;
747 }
748
749 if (vm_flags & VM_LOCKED) {
750 mm->locked_vm += new_len >> PAGE_SHIFT;
751 *locked = true;
752 }
753
754 mm->hiwater_vm = hiwater_vm;
755
756 /* Restore VM_ACCOUNT if one or two pieces of vma left */
757 if (excess) {
758 vma->vm_flags |= VM_ACCOUNT;
759 if (split)
760 vma->vm_next->vm_flags |= VM_ACCOUNT;
761 }
762
763 return new_addr;
764 }
765
vma_to_resize(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long * p)766 static struct vm_area_struct *vma_to_resize(unsigned long addr,
767 unsigned long old_len, unsigned long new_len, unsigned long flags,
768 unsigned long *p)
769 {
770 struct mm_struct *mm = current->mm;
771 struct vm_area_struct *vma;
772 unsigned long pgoff;
773
774 vma = vma_lookup(mm, addr);
775 if (!vma)
776 return ERR_PTR(-EFAULT);
777
778 /*
779 * !old_len is a special case where an attempt is made to 'duplicate'
780 * a mapping. This makes no sense for private mappings as it will
781 * instead create a fresh/new mapping unrelated to the original. This
782 * is contrary to the basic idea of mremap which creates new mappings
783 * based on the original. There are no known use cases for this
784 * behavior. As a result, fail such attempts.
785 */
786 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
787 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
788 return ERR_PTR(-EINVAL);
789 }
790
791 if ((flags & MREMAP_DONTUNMAP) &&
792 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
793 return ERR_PTR(-EINVAL);
794
795 if (is_vm_hugetlb_page(vma))
796 return ERR_PTR(-EINVAL);
797
798 /* We can't remap across vm area boundaries */
799 if (old_len > vma->vm_end - addr)
800 return ERR_PTR(-EFAULT);
801
802 if (new_len == old_len)
803 return vma;
804
805 /* Need to be careful about a growing mapping */
806 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
807 pgoff += vma->vm_pgoff;
808 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
809 return ERR_PTR(-EINVAL);
810
811 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
812 return ERR_PTR(-EFAULT);
813
814 if (vma->vm_flags & VM_LOCKED) {
815 unsigned long locked, lock_limit;
816 locked = mm->locked_vm << PAGE_SHIFT;
817 lock_limit = rlimit(RLIMIT_MEMLOCK);
818 locked += new_len - old_len;
819 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
820 return ERR_PTR(-EAGAIN);
821 }
822
823 if (!may_expand_vm(mm, vma->vm_flags,
824 (new_len - old_len) >> PAGE_SHIFT))
825 return ERR_PTR(-ENOMEM);
826
827 if (vma->vm_flags & VM_ACCOUNT) {
828 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
829 if (security_vm_enough_memory_mm(mm, charged))
830 return ERR_PTR(-ENOMEM);
831 *p = charged;
832 }
833
834 return vma;
835 }
836
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)837 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
838 unsigned long new_addr, unsigned long new_len, bool *locked,
839 unsigned long flags, struct vm_userfaultfd_ctx *uf,
840 struct list_head *uf_unmap_early,
841 struct list_head *uf_unmap)
842 {
843 struct mm_struct *mm = current->mm;
844 struct vm_area_struct *vma;
845 unsigned long ret = -EINVAL;
846 unsigned long charged = 0;
847 unsigned long map_flags = 0;
848
849 if (offset_in_page(new_addr))
850 goto out;
851
852 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
853 goto out;
854
855 /* Ensure the old/new locations do not overlap */
856 if (addr + old_len > new_addr && new_addr + new_len > addr)
857 goto out;
858
859 /*
860 * move_vma() need us to stay 4 maps below the threshold, otherwise
861 * it will bail out at the very beginning.
862 * That is a problem if we have already unmaped the regions here
863 * (new_addr, and old_addr), because userspace will not know the
864 * state of the vma's after it gets -ENOMEM.
865 * So, to avoid such scenario we can pre-compute if the whole
866 * operation has high chances to success map-wise.
867 * Worst-scenario case is when both vma's (new_addr and old_addr) get
868 * split in 3 before unmapping it.
869 * That means 2 more maps (1 for each) to the ones we already hold.
870 * Check whether current map count plus 2 still leads us to 4 maps below
871 * the threshold, otherwise return -ENOMEM here to be more safe.
872 */
873 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
874 return -ENOMEM;
875
876 if (flags & MREMAP_FIXED) {
877 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
878 if (ret)
879 goto out;
880 }
881
882 if (old_len >= new_len) {
883 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
884 if (ret && old_len != new_len)
885 goto out;
886 old_len = new_len;
887 }
888
889 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
890 if (IS_ERR(vma)) {
891 ret = PTR_ERR(vma);
892 goto out;
893 }
894
895 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
896 if (flags & MREMAP_DONTUNMAP &&
897 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
898 ret = -ENOMEM;
899 goto out;
900 }
901
902 if (flags & MREMAP_FIXED)
903 map_flags |= MAP_FIXED;
904
905 if (vma->vm_flags & VM_MAYSHARE)
906 map_flags |= MAP_SHARED;
907
908 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
909 ((addr - vma->vm_start) >> PAGE_SHIFT),
910 map_flags);
911 if (IS_ERR_VALUE(ret))
912 goto out1;
913
914 /* We got a new mapping */
915 if (!(flags & MREMAP_FIXED))
916 new_addr = ret;
917
918 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
919 uf_unmap);
920
921 if (!(offset_in_page(ret)))
922 goto out;
923
924 out1:
925 vm_unacct_memory(charged);
926
927 out:
928 return ret;
929 }
930
vma_expandable(struct vm_area_struct * vma,unsigned long delta)931 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
932 {
933 unsigned long end = vma->vm_end + delta;
934 if (end < vma->vm_end) /* overflow */
935 return 0;
936 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
937 return 0;
938 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
939 0, MAP_FIXED) & ~PAGE_MASK)
940 return 0;
941 return 1;
942 }
943
944 /*
945 * Expand (or shrink) an existing mapping, potentially moving it at the
946 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
947 *
948 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
949 * This option implies MREMAP_MAYMOVE.
950 */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)951 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
952 unsigned long, new_len, unsigned long, flags,
953 unsigned long, new_addr)
954 {
955 struct mm_struct *mm = current->mm;
956 struct vm_area_struct *vma;
957 unsigned long ret = -EINVAL;
958 unsigned long charged = 0;
959 bool locked = false;
960 bool downgraded = false;
961 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
962 LIST_HEAD(uf_unmap_early);
963 LIST_HEAD(uf_unmap);
964
965 /*
966 * There is a deliberate asymmetry here: we strip the pointer tag
967 * from the old address but leave the new address alone. This is
968 * for consistency with mmap(), where we prevent the creation of
969 * aliasing mappings in userspace by leaving the tag bits of the
970 * mapping address intact. A non-zero tag will cause the subsequent
971 * range checks to reject the address as invalid.
972 *
973 * See Documentation/arm64/tagged-address-abi.rst for more information.
974 */
975 addr = untagged_addr(addr);
976
977 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
978 return ret;
979
980 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
981 return ret;
982
983 /*
984 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
985 * in the process.
986 */
987 if (flags & MREMAP_DONTUNMAP &&
988 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
989 return ret;
990
991
992 if (offset_in_page(addr))
993 return ret;
994
995 old_len = PAGE_ALIGN(old_len);
996 new_len = PAGE_ALIGN(new_len);
997
998 /*
999 * We allow a zero old-len as a special case
1000 * for DOS-emu "duplicate shm area" thing. But
1001 * a zero new-len is nonsensical.
1002 */
1003 if (!new_len)
1004 return ret;
1005
1006 if (mmap_write_lock_killable(current->mm))
1007 return -EINTR;
1008
1009 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
1010 ret = mremap_to(addr, old_len, new_addr, new_len,
1011 &locked, flags, &uf, &uf_unmap_early,
1012 &uf_unmap);
1013 goto out;
1014 }
1015
1016 /*
1017 * Always allow a shrinking remap: that just unmaps
1018 * the unnecessary pages..
1019 * __do_munmap does all the needed commit accounting, and
1020 * downgrades mmap_lock to read if so directed.
1021 */
1022 if (old_len >= new_len) {
1023 int retval;
1024
1025 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
1026 &uf_unmap, true);
1027 if (retval < 0 && old_len != new_len) {
1028 ret = retval;
1029 goto out;
1030 /* Returning 1 indicates mmap_lock is downgraded to read. */
1031 } else if (retval == 1)
1032 downgraded = true;
1033 ret = addr;
1034 goto out;
1035 }
1036
1037 /*
1038 * Ok, we need to grow..
1039 */
1040 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
1041 if (IS_ERR(vma)) {
1042 ret = PTR_ERR(vma);
1043 goto out;
1044 }
1045
1046 /* old_len exactly to the end of the area..
1047 */
1048 if (old_len == vma->vm_end - addr) {
1049 /* can we just expand the current mapping? */
1050 if (vma_expandable(vma, new_len - old_len)) {
1051 int pages = (new_len - old_len) >> PAGE_SHIFT;
1052
1053 if (vma_adjust(vma, vma->vm_start, addr + new_len,
1054 vma->vm_pgoff, NULL)) {
1055 ret = -ENOMEM;
1056 goto out;
1057 }
1058
1059 vm_stat_account(mm, vma->vm_flags, pages);
1060 if (vma->vm_flags & VM_LOCKED) {
1061 mm->locked_vm += pages;
1062 locked = true;
1063 new_addr = addr;
1064 }
1065 ret = addr;
1066 goto out;
1067 }
1068 }
1069
1070 /*
1071 * We weren't able to just expand or shrink the area,
1072 * we need to create a new one and move it..
1073 */
1074 ret = -ENOMEM;
1075 if (flags & MREMAP_MAYMOVE) {
1076 unsigned long map_flags = 0;
1077 if (vma->vm_flags & VM_MAYSHARE)
1078 map_flags |= MAP_SHARED;
1079
1080 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1081 vma->vm_pgoff +
1082 ((addr - vma->vm_start) >> PAGE_SHIFT),
1083 map_flags);
1084 if (IS_ERR_VALUE(new_addr)) {
1085 ret = new_addr;
1086 goto out;
1087 }
1088
1089 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1090 &locked, flags, &uf, &uf_unmap);
1091 }
1092 out:
1093 if (offset_in_page(ret)) {
1094 vm_unacct_memory(charged);
1095 locked = false;
1096 }
1097 if (downgraded)
1098 mmap_read_unlock(current->mm);
1099 else
1100 mmap_write_unlock(current->mm);
1101 if (locked && new_len > old_len)
1102 mm_populate(new_addr + old_len, new_len - old_len);
1103 userfaultfd_unmap_complete(mm, &uf_unmap_early);
1104 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1105 userfaultfd_unmap_complete(mm, &uf_unmap);
1106 return ret;
1107 }
1108