1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/string.h>
23 #include <linux/uio.h>
24 #include <linux/ksm.h>
25 #include <linux/fs.h>
26 #include <linux/file.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagewalk.h>
30 #include <linux/swap.h>
31 #include <linux/swapops.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/mmu_notifier.h>
34 #include <trace/hooks/mm.h>
35
36 #include <asm/tlb.h>
37
38 #include "internal.h"
39
40 struct madvise_walk_private {
41 struct mmu_gather *tlb;
42 bool pageout;
43 bool can_pageout_file;
44 };
45
46 /*
47 * Any behaviour which results in changes to the vma->vm_flags needs to
48 * take mmap_lock for writing. Others, which simply traverse vmas, need
49 * to only take it for reading.
50 */
madvise_need_mmap_write(int behavior)51 static int madvise_need_mmap_write(int behavior)
52 {
53 switch (behavior) {
54 case MADV_REMOVE:
55 case MADV_WILLNEED:
56 case MADV_DONTNEED:
57 case MADV_COLD:
58 case MADV_PAGEOUT:
59 case MADV_FREE:
60 case MADV_POPULATE_READ:
61 case MADV_POPULATE_WRITE:
62 return 0;
63 default:
64 /* be safe, default to 1. list exceptions explicitly */
65 return 1;
66 }
67 }
68
69 #ifdef CONFIG_ANON_VMA_NAME
anon_vma_name_alloc(const char * name)70 struct anon_vma_name *anon_vma_name_alloc(const char *name)
71 {
72 struct anon_vma_name *anon_name;
73 size_t count;
74
75 /* Add 1 for NUL terminator at the end of the anon_name->name */
76 count = strlen(name) + 1;
77 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
78 if (anon_name) {
79 kref_init(&anon_name->kref);
80 memcpy(anon_name->name, name, count);
81 }
82
83 return anon_name;
84 }
85
anon_vma_name_free(struct kref * kref)86 void anon_vma_name_free(struct kref *kref)
87 {
88 struct anon_vma_name *anon_name =
89 container_of(kref, struct anon_vma_name, kref);
90 kfree(anon_name);
91 }
92
anon_vma_name(struct vm_area_struct * vma)93 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
94 {
95 mmap_assert_locked(vma->vm_mm);
96
97 if (vma->vm_file)
98 return NULL;
99
100 return vma->anon_name;
101 }
102
103 /* mmap_lock should be write-locked */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)104 static int replace_anon_vma_name(struct vm_area_struct *vma,
105 struct anon_vma_name *anon_name)
106 {
107 struct anon_vma_name *orig_name = anon_vma_name(vma);
108
109 if (!anon_name) {
110 vma->anon_name = NULL;
111 anon_vma_name_put(orig_name);
112 return 0;
113 }
114
115 if (anon_vma_name_eq(orig_name, anon_name))
116 return 0;
117
118 vma->anon_name = anon_vma_name_reuse(anon_name);
119 anon_vma_name_put(orig_name);
120
121 return 0;
122 }
123 #else /* CONFIG_ANON_VMA_NAME */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)124 static int replace_anon_vma_name(struct vm_area_struct *vma,
125 struct anon_vma_name *anon_name)
126 {
127 if (anon_name)
128 return -EINVAL;
129
130 return 0;
131 }
132 #endif /* CONFIG_ANON_VMA_NAME */
133 /*
134 * Update the vm_flags on region of a vma, splitting it or merging it as
135 * necessary. Must be called with mmap_sem held for writing;
136 * Caller should ensure anon_name stability by raising its refcount even when
137 * anon_name belongs to a valid vma because this function might free that vma.
138 */
madvise_update_vma(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * anon_name)139 static int madvise_update_vma(struct vm_area_struct *vma,
140 struct vm_area_struct **prev, unsigned long start,
141 unsigned long end, unsigned long new_flags,
142 struct anon_vma_name *anon_name)
143 {
144 struct mm_struct *mm = vma->vm_mm;
145 int error;
146 pgoff_t pgoff;
147
148 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
149 *prev = vma;
150 return 0;
151 }
152
153 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
154 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
155 vma->vm_file, pgoff, vma_policy(vma),
156 vma->vm_userfaultfd_ctx, anon_name);
157 if (*prev) {
158 vma = *prev;
159 goto success;
160 }
161
162 *prev = vma;
163
164 if (start != vma->vm_start) {
165 if (unlikely(mm->map_count >= sysctl_max_map_count))
166 return -ENOMEM;
167 error = __split_vma(mm, vma, start, 1);
168 if (error)
169 return error;
170 }
171
172 if (end != vma->vm_end) {
173 if (unlikely(mm->map_count >= sysctl_max_map_count))
174 return -ENOMEM;
175 error = __split_vma(mm, vma, end, 0);
176 if (error)
177 return error;
178 }
179
180 success:
181 /*
182 * vm_flags is protected by the mmap_lock held in write mode.
183 */
184 vma->vm_flags = new_flags;
185 if (!vma->vm_file) {
186 error = replace_anon_vma_name(vma, anon_name);
187 if (error)
188 return error;
189 }
190
191 return 0;
192 }
193
194 #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)195 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
196 unsigned long end, struct mm_walk *walk)
197 {
198 pte_t *orig_pte;
199 struct vm_area_struct *vma = walk->private;
200 unsigned long index;
201
202 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
203 return 0;
204
205 for (index = start; index != end; index += PAGE_SIZE) {
206 pte_t pte;
207 swp_entry_t entry;
208 struct page *page;
209 spinlock_t *ptl;
210
211 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
212 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
213 pte_unmap_unlock(orig_pte, ptl);
214
215 if (pte_present(pte) || pte_none(pte))
216 continue;
217 entry = pte_to_swp_entry(pte);
218 if (unlikely(non_swap_entry(entry)))
219 continue;
220
221 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
222 vma, index, false);
223 if (page)
224 put_page(page);
225 }
226
227 return 0;
228 }
229
230 static const struct mm_walk_ops swapin_walk_ops = {
231 .pmd_entry = swapin_walk_pmd_entry,
232 };
233
force_shm_swapin_readahead(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)234 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
235 unsigned long start, unsigned long end,
236 struct address_space *mapping)
237 {
238 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
239 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
240 struct page *page;
241
242 rcu_read_lock();
243 xas_for_each(&xas, page, end_index) {
244 swp_entry_t swap;
245
246 if (!xa_is_value(page))
247 continue;
248 xas_pause(&xas);
249 rcu_read_unlock();
250
251 swap = radix_to_swp_entry(page);
252 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
253 NULL, 0, false);
254 if (page)
255 put_page(page);
256
257 rcu_read_lock();
258 }
259 rcu_read_unlock();
260
261 lru_add_drain(); /* Push any new pages onto the LRU now */
262 }
263 #endif /* CONFIG_SWAP */
264
265 /*
266 * Schedule all required I/O operations. Do not wait for completion.
267 */
madvise_willneed(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)268 static long madvise_willneed(struct vm_area_struct *vma,
269 struct vm_area_struct **prev,
270 unsigned long start, unsigned long end)
271 {
272 struct mm_struct *mm = vma->vm_mm;
273 struct file *file = vma->vm_file;
274 loff_t offset;
275
276 *prev = vma;
277 #ifdef CONFIG_SWAP
278 if (!file) {
279 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
280 lru_add_drain(); /* Push any new pages onto the LRU now */
281 return 0;
282 }
283
284 if (shmem_mapping(file->f_mapping)) {
285 force_shm_swapin_readahead(vma, start, end,
286 file->f_mapping);
287 return 0;
288 }
289 #else
290 if (!file)
291 return -EBADF;
292 #endif
293
294 if (IS_DAX(file_inode(file))) {
295 /* no bad return value, but ignore advice */
296 return 0;
297 }
298
299 /*
300 * Filesystem's fadvise may need to take various locks. We need to
301 * explicitly grab a reference because the vma (and hence the
302 * vma's reference to the file) can go away as soon as we drop
303 * mmap_lock.
304 */
305 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
306 get_file(file);
307 offset = (loff_t)(start - vma->vm_start)
308 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
309 mmap_read_unlock(mm);
310 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
311 fput(file);
312 mmap_read_lock(mm);
313 return 0;
314 }
315
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)316 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
317 unsigned long addr, unsigned long end,
318 struct mm_walk *walk)
319 {
320 struct madvise_walk_private *private = walk->private;
321 struct mmu_gather *tlb = private->tlb;
322 bool pageout = private->pageout;
323 bool pageout_anon_only = pageout && !private->can_pageout_file;
324 struct mm_struct *mm = tlb->mm;
325 struct vm_area_struct *vma = walk->vma;
326 pte_t *orig_pte, *pte, ptent;
327 spinlock_t *ptl;
328 struct page *page = NULL;
329 LIST_HEAD(page_list);
330 bool allow_shared = false;
331 bool abort_madvise = false;
332
333 trace_android_vh_madvise_cold_or_pageout_abort(vma, &abort_madvise);
334 if (fatal_signal_pending(current) || abort_madvise)
335 return -EINTR;
336
337 trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);
338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
339 if (pmd_trans_huge(*pmd)) {
340 pmd_t orig_pmd;
341 unsigned long next = pmd_addr_end(addr, end);
342
343 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
344 ptl = pmd_trans_huge_lock(pmd, vma);
345 if (!ptl)
346 return 0;
347
348 orig_pmd = *pmd;
349 if (is_huge_zero_pmd(orig_pmd))
350 goto huge_unlock;
351
352 if (unlikely(!pmd_present(orig_pmd))) {
353 VM_BUG_ON(thp_migration_supported() &&
354 !is_pmd_migration_entry(orig_pmd));
355 goto huge_unlock;
356 }
357
358 page = pmd_page(orig_pmd);
359
360 /* Do not interfere with other mappings of this page */
361 if (page_mapcount(page) != 1)
362 goto huge_unlock;
363
364 if (pageout_anon_only && !PageAnon(page))
365 goto huge_unlock;
366
367 if (next - addr != HPAGE_PMD_SIZE) {
368 int err;
369
370 get_page(page);
371 spin_unlock(ptl);
372 lock_page(page);
373 err = split_huge_page(page);
374 unlock_page(page);
375 put_page(page);
376 if (!err)
377 goto regular_page;
378 return 0;
379 }
380
381 if (pmd_young(orig_pmd)) {
382 pmdp_invalidate(vma, addr, pmd);
383 orig_pmd = pmd_mkold(orig_pmd);
384
385 set_pmd_at(mm, addr, pmd, orig_pmd);
386 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
387 }
388
389 ClearPageReferenced(page);
390 test_and_clear_page_young(page);
391 if (pageout) {
392 if (!isolate_lru_page(page)) {
393 if (PageUnevictable(page))
394 putback_lru_page(page);
395 else
396 list_add(&page->lru, &page_list);
397 }
398 } else
399 deactivate_page(page);
400 huge_unlock:
401 spin_unlock(ptl);
402 if (pageout)
403 reclaim_pages(&page_list);
404 return 0;
405 }
406
407 regular_page:
408 if (pmd_trans_unstable(pmd))
409 return 0;
410 #endif
411 tlb_change_page_size(tlb, PAGE_SIZE);
412 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
413 flush_tlb_batched_pending(mm);
414 arch_enter_lazy_mmu_mode();
415 for (; addr < end; pte++, addr += PAGE_SIZE) {
416 ptent = *pte;
417
418 if (pte_none(ptent))
419 continue;
420
421 if (!pte_present(ptent))
422 continue;
423
424 page = vm_normal_page(vma, addr, ptent);
425 if (!page)
426 continue;
427
428 /*
429 * Creating a THP page is expensive so split it only if we
430 * are sure it's worth. Split it if we are only owner.
431 */
432 if (PageTransCompound(page)) {
433 if (page_mapcount(page) != 1)
434 break;
435 if (pageout_anon_only && !PageAnon(page))
436 break;
437 get_page(page);
438 if (!trylock_page(page)) {
439 put_page(page);
440 break;
441 }
442 pte_unmap_unlock(orig_pte, ptl);
443 if (split_huge_page(page)) {
444 unlock_page(page);
445 put_page(page);
446 pte_offset_map_lock(mm, pmd, addr, &ptl);
447 break;
448 }
449 unlock_page(page);
450 put_page(page);
451 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
452 pte--;
453 addr -= PAGE_SIZE;
454 continue;
455 }
456
457 /*
458 * Do not interfere with other mappings of this page and
459 * non-LRU page.
460 */
461 if (!allow_shared && (!PageLRU(page) || page_mapcount(page) != 1))
462 continue;
463
464 if (pageout_anon_only && !PageAnon(page))
465 continue;
466
467 VM_BUG_ON_PAGE(PageTransCompound(page), page);
468
469 if (pte_young(ptent)) {
470 ptent = ptep_get_and_clear_full(mm, addr, pte,
471 tlb->fullmm);
472 ptent = pte_mkold(ptent);
473 set_pte_at(mm, addr, pte, ptent);
474 tlb_remove_tlb_entry(tlb, pte, addr);
475 }
476
477 /*
478 * We are deactivating a page for accelerating reclaiming.
479 * VM couldn't reclaim the page unless we clear PG_young.
480 * As a side effect, it makes confuse idle-page tracking
481 * because they will miss recent referenced history.
482 */
483 ClearPageReferenced(page);
484 test_and_clear_page_young(page);
485 if (pageout) {
486 if (!isolate_lru_page(page)) {
487 if (PageUnevictable(page))
488 putback_lru_page(page);
489 else
490 list_add(&page->lru, &page_list);
491 }
492 } else
493 deactivate_page(page);
494 }
495
496 arch_leave_lazy_mmu_mode();
497 pte_unmap_unlock(orig_pte, ptl);
498 if (pageout)
499 reclaim_pages(&page_list);
500 cond_resched();
501
502 return 0;
503 }
504
505 static const struct mm_walk_ops cold_walk_ops = {
506 .pmd_entry = madvise_cold_or_pageout_pte_range,
507 };
508
madvise_cold_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)509 static void madvise_cold_page_range(struct mmu_gather *tlb,
510 struct vm_area_struct *vma,
511 unsigned long addr, unsigned long end)
512 {
513 struct madvise_walk_private walk_private = {
514 .pageout = false,
515 .tlb = tlb,
516 };
517
518 tlb_start_vma(tlb, vma);
519 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
520 tlb_end_vma(tlb, vma);
521 }
522
madvise_cold(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)523 static long madvise_cold(struct vm_area_struct *vma,
524 struct vm_area_struct **prev,
525 unsigned long start_addr, unsigned long end_addr)
526 {
527 struct mm_struct *mm = vma->vm_mm;
528 struct mmu_gather tlb;
529
530 *prev = vma;
531 if (!can_madv_lru_vma(vma))
532 return -EINVAL;
533
534 lru_add_drain();
535 tlb_gather_mmu(&tlb, mm);
536 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
537 tlb_finish_mmu(&tlb);
538
539 return 0;
540 }
541
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,bool can_pageout_file)542 static void madvise_pageout_page_range(struct mmu_gather *tlb,
543 struct vm_area_struct *vma,
544 unsigned long addr, unsigned long end,
545 bool can_pageout_file)
546 {
547 struct madvise_walk_private walk_private = {
548 .pageout = true,
549 .tlb = tlb,
550 .can_pageout_file = can_pageout_file,
551 };
552
553 tlb_start_vma(tlb, vma);
554 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
555 tlb_end_vma(tlb, vma);
556 }
557
can_do_file_pageout(struct vm_area_struct * vma)558 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
559 {
560 if (!vma->vm_file)
561 return false;
562 /*
563 * paging out pagecache only for non-anonymous mappings that correspond
564 * to the files the calling process could (if tried) open for writing;
565 * otherwise we'd be including shared non-exclusive mappings, which
566 * opens a side channel.
567 */
568 return inode_owner_or_capable(&init_user_ns,
569 file_inode(vma->vm_file)) ||
570 file_permission(vma->vm_file, MAY_WRITE) == 0;
571 }
572
madvise_pageout(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)573 static long madvise_pageout(struct vm_area_struct *vma,
574 struct vm_area_struct **prev,
575 unsigned long start_addr, unsigned long end_addr)
576 {
577 struct mm_struct *mm = vma->vm_mm;
578 struct mmu_gather tlb;
579 bool can_pageout_file;
580
581 *prev = vma;
582 if (!can_madv_lru_vma(vma))
583 return -EINVAL;
584
585 /*
586 * If the VMA belongs to a private file mapping, there can be private
587 * dirty pages which can be paged out if even this process is neither
588 * owner nor write capable of the file. Cache the file access check
589 * here and use it later during page walk.
590 */
591 can_pageout_file = can_do_file_pageout(vma);
592
593 lru_add_drain();
594 tlb_gather_mmu(&tlb, mm);
595 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr, can_pageout_file);
596 tlb_finish_mmu(&tlb);
597
598 return 0;
599 }
600
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)601 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
602 unsigned long end, struct mm_walk *walk)
603
604 {
605 struct mmu_gather *tlb = walk->private;
606 struct mm_struct *mm = tlb->mm;
607 struct vm_area_struct *vma = walk->vma;
608 spinlock_t *ptl;
609 pte_t *orig_pte, *pte, ptent;
610 struct page *page;
611 int nr_swap = 0;
612 unsigned long next;
613
614 next = pmd_addr_end(addr, end);
615 if (pmd_trans_huge(*pmd))
616 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
617 goto next;
618
619 if (pmd_trans_unstable(pmd))
620 return 0;
621
622 tlb_change_page_size(tlb, PAGE_SIZE);
623 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
624 flush_tlb_batched_pending(mm);
625 arch_enter_lazy_mmu_mode();
626 for (; addr != end; pte++, addr += PAGE_SIZE) {
627 ptent = *pte;
628
629 if (pte_none(ptent))
630 continue;
631 /*
632 * If the pte has swp_entry, just clear page table to
633 * prevent swap-in which is more expensive rather than
634 * (page allocation + zeroing).
635 */
636 if (!pte_present(ptent)) {
637 swp_entry_t entry;
638
639 entry = pte_to_swp_entry(ptent);
640 if (non_swap_entry(entry))
641 continue;
642 nr_swap--;
643 free_swap_and_cache(entry);
644 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
645 continue;
646 }
647
648 page = vm_normal_page(vma, addr, ptent);
649 if (!page)
650 continue;
651
652 /*
653 * If pmd isn't transhuge but the page is THP and
654 * is owned by only this process, split it and
655 * deactivate all pages.
656 */
657 if (PageTransCompound(page)) {
658 if (page_mapcount(page) != 1)
659 goto out;
660 get_page(page);
661 if (!trylock_page(page)) {
662 put_page(page);
663 goto out;
664 }
665 pte_unmap_unlock(orig_pte, ptl);
666 if (split_huge_page(page)) {
667 unlock_page(page);
668 put_page(page);
669 pte_offset_map_lock(mm, pmd, addr, &ptl);
670 goto out;
671 }
672 unlock_page(page);
673 put_page(page);
674 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
675 pte--;
676 addr -= PAGE_SIZE;
677 continue;
678 }
679
680 VM_BUG_ON_PAGE(PageTransCompound(page), page);
681
682 if (PageSwapCache(page) || PageDirty(page)) {
683 if (!trylock_page(page))
684 continue;
685 /*
686 * If page is shared with others, we couldn't clear
687 * PG_dirty of the page.
688 */
689 if (page_mapcount(page) != 1) {
690 unlock_page(page);
691 continue;
692 }
693
694 if (PageSwapCache(page) && !try_to_free_swap(page)) {
695 unlock_page(page);
696 continue;
697 }
698
699 ClearPageDirty(page);
700 unlock_page(page);
701 }
702
703 if (pte_young(ptent) || pte_dirty(ptent)) {
704 /*
705 * Some of architecture(ex, PPC) don't update TLB
706 * with set_pte_at and tlb_remove_tlb_entry so for
707 * the portability, remap the pte with old|clean
708 * after pte clearing.
709 */
710 ptent = ptep_get_and_clear_full(mm, addr, pte,
711 tlb->fullmm);
712
713 ptent = pte_mkold(ptent);
714 ptent = pte_mkclean(ptent);
715 set_pte_at(mm, addr, pte, ptent);
716 tlb_remove_tlb_entry(tlb, pte, addr);
717 }
718 mark_page_lazyfree(page);
719 }
720 out:
721 if (nr_swap) {
722 if (current->mm == mm)
723 sync_mm_rss(mm);
724
725 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
726 }
727 arch_leave_lazy_mmu_mode();
728 pte_unmap_unlock(orig_pte, ptl);
729 cond_resched();
730 next:
731 return 0;
732 }
733
734 static const struct mm_walk_ops madvise_free_walk_ops = {
735 .pmd_entry = madvise_free_pte_range,
736 };
737
madvise_free_single_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)738 static int madvise_free_single_vma(struct vm_area_struct *vma,
739 unsigned long start_addr, unsigned long end_addr)
740 {
741 struct mm_struct *mm = vma->vm_mm;
742 struct mmu_notifier_range range;
743 struct mmu_gather tlb;
744
745 /* MADV_FREE works for only anon vma at the moment */
746 if (!vma_is_anonymous(vma))
747 return -EINVAL;
748
749 range.start = max(vma->vm_start, start_addr);
750 if (range.start >= vma->vm_end)
751 return -EINVAL;
752 range.end = min(vma->vm_end, end_addr);
753 if (range.end <= vma->vm_start)
754 return -EINVAL;
755 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
756 range.start, range.end);
757
758 lru_add_drain();
759 tlb_gather_mmu(&tlb, mm);
760 update_hiwater_rss(mm);
761
762 mmu_notifier_invalidate_range_start(&range);
763 tlb_start_vma(&tlb, vma);
764 walk_page_range(vma->vm_mm, range.start, range.end,
765 &madvise_free_walk_ops, &tlb);
766 tlb_end_vma(&tlb, vma);
767 mmu_notifier_invalidate_range_end(&range);
768 tlb_finish_mmu(&tlb);
769
770 return 0;
771 }
772
773 /*
774 * Application no longer needs these pages. If the pages are dirty,
775 * it's OK to just throw them away. The app will be more careful about
776 * data it wants to keep. Be sure to free swap resources too. The
777 * zap_page_range call sets things up for shrink_active_list to actually free
778 * these pages later if no one else has touched them in the meantime,
779 * although we could add these pages to a global reuse list for
780 * shrink_active_list to pick up before reclaiming other pages.
781 *
782 * NB: This interface discards data rather than pushes it out to swap,
783 * as some implementations do. This has performance implications for
784 * applications like large transactional databases which want to discard
785 * pages in anonymous maps after committing to backing store the data
786 * that was kept in them. There is no reason to write this data out to
787 * the swap area if the application is discarding it.
788 *
789 * An interface that causes the system to free clean pages and flush
790 * dirty pages is already available as msync(MS_INVALIDATE).
791 */
madvise_dontneed_single_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)792 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
793 unsigned long start, unsigned long end)
794 {
795 zap_page_range(vma, start, end - start);
796 return 0;
797 }
798
madvise_dontneed_free(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)799 static long madvise_dontneed_free(struct vm_area_struct *vma,
800 struct vm_area_struct **prev,
801 unsigned long start, unsigned long end,
802 int behavior)
803 {
804 struct mm_struct *mm = vma->vm_mm;
805
806 *prev = vma;
807 if (!can_madv_lru_vma(vma))
808 return -EINVAL;
809
810 if (!userfaultfd_remove(vma, start, end)) {
811 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
812
813 mmap_read_lock(mm);
814 vma = find_vma(mm, start);
815 if (!vma)
816 return -ENOMEM;
817 if (start < vma->vm_start) {
818 /*
819 * This "vma" under revalidation is the one
820 * with the lowest vma->vm_start where start
821 * is also < vma->vm_end. If start <
822 * vma->vm_start it means an hole materialized
823 * in the user address space within the
824 * virtual range passed to MADV_DONTNEED
825 * or MADV_FREE.
826 */
827 return -ENOMEM;
828 }
829 if (!can_madv_lru_vma(vma))
830 return -EINVAL;
831 if (end > vma->vm_end) {
832 /*
833 * Don't fail if end > vma->vm_end. If the old
834 * vma was split while the mmap_lock was
835 * released the effect of the concurrent
836 * operation may not cause madvise() to
837 * have an undefined result. There may be an
838 * adjacent next vma that we'll walk
839 * next. userfaultfd_remove() will generate an
840 * UFFD_EVENT_REMOVE repetition on the
841 * end-vma->vm_end range, but the manager can
842 * handle a repetition fine.
843 */
844 end = vma->vm_end;
845 }
846 VM_WARN_ON(start >= end);
847 }
848
849 if (behavior == MADV_DONTNEED)
850 return madvise_dontneed_single_vma(vma, start, end);
851 else if (behavior == MADV_FREE)
852 return madvise_free_single_vma(vma, start, end);
853 else
854 return -EINVAL;
855 }
856
madvise_populate(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)857 static long madvise_populate(struct vm_area_struct *vma,
858 struct vm_area_struct **prev,
859 unsigned long start, unsigned long end,
860 int behavior)
861 {
862 const bool write = behavior == MADV_POPULATE_WRITE;
863 struct mm_struct *mm = vma->vm_mm;
864 unsigned long tmp_end;
865 int locked = 1;
866 long pages;
867
868 *prev = vma;
869
870 while (start < end) {
871 /*
872 * We might have temporarily dropped the lock. For example,
873 * our VMA might have been split.
874 */
875 if (!vma || start >= vma->vm_end) {
876 vma = find_vma(mm, start);
877 if (!vma || start < vma->vm_start)
878 return -ENOMEM;
879 }
880
881 tmp_end = min_t(unsigned long, end, vma->vm_end);
882 /* Populate (prefault) page tables readable/writable. */
883 pages = faultin_vma_page_range(vma, start, tmp_end, write,
884 &locked);
885 if (!locked) {
886 mmap_read_lock(mm);
887 locked = 1;
888 *prev = NULL;
889 vma = NULL;
890 }
891 if (pages < 0) {
892 switch (pages) {
893 case -EINTR:
894 return -EINTR;
895 case -EINVAL: /* Incompatible mappings / permissions. */
896 return -EINVAL;
897 case -EHWPOISON:
898 return -EHWPOISON;
899 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
900 return -EFAULT;
901 default:
902 pr_warn_once("%s: unhandled return value: %ld\n",
903 __func__, pages);
904 fallthrough;
905 case -ENOMEM:
906 return -ENOMEM;
907 }
908 }
909 start += pages * PAGE_SIZE;
910 }
911 return 0;
912 }
913
914 /*
915 * Application wants to free up the pages and associated backing store.
916 * This is effectively punching a hole into the middle of a file.
917 */
madvise_remove(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)918 static long madvise_remove(struct vm_area_struct *vma,
919 struct vm_area_struct **prev,
920 unsigned long start, unsigned long end)
921 {
922 loff_t offset;
923 int error;
924 struct file *f;
925 struct mm_struct *mm = vma->vm_mm;
926
927 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
928
929 if (vma->vm_flags & VM_LOCKED)
930 return -EINVAL;
931
932 f = vma->vm_file;
933
934 if (!f || !f->f_mapping || !f->f_mapping->host) {
935 return -EINVAL;
936 }
937
938 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
939 return -EACCES;
940
941 offset = (loff_t)(start - vma->vm_start)
942 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
943
944 /*
945 * Filesystem's fallocate may need to take i_rwsem. We need to
946 * explicitly grab a reference because the vma (and hence the
947 * vma's reference to the file) can go away as soon as we drop
948 * mmap_lock.
949 */
950 get_file(f);
951 if (userfaultfd_remove(vma, start, end)) {
952 /* mmap_lock was not released by userfaultfd_remove() */
953 mmap_read_unlock(mm);
954 }
955 error = vfs_fallocate(f,
956 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
957 offset, end - start);
958 fput(f);
959 mmap_read_lock(mm);
960 return error;
961 }
962
963 /*
964 * Apply an madvise behavior to a region of a vma. madvise_update_vma
965 * will handle splitting a vm area into separate areas, each area with its own
966 * behavior.
967 */
madvise_vma_behavior(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long behavior)968 static int madvise_vma_behavior(struct vm_area_struct *vma,
969 struct vm_area_struct **prev,
970 unsigned long start, unsigned long end,
971 unsigned long behavior)
972 {
973 int error;
974 struct anon_vma_name *anon_name;
975 unsigned long new_flags = vma->vm_flags;
976
977 switch (behavior) {
978 case MADV_REMOVE:
979 return madvise_remove(vma, prev, start, end);
980 case MADV_WILLNEED:
981 return madvise_willneed(vma, prev, start, end);
982 case MADV_COLD:
983 return madvise_cold(vma, prev, start, end);
984 case MADV_PAGEOUT:
985 return madvise_pageout(vma, prev, start, end);
986 case MADV_FREE:
987 case MADV_DONTNEED:
988 return madvise_dontneed_free(vma, prev, start, end, behavior);
989 case MADV_POPULATE_READ:
990 case MADV_POPULATE_WRITE:
991 return madvise_populate(vma, prev, start, end, behavior);
992 case MADV_NORMAL:
993 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
994 break;
995 case MADV_SEQUENTIAL:
996 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
997 break;
998 case MADV_RANDOM:
999 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1000 break;
1001 case MADV_DONTFORK:
1002 new_flags |= VM_DONTCOPY;
1003 break;
1004 case MADV_DOFORK:
1005 if (vma->vm_flags & VM_IO)
1006 return -EINVAL;
1007 new_flags &= ~VM_DONTCOPY;
1008 break;
1009 case MADV_WIPEONFORK:
1010 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1011 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1012 return -EINVAL;
1013 new_flags |= VM_WIPEONFORK;
1014 break;
1015 case MADV_KEEPONFORK:
1016 new_flags &= ~VM_WIPEONFORK;
1017 break;
1018 case MADV_DONTDUMP:
1019 new_flags |= VM_DONTDUMP;
1020 break;
1021 case MADV_DODUMP:
1022 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1023 return -EINVAL;
1024 new_flags &= ~VM_DONTDUMP;
1025 break;
1026 case MADV_MERGEABLE:
1027 case MADV_UNMERGEABLE:
1028 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1029 if (error)
1030 goto out;
1031 break;
1032 case MADV_HUGEPAGE:
1033 case MADV_NOHUGEPAGE:
1034 error = hugepage_madvise(vma, &new_flags, behavior);
1035 if (error)
1036 goto out;
1037 break;
1038 }
1039
1040 anon_name = anon_vma_name(vma);
1041 anon_vma_name_get(anon_name);
1042 error = madvise_update_vma(vma, prev, start, end, new_flags,
1043 anon_name);
1044 anon_vma_name_put(anon_name);
1045
1046 out:
1047 /*
1048 * madvise() returns EAGAIN if kernel resources, such as
1049 * slab, are temporarily unavailable.
1050 */
1051 if (error == -ENOMEM)
1052 error = -EAGAIN;
1053 return error;
1054 }
1055
1056 #ifdef CONFIG_MEMORY_FAILURE
1057 /*
1058 * Error injection support for memory error handling.
1059 */
madvise_inject_error(int behavior,unsigned long start,unsigned long end)1060 static int madvise_inject_error(int behavior,
1061 unsigned long start, unsigned long end)
1062 {
1063 unsigned long size;
1064
1065 if (!capable(CAP_SYS_ADMIN))
1066 return -EPERM;
1067
1068
1069 for (; start < end; start += size) {
1070 unsigned long pfn;
1071 struct page *page;
1072 int ret;
1073
1074 ret = get_user_pages_fast(start, 1, 0, &page);
1075 if (ret != 1)
1076 return ret;
1077 pfn = page_to_pfn(page);
1078
1079 /*
1080 * When soft offlining hugepages, after migrating the page
1081 * we dissolve it, therefore in the second loop "page" will
1082 * no longer be a compound page.
1083 */
1084 size = page_size(compound_head(page));
1085
1086 if (behavior == MADV_SOFT_OFFLINE) {
1087 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1088 pfn, start);
1089 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1090 } else {
1091 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1092 pfn, start);
1093 ret = memory_failure(pfn, MF_COUNT_INCREASED);
1094 if (ret == -EOPNOTSUPP)
1095 ret = 0;
1096 }
1097
1098 if (ret)
1099 return ret;
1100 }
1101
1102 return 0;
1103 }
1104 #endif
1105
1106 static bool
madvise_behavior_valid(int behavior)1107 madvise_behavior_valid(int behavior)
1108 {
1109 switch (behavior) {
1110 case MADV_DOFORK:
1111 case MADV_DONTFORK:
1112 case MADV_NORMAL:
1113 case MADV_SEQUENTIAL:
1114 case MADV_RANDOM:
1115 case MADV_REMOVE:
1116 case MADV_WILLNEED:
1117 case MADV_DONTNEED:
1118 case MADV_FREE:
1119 case MADV_COLD:
1120 case MADV_PAGEOUT:
1121 case MADV_POPULATE_READ:
1122 case MADV_POPULATE_WRITE:
1123 #ifdef CONFIG_KSM
1124 case MADV_MERGEABLE:
1125 case MADV_UNMERGEABLE:
1126 #endif
1127 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1128 case MADV_HUGEPAGE:
1129 case MADV_NOHUGEPAGE:
1130 #endif
1131 case MADV_DONTDUMP:
1132 case MADV_DODUMP:
1133 case MADV_WIPEONFORK:
1134 case MADV_KEEPONFORK:
1135 #ifdef CONFIG_MEMORY_FAILURE
1136 case MADV_SOFT_OFFLINE:
1137 case MADV_HWPOISON:
1138 #endif
1139 return true;
1140
1141 default:
1142 return false;
1143 }
1144 }
1145
1146 static bool
process_madvise_behavior_valid(int behavior)1147 process_madvise_behavior_valid(int behavior)
1148 {
1149 switch (behavior) {
1150 case MADV_COLD:
1151 case MADV_PAGEOUT:
1152 case MADV_WILLNEED:
1153 return true;
1154 default:
1155 return false;
1156 }
1157 }
1158
1159 /*
1160 * Walk the vmas in range [start,end), and call the visit function on each one.
1161 * The visit function will get start and end parameters that cover the overlap
1162 * between the current vma and the original range. Any unmapped regions in the
1163 * original range will result in this function returning -ENOMEM while still
1164 * calling the visit function on all of the existing vmas in the range.
1165 * Must be called with the mmap_lock held for reading or writing.
1166 */
1167 static
madvise_walk_vmas(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long arg,int (* visit)(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long arg))1168 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1169 unsigned long end, unsigned long arg,
1170 int (*visit)(struct vm_area_struct *vma,
1171 struct vm_area_struct **prev, unsigned long start,
1172 unsigned long end, unsigned long arg))
1173 {
1174 struct vm_area_struct *vma;
1175 struct vm_area_struct *prev;
1176 unsigned long tmp;
1177 int unmapped_error = 0;
1178
1179 /*
1180 * If the interval [start,end) covers some unmapped address
1181 * ranges, just ignore them, but return -ENOMEM at the end.
1182 * - different from the way of handling in mlock etc.
1183 */
1184 vma = find_vma_prev(mm, start, &prev);
1185 if (vma && start > vma->vm_start)
1186 prev = vma;
1187
1188 for (;;) {
1189 int error;
1190
1191 /* Still start < end. */
1192 if (!vma)
1193 return -ENOMEM;
1194
1195 /* Here start < (end|vma->vm_end). */
1196 if (start < vma->vm_start) {
1197 unmapped_error = -ENOMEM;
1198 start = vma->vm_start;
1199 if (start >= end)
1200 break;
1201 }
1202
1203 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1204 tmp = vma->vm_end;
1205 if (end < tmp)
1206 tmp = end;
1207
1208 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1209 error = visit(vma, &prev, start, tmp, arg);
1210 if (error)
1211 return error;
1212 start = tmp;
1213 if (prev && start < prev->vm_end)
1214 start = prev->vm_end;
1215 if (start >= end)
1216 break;
1217 if (prev)
1218 vma = prev->vm_next;
1219 else /* madvise_remove dropped mmap_lock */
1220 vma = find_vma(mm, start);
1221 }
1222
1223 return unmapped_error;
1224 }
1225
1226 #ifdef CONFIG_ANON_VMA_NAME
madvise_vma_anon_name(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long anon_name)1227 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1228 struct vm_area_struct **prev,
1229 unsigned long start, unsigned long end,
1230 unsigned long anon_name)
1231 {
1232 int error;
1233
1234 /* Only anonymous mappings can be named */
1235 if (vma->vm_file)
1236 return -EBADF;
1237
1238 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1239 (struct anon_vma_name *)anon_name);
1240
1241 /*
1242 * madvise() returns EAGAIN if kernel resources, such as
1243 * slab, are temporarily unavailable.
1244 */
1245 if (error == -ENOMEM)
1246 error = -EAGAIN;
1247 return error;
1248 }
1249
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)1250 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
1251 unsigned long len_in, struct anon_vma_name *anon_name)
1252 {
1253 unsigned long end;
1254 unsigned long len;
1255
1256 if (start & ~PAGE_MASK)
1257 return -EINVAL;
1258 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1259
1260 /* Check to see whether len was rounded up from small -ve to zero */
1261 if (len_in && !len)
1262 return -EINVAL;
1263
1264 end = start + len;
1265 if (end < start)
1266 return -EINVAL;
1267
1268 if (end == start)
1269 return 0;
1270
1271 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1272 madvise_vma_anon_name);
1273 }
1274 #endif /* CONFIG_ANON_VMA_NAME */
1275 /*
1276 * The madvise(2) system call.
1277 *
1278 * Applications can use madvise() to advise the kernel how it should
1279 * handle paging I/O in this VM area. The idea is to help the kernel
1280 * use appropriate read-ahead and caching techniques. The information
1281 * provided is advisory only, and can be safely disregarded by the
1282 * kernel without affecting the correct operation of the application.
1283 *
1284 * behavior values:
1285 * MADV_NORMAL - the default behavior is to read clusters. This
1286 * results in some read-ahead and read-behind.
1287 * MADV_RANDOM - the system should read the minimum amount of data
1288 * on any access, since it is unlikely that the appli-
1289 * cation will need more than what it asks for.
1290 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1291 * once, so they can be aggressively read ahead, and
1292 * can be freed soon after they are accessed.
1293 * MADV_WILLNEED - the application is notifying the system to read
1294 * some pages ahead.
1295 * MADV_DONTNEED - the application is finished with the given range,
1296 * so the kernel can free resources associated with it.
1297 * MADV_FREE - the application marks pages in the given range as lazy free,
1298 * where actual purges are postponed until memory pressure happens.
1299 * MADV_REMOVE - the application wants to free up the given range of
1300 * pages and associated backing store.
1301 * MADV_DONTFORK - omit this area from child's address space when forking:
1302 * typically, to avoid COWing pages pinned by get_user_pages().
1303 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1304 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1305 * range after a fork.
1306 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1307 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1308 * were corrupted by unrecoverable hardware memory failure.
1309 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1310 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1311 * this area with pages of identical content from other such areas.
1312 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1313 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1314 * huge pages in the future. Existing pages might be coalesced and
1315 * new pages might be allocated as THP.
1316 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1317 * transparent huge pages so the existing pages will not be
1318 * coalesced into THP and new pages will not be allocated as THP.
1319 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1320 * from being included in its core dump.
1321 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1322 * MADV_COLD - the application is not expected to use this memory soon,
1323 * deactivate pages in this range so that they can be reclaimed
1324 * easily if memory pressure happens.
1325 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1326 * page out the pages in this range immediately.
1327 * MADV_POPULATE_READ - populate (prefault) page tables readable by
1328 * triggering read faults if required
1329 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1330 * triggering write faults if required
1331 *
1332 * return values:
1333 * zero - success
1334 * -EINVAL - start + len < 0, start is not page-aligned,
1335 * "behavior" is not a valid value, or application
1336 * is attempting to release locked or shared pages,
1337 * or the specified address range includes file, Huge TLB,
1338 * MAP_SHARED or VMPFNMAP range.
1339 * -ENOMEM - addresses in the specified range are not currently
1340 * mapped, or are outside the AS of the process.
1341 * -EIO - an I/O error occurred while paging in data.
1342 * -EBADF - map exists, but area maps something that isn't a file.
1343 * -EAGAIN - a kernel resource was temporarily unavailable.
1344 */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)1345 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1346 {
1347 unsigned long end;
1348 int error;
1349 int write;
1350 size_t len;
1351 struct blk_plug plug;
1352
1353 start = untagged_addr(start);
1354
1355 if (!madvise_behavior_valid(behavior))
1356 return -EINVAL;
1357
1358 if (!PAGE_ALIGNED(start))
1359 return -EINVAL;
1360 len = PAGE_ALIGN(len_in);
1361
1362 /* Check to see whether len was rounded up from small -ve to zero */
1363 if (len_in && !len)
1364 return -EINVAL;
1365
1366 end = start + len;
1367 if (end < start)
1368 return -EINVAL;
1369
1370 if (end == start)
1371 return 0;
1372
1373 #ifdef CONFIG_MEMORY_FAILURE
1374 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1375 return madvise_inject_error(behavior, start, start + len_in);
1376 #endif
1377
1378 write = madvise_need_mmap_write(behavior);
1379 if (write) {
1380 if (mmap_write_lock_killable(mm))
1381 return -EINTR;
1382 } else {
1383 mmap_read_lock(mm);
1384 }
1385
1386 blk_start_plug(&plug);
1387 error = madvise_walk_vmas(mm, start, end, behavior,
1388 madvise_vma_behavior);
1389 blk_finish_plug(&plug);
1390 if (write)
1391 mmap_write_unlock(mm);
1392 else
1393 mmap_read_unlock(mm);
1394
1395 return error;
1396 }
1397
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)1398 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1399 {
1400 return do_madvise(current->mm, start, len_in, behavior);
1401 }
1402
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)1403 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1404 size_t, vlen, int, behavior, unsigned int, flags)
1405 {
1406 ssize_t ret;
1407 struct iovec iovstack[UIO_FASTIOV], iovec;
1408 struct iovec *iov = iovstack;
1409 struct iov_iter iter;
1410 struct pid *pid;
1411 struct task_struct *task;
1412 struct mm_struct *mm;
1413 size_t total_len;
1414 unsigned int f_flags;
1415
1416 if (flags != 0) {
1417 ret = -EINVAL;
1418 goto out;
1419 }
1420
1421 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1422 if (ret < 0)
1423 goto out;
1424
1425 pid = pidfd_get_pid(pidfd, &f_flags);
1426 if (IS_ERR(pid)) {
1427 ret = PTR_ERR(pid);
1428 goto free_iov;
1429 }
1430
1431 task = get_pid_task(pid, PIDTYPE_PID);
1432 if (!task) {
1433 ret = -ESRCH;
1434 goto put_pid;
1435 }
1436
1437 if (!process_madvise_behavior_valid(behavior)) {
1438 ret = -EINVAL;
1439 goto release_task;
1440 }
1441
1442 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1443 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1444 if (IS_ERR_OR_NULL(mm)) {
1445 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1446 goto release_task;
1447 }
1448
1449 /*
1450 * Require CAP_SYS_NICE for influencing process performance. Note that
1451 * only non-destructive hints are currently supported.
1452 */
1453 if (!capable(CAP_SYS_NICE)) {
1454 ret = -EPERM;
1455 goto release_mm;
1456 }
1457
1458 total_len = iov_iter_count(&iter);
1459
1460 while (iov_iter_count(&iter)) {
1461 iovec = iov_iter_iovec(&iter);
1462 ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1463 iovec.iov_len, behavior);
1464 if (ret < 0)
1465 break;
1466 iov_iter_advance(&iter, iovec.iov_len);
1467 }
1468
1469 ret = (total_len - iov_iter_count(&iter)) ? : ret;
1470
1471 release_mm:
1472 mmput(mm);
1473 release_task:
1474 put_task_struct(task);
1475 put_pid:
1476 put_pid(pid);
1477 free_iov:
1478 kfree(iov);
1479 out:
1480 return ret;
1481 }
1482