1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/mlock.c
4 *
5 * (C) Copyright 1995 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
7 */
8
9 #include <linux/capability.h>
10 #include <linux/mman.h>
11 #include <linux/mm.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pgsize_migration.h>
17 #include <linux/pagevec.h>
18 #include <linux/pagewalk.h>
19 #include <linux/mempolicy.h>
20 #include <linux/syscalls.h>
21 #include <linux/sched.h>
22 #include <linux/export.h>
23 #include <linux/rmap.h>
24 #include <linux/mmzone.h>
25 #include <linux/hugetlb.h>
26 #include <linux/memcontrol.h>
27 #include <linux/mm_inline.h>
28 #include <linux/secretmem.h>
29 #include <linux/page_size_compat.h>
30
31 #include "internal.h"
32
33 struct mlock_fbatch {
34 local_lock_t lock;
35 struct folio_batch fbatch;
36 };
37
38 static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
39 .lock = INIT_LOCAL_LOCK(lock),
40 };
41
can_do_mlock(void)42 bool can_do_mlock(void)
43 {
44 if (rlimit(RLIMIT_MEMLOCK) != 0)
45 return true;
46 if (capable(CAP_IPC_LOCK))
47 return true;
48 return false;
49 }
50 EXPORT_SYMBOL(can_do_mlock);
51
52 /*
53 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
54 * in vmscan and, possibly, the fault path; and to support semi-accurate
55 * statistics.
56 *
57 * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it
58 * will be ostensibly placed on the LRU "unevictable" list (actually no such
59 * list exists), rather than the [in]active lists. PG_unevictable is set to
60 * indicate the unevictable state.
61 */
62
__mlock_folio(struct folio * folio,struct lruvec * lruvec)63 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
64 {
65 /* There is nothing more we can do while it's off LRU */
66 if (!folio_test_clear_lru(folio))
67 return lruvec;
68
69 lruvec = folio_lruvec_relock_irq(folio, lruvec);
70
71 if (unlikely(folio_evictable(folio))) {
72 /*
73 * This is a little surprising, but quite possible: PG_mlocked
74 * must have got cleared already by another CPU. Could this
75 * folio be unevictable? I'm not sure, but move it now if so.
76 */
77 if (folio_test_unevictable(folio)) {
78 lruvec_del_folio(lruvec, folio);
79 folio_clear_unevictable(folio);
80 lruvec_add_folio(lruvec, folio);
81
82 __count_vm_events(UNEVICTABLE_PGRESCUED,
83 folio_nr_pages(folio));
84 }
85 goto out;
86 }
87
88 if (folio_test_unevictable(folio)) {
89 if (folio_test_mlocked(folio))
90 folio->mlock_count++;
91 goto out;
92 }
93
94 lruvec_del_folio(lruvec, folio);
95 folio_clear_active(folio);
96 folio_set_unevictable(folio);
97 folio->mlock_count = !!folio_test_mlocked(folio);
98 lruvec_add_folio(lruvec, folio);
99 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
100 out:
101 folio_set_lru(folio);
102 return lruvec;
103 }
104
__mlock_new_folio(struct folio * folio,struct lruvec * lruvec)105 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
106 {
107 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
108
109 lruvec = folio_lruvec_relock_irq(folio, lruvec);
110
111 /* As above, this is a little surprising, but possible */
112 if (unlikely(folio_evictable(folio)))
113 goto out;
114
115 folio_set_unevictable(folio);
116 folio->mlock_count = !!folio_test_mlocked(folio);
117 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
118 out:
119 lruvec_add_folio(lruvec, folio);
120 folio_set_lru(folio);
121 return lruvec;
122 }
123
__munlock_folio(struct folio * folio,struct lruvec * lruvec)124 static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
125 {
126 int nr_pages = folio_nr_pages(folio);
127 bool isolated = false;
128
129 if (!folio_test_clear_lru(folio))
130 goto munlock;
131
132 isolated = true;
133 lruvec = folio_lruvec_relock_irq(folio, lruvec);
134
135 if (folio_test_unevictable(folio)) {
136 /* Then mlock_count is maintained, but might undercount */
137 if (folio->mlock_count)
138 folio->mlock_count--;
139 if (folio->mlock_count)
140 goto out;
141 }
142 /* else assume that was the last mlock: reclaim will fix it if not */
143
144 munlock:
145 if (folio_test_clear_mlocked(folio)) {
146 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
147 if (isolated || !folio_test_unevictable(folio))
148 __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
149 else
150 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
151 }
152
153 /* folio_evictable() has to be checked *after* clearing Mlocked */
154 if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
155 lruvec_del_folio(lruvec, folio);
156 folio_clear_unevictable(folio);
157 lruvec_add_folio(lruvec, folio);
158 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
159 }
160 out:
161 if (isolated)
162 folio_set_lru(folio);
163 return lruvec;
164 }
165
166 /*
167 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
168 */
169 #define LRU_FOLIO 0x1
170 #define NEW_FOLIO 0x2
mlock_lru(struct folio * folio)171 static inline struct folio *mlock_lru(struct folio *folio)
172 {
173 return (struct folio *)((unsigned long)folio + LRU_FOLIO);
174 }
175
mlock_new(struct folio * folio)176 static inline struct folio *mlock_new(struct folio *folio)
177 {
178 return (struct folio *)((unsigned long)folio + NEW_FOLIO);
179 }
180
181 /*
182 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
183 * make use of such folio pointer flags in future, but for now just keep it for
184 * mlock. We could use three separate folio batches instead, but one feels
185 * better (munlocking a full folio batch does not need to drain mlocking folio
186 * batches first).
187 */
mlock_folio_batch(struct folio_batch * fbatch)188 static void mlock_folio_batch(struct folio_batch *fbatch)
189 {
190 struct lruvec *lruvec = NULL;
191 unsigned long mlock;
192 struct folio *folio;
193 int i;
194
195 for (i = 0; i < folio_batch_count(fbatch); i++) {
196 folio = fbatch->folios[i];
197 mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
198 folio = (struct folio *)((unsigned long)folio - mlock);
199 fbatch->folios[i] = folio;
200
201 if (mlock & LRU_FOLIO)
202 lruvec = __mlock_folio(folio, lruvec);
203 else if (mlock & NEW_FOLIO)
204 lruvec = __mlock_new_folio(folio, lruvec);
205 else
206 lruvec = __munlock_folio(folio, lruvec);
207 }
208
209 if (lruvec)
210 unlock_page_lruvec_irq(lruvec);
211 folios_put(fbatch);
212 }
213
mlock_drain_local(void)214 void mlock_drain_local(void)
215 {
216 struct folio_batch *fbatch;
217
218 local_lock(&mlock_fbatch.lock);
219 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
220 if (folio_batch_count(fbatch))
221 mlock_folio_batch(fbatch);
222 local_unlock(&mlock_fbatch.lock);
223 }
224
mlock_drain_remote(int cpu)225 void mlock_drain_remote(int cpu)
226 {
227 struct folio_batch *fbatch;
228
229 WARN_ON_ONCE(cpu_online(cpu));
230 fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
231 if (folio_batch_count(fbatch))
232 mlock_folio_batch(fbatch);
233 }
234
need_mlock_drain(int cpu)235 bool need_mlock_drain(int cpu)
236 {
237 return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
238 }
239
240 /**
241 * mlock_folio - mlock a folio already on (or temporarily off) LRU
242 * @folio: folio to be mlocked.
243 */
mlock_folio(struct folio * folio)244 void mlock_folio(struct folio *folio)
245 {
246 struct folio_batch *fbatch;
247
248 local_lock(&mlock_fbatch.lock);
249 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
250
251 if (!folio_test_set_mlocked(folio)) {
252 int nr_pages = folio_nr_pages(folio);
253
254 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
255 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
256 }
257
258 folio_get(folio);
259 if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
260 !folio_may_be_lru_cached(folio) || lru_cache_disabled())
261 mlock_folio_batch(fbatch);
262 local_unlock(&mlock_fbatch.lock);
263 }
264
265 /**
266 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
267 * @folio: folio to be mlocked, either normal or a THP head.
268 */
mlock_new_folio(struct folio * folio)269 void mlock_new_folio(struct folio *folio)
270 {
271 struct folio_batch *fbatch;
272 int nr_pages = folio_nr_pages(folio);
273
274 local_lock(&mlock_fbatch.lock);
275 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
276 folio_set_mlocked(folio);
277
278 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
279 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
280
281 folio_get(folio);
282 if (!folio_batch_add(fbatch, mlock_new(folio)) ||
283 !folio_may_be_lru_cached(folio) || lru_cache_disabled())
284 mlock_folio_batch(fbatch);
285 local_unlock(&mlock_fbatch.lock);
286 }
287
288 /**
289 * munlock_folio - munlock a folio
290 * @folio: folio to be munlocked, either normal or a THP head.
291 */
munlock_folio(struct folio * folio)292 void munlock_folio(struct folio *folio)
293 {
294 struct folio_batch *fbatch;
295
296 local_lock(&mlock_fbatch.lock);
297 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
298 /*
299 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
300 * which will check whether the folio is multiply mlocked.
301 */
302 folio_get(folio);
303 if (!folio_batch_add(fbatch, folio) ||
304 !folio_may_be_lru_cached(folio) || lru_cache_disabled())
305 mlock_folio_batch(fbatch);
306 local_unlock(&mlock_fbatch.lock);
307 }
308
folio_mlock_step(struct folio * folio,pte_t * pte,unsigned long addr,unsigned long end)309 static inline unsigned int folio_mlock_step(struct folio *folio,
310 pte_t *pte, unsigned long addr, unsigned long end)
311 {
312 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
313 unsigned int count = (end - addr) >> PAGE_SHIFT;
314 pte_t ptent = ptep_get(pte);
315
316 if (!folio_test_large(folio))
317 return 1;
318
319 return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL,
320 NULL, NULL);
321 }
322
allow_mlock_munlock(struct folio * folio,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned int step)323 static inline bool allow_mlock_munlock(struct folio *folio,
324 struct vm_area_struct *vma, unsigned long start,
325 unsigned long end, unsigned int step)
326 {
327 /*
328 * For unlock, allow munlock large folio which is partially
329 * mapped to VMA. As it's possible that large folio is
330 * mlocked and VMA is split later.
331 *
332 * During memory pressure, such kind of large folio can
333 * be split. And the pages are not in VM_LOCKed VMA
334 * can be reclaimed.
335 */
336 if (!(vma->vm_flags & VM_LOCKED))
337 return true;
338
339 /* folio_within_range() cannot take KSM, but any small folio is OK */
340 if (!folio_test_large(folio))
341 return true;
342
343 /* folio not in range [start, end), skip mlock */
344 if (!folio_within_range(folio, vma, start, end))
345 return false;
346
347 /* folio is not fully mapped, skip mlock */
348 if (step != folio_nr_pages(folio))
349 return false;
350
351 return true;
352 }
353
mlock_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)354 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
355 unsigned long end, struct mm_walk *walk)
356
357 {
358 struct vm_area_struct *vma = walk->vma;
359 spinlock_t *ptl;
360 pte_t *start_pte, *pte;
361 pte_t ptent;
362 struct folio *folio;
363 unsigned int step = 1;
364 unsigned long start = addr;
365
366 ptl = pmd_trans_huge_lock(pmd, vma);
367 if (ptl) {
368 if (!pmd_present(*pmd))
369 goto out;
370 if (is_huge_zero_pmd(*pmd))
371 goto out;
372 folio = pmd_folio(*pmd);
373 if (vma->vm_flags & VM_LOCKED)
374 mlock_folio(folio);
375 else
376 munlock_folio(folio);
377 goto out;
378 }
379
380 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
381 if (!start_pte) {
382 walk->action = ACTION_AGAIN;
383 return 0;
384 }
385
386 for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
387 ptent = ptep_get(pte);
388 if (!pte_present(ptent))
389 continue;
390 folio = vm_normal_folio(vma, addr, ptent);
391 if (!folio || folio_is_zone_device(folio))
392 continue;
393
394 step = folio_mlock_step(folio, pte, addr, end);
395 if (!allow_mlock_munlock(folio, vma, start, end, step))
396 goto next_entry;
397
398 if (vma->vm_flags & VM_LOCKED)
399 mlock_folio(folio);
400 else
401 munlock_folio(folio);
402
403 next_entry:
404 pte += step - 1;
405 addr += (step - 1) << PAGE_SHIFT;
406 }
407 pte_unmap(start_pte);
408 out:
409 spin_unlock(ptl);
410 cond_resched();
411 return 0;
412 }
413
414 /*
415 * mlock_vma_pages_range() - mlock any pages already in the range,
416 * or munlock all pages in the range.
417 * @vma - vma containing range to be mlock()ed or munlock()ed
418 * @start - start address in @vma of the range
419 * @end - end of range in @vma
420 * @newflags - the new set of flags for @vma.
421 *
422 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
423 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
424 */
mlock_vma_pages_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,vm_flags_t newflags)425 static void mlock_vma_pages_range(struct vm_area_struct *vma,
426 unsigned long start, unsigned long end, vm_flags_t newflags)
427 {
428 static const struct mm_walk_ops mlock_walk_ops = {
429 .pmd_entry = mlock_pte_range,
430 .walk_lock = PGWALK_WRLOCK_VERIFY,
431 };
432
433 /*
434 * There is a slight chance that concurrent page migration,
435 * or page reclaim finding a page of this now-VM_LOCKED vma,
436 * will call mlock_vma_folio() and raise page's mlock_count:
437 * double counting, leaving the page unevictable indefinitely.
438 * Communicate this danger to mlock_vma_folio() with VM_IO,
439 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
440 * mmap_lock is held in write mode here, so this weird
441 * combination should not be visible to other mmap_lock users;
442 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
443 */
444 if (newflags & VM_LOCKED)
445 newflags |= VM_IO;
446 vma_start_write(vma);
447 vm_flags_reset_once(vma, newflags);
448
449 lru_add_drain();
450 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
451 lru_add_drain();
452
453 if (newflags & VM_IO) {
454 newflags &= ~VM_IO;
455 vm_flags_reset_once(vma, newflags);
456 }
457 }
458
459 /*
460 * mlock_fixup - handle mlock[all]/munlock[all] requests.
461 *
462 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
463 * munlock is a no-op. However, for some special vmas, we go ahead and
464 * populate the ptes.
465 *
466 * For vmas that pass the filters, merge/split as appropriate.
467 */
mlock_fixup(struct vma_iterator * vmi,struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,vm_flags_t newflags)468 static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
469 struct vm_area_struct **prev, unsigned long start,
470 unsigned long end, vm_flags_t newflags)
471 {
472 struct mm_struct *mm = vma->vm_mm;
473 int nr_pages;
474 int ret = 0;
475 vm_flags_t oldflags = vma->vm_flags;
476
477 if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
478 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
479 vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
480 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
481 goto out;
482
483 vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
484 if (IS_ERR(vma)) {
485 ret = PTR_ERR(vma);
486 goto out;
487 }
488
489 /*
490 * Keep track of amount of locked VM.
491 */
492 nr_pages = (end - start) >> PAGE_SHIFT;
493 if (!(newflags & VM_LOCKED))
494 nr_pages = -nr_pages;
495 else if (oldflags & VM_LOCKED)
496 nr_pages = 0;
497 mm->locked_vm += nr_pages;
498
499 /*
500 * vm_flags is protected by the mmap_lock held in write mode.
501 * It's okay if try_to_unmap_one unmaps a page just after we
502 * set VM_LOCKED, populate_vma_page_range will bring it back.
503 */
504 if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
505 /* No work to do, and mlocking twice would be wrong */
506 vma_start_write(vma);
507 vm_flags_reset(vma, newflags);
508 } else {
509 mlock_vma_pages_range(vma, start, end, newflags);
510 }
511 out:
512 *prev = vma;
513 return ret;
514 }
515
apply_vma_lock_flags(unsigned long start,size_t len,vm_flags_t flags)516 static int apply_vma_lock_flags(unsigned long start, size_t len,
517 vm_flags_t flags)
518 {
519 unsigned long nstart, end, tmp;
520 struct vm_area_struct *vma, *prev;
521 VMA_ITERATOR(vmi, current->mm, start);
522
523 VM_BUG_ON(__offset_in_page_log(start));
524 VM_BUG_ON(len != __PAGE_ALIGN(len));
525 end = start + len;
526 if (end < start)
527 return -EINVAL;
528 if (end == start)
529 return 0;
530 vma = vma_iter_load(&vmi);
531 if (!vma)
532 return -ENOMEM;
533
534 prev = vma_prev(&vmi);
535 if (start > vma->vm_start)
536 prev = vma;
537
538 nstart = start;
539 tmp = vma->vm_start;
540 for_each_vma_range(vmi, vma, end) {
541 int error;
542 vm_flags_t newflags;
543
544 if (vma->vm_start != tmp)
545 return -ENOMEM;
546
547 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
548 newflags |= flags;
549 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
550 tmp = vma->vm_end;
551 if (tmp > end)
552 tmp = end;
553 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
554 if (error)
555 return error;
556 tmp = vma_iter_end(&vmi);
557 nstart = tmp;
558 }
559
560 if (tmp < end)
561 return -ENOMEM;
562
563 return 0;
564 }
565
566 /*
567 * Go through vma areas and sum size of mlocked
568 * vma pages, as return value.
569 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
570 * is also counted.
571 * Return value: previously mlocked page counts
572 */
count_mm_mlocked_page_nr(struct mm_struct * mm,unsigned long start,size_t len)573 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
574 unsigned long start, size_t len)
575 {
576 struct vm_area_struct *vma;
577 unsigned long count = 0;
578 unsigned long end;
579 VMA_ITERATOR(vmi, mm, start);
580
581 /* Don't overflow past ULONG_MAX */
582 if (unlikely(ULONG_MAX - len < start))
583 end = ULONG_MAX;
584 else
585 end = start + len;
586
587 for_each_vma_range(vmi, vma, end) {
588 if (vma->vm_flags & VM_LOCKED) {
589 if (start > vma->vm_start)
590 count -= (start - vma->vm_start);
591 if (end < vma->vm_end) {
592 count += end - vma->vm_start;
593 break;
594 }
595 count += vma->vm_end - vma->vm_start;
596 }
597 }
598
599 return count >> PAGE_SHIFT;
600 }
601
602 /*
603 * convert get_user_pages() return value to posix mlock() error
604 */
__mlock_posix_error_return(long retval)605 static int __mlock_posix_error_return(long retval)
606 {
607 if (retval == -EFAULT)
608 retval = -ENOMEM;
609 else if (retval == -ENOMEM)
610 retval = -EAGAIN;
611 return retval;
612 }
613
do_mlock(unsigned long start,size_t len,vm_flags_t flags)614 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
615 {
616 unsigned long locked;
617 unsigned long lock_limit;
618 int error = -ENOMEM;
619
620 start = untagged_addr(start);
621
622 if (!can_do_mlock())
623 return -EPERM;
624
625 len = __PAGE_ALIGN(len + (__offset_in_page(start)));
626 start &= __PAGE_MASK;
627
628 lock_limit = rlimit(RLIMIT_MEMLOCK);
629 lock_limit >>= PAGE_SHIFT;
630 locked = len >> PAGE_SHIFT;
631
632 if (mmap_write_lock_killable(current->mm))
633 return -EINTR;
634
635 locked += current->mm->locked_vm;
636 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
637 /*
638 * It is possible that the regions requested intersect with
639 * previously mlocked areas, that part area in "mm->locked_vm"
640 * should not be counted to new mlock increment count. So check
641 * and adjust locked count if necessary.
642 */
643 locked -= count_mm_mlocked_page_nr(current->mm,
644 start, len);
645 }
646
647 /* check against resource limits */
648 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
649 error = apply_vma_lock_flags(start, len, flags);
650
651 mmap_write_unlock(current->mm);
652 if (error)
653 return error;
654
655 error = __mm_populate(start, len, 0);
656 if (error)
657 return __mlock_posix_error_return(error);
658 return 0;
659 }
660
SYSCALL_DEFINE2(mlock,unsigned long,start,size_t,len)661 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
662 {
663 return do_mlock(start, len, VM_LOCKED);
664 }
665
SYSCALL_DEFINE3(mlock2,unsigned long,start,size_t,len,int,flags)666 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
667 {
668 vm_flags_t vm_flags = VM_LOCKED;
669
670 if (flags & ~MLOCK_ONFAULT)
671 return -EINVAL;
672
673 if (flags & MLOCK_ONFAULT)
674 vm_flags |= VM_LOCKONFAULT;
675
676 return do_mlock(start, len, vm_flags);
677 }
678
SYSCALL_DEFINE2(munlock,unsigned long,start,size_t,len)679 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
680 {
681 int ret;
682
683 start = untagged_addr(start);
684
685 len = __PAGE_ALIGN(len + (__offset_in_page(start)));
686 start &= __PAGE_MASK;
687
688 if (mmap_write_lock_killable(current->mm))
689 return -EINTR;
690 ret = apply_vma_lock_flags(start, len, 0);
691 mmap_write_unlock(current->mm);
692
693 return ret;
694 }
695
696 /*
697 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
698 * and translate into the appropriate modifications to mm->def_flags and/or the
699 * flags for all current VMAs.
700 *
701 * There are a couple of subtleties with this. If mlockall() is called multiple
702 * times with different flags, the values do not necessarily stack. If mlockall
703 * is called once including the MCL_FUTURE flag and then a second time without
704 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
705 */
apply_mlockall_flags(int flags)706 static int apply_mlockall_flags(int flags)
707 {
708 VMA_ITERATOR(vmi, current->mm, 0);
709 struct vm_area_struct *vma, *prev = NULL;
710 vm_flags_t to_add = 0;
711
712 current->mm->def_flags &= ~VM_LOCKED_MASK;
713 if (flags & MCL_FUTURE) {
714 current->mm->def_flags |= VM_LOCKED;
715
716 if (flags & MCL_ONFAULT)
717 current->mm->def_flags |= VM_LOCKONFAULT;
718
719 if (!(flags & MCL_CURRENT))
720 goto out;
721 }
722
723 if (flags & MCL_CURRENT) {
724 to_add |= VM_LOCKED;
725 if (flags & MCL_ONFAULT)
726 to_add |= VM_LOCKONFAULT;
727 }
728
729 for_each_vma(vmi, vma) {
730 int error;
731 vm_flags_t newflags;
732
733 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
734 newflags |= to_add;
735
736 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
737 newflags);
738 /* Ignore errors, but prev needs fixing up. */
739 if (error)
740 prev = vma;
741 cond_resched();
742 }
743 out:
744 return 0;
745 }
746
SYSCALL_DEFINE1(mlockall,int,flags)747 SYSCALL_DEFINE1(mlockall, int, flags)
748 {
749 unsigned long lock_limit;
750 int ret;
751
752 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
753 flags == MCL_ONFAULT)
754 return -EINVAL;
755
756 if (!can_do_mlock())
757 return -EPERM;
758
759 lock_limit = rlimit(RLIMIT_MEMLOCK);
760 lock_limit >>= PAGE_SHIFT;
761
762 if (mmap_write_lock_killable(current->mm))
763 return -EINTR;
764
765 ret = -ENOMEM;
766 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
767 capable(CAP_IPC_LOCK))
768 ret = apply_mlockall_flags(flags);
769 mmap_write_unlock(current->mm);
770 if (!ret && (flags & MCL_CURRENT))
771 mm_populate(0, TASK_SIZE);
772
773 return ret;
774 }
775
SYSCALL_DEFINE0(munlockall)776 SYSCALL_DEFINE0(munlockall)
777 {
778 int ret;
779
780 if (mmap_write_lock_killable(current->mm))
781 return -EINTR;
782 ret = apply_mlockall_flags(0);
783 mmap_write_unlock(current->mm);
784 return ret;
785 }
786
787 /*
788 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
789 * shm segments) get accounted against the user_struct instead.
790 */
791 static DEFINE_SPINLOCK(shmlock_user_lock);
792
user_shm_lock(size_t size,struct ucounts * ucounts)793 int user_shm_lock(size_t size, struct ucounts *ucounts)
794 {
795 unsigned long lock_limit, locked;
796 long memlock;
797 int allowed = 0;
798
799 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
800 lock_limit = rlimit(RLIMIT_MEMLOCK);
801 if (lock_limit != RLIM_INFINITY)
802 lock_limit >>= PAGE_SHIFT;
803 spin_lock(&shmlock_user_lock);
804 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
805
806 if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
807 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
808 goto out;
809 }
810 if (!get_ucounts(ucounts)) {
811 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
812 allowed = 0;
813 goto out;
814 }
815 allowed = 1;
816 out:
817 spin_unlock(&shmlock_user_lock);
818 return allowed;
819 }
820
user_shm_unlock(size_t size,struct ucounts * ucounts)821 void user_shm_unlock(size_t size, struct ucounts *ucounts)
822 {
823 spin_lock(&shmlock_user_lock);
824 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
825 spin_unlock(&shmlock_user_lock);
826 put_ucounts(ucounts);
827 }
828