• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/mm/mlock.c
3  *
4  *  (C) Copyright 1995 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  */
7 
8 #include <linux/capability.h>
9 #include <linux/mman.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/pagevec.h>
15 #include <linux/mempolicy.h>
16 #include <linux/syscalls.h>
17 #include <linux/sched.h>
18 #include <linux/export.h>
19 #include <linux/rmap.h>
20 #include <linux/mmzone.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memcontrol.h>
23 #include <linux/mm_inline.h>
24 
25 #include "internal.h"
26 
can_do_mlock(void)27 int can_do_mlock(void)
28 {
29 	if (rlimit(RLIMIT_MEMLOCK) != 0)
30 		return 1;
31 	if (capable(CAP_IPC_LOCK))
32 		return 1;
33 	return 0;
34 }
35 EXPORT_SYMBOL(can_do_mlock);
36 
37 /*
38  * Mlocked pages are marked with PageMlocked() flag for efficient testing
39  * in vmscan and, possibly, the fault path; and to support semi-accurate
40  * statistics.
41  *
42  * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
43  * be placed on the LRU "unevictable" list, rather than the [in]active lists.
44  * The unevictable list is an LRU sibling list to the [in]active lists.
45  * PageUnevictable is set to indicate the unevictable state.
46  *
47  * When lazy mlocking via vmscan, it is important to ensure that the
48  * vma's VM_LOCKED status is not concurrently being modified, otherwise we
49  * may have mlocked a page that is being munlocked. So lazy mlock must take
50  * the mmap_sem for read, and verify that the vma really is locked
51  * (see mm/rmap.c).
52  */
53 
54 /*
55  *  LRU accounting for clear_page_mlock()
56  */
clear_page_mlock(struct page * page)57 void clear_page_mlock(struct page *page)
58 {
59 	if (!TestClearPageMlocked(page))
60 		return;
61 
62 	mod_zone_page_state(page_zone(page), NR_MLOCK,
63 			    -hpage_nr_pages(page));
64 	count_vm_event(UNEVICTABLE_PGCLEARED);
65 	if (!isolate_lru_page(page)) {
66 		putback_lru_page(page);
67 	} else {
68 		/*
69 		 * We lost the race. the page already moved to evictable list.
70 		 */
71 		if (PageUnevictable(page))
72 			count_vm_event(UNEVICTABLE_PGSTRANDED);
73 	}
74 }
75 
76 /*
77  * Mark page as mlocked if not already.
78  * If page on LRU, isolate and putback to move to unevictable list.
79  */
mlock_vma_page(struct page * page)80 void mlock_vma_page(struct page *page)
81 {
82 	/* Serialize with page migration */
83 	BUG_ON(!PageLocked(page));
84 
85 	if (!TestSetPageMlocked(page)) {
86 		mod_zone_page_state(page_zone(page), NR_MLOCK,
87 				    hpage_nr_pages(page));
88 		count_vm_event(UNEVICTABLE_PGMLOCKED);
89 		if (!isolate_lru_page(page))
90 			putback_lru_page(page);
91 	}
92 }
93 
94 /*
95  * Isolate a page from LRU with optional get_page() pin.
96  * Assumes lru_lock already held and page already pinned.
97  */
__munlock_isolate_lru_page(struct page * page,bool getpage)98 static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
99 {
100 	if (PageLRU(page)) {
101 		struct lruvec *lruvec;
102 
103 		lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
104 		if (getpage)
105 			get_page(page);
106 		ClearPageLRU(page);
107 		del_page_from_lru_list(page, lruvec, page_lru(page));
108 		return true;
109 	}
110 
111 	return false;
112 }
113 
114 /*
115  * Finish munlock after successful page isolation
116  *
117  * Page must be locked. This is a wrapper for try_to_munlock()
118  * and putback_lru_page() with munlock accounting.
119  */
__munlock_isolated_page(struct page * page)120 static void __munlock_isolated_page(struct page *page)
121 {
122 	int ret = SWAP_AGAIN;
123 
124 	/*
125 	 * Optimization: if the page was mapped just once, that's our mapping
126 	 * and we don't need to check all the other vmas.
127 	 */
128 	if (page_mapcount(page) > 1)
129 		ret = try_to_munlock(page);
130 
131 	/* Did try_to_unlock() succeed or punt? */
132 	if (ret != SWAP_MLOCK)
133 		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
134 
135 	putback_lru_page(page);
136 }
137 
138 /*
139  * Accounting for page isolation fail during munlock
140  *
141  * Performs accounting when page isolation fails in munlock. There is nothing
142  * else to do because it means some other task has already removed the page
143  * from the LRU. putback_lru_page() will take care of removing the page from
144  * the unevictable list, if necessary. vmscan [page_referenced()] will move
145  * the page back to the unevictable list if some other vma has it mlocked.
146  */
__munlock_isolation_failed(struct page * page)147 static void __munlock_isolation_failed(struct page *page)
148 {
149 	if (PageUnevictable(page))
150 		__count_vm_event(UNEVICTABLE_PGSTRANDED);
151 	else
152 		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
153 }
154 
155 /**
156  * munlock_vma_page - munlock a vma page
157  * @page - page to be unlocked, either a normal page or THP page head
158  *
159  * returns the size of the page as a page mask (0 for normal page,
160  *         HPAGE_PMD_NR - 1 for THP head page)
161  *
162  * called from munlock()/munmap() path with page supposedly on the LRU.
163  * When we munlock a page, because the vma where we found the page is being
164  * munlock()ed or munmap()ed, we want to check whether other vmas hold the
165  * page locked so that we can leave it on the unevictable lru list and not
166  * bother vmscan with it.  However, to walk the page's rmap list in
167  * try_to_munlock() we must isolate the page from the LRU.  If some other
168  * task has removed the page from the LRU, we won't be able to do that.
169  * So we clear the PageMlocked as we might not get another chance.  If we
170  * can't isolate the page, we leave it for putback_lru_page() and vmscan
171  * [page_referenced()/try_to_unmap()] to deal with.
172  */
munlock_vma_page(struct page * page)173 unsigned int munlock_vma_page(struct page *page)
174 {
175 	unsigned int nr_pages;
176 	struct zone *zone = page_zone(page);
177 
178 	/* For try_to_munlock() and to serialize with page migration */
179 	BUG_ON(!PageLocked(page));
180 
181 	/*
182 	 * Serialize with any parallel __split_huge_page_refcount() which
183 	 * might otherwise copy PageMlocked to part of the tail pages before
184 	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
185 	 */
186 	spin_lock_irq(&zone->lru_lock);
187 
188 	nr_pages = hpage_nr_pages(page);
189 	if (!TestClearPageMlocked(page))
190 		goto unlock_out;
191 
192 	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
193 
194 	if (__munlock_isolate_lru_page(page, true)) {
195 		spin_unlock_irq(&zone->lru_lock);
196 		__munlock_isolated_page(page);
197 		goto out;
198 	}
199 	__munlock_isolation_failed(page);
200 
201 unlock_out:
202 	spin_unlock_irq(&zone->lru_lock);
203 
204 out:
205 	return nr_pages - 1;
206 }
207 
208 /**
209  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
210  * @vma:   target vma
211  * @start: start address
212  * @end:   end address
213  * @nonblocking:
214  *
215  * This takes care of making the pages present too.
216  *
217  * return 0 on success, negative error code on error.
218  *
219  * vma->vm_mm->mmap_sem must be held.
220  *
221  * If @nonblocking is NULL, it may be held for read or write and will
222  * be unperturbed.
223  *
224  * If @nonblocking is non-NULL, it must held for read only and may be
225  * released.  If it's released, *@nonblocking will be set to 0.
226  */
__mlock_vma_pages_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * nonblocking)227 long __mlock_vma_pages_range(struct vm_area_struct *vma,
228 		unsigned long start, unsigned long end, int *nonblocking)
229 {
230 	struct mm_struct *mm = vma->vm_mm;
231 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
232 	int gup_flags;
233 
234 	VM_BUG_ON(start & ~PAGE_MASK);
235 	VM_BUG_ON(end   & ~PAGE_MASK);
236 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
237 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
238 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
239 
240 	gup_flags = FOLL_TOUCH | FOLL_MLOCK;
241 	/*
242 	 * We want to touch writable mappings with a write fault in order
243 	 * to break COW, except for shared mappings because these don't COW
244 	 * and we would not want to dirty them for nothing.
245 	 */
246 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
247 		gup_flags |= FOLL_WRITE;
248 
249 	/*
250 	 * We want mlock to succeed for regions that have any permissions
251 	 * other than PROT_NONE.
252 	 */
253 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
254 		gup_flags |= FOLL_FORCE;
255 
256 	/*
257 	 * We made sure addr is within a VMA, so the following will
258 	 * not result in a stack expansion that recurses back here.
259 	 */
260 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
261 				NULL, NULL, nonblocking);
262 }
263 
264 /*
265  * convert get_user_pages() return value to posix mlock() error
266  */
__mlock_posix_error_return(long retval)267 static int __mlock_posix_error_return(long retval)
268 {
269 	if (retval == -EFAULT)
270 		retval = -ENOMEM;
271 	else if (retval == -ENOMEM)
272 		retval = -EAGAIN;
273 	return retval;
274 }
275 
276 /*
277  * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
278  *
279  * The fast path is available only for evictable pages with single mapping.
280  * Then we can bypass the per-cpu pvec and get better performance.
281  * when mapcount > 1 we need try_to_munlock() which can fail.
282  * when !page_evictable(), we need the full redo logic of putback_lru_page to
283  * avoid leaving evictable page in unevictable list.
284  *
285  * In case of success, @page is added to @pvec and @pgrescued is incremented
286  * in case that the page was previously unevictable. @page is also unlocked.
287  */
__putback_lru_fast_prepare(struct page * page,struct pagevec * pvec,int * pgrescued)288 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
289 		int *pgrescued)
290 {
291 	VM_BUG_ON_PAGE(PageLRU(page), page);
292 	VM_BUG_ON_PAGE(!PageLocked(page), page);
293 
294 	if (page_mapcount(page) <= 1 && page_evictable(page)) {
295 		pagevec_add(pvec, page);
296 		if (TestClearPageUnevictable(page))
297 			(*pgrescued)++;
298 		unlock_page(page);
299 		return true;
300 	}
301 
302 	return false;
303 }
304 
305 /*
306  * Putback multiple evictable pages to the LRU
307  *
308  * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
309  * the pages might have meanwhile become unevictable but that is OK.
310  */
__putback_lru_fast(struct pagevec * pvec,int pgrescued)311 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
312 {
313 	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
314 	/*
315 	 *__pagevec_lru_add() calls release_pages() so we don't call
316 	 * put_page() explicitly
317 	 */
318 	__pagevec_lru_add(pvec);
319 	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
320 }
321 
322 /*
323  * Munlock a batch of pages from the same zone
324  *
325  * The work is split to two main phases. First phase clears the Mlocked flag
326  * and attempts to isolate the pages, all under a single zone lru lock.
327  * The second phase finishes the munlock only for pages where isolation
328  * succeeded.
329  *
330  * Note that the pagevec may be modified during the process.
331  */
__munlock_pagevec(struct pagevec * pvec,struct zone * zone)332 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
333 {
334 	int i;
335 	int nr = pagevec_count(pvec);
336 	int delta_munlocked = -nr;
337 	struct pagevec pvec_putback;
338 	int pgrescued = 0;
339 
340 	pagevec_init(&pvec_putback, 0);
341 
342 	/* Phase 1: page isolation */
343 	spin_lock_irq(&zone->lru_lock);
344 	for (i = 0; i < nr; i++) {
345 		struct page *page = pvec->pages[i];
346 
347 		if (TestClearPageMlocked(page)) {
348 			/*
349 			 * We already have pin from follow_page_mask()
350 			 * so we can spare the get_page() here.
351 			 */
352 			if (__munlock_isolate_lru_page(page, false))
353 				continue;
354 			else
355 				__munlock_isolation_failed(page);
356 		} else {
357 			delta_munlocked++;
358 		}
359 
360 		/*
361 		 * We won't be munlocking this page in the next phase
362 		 * but we still need to release the follow_page_mask()
363 		 * pin. We cannot do it under lru_lock however. If it's
364 		 * the last pin, __page_cache_release() would deadlock.
365 		 */
366 		pagevec_add(&pvec_putback, pvec->pages[i]);
367 		pvec->pages[i] = NULL;
368 	}
369 	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
370 	spin_unlock_irq(&zone->lru_lock);
371 
372 	/* Now we can release pins of pages that we are not munlocking */
373 	pagevec_release(&pvec_putback);
374 
375 	/* Phase 2: page munlock */
376 	for (i = 0; i < nr; i++) {
377 		struct page *page = pvec->pages[i];
378 
379 		if (page) {
380 			lock_page(page);
381 			if (!__putback_lru_fast_prepare(page, &pvec_putback,
382 					&pgrescued)) {
383 				/*
384 				 * Slow path. We don't want to lose the last
385 				 * pin before unlock_page()
386 				 */
387 				get_page(page); /* for putback_lru_page() */
388 				__munlock_isolated_page(page);
389 				unlock_page(page);
390 				put_page(page); /* from follow_page_mask() */
391 			}
392 		}
393 	}
394 
395 	/*
396 	 * Phase 3: page putback for pages that qualified for the fast path
397 	 * This will also call put_page() to return pin from follow_page_mask()
398 	 */
399 	if (pagevec_count(&pvec_putback))
400 		__putback_lru_fast(&pvec_putback, pgrescued);
401 }
402 
403 /*
404  * Fill up pagevec for __munlock_pagevec using pte walk
405  *
406  * The function expects that the struct page corresponding to @start address is
407  * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
408  *
409  * The rest of @pvec is filled by subsequent pages within the same pmd and same
410  * zone, as long as the pte's are present and vm_normal_page() succeeds. These
411  * pages also get pinned.
412  *
413  * Returns the address of the next page that should be scanned. This equals
414  * @start + PAGE_SIZE when no page could be added by the pte walk.
415  */
__munlock_pagevec_fill(struct pagevec * pvec,struct vm_area_struct * vma,int zoneid,unsigned long start,unsigned long end)416 static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
417 		struct vm_area_struct *vma, int zoneid,	unsigned long start,
418 		unsigned long end)
419 {
420 	pte_t *pte;
421 	spinlock_t *ptl;
422 
423 	/*
424 	 * Initialize pte walk starting at the already pinned page where we
425 	 * are sure that there is a pte, as it was pinned under the same
426 	 * mmap_sem write op.
427 	 */
428 	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
429 	/* Make sure we do not cross the page table boundary */
430 	end = pgd_addr_end(start, end);
431 	end = pud_addr_end(start, end);
432 	end = pmd_addr_end(start, end);
433 
434 	/* The page next to the pinned page is the first we will try to get */
435 	start += PAGE_SIZE;
436 	while (start < end) {
437 		struct page *page = NULL;
438 		pte++;
439 		if (pte_present(*pte))
440 			page = vm_normal_page(vma, start, *pte);
441 		/*
442 		 * Break if page could not be obtained or the page's node+zone does not
443 		 * match
444 		 */
445 		if (!page || page_zone_id(page) != zoneid)
446 			break;
447 
448 		get_page(page);
449 		/*
450 		 * Increase the address that will be returned *before* the
451 		 * eventual break due to pvec becoming full by adding the page
452 		 */
453 		start += PAGE_SIZE;
454 		if (pagevec_add(pvec, page) == 0)
455 			break;
456 	}
457 	pte_unmap_unlock(pte, ptl);
458 	return start;
459 }
460 
461 /*
462  * munlock_vma_pages_range() - munlock all pages in the vma range.'
463  * @vma - vma containing range to be munlock()ed.
464  * @start - start address in @vma of the range
465  * @end - end of range in @vma.
466  *
467  *  For mremap(), munmap() and exit().
468  *
469  * Called with @vma VM_LOCKED.
470  *
471  * Returns with VM_LOCKED cleared.  Callers must be prepared to
472  * deal with this.
473  *
474  * We don't save and restore VM_LOCKED here because pages are
475  * still on lru.  In unmap path, pages might be scanned by reclaim
476  * and re-mlocked by try_to_{munlock|unmap} before we unmap and
477  * free them.  This will result in freeing mlocked pages.
478  */
munlock_vma_pages_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)479 void munlock_vma_pages_range(struct vm_area_struct *vma,
480 			     unsigned long start, unsigned long end)
481 {
482 	vma->vm_flags &= ~VM_LOCKED;
483 
484 	while (start < end) {
485 		struct page *page = NULL;
486 		unsigned int page_mask;
487 		unsigned long page_increm;
488 		struct pagevec pvec;
489 		struct zone *zone;
490 		int zoneid;
491 
492 		pagevec_init(&pvec, 0);
493 		/*
494 		 * Although FOLL_DUMP is intended for get_dump_page(),
495 		 * it just so happens that its special treatment of the
496 		 * ZERO_PAGE (returning an error instead of doing get_page)
497 		 * suits munlock very well (and if somehow an abnormal page
498 		 * has sneaked into the range, we won't oops here: great).
499 		 */
500 		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
501 				&page_mask);
502 
503 		if (page && !IS_ERR(page)) {
504 			if (PageTransHuge(page)) {
505 				lock_page(page);
506 				/*
507 				 * Any THP page found by follow_page_mask() may
508 				 * have gotten split before reaching
509 				 * munlock_vma_page(), so we need to recompute
510 				 * the page_mask here.
511 				 */
512 				page_mask = munlock_vma_page(page);
513 				unlock_page(page);
514 				put_page(page); /* follow_page_mask() */
515 			} else {
516 				/*
517 				 * Non-huge pages are handled in batches via
518 				 * pagevec. The pin from follow_page_mask()
519 				 * prevents them from collapsing by THP.
520 				 */
521 				pagevec_add(&pvec, page);
522 				zone = page_zone(page);
523 				zoneid = page_zone_id(page);
524 
525 				/*
526 				 * Try to fill the rest of pagevec using fast
527 				 * pte walk. This will also update start to
528 				 * the next page to process. Then munlock the
529 				 * pagevec.
530 				 */
531 				start = __munlock_pagevec_fill(&pvec, vma,
532 						zoneid, start, end);
533 				__munlock_pagevec(&pvec, zone);
534 				goto next;
535 			}
536 		}
537 		/* It's a bug to munlock in the middle of a THP page */
538 		VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
539 		page_increm = 1 + page_mask;
540 		start += page_increm * PAGE_SIZE;
541 next:
542 		cond_resched();
543 	}
544 }
545 
546 /*
547  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
548  *
549  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
550  * munlock is a no-op.  However, for some special vmas, we go ahead and
551  * populate the ptes.
552  *
553  * For vmas that pass the filters, merge/split as appropriate.
554  */
mlock_fixup(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,vm_flags_t newflags)555 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
556 	unsigned long start, unsigned long end, vm_flags_t newflags)
557 {
558 	struct mm_struct *mm = vma->vm_mm;
559 	pgoff_t pgoff;
560 	int nr_pages;
561 	int ret = 0;
562 	int lock = !!(newflags & VM_LOCKED);
563 
564 	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
565 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
566 		goto out;	/* don't set VM_LOCKED,  don't count */
567 
568 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
569 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
570 			  vma->vm_file, pgoff, vma_policy(vma),
571 			  vma_get_anon_name(vma));
572 	if (*prev) {
573 		vma = *prev;
574 		goto success;
575 	}
576 
577 	if (start != vma->vm_start) {
578 		ret = split_vma(mm, vma, start, 1);
579 		if (ret)
580 			goto out;
581 	}
582 
583 	if (end != vma->vm_end) {
584 		ret = split_vma(mm, vma, end, 0);
585 		if (ret)
586 			goto out;
587 	}
588 
589 success:
590 	/*
591 	 * Keep track of amount of locked VM.
592 	 */
593 	nr_pages = (end - start) >> PAGE_SHIFT;
594 	if (!lock)
595 		nr_pages = -nr_pages;
596 	mm->locked_vm += nr_pages;
597 
598 	/*
599 	 * vm_flags is protected by the mmap_sem held in write mode.
600 	 * It's okay if try_to_unmap_one unmaps a page just after we
601 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
602 	 */
603 
604 	if (lock)
605 		vma->vm_flags = newflags;
606 	else
607 		munlock_vma_pages_range(vma, start, end);
608 
609 out:
610 	*prev = vma;
611 	return ret;
612 }
613 
do_mlock(unsigned long start,size_t len,int on)614 static int do_mlock(unsigned long start, size_t len, int on)
615 {
616 	unsigned long nstart, end, tmp;
617 	struct vm_area_struct * vma, * prev;
618 	int error;
619 
620 	VM_BUG_ON(start & ~PAGE_MASK);
621 	VM_BUG_ON(len != PAGE_ALIGN(len));
622 	end = start + len;
623 	if (end < start)
624 		return -EINVAL;
625 	if (end == start)
626 		return 0;
627 	vma = find_vma(current->mm, start);
628 	if (!vma || vma->vm_start > start)
629 		return -ENOMEM;
630 
631 	prev = vma->vm_prev;
632 	if (start > vma->vm_start)
633 		prev = vma;
634 
635 	for (nstart = start ; ; ) {
636 		vm_flags_t newflags;
637 
638 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
639 
640 		newflags = vma->vm_flags & ~VM_LOCKED;
641 		if (on)
642 			newflags |= VM_LOCKED;
643 
644 		tmp = vma->vm_end;
645 		if (tmp > end)
646 			tmp = end;
647 		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
648 		if (error)
649 			break;
650 		nstart = tmp;
651 		if (nstart < prev->vm_end)
652 			nstart = prev->vm_end;
653 		if (nstart >= end)
654 			break;
655 
656 		vma = prev->vm_next;
657 		if (!vma || vma->vm_start != nstart) {
658 			error = -ENOMEM;
659 			break;
660 		}
661 	}
662 	return error;
663 }
664 
665 /*
666  * __mm_populate - populate and/or mlock pages within a range of address space.
667  *
668  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
669  * flags. VMAs must be already marked with the desired vm_flags, and
670  * mmap_sem must not be held.
671  */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)672 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
673 {
674 	struct mm_struct *mm = current->mm;
675 	unsigned long end, nstart, nend;
676 	struct vm_area_struct *vma = NULL;
677 	int locked = 0;
678 	long ret = 0;
679 
680 	VM_BUG_ON(start & ~PAGE_MASK);
681 	VM_BUG_ON(len != PAGE_ALIGN(len));
682 	end = start + len;
683 
684 	for (nstart = start; nstart < end; nstart = nend) {
685 		/*
686 		 * We want to fault in pages for [nstart; end) address range.
687 		 * Find first corresponding VMA.
688 		 */
689 		if (!locked) {
690 			locked = 1;
691 			down_read(&mm->mmap_sem);
692 			vma = find_vma(mm, nstart);
693 		} else if (nstart >= vma->vm_end)
694 			vma = vma->vm_next;
695 		if (!vma || vma->vm_start >= end)
696 			break;
697 		/*
698 		 * Set [nstart; nend) to intersection of desired address
699 		 * range with the first VMA. Also, skip undesirable VMA types.
700 		 */
701 		nend = min(end, vma->vm_end);
702 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
703 			continue;
704 		if (nstart < vma->vm_start)
705 			nstart = vma->vm_start;
706 		/*
707 		 * Now fault in a range of pages. __mlock_vma_pages_range()
708 		 * double checks the vma flags, so that it won't mlock pages
709 		 * if the vma was already munlocked.
710 		 */
711 		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
712 		if (ret < 0) {
713 			if (ignore_errors) {
714 				ret = 0;
715 				continue;	/* continue at next VMA */
716 			}
717 			ret = __mlock_posix_error_return(ret);
718 			break;
719 		}
720 		nend = nstart + ret * PAGE_SIZE;
721 		ret = 0;
722 	}
723 	if (locked)
724 		up_read(&mm->mmap_sem);
725 	return ret;	/* 0 or negative error code */
726 }
727 
SYSCALL_DEFINE2(mlock,unsigned long,start,size_t,len)728 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
729 {
730 	unsigned long locked;
731 	unsigned long lock_limit;
732 	int error = -ENOMEM;
733 
734 	if (!can_do_mlock())
735 		return -EPERM;
736 
737 	lru_add_drain_all();	/* flush pagevec */
738 
739 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
740 	start &= PAGE_MASK;
741 
742 	lock_limit = rlimit(RLIMIT_MEMLOCK);
743 	lock_limit >>= PAGE_SHIFT;
744 	locked = len >> PAGE_SHIFT;
745 
746 	down_write(&current->mm->mmap_sem);
747 
748 	locked += current->mm->locked_vm;
749 
750 	/* check against resource limits */
751 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
752 		error = do_mlock(start, len, 1);
753 
754 	up_write(&current->mm->mmap_sem);
755 	if (!error)
756 		error = __mm_populate(start, len, 0);
757 	return error;
758 }
759 
SYSCALL_DEFINE2(munlock,unsigned long,start,size_t,len)760 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
761 {
762 	int ret;
763 
764 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
765 	start &= PAGE_MASK;
766 
767 	down_write(&current->mm->mmap_sem);
768 	ret = do_mlock(start, len, 0);
769 	up_write(&current->mm->mmap_sem);
770 
771 	return ret;
772 }
773 
do_mlockall(int flags)774 static int do_mlockall(int flags)
775 {
776 	struct vm_area_struct * vma, * prev = NULL;
777 
778 	if (flags & MCL_FUTURE)
779 		current->mm->def_flags |= VM_LOCKED;
780 	else
781 		current->mm->def_flags &= ~VM_LOCKED;
782 	if (flags == MCL_FUTURE)
783 		goto out;
784 
785 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
786 		vm_flags_t newflags;
787 
788 		newflags = vma->vm_flags & ~VM_LOCKED;
789 		if (flags & MCL_CURRENT)
790 			newflags |= VM_LOCKED;
791 
792 		/* Ignore errors */
793 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
794 		cond_resched_rcu_qs();
795 	}
796 out:
797 	return 0;
798 }
799 
SYSCALL_DEFINE1(mlockall,int,flags)800 SYSCALL_DEFINE1(mlockall, int, flags)
801 {
802 	unsigned long lock_limit;
803 	int ret = -EINVAL;
804 
805 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
806 		goto out;
807 
808 	ret = -EPERM;
809 	if (!can_do_mlock())
810 		goto out;
811 
812 	if (flags & MCL_CURRENT)
813 		lru_add_drain_all();	/* flush pagevec */
814 
815 	lock_limit = rlimit(RLIMIT_MEMLOCK);
816 	lock_limit >>= PAGE_SHIFT;
817 
818 	ret = -ENOMEM;
819 	down_write(&current->mm->mmap_sem);
820 
821 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
822 	    capable(CAP_IPC_LOCK))
823 		ret = do_mlockall(flags);
824 	up_write(&current->mm->mmap_sem);
825 	if (!ret && (flags & MCL_CURRENT))
826 		mm_populate(0, TASK_SIZE);
827 out:
828 	return ret;
829 }
830 
SYSCALL_DEFINE0(munlockall)831 SYSCALL_DEFINE0(munlockall)
832 {
833 	int ret;
834 
835 	down_write(&current->mm->mmap_sem);
836 	ret = do_mlockall(0);
837 	up_write(&current->mm->mmap_sem);
838 	return ret;
839 }
840 
841 /*
842  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
843  * shm segments) get accounted against the user_struct instead.
844  */
845 static DEFINE_SPINLOCK(shmlock_user_lock);
846 
user_shm_lock(size_t size,struct user_struct * user)847 int user_shm_lock(size_t size, struct user_struct *user)
848 {
849 	unsigned long lock_limit, locked;
850 	int allowed = 0;
851 
852 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
853 	lock_limit = rlimit(RLIMIT_MEMLOCK);
854 	if (lock_limit == RLIM_INFINITY)
855 		allowed = 1;
856 	lock_limit >>= PAGE_SHIFT;
857 	spin_lock(&shmlock_user_lock);
858 	if (!allowed &&
859 	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
860 		goto out;
861 	get_uid(user);
862 	user->locked_shm += locked;
863 	allowed = 1;
864 out:
865 	spin_unlock(&shmlock_user_lock);
866 	return allowed;
867 }
868 
user_shm_unlock(size_t size,struct user_struct * user)869 void user_shm_unlock(size_t size, struct user_struct *user)
870 {
871 	spin_lock(&shmlock_user_lock);
872 	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
873 	spin_unlock(&shmlock_user_lock);
874 	free_uid(user);
875 }
876