• Home
  • Raw
  • Download

Lines Matching +full:phase +full:- +full:locked

1 // SPDX-License-Identifier: GPL-2.0
41 * in vmscan and, possibly, the fault path; and to support semi-accurate
52 * the mmap_lock for read, and verify that the vma really is locked
67 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock()
131 * Page must be locked. This is a wrapper for try_to_munlock()
170 * munlock_vma_page - munlock a vma page
174 * HPAGE_PMD_NR - 1 for THP head page)
179 * page locked so that we can leave it on the unevictable lru list and not
202 spin_lock_irq(&pgdat->lru_lock); in munlock_vma_page()
205 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ in munlock_vma_page()
211 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in munlock_vma_page()
214 spin_unlock_irq(&pgdat->lru_lock); in munlock_vma_page()
221 spin_unlock_irq(&pgdat->lru_lock); in munlock_vma_page()
224 return nr_pages - 1; in munlock_vma_page()
232 if (retval == -EFAULT) in __mlock_posix_error_return()
233 retval = -ENOMEM; in __mlock_posix_error_return()
234 else if (retval == -ENOMEM) in __mlock_posix_error_return()
235 retval = -EAGAIN; in __mlock_posix_error_return()
243 * Then we can bypass the per-cpu pvec and get better performance.
271 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
288 * The work is split to two main phases. First phase clears the Mlocked flag
290 * The second phase finishes the munlock only for pages where isolation
299 int delta_munlocked = -nr; in __munlock_pagevec()
305 /* Phase 1: page isolation */ in __munlock_pagevec()
306 spin_lock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec()
308 struct page *page = pvec->pages[i]; in __munlock_pagevec()
324 * We won't be munlocking this page in the next phase in __munlock_pagevec()
329 pagevec_add(&pvec_putback, pvec->pages[i]); in __munlock_pagevec()
330 pvec->pages[i] = NULL; in __munlock_pagevec()
333 spin_unlock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec()
338 /* Phase 2: page munlock */ in __munlock_pagevec()
340 struct page *page = pvec->pages[i]; in __munlock_pagevec()
359 * Phase 3: page putback for pages that qualified for the fast path in __munlock_pagevec()
370 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
391 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
413 * Do not use pagevec for PTE-mapped THP, in __munlock_pagevec_fill()
433 * munlock_vma_pages_range() - munlock all pages in the vma range.'
434 * @vma - vma containing range to be munlock()ed.
435 * @start - start address in @vma of the range
436 * @end - end of range in @vma.
447 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
453 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range()
489 * Non-huge pages are handled in batches via in munlock_vma_pages_range()
516 * mlock_fixup - handle mlock[all]/munlock[all] requests.
518 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
519 * munlock is a no-op. However, for some special vmas, we go ahead and
527 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
532 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
534 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup()
535 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup()
540 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
541 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup()
542 vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
543 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mlock_fixup()
549 if (start != vma->vm_start) { in mlock_fixup()
555 if (end != vma->vm_end) { in mlock_fixup()
563 * Keep track of amount of locked VM. in mlock_fixup()
565 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
567 nr_pages = -nr_pages; in mlock_fixup()
570 mm->locked_vm += nr_pages; in mlock_fixup()
579 vma->vm_flags = newflags; in mlock_fixup()
599 return -EINVAL; in apply_vma_lock_flags()
602 vma = find_vma(current->mm, start); in apply_vma_lock_flags()
603 if (!vma || vma->vm_start > start) in apply_vma_lock_flags()
604 return -ENOMEM; in apply_vma_lock_flags()
606 prev = vma->vm_prev; in apply_vma_lock_flags()
607 if (start > vma->vm_start) in apply_vma_lock_flags()
611 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags()
615 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in apply_vma_lock_flags()
616 tmp = vma->vm_end; in apply_vma_lock_flags()
623 if (nstart < prev->vm_end) in apply_vma_lock_flags()
624 nstart = prev->vm_end; in apply_vma_lock_flags()
628 vma = prev->vm_next; in apply_vma_lock_flags()
629 if (!vma || vma->vm_start != nstart) { in apply_vma_lock_flags()
630 error = -ENOMEM; in apply_vma_lock_flags()
651 mm = current->mm; in count_mm_mlocked_page_nr()
655 vma = mm->mmap; in count_mm_mlocked_page_nr()
657 for (; vma ; vma = vma->vm_next) { in count_mm_mlocked_page_nr()
658 if (start >= vma->vm_end) in count_mm_mlocked_page_nr()
660 if (start + len <= vma->vm_start) in count_mm_mlocked_page_nr()
662 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
663 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
664 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
665 if (start + len < vma->vm_end) { in count_mm_mlocked_page_nr()
666 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
669 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
678 unsigned long locked; in do_mlock() local
680 int error = -ENOMEM; in do_mlock()
685 return -EPERM; in do_mlock()
692 locked = len >> PAGE_SHIFT; in do_mlock()
694 if (mmap_write_lock_killable(current->mm)) in do_mlock()
695 return -EINTR; in do_mlock()
697 locked += current->mm->locked_vm; in do_mlock()
698 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { in do_mlock()
701 * previously mlocked areas, that part area in "mm->locked_vm" in do_mlock()
703 * and adjust locked count if necessary. in do_mlock()
705 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock()
710 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) in do_mlock()
713 mmap_write_unlock(current->mm); in do_mlock()
733 return -EINVAL; in SYSCALL_DEFINE3()
750 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE2()
751 return -EINTR; in SYSCALL_DEFINE2()
753 mmap_write_unlock(current->mm); in SYSCALL_DEFINE2()
760 * and translate into the appropriate modifications to mm->def_flags and/or the
766 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
773 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
775 current->mm->def_flags |= VM_LOCKED; in apply_mlockall_flags()
778 current->mm->def_flags |= VM_LOCKONFAULT; in apply_mlockall_flags()
790 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { in apply_mlockall_flags()
793 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
797 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); in apply_mlockall_flags()
811 return -EINVAL; in SYSCALL_DEFINE1()
814 return -EPERM; in SYSCALL_DEFINE1()
819 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE1()
820 return -EINTR; in SYSCALL_DEFINE1()
822 ret = -ENOMEM; in SYSCALL_DEFINE1()
823 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || in SYSCALL_DEFINE1()
826 mmap_write_unlock(current->mm); in SYSCALL_DEFINE1()
837 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE0()
838 return -EINTR; in SYSCALL_DEFINE0()
840 mmap_write_unlock(current->mm); in SYSCALL_DEFINE0()
852 unsigned long lock_limit, locked; in user_shm_lock() local
855 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock()
862 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) in user_shm_lock()
865 user->locked_shm += locked; in user_shm_lock()
875 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_unlock()