Lines Matching refs:start
376 unsigned long start, unsigned long end) in __munlock_pagevec_fill() argument
386 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
388 end = pgd_addr_end(start, end); in __munlock_pagevec_fill()
389 end = p4d_addr_end(start, end); in __munlock_pagevec_fill()
390 end = pud_addr_end(start, end); in __munlock_pagevec_fill()
391 end = pmd_addr_end(start, end); in __munlock_pagevec_fill()
394 start += PAGE_SIZE; in __munlock_pagevec_fill()
395 while (start < end) { in __munlock_pagevec_fill()
399 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
419 start += PAGE_SIZE; in __munlock_pagevec_fill()
424 return start; in __munlock_pagevec_fill()
446 unsigned long start, unsigned long end) in munlock_vma_pages_range() argument
450 while (start < end) { in munlock_vma_pages_range()
465 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range()
497 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
498 zone, start, end); in munlock_vma_pages_range()
504 start += page_increm * PAGE_SIZE; in munlock_vma_pages_range()
520 unsigned long start, unsigned long end, vm_flags_t newflags) in mlock_fixup() argument
535 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
536 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup()
544 if (start != vma->vm_start) { in mlock_fixup()
545 ret = split_vma(mm, vma, start, 1); in mlock_fixup()
560 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
576 munlock_vma_pages_range(vma, start, end); in mlock_fixup()
583 static int apply_vma_lock_flags(unsigned long start, size_t len, in apply_vma_lock_flags() argument
590 VM_BUG_ON(offset_in_page(start)); in apply_vma_lock_flags()
592 end = start + len; in apply_vma_lock_flags()
593 if (end < start) in apply_vma_lock_flags()
595 if (end == start) in apply_vma_lock_flags()
597 vma = find_vma(current->mm, start); in apply_vma_lock_flags()
598 if (!vma || vma->vm_start > start) in apply_vma_lock_flags()
602 if (start > vma->vm_start) in apply_vma_lock_flags()
605 for (nstart = start ; ; ) { in apply_vma_lock_flags()
640 unsigned long start, size_t len) in count_mm_mlocked_page_nr() argument
648 vma = find_vma(mm, start); in count_mm_mlocked_page_nr()
653 if (start >= vma->vm_end) in count_mm_mlocked_page_nr()
655 if (start + len <= vma->vm_start) in count_mm_mlocked_page_nr()
658 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
659 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
660 if (start + len < vma->vm_end) { in count_mm_mlocked_page_nr()
661 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
671 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) in do_mlock() argument
677 start = untagged_addr(start); in do_mlock()
682 len = PAGE_ALIGN(len + (offset_in_page(start))); in do_mlock()
683 start &= PAGE_MASK; in do_mlock()
701 start, len); in do_mlock()
706 error = apply_vma_lock_flags(start, len, flags); in do_mlock()
712 error = __mm_populate(start, len, 0); in do_mlock()
718 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) in SYSCALL_DEFINE2() argument
720 return do_mlock(start, len, VM_LOCKED); in SYSCALL_DEFINE2()
723 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument
733 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3()
736 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) in SYSCALL_DEFINE2() argument
740 start = untagged_addr(start); in SYSCALL_DEFINE2()
742 len = PAGE_ALIGN(len + (offset_in_page(start))); in SYSCALL_DEFINE2()
743 start &= PAGE_MASK; in SYSCALL_DEFINE2()
747 ret = apply_vma_lock_flags(start, len, 0); in SYSCALL_DEFINE2()