• Home
  • Raw
  • Download

Lines Matching refs:start

780 		unsigned long start, unsigned long nr_pages,  in __get_user_pages()  argument
791 start = untagged_addr(start); in __get_user_pages()
809 if (!vma || start >= vma->vm_end) { in __get_user_pages()
810 vma = find_extend_vma(mm, start); in __get_user_pages()
811 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
812 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages()
827 &start, &nr_pages, i, in __get_user_pages()
843 page = follow_page_mask(vma, start, foll_flags, &ctx); in __get_user_pages()
845 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
873 flush_anon_page(vma, page, start); in __get_user_pages()
882 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); in __get_user_pages()
886 start += page_increm * PAGE_SIZE; in __get_user_pages()
1000 unsigned long start, in __get_user_pages_locked() argument
1023 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1056 start += ret << PAGE_SHIFT; in __get_user_pages_locked()
1066 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, in __get_user_pages_locked()
1080 start += PAGE_SIZE; in __get_user_pages_locked()
1150 unsigned long start, unsigned long nr_pages, in get_user_pages_remote() argument
1163 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, in get_user_pages_remote()
1189 unsigned long start, unsigned long end, int *nonblocking) in populate_vma_page_range() argument
1192 unsigned long nr_pages = (end - start) / PAGE_SIZE; in populate_vma_page_range()
1195 VM_BUG_ON(start & ~PAGE_MASK); in populate_vma_page_range()
1197 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
1223 return __get_user_pages(current, mm, start, nr_pages, gup_flags, in populate_vma_page_range()
1234 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) in __mm_populate() argument
1242 end = start + len; in __mm_populate()
1244 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate()
1317 struct mm_struct *mm, unsigned long start, in __get_user_pages_locked() argument
1335 vma = find_vma(mm, start); in __get_user_pages_locked()
1345 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1351 start = (start + PAGE_SIZE) & PAGE_MASK; in __get_user_pages_locked()
1435 unsigned long start, in check_and_migrate_cma_pages() argument
1507 nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages, in check_and_migrate_cma_pages()
1522 unsigned long start, in check_and_migrate_cma_pages() argument
1538 unsigned long start, in __gup_longterm_locked() argument
1562 rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, in __gup_longterm_locked()
1577 rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, in __gup_longterm_locked()
1589 unsigned long start, in __gup_longterm_locked() argument
1595 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1607 long get_user_pages(unsigned long start, unsigned long nr_pages, in get_user_pages() argument
1611 return __gup_longterm_locked(current, current->mm, start, nr_pages, in get_user_pages()
1637 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, in get_user_pages_locked() argument
1650 return __get_user_pages_locked(current, current->mm, start, nr_pages, in get_user_pages_locked()
1671 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, in get_user_pages_unlocked() argument
1688 ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2305 static bool gup_fast_permitted(unsigned long start, unsigned long end) in gup_fast_permitted() argument
2320 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument
2327 start = untagged_addr(start) & PAGE_MASK; in __get_user_pages_fast()
2329 end = start + len; in __get_user_pages_fast()
2331 if (end <= start) in __get_user_pages_fast()
2333 if (unlikely(!access_ok((void __user *)start, len))) in __get_user_pages_fast()
2349 gup_fast_permitted(start, end)) { in __get_user_pages_fast()
2351 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); in __get_user_pages_fast()
2359 static int __gup_longterm_unlocked(unsigned long start, int nr_pages, in __gup_longterm_unlocked() argument
2371 start, nr_pages, in __gup_longterm_unlocked()
2375 ret = get_user_pages_unlocked(start, nr_pages, in __gup_longterm_unlocked()
2398 int get_user_pages_fast(unsigned long start, int nr_pages, in get_user_pages_fast() argument
2407 start = untagged_addr(start) & PAGE_MASK; in get_user_pages_fast()
2408 addr = start; in get_user_pages_fast()
2410 end = start + len; in get_user_pages_fast()
2412 if (end <= start) in get_user_pages_fast()
2414 if (unlikely(!access_ok((void __user *)start, len))) in get_user_pages_fast()
2418 gup_fast_permitted(start, end)) { in get_user_pages_fast()
2427 start += nr << PAGE_SHIFT; in get_user_pages_fast()
2430 ret = __gup_longterm_unlocked(start, nr_pages - nr, in get_user_pages_fast()