Lines Matching refs:pages
267 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
279 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
284 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock()
321 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
338 unpin_user_page(pages[index]); in unpin_user_pages()
1029 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1041 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1062 pages ? &pages[i] : NULL); in __get_user_pages()
1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1128 if (pages) { in __get_user_pages()
1129 pages[i] = page; in __get_user_pages()
1256 struct page **pages, in __get_user_pages_locked() argument
1283 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1289 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1320 if (likely(pages)) in __get_user_pages_locked()
1321 pages += ret; in __get_user_pages_locked()
1350 pages, NULL, locked); in __get_user_pages_locked()
1366 if (likely(pages)) in __get_user_pages_locked()
1367 pages++; in __get_user_pages_locked()
1501 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1527 if (pages) { in __get_user_pages_locked()
1528 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1529 if (pages[i]) in __get_user_pages_locked()
1530 get_page(pages[i]); in __get_user_pages_locked()
1600 struct page **pages, in check_and_migrate_cma_pages() argument
1619 head = compound_head(pages[i]); in check_and_migrate_cma_pages()
1663 unpin_user_pages(pages, nr_pages); in check_and_migrate_cma_pages()
1666 put_page(pages[i]); in check_and_migrate_cma_pages()
1678 ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in check_and_migrate_cma_pages()
1695 struct page **pages, in check_and_migrate_cma_pages() argument
1710 struct page **pages, in __gup_longterm_locked() argument
1719 if (!pages) in __gup_longterm_locked()
1732 rc = __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
1741 unpin_user_pages(pages, rc); in __gup_longterm_locked()
1744 put_page(pages[i]); in __gup_longterm_locked()
1749 rc = check_and_migrate_cma_pages(mm, start, rc, pages, in __gup_longterm_locked()
1763 struct page **pages, in __gup_longterm_locked() argument
1767 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1794 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1811 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
1816 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
1883 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1890 pages, vmas, locked); in get_user_pages_remote()
1897 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1905 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1929 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1936 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1973 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
1992 pages, NULL, locked, in get_user_pages_locked()
2013 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2029 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2129 struct page **pages) in undo_dev_pagemap() argument
2132 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2144 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2171 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2205 pages[*nr] = page; in gup_pte_range()
2230 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2239 struct page **pages, int *nr) in __gup_device_huge() argument
2249 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2253 pages[*nr] = page; in __gup_device_huge()
2255 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2269 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2275 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2279 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2287 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2293 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2297 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2305 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2313 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2321 unsigned long end, struct page **pages) in record_subpages() argument
2326 pages[nr++] = page++; in record_subpages()
2341 struct page **pages, int *nr) in gup_hugepte() argument
2362 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2380 struct page **pages, int *nr) in gup_huge_pd() argument
2389 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2398 struct page **pages, int *nr) in gup_huge_pd() argument
2406 struct page **pages, int *nr) in gup_huge_pmd() argument
2418 pages, nr); in gup_huge_pmd()
2422 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2440 struct page **pages, int *nr) in gup_huge_pud() argument
2452 pages, nr); in gup_huge_pud()
2456 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2474 struct page **pages, int *nr) in gup_huge_pgd() argument
2485 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2502 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2526 pages, nr)) in gup_pmd_range()
2535 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2537 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2545 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2559 pages, nr)) in gup_pud_range()
2563 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2565 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2573 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2588 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2590 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2598 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2612 pages, nr)) in gup_pgd_range()
2616 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2618 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2624 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2641 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2653 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2657 pages, gup_flags); in __gup_longterm_unlocked()
2666 struct page **pages) in lockless_pages_from_mm() argument
2694 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
2703 unpin_user_pages(pages, nr_pinned); in lockless_pages_from_mm()
2713 struct page **pages) in internal_get_user_pages_fast() argument
2737 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
2743 pages += nr_pinned; in internal_get_user_pages_fast()
2745 pages); in internal_get_user_pages_fast()
2779 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
2792 pages); in get_user_pages_fast_only()
2824 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2836 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
2857 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
2864 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
2875 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
2891 pages); in pin_user_pages_fast_only()
2929 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
2938 pages, vmas, locked); in pin_user_pages_remote()
2961 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
2970 pages, vmas, gup_flags); in pin_user_pages()
2980 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
2987 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()
2997 unsigned int gup_flags, struct page **pages, in pin_user_pages_locked() argument
3015 pages, NULL, locked, in pin_user_pages_locked()