Lines Matching full:pages
83 * So now that the head page is stable, recheck that the pages still in try_get_compound_head()
234 * Pages that were pinned via pin_user_pages*() must be released via either
236 * that such pages can be separately tracked and uniquely handled. In
246 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
247 * @pages: array of pages to be maybe marked dirty, and definitely released.
248 * @npages: number of pages in the @pages array.
249 * @make_dirty: whether to mark the pages dirty
254 * For each page in the @pages array, make that page (or its head page, if a
256 * listed as clean. In any case, releases all pages using unpin_user_page(),
267 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
273 * TODO: this can be optimized for huge pages: if a series of pages is in unpin_user_pages_dirty_lock()
279 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
284 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock()
313 * unpin_user_pages() - release an array of gup-pinned pages.
314 * @pages: array of pages to be marked dirty and released.
315 * @npages: number of pages in the @pages array.
317 * For each page in the @pages array, release the page using unpin_user_page().
321 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
326 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
333 * TODO: this can be optimized for huge pages: if a series of pages is in unpin_user_pages()
338 unpin_user_page(pages[index]); in unpin_user_pages()
348 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
454 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
465 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
535 lru_add_drain(); /* push cached pages to LRU */ in follow_page_pte()
747 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
819 /* user gate pages are read-only */ in get_gate_page()
872 /* mlock all present pages, but do not fault in new pages */ in faultin_page()
942 * Anon pages in shared mappings are surprising: now in check_vma_flags()
968 * __get_user_pages() - pin user pages in memory
971 * @nr_pages: number of pages from start to pin
973 * @pages: array that receives pointers to the pages pinned.
975 * only intends to ensure the pages are faulted in.
980 * Returns either number of pages pinned (which may be less than the
984 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
985 * -- If nr_pages is >0, and some pages were pinned, returns the number of
986 * pages pinned. Again, this may be less than nr_pages.
989 * The caller is responsible for releasing returned @pages, via put_page().
1029 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1041 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1062 pages ? &pages[i] : NULL); in __get_user_pages()
1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1092 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1128 if (pages) { in __get_user_pages()
1129 pages[i] = page; in __get_user_pages()
1256 struct page **pages, in __get_user_pages_locked() argument
1276 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1280 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1283 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1289 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1318 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1320 if (likely(pages)) in __get_user_pages_locked()
1321 pages += ret; in __get_user_pages_locked()
1350 pages, NULL, locked); in __get_user_pages_locked()
1366 if (likely(pages)) in __get_user_pages_locked()
1367 pages++; in __get_user_pages_locked()
1382 * populate_vma_page_range() - populate a range of pages in the vma.
1388 * This takes care of mlocking the pages too if VM_LOCKED is set.
1390 * Return either number of pages pinned in the vma, or a negative error
1441 * __mm_populate - populate and/or mlock pages within a range of address space.
1459 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1480 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1481 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1501 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1527 if (pages) { in __get_user_pages_locked()
1528 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1529 if (pages[i]) in __get_user_pages_locked()
1530 get_page(pages[i]); in __get_user_pages_locked()
1600 struct page **pages, in check_and_migrate_cma_pages() argument
1619 head = compound_head(pages[i]); in check_and_migrate_cma_pages()
1652 * If list is empty, and no isolation errors, means that all pages are in check_and_migrate_cma_pages()
1663 unpin_user_pages(pages, nr_pages); in check_and_migrate_cma_pages()
1666 put_page(pages[i]); in check_and_migrate_cma_pages()
1677 /* We unpinned pages before migration, pin them again */ in check_and_migrate_cma_pages()
1678 ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in check_and_migrate_cma_pages()
1686 * check again because pages were unpinned, and we also might have in check_and_migrate_cma_pages()
1687 * had isolation errors and need more pages to migrate. in check_and_migrate_cma_pages()
1695 struct page **pages, in check_and_migrate_cma_pages() argument
1710 struct page **pages, in __gup_longterm_locked() argument
1719 if (!pages) in __gup_longterm_locked()
1732 rc = __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
1741 unpin_user_pages(pages, rc); in __gup_longterm_locked()
1744 put_page(pages[i]); in __gup_longterm_locked()
1749 rc = check_and_migrate_cma_pages(mm, start, rc, pages, in __gup_longterm_locked()
1763 struct page **pages, in __gup_longterm_locked() argument
1767 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1794 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1811 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
1816 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
1822 * get_user_pages_remote() - pin user pages in memory
1825 * @nr_pages: number of pages from start to pin
1827 * @pages: array that receives pointers to the pages pinned.
1829 * only intends to ensure the pages are faulted in.
1836 * Returns either number of pages pinned (which may be less than the
1840 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1841 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1842 * pages pinned. Again, this may be less than nr_pages.
1844 * The caller is responsible for releasing returned @pages, via put_page().
1870 * via the user virtual addresses. The pages may be submitted for
1883 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1890 pages, vmas, locked); in get_user_pages_remote()
1897 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1905 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1913 * get_user_pages() - pin user pages in memory
1915 * @nr_pages: number of pages from start to pin
1917 * @pages: array that receives pointers to the pages pinned.
1919 * only intends to ensure the pages are faulted in.
1929 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1936 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1945 * get_user_pages(mm, ..., pages, NULL);
1953 * get_user_pages_locked(mm, ..., pages, &locked);
1958 * @nr_pages: number of pages from start to pin
1960 * @pages: array that receives pointers to the pages pinned.
1962 * only intends to ensure the pages are faulted in.
1973 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
1992 pages, NULL, locked, in get_user_pages_locked()
2001 * get_user_pages(mm, ..., pages, NULL);
2006 * get_user_pages_unlocked(mm, ..., pages);
2013 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2029 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2040 * get_user_pages_fast attempts to pin user pages by walking the page
2042 * protected from page table pages being freed from under it, and should
2047 * pages are freed. This is unsuitable for architectures that do not need
2050 * Another way to achieve this is to batch up page table containing pages
2052 * pages. Disabling interrupts will allow the fast_gup walker to both block
2060 * free pages containing page tables or TLB flushing requires IPI broadcast.
2129 struct page **pages) in undo_dev_pagemap() argument
2132 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2144 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2171 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2205 pages[*nr] = page; in gup_pte_range()
2226 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2230 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2239 struct page **pages, int *nr) in __gup_device_huge() argument
2249 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2253 pages[*nr] = page; in __gup_device_huge()
2255 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2269 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2275 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2279 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2287 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2293 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2297 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2305 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2313 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2321 unsigned long end, struct page **pages) in record_subpages() argument
2326 pages[nr++] = page++; in record_subpages()
2341 struct page **pages, int *nr) in gup_hugepte() argument
2362 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2380 struct page **pages, int *nr) in gup_huge_pd() argument
2389 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2398 struct page **pages, int *nr) in gup_huge_pd() argument
2406 struct page **pages, int *nr) in gup_huge_pmd() argument
2418 pages, nr); in gup_huge_pmd()
2422 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2440 struct page **pages, int *nr) in gup_huge_pud() argument
2452 pages, nr); in gup_huge_pud()
2456 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2474 struct page **pages, int *nr) in gup_huge_pgd() argument
2485 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2502 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2526 pages, nr)) in gup_pmd_range()
2535 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2537 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2545 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2559 pages, nr)) in gup_pud_range()
2563 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2565 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2573 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2588 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2590 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2598 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2612 pages, nr)) in gup_pgd_range()
2616 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2618 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2624 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2641 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2653 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2657 pages, gup_flags); in __gup_longterm_unlocked()
2666 struct page **pages) in lockless_pages_from_mm() argument
2686 * With interrupts disabled, we block page table pages from being freed in lockless_pages_from_mm()
2694 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
2698 * When pinning pages for DMA there could be a concurrent write protect in lockless_pages_from_mm()
2703 unpin_user_pages(pages, nr_pinned); in lockless_pages_from_mm()
2713 struct page **pages) in internal_get_user_pages_fast() argument
2737 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
2741 /* Slow path: try to get the remaining pages with get_user_pages */ in internal_get_user_pages_fast()
2743 pages += nr_pinned; in internal_get_user_pages_fast()
2745 pages); in internal_get_user_pages_fast()
2748 * The caller has to unpin the pages we already pinned so in internal_get_user_pages_fast()
2759 * get_user_pages_fast_only() - pin user pages in memory
2761 * @nr_pages: number of pages from start to pin
2763 * @pages: array that receives pointers to the pages pinned.
2769 * number of pages pinned, 0 if no pages were pinned.
2772 * pages pinned.
2779 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
2792 pages); in get_user_pages_fast_only()
2808 * get_user_pages_fast() - pin user pages in memory
2810 * @nr_pages: number of pages from start to pin
2812 * @pages: array that receives pointers to the pages pinned.
2815 * Attempt to pin user pages in memory without taking mm->mmap_lock.
2819 * Returns number of pages pinned. This may be fewer than the number requested.
2820 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2824 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2836 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
2841 * pin_user_pages_fast() - pin user pages in memory without taking locks
2844 * @nr_pages: number of pages from start to pin
2846 * @pages: array that receives pointers to the pages pinned.
2853 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2857 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
2864 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
2875 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
2891 pages); in pin_user_pages_fast_only()
2905 * pin_user_pages_remote() - pin pages of a remote process
2909 * @nr_pages: number of pages from start to pin
2911 * @pages: array that receives pointers to the pages pinned.
2913 * only intends to ensure the pages are faulted in.
2924 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2929 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
2938 pages, vmas, locked); in pin_user_pages_remote()
2943 * pin_user_pages() - pin user pages in memory for use by other devices
2946 * @nr_pages: number of pages from start to pin
2948 * @pages: array that receives pointers to the pages pinned.
2950 * only intends to ensure the pages are faulted in.
2957 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2961 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
2970 pages, vmas, gup_flags); in pin_user_pages()
2980 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
2987 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()
2997 unsigned int gup_flags, struct page **pages, in pin_user_pages_locked() argument
3015 pages, NULL, locked, in pin_user_pages_locked()