1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
14
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
21
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
24
25 #include "internal.h"
26
27 struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30 };
31
hpage_pincount_add(struct page * page,int refs)32 static void hpage_pincount_add(struct page *page, int refs)
33 {
34 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
35 VM_BUG_ON_PAGE(page != compound_head(page), page);
36
37 atomic_add(refs, compound_pincount_ptr(page));
38 }
39
hpage_pincount_sub(struct page * page,int refs)40 static void hpage_pincount_sub(struct page *page, int refs)
41 {
42 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
43 VM_BUG_ON_PAGE(page != compound_head(page), page);
44
45 atomic_sub(refs, compound_pincount_ptr(page));
46 }
47
48 /* Equivalent to calling put_page() @refs times. */
put_page_refs(struct page * page,int refs)49 static void put_page_refs(struct page *page, int refs)
50 {
51 #ifdef CONFIG_DEBUG_VM
52 if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
53 return;
54 #endif
55
56 /*
57 * Calling put_page() for each ref is unnecessarily slow. Only the last
58 * ref needs a put_page().
59 */
60 if (refs > 1)
61 page_ref_sub(page, refs - 1);
62 put_page(page);
63 }
64
65 /*
66 * Return the compound head page with ref appropriately incremented,
67 * or NULL if that failed.
68 */
try_get_compound_head(struct page * page,int refs)69 static inline struct page *try_get_compound_head(struct page *page, int refs)
70 {
71 struct page *head = compound_head(page);
72
73 if (WARN_ON_ONCE(page_ref_count(head) < 0))
74 return NULL;
75 if (unlikely(!page_cache_add_speculative(head, refs)))
76 return NULL;
77
78 /*
79 * At this point we have a stable reference to the head page; but it
80 * could be that between the compound_head() lookup and the refcount
81 * increment, the compound page was split, in which case we'd end up
82 * holding a reference on a page that has nothing to do with the page
83 * we were given anymore.
84 * So now that the head page is stable, recheck that the pages still
85 * belong together.
86 */
87 if (unlikely(compound_head(page) != head)) {
88 put_page_refs(head, refs);
89 return NULL;
90 }
91
92 return head;
93 }
94
95 /**
96 * try_grab_compound_head() - attempt to elevate a page's refcount, by a
97 * flags-dependent amount.
98 *
99 * Even though the name includes "compound_head", this function is still
100 * appropriate for callers that have a non-compound @page to get.
101 *
102 * @page: pointer to page to be grabbed
103 * @refs: the value to (effectively) add to the page's refcount
104 * @flags: gup flags: these are the FOLL_* flag values.
105 *
106 * "grab" names in this file mean, "look at flags to decide whether to use
107 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
108 *
109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
110 * same time. (That's true throughout the get_user_pages*() and
111 * pin_user_pages*() APIs.) Cases:
112 *
113 * FOLL_GET: page's refcount will be incremented by @refs.
114 *
115 * FOLL_PIN on compound pages that are > two pages long: page's refcount will
116 * be incremented by @refs, and page[2].hpage_pinned_refcount will be
117 * incremented by @refs * GUP_PIN_COUNTING_BIAS.
118 *
119 * FOLL_PIN on normal pages, or compound pages that are two pages long:
120 * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
121 *
122 * Return: head page (with refcount appropriately incremented) for success, or
123 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
124 * considered failure, and furthermore, a likely bug in the caller, so a warning
125 * is also emitted.
126 */
try_grab_compound_head(struct page * page,int refs,unsigned int flags)127 __maybe_unused struct page *try_grab_compound_head(struct page *page,
128 int refs, unsigned int flags)
129 {
130 if (flags & FOLL_GET)
131 return try_get_compound_head(page, refs);
132 else if (flags & FOLL_PIN) {
133 /*
134 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
135 * right zone, so fail and let the caller fall back to the slow
136 * path.
137 */
138 if (unlikely((flags & FOLL_LONGTERM) &&
139 !is_pinnable_page(page)))
140 return NULL;
141
142 /*
143 * CAUTION: Don't use compound_head() on the page before this
144 * point, the result won't be stable.
145 */
146 page = try_get_compound_head(page, refs);
147 if (!page)
148 return NULL;
149
150 /*
151 * When pinning a compound page of order > 1 (which is what
152 * hpage_pincount_available() checks for), use an exact count to
153 * track it, via hpage_pincount_add/_sub().
154 *
155 * However, be sure to *also* increment the normal page refcount
156 * field at least once, so that the page really is pinned.
157 * That's why the refcount from the earlier
158 * try_get_compound_head() is left intact.
159 */
160 if (hpage_pincount_available(page))
161 hpage_pincount_add(page, refs);
162 else
163 page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
164
165 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
166 refs);
167
168 return page;
169 }
170
171 WARN_ON_ONCE(1);
172 return NULL;
173 }
174
put_compound_head(struct page * page,int refs,unsigned int flags)175 static void put_compound_head(struct page *page, int refs, unsigned int flags)
176 {
177 if (flags & FOLL_PIN) {
178 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
179 refs);
180
181 if (hpage_pincount_available(page))
182 hpage_pincount_sub(page, refs);
183 else
184 refs *= GUP_PIN_COUNTING_BIAS;
185 }
186
187 put_page_refs(page, refs);
188 }
189
190 /**
191 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
192 *
193 * This might not do anything at all, depending on the flags argument.
194 *
195 * "grab" names in this file mean, "look at flags to decide whether to use
196 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
197 *
198 * @page: pointer to page to be grabbed
199 * @flags: gup flags: these are the FOLL_* flag values.
200 *
201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
202 * time. Cases: please see the try_grab_compound_head() documentation, with
203 * "refs=1".
204 *
205 * Return: true for success, or if no action was required (if neither FOLL_PIN
206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
207 * FOLL_PIN was set, but the page could not be grabbed.
208 */
try_grab_page(struct page * page,unsigned int flags)209 bool __must_check try_grab_page(struct page *page, unsigned int flags)
210 {
211 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
212
213 if (flags & FOLL_GET)
214 return try_get_page(page);
215 else if (flags & FOLL_PIN) {
216 int refs = 1;
217
218 page = compound_head(page);
219
220 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
221 return false;
222
223 if (hpage_pincount_available(page))
224 hpage_pincount_add(page, 1);
225 else
226 refs = GUP_PIN_COUNTING_BIAS;
227
228 /*
229 * Similar to try_grab_compound_head(): even if using the
230 * hpage_pincount_add/_sub() routines, be sure to
231 * *also* increment the normal page refcount field at least
232 * once, so that the page really is pinned.
233 */
234 page_ref_add(page, refs);
235
236 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
237 }
238
239 return true;
240 }
241
242 /**
243 * unpin_user_page() - release a dma-pinned page
244 * @page: pointer to page to be released
245 *
246 * Pages that were pinned via pin_user_pages*() must be released via either
247 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
248 * that such pages can be separately tracked and uniquely handled. In
249 * particular, interactions with RDMA and filesystems need special handling.
250 */
unpin_user_page(struct page * page)251 void unpin_user_page(struct page *page)
252 {
253 put_compound_head(compound_head(page), 1, FOLL_PIN);
254 }
255 EXPORT_SYMBOL(unpin_user_page);
256
compound_range_next(unsigned long i,unsigned long npages,struct page ** list,struct page ** head,unsigned int * ntails)257 static inline void compound_range_next(unsigned long i, unsigned long npages,
258 struct page **list, struct page **head,
259 unsigned int *ntails)
260 {
261 struct page *next, *page;
262 unsigned int nr = 1;
263
264 if (i >= npages)
265 return;
266
267 next = *list + i;
268 page = compound_head(next);
269 if (PageCompound(page) && compound_order(page) >= 1)
270 nr = min_t(unsigned int,
271 page + compound_nr(page) - next, npages - i);
272
273 *head = page;
274 *ntails = nr;
275 }
276
277 #define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
278 for (__i = 0, \
279 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
280 __i < __npages; __i += __ntails, \
281 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
282
compound_next(unsigned long i,unsigned long npages,struct page ** list,struct page ** head,unsigned int * ntails)283 static inline void compound_next(unsigned long i, unsigned long npages,
284 struct page **list, struct page **head,
285 unsigned int *ntails)
286 {
287 struct page *page;
288 unsigned int nr;
289
290 if (i >= npages)
291 return;
292
293 page = compound_head(list[i]);
294 for (nr = i + 1; nr < npages; nr++) {
295 if (compound_head(list[nr]) != page)
296 break;
297 }
298
299 *head = page;
300 *ntails = nr - i;
301 }
302
303 #define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
304 for (__i = 0, \
305 compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
306 __i < __npages; __i += __ntails, \
307 compound_next(__i, __npages, __list, &(__head), &(__ntails)))
308
309 /**
310 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
311 * @pages: array of pages to be maybe marked dirty, and definitely released.
312 * @npages: number of pages in the @pages array.
313 * @make_dirty: whether to mark the pages dirty
314 *
315 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
316 * variants called on that page.
317 *
318 * For each page in the @pages array, make that page (or its head page, if a
319 * compound page) dirty, if @make_dirty is true, and if the page was previously
320 * listed as clean. In any case, releases all pages using unpin_user_page(),
321 * possibly via unpin_user_pages(), for the non-dirty case.
322 *
323 * Please see the unpin_user_page() documentation for details.
324 *
325 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
326 * required, then the caller should a) verify that this is really correct,
327 * because _lock() is usually required, and b) hand code it:
328 * set_page_dirty_lock(), unpin_user_page().
329 *
330 */
unpin_user_pages_dirty_lock(struct page ** pages,unsigned long npages,bool make_dirty)331 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
332 bool make_dirty)
333 {
334 unsigned long index;
335 struct page *head;
336 unsigned int ntails;
337
338 if (!make_dirty) {
339 unpin_user_pages(pages, npages);
340 return;
341 }
342
343 for_each_compound_head(index, pages, npages, head, ntails) {
344 /*
345 * Checking PageDirty at this point may race with
346 * clear_page_dirty_for_io(), but that's OK. Two key
347 * cases:
348 *
349 * 1) This code sees the page as already dirty, so it
350 * skips the call to set_page_dirty(). That could happen
351 * because clear_page_dirty_for_io() called
352 * page_mkclean(), followed by set_page_dirty().
353 * However, now the page is going to get written back,
354 * which meets the original intention of setting it
355 * dirty, so all is well: clear_page_dirty_for_io() goes
356 * on to call TestClearPageDirty(), and write the page
357 * back.
358 *
359 * 2) This code sees the page as clean, so it calls
360 * set_page_dirty(). The page stays dirty, despite being
361 * written back, so it gets written back again in the
362 * next writeback cycle. This is harmless.
363 */
364 if (!PageDirty(head))
365 set_page_dirty_lock(head);
366 put_compound_head(head, ntails, FOLL_PIN);
367 }
368 }
369 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
370
371 /**
372 * unpin_user_page_range_dirty_lock() - release and optionally dirty
373 * gup-pinned page range
374 *
375 * @page: the starting page of a range maybe marked dirty, and definitely released.
376 * @npages: number of consecutive pages to release.
377 * @make_dirty: whether to mark the pages dirty
378 *
379 * "gup-pinned page range" refers to a range of pages that has had one of the
380 * pin_user_pages() variants called on that page.
381 *
382 * For the page ranges defined by [page .. page+npages], make that range (or
383 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
384 * page range was previously listed as clean.
385 *
386 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
387 * required, then the caller should a) verify that this is really correct,
388 * because _lock() is usually required, and b) hand code it:
389 * set_page_dirty_lock(), unpin_user_page().
390 *
391 */
unpin_user_page_range_dirty_lock(struct page * page,unsigned long npages,bool make_dirty)392 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
393 bool make_dirty)
394 {
395 unsigned long index;
396 struct page *head;
397 unsigned int ntails;
398
399 for_each_compound_range(index, &page, npages, head, ntails) {
400 if (make_dirty && !PageDirty(head))
401 set_page_dirty_lock(head);
402 put_compound_head(head, ntails, FOLL_PIN);
403 }
404 }
405 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
406
407 /**
408 * unpin_user_pages() - release an array of gup-pinned pages.
409 * @pages: array of pages to be marked dirty and released.
410 * @npages: number of pages in the @pages array.
411 *
412 * For each page in the @pages array, release the page using unpin_user_page().
413 *
414 * Please see the unpin_user_page() documentation for details.
415 */
unpin_user_pages(struct page ** pages,unsigned long npages)416 void unpin_user_pages(struct page **pages, unsigned long npages)
417 {
418 unsigned long index;
419 struct page *head;
420 unsigned int ntails;
421
422 /*
423 * If this WARN_ON() fires, then the system *might* be leaking pages (by
424 * leaving them pinned), but probably not. More likely, gup/pup returned
425 * a hard -ERRNO error to the caller, who erroneously passed it here.
426 */
427 if (WARN_ON(IS_ERR_VALUE(npages)))
428 return;
429
430 for_each_compound_head(index, pages, npages, head, ntails)
431 put_compound_head(head, ntails, FOLL_PIN);
432 }
433 EXPORT_SYMBOL(unpin_user_pages);
434
435 /*
436 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
437 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
438 * cache bouncing on large SMP machines for concurrent pinned gups.
439 */
mm_set_has_pinned_flag(unsigned long * mm_flags)440 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
441 {
442 if (!test_bit(MMF_HAS_PINNED, mm_flags))
443 set_bit(MMF_HAS_PINNED, mm_flags);
444 }
445
446 #ifdef CONFIG_MMU
no_page_table(struct vm_area_struct * vma,unsigned int flags)447 static struct page *no_page_table(struct vm_area_struct *vma,
448 unsigned int flags)
449 {
450 /*
451 * When core dumping an enormous anonymous area that nobody
452 * has touched so far, we don't want to allocate unnecessary pages or
453 * page tables. Return error instead of NULL to skip handle_mm_fault,
454 * then get_dump_page() will return NULL to leave a hole in the dump.
455 * But we can only make this optimization where a hole would surely
456 * be zero-filled if handle_mm_fault() actually did handle it.
457 */
458 if ((flags & FOLL_DUMP) &&
459 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
460 return ERR_PTR(-EFAULT);
461 return NULL;
462 }
463
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)464 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
465 pte_t *pte, unsigned int flags)
466 {
467 /* No page to get reference */
468 if (flags & (FOLL_GET | FOLL_PIN))
469 return -EFAULT;
470
471 if (flags & FOLL_TOUCH) {
472 pte_t entry = *pte;
473
474 if (flags & FOLL_WRITE)
475 entry = pte_mkdirty(entry);
476 entry = pte_mkyoung(entry);
477
478 if (!pte_same(*pte, entry)) {
479 set_pte_at(vma->vm_mm, address, pte, entry);
480 update_mmu_cache(vma, address, pte);
481 }
482 }
483
484 /* Proper page table entry exists, but no corresponding struct page */
485 return -EEXIST;
486 }
487
488 /*
489 * FOLL_FORCE can write to even unwritable pte's, but only
490 * after we've gone through a COW cycle and they are dirty.
491 */
can_follow_write_pte(pte_t pte,unsigned int flags)492 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
493 {
494 return pte_write(pte) ||
495 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
496 }
497
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags,struct dev_pagemap ** pgmap)498 static struct page *follow_page_pte(struct vm_area_struct *vma,
499 unsigned long address, pmd_t *pmd, unsigned int flags,
500 struct dev_pagemap **pgmap)
501 {
502 struct mm_struct *mm = vma->vm_mm;
503 struct page *page;
504 spinlock_t *ptl;
505 pte_t *ptep, pte;
506 int ret;
507
508 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
509 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
510 (FOLL_PIN | FOLL_GET)))
511 return ERR_PTR(-EINVAL);
512
513 /*
514 * Considering PTE level hugetlb, like continuous-PTE hugetlb on
515 * ARM64 architecture.
516 */
517 if (is_vm_hugetlb_page(vma)) {
518 page = follow_huge_pmd_pte(vma, address, flags);
519 if (page)
520 return page;
521 return no_page_table(vma, flags);
522 }
523
524 retry:
525 if (unlikely(pmd_bad(*pmd)))
526 return no_page_table(vma, flags);
527
528 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
529 pte = *ptep;
530 if (!pte_present(pte)) {
531 swp_entry_t entry;
532 /*
533 * KSM's break_ksm() relies upon recognizing a ksm page
534 * even while it is being migrated, so for that case we
535 * need migration_entry_wait().
536 */
537 if (likely(!(flags & FOLL_MIGRATION)))
538 goto no_page;
539 if (pte_none(pte))
540 goto no_page;
541 entry = pte_to_swp_entry(pte);
542 if (!is_migration_entry(entry))
543 goto no_page;
544 pte_unmap_unlock(ptep, ptl);
545 migration_entry_wait(mm, pmd, address);
546 goto retry;
547 }
548 if ((flags & FOLL_NUMA) && pte_protnone(pte))
549 goto no_page;
550 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
551 pte_unmap_unlock(ptep, ptl);
552 return NULL;
553 }
554
555 page = vm_normal_page(vma, address, pte);
556 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
557 /*
558 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
559 * case since they are only valid while holding the pgmap
560 * reference.
561 */
562 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
563 if (*pgmap)
564 page = pte_page(pte);
565 else
566 goto no_page;
567 } else if (unlikely(!page)) {
568 if (flags & FOLL_DUMP) {
569 /* Avoid special (like zero) pages in core dumps */
570 page = ERR_PTR(-EFAULT);
571 goto out;
572 }
573
574 if (is_zero_pfn(pte_pfn(pte))) {
575 page = pte_page(pte);
576 } else {
577 ret = follow_pfn_pte(vma, address, ptep, flags);
578 page = ERR_PTR(ret);
579 goto out;
580 }
581 }
582
583 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
584 if (unlikely(!try_grab_page(page, flags))) {
585 page = ERR_PTR(-ENOMEM);
586 goto out;
587 }
588 /*
589 * We need to make the page accessible if and only if we are going
590 * to access its content (the FOLL_PIN case). Please see
591 * Documentation/core-api/pin_user_pages.rst for details.
592 */
593 if (flags & FOLL_PIN) {
594 ret = arch_make_page_accessible(page);
595 if (ret) {
596 unpin_user_page(page);
597 page = ERR_PTR(ret);
598 goto out;
599 }
600 }
601 if (flags & FOLL_TOUCH) {
602 if ((flags & FOLL_WRITE) &&
603 !pte_dirty(pte) && !PageDirty(page))
604 set_page_dirty(page);
605 /*
606 * pte_mkyoung() would be more correct here, but atomic care
607 * is needed to avoid losing the dirty bit: it is easier to use
608 * mark_page_accessed().
609 */
610 mark_page_accessed(page);
611 }
612 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
613 /* Do not mlock pte-mapped THP */
614 if (PageTransCompound(page))
615 goto out;
616
617 /*
618 * The preliminary mapping check is mainly to avoid the
619 * pointless overhead of lock_page on the ZERO_PAGE
620 * which might bounce very badly if there is contention.
621 *
622 * If the page is already locked, we don't need to
623 * handle it now - vmscan will handle it later if and
624 * when it attempts to reclaim the page.
625 */
626 if (page->mapping && trylock_page(page)) {
627 lru_add_drain(); /* push cached pages to LRU */
628 /*
629 * Because we lock page here, and migration is
630 * blocked by the pte's page reference, and we
631 * know the page is still mapped, we don't even
632 * need to check for file-cache page truncation.
633 */
634 mlock_vma_page(page);
635 unlock_page(page);
636 }
637 }
638 out:
639 pte_unmap_unlock(ptep, ptl);
640 return page;
641 no_page:
642 pte_unmap_unlock(ptep, ptl);
643 if (!pte_none(pte))
644 return NULL;
645 return no_page_table(vma, flags);
646 }
647
follow_pmd_mask(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,unsigned int flags,struct follow_page_context * ctx)648 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
649 unsigned long address, pud_t *pudp,
650 unsigned int flags,
651 struct follow_page_context *ctx)
652 {
653 pmd_t *pmd, pmdval;
654 spinlock_t *ptl;
655 struct page *page;
656 struct mm_struct *mm = vma->vm_mm;
657
658 pmd = pmd_offset(pudp, address);
659 /*
660 * The READ_ONCE() will stabilize the pmdval in a register or
661 * on the stack so that it will stop changing under the code.
662 */
663 pmdval = READ_ONCE(*pmd);
664 if (pmd_none(pmdval))
665 return no_page_table(vma, flags);
666 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
667 page = follow_huge_pmd_pte(vma, address, flags);
668 if (page)
669 return page;
670 return no_page_table(vma, flags);
671 }
672 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
673 page = follow_huge_pd(vma, address,
674 __hugepd(pmd_val(pmdval)), flags,
675 PMD_SHIFT);
676 if (page)
677 return page;
678 return no_page_table(vma, flags);
679 }
680 retry:
681 if (!pmd_present(pmdval)) {
682 if (likely(!(flags & FOLL_MIGRATION)))
683 return no_page_table(vma, flags);
684 VM_BUG_ON(thp_migration_supported() &&
685 !is_pmd_migration_entry(pmdval));
686 if (is_pmd_migration_entry(pmdval))
687 pmd_migration_entry_wait(mm, pmd);
688 pmdval = READ_ONCE(*pmd);
689 /*
690 * MADV_DONTNEED may convert the pmd to null because
691 * mmap_lock is held in read mode
692 */
693 if (pmd_none(pmdval))
694 return no_page_table(vma, flags);
695 goto retry;
696 }
697 if (pmd_devmap(pmdval)) {
698 ptl = pmd_lock(mm, pmd);
699 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
700 spin_unlock(ptl);
701 if (page)
702 return page;
703 }
704 if (likely(!pmd_trans_huge(pmdval)))
705 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
706
707 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
708 return no_page_table(vma, flags);
709
710 retry_locked:
711 ptl = pmd_lock(mm, pmd);
712 if (unlikely(pmd_none(*pmd))) {
713 spin_unlock(ptl);
714 return no_page_table(vma, flags);
715 }
716 if (unlikely(!pmd_present(*pmd))) {
717 spin_unlock(ptl);
718 if (likely(!(flags & FOLL_MIGRATION)))
719 return no_page_table(vma, flags);
720 pmd_migration_entry_wait(mm, pmd);
721 goto retry_locked;
722 }
723 if (unlikely(!pmd_trans_huge(*pmd))) {
724 spin_unlock(ptl);
725 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
726 }
727 if (flags & FOLL_SPLIT_PMD) {
728 int ret;
729 page = pmd_page(*pmd);
730 if (is_huge_zero_page(page)) {
731 spin_unlock(ptl);
732 ret = 0;
733 split_huge_pmd(vma, pmd, address);
734 if (pmd_trans_unstable(pmd))
735 ret = -EBUSY;
736 } else {
737 spin_unlock(ptl);
738 split_huge_pmd(vma, pmd, address);
739 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
740 }
741
742 return ret ? ERR_PTR(ret) :
743 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
744 }
745 page = follow_trans_huge_pmd(vma, address, pmd, flags);
746 spin_unlock(ptl);
747 ctx->page_mask = HPAGE_PMD_NR - 1;
748 return page;
749 }
750
follow_pud_mask(struct vm_area_struct * vma,unsigned long address,p4d_t * p4dp,unsigned int flags,struct follow_page_context * ctx)751 static struct page *follow_pud_mask(struct vm_area_struct *vma,
752 unsigned long address, p4d_t *p4dp,
753 unsigned int flags,
754 struct follow_page_context *ctx)
755 {
756 pud_t *pud;
757 spinlock_t *ptl;
758 struct page *page;
759 struct mm_struct *mm = vma->vm_mm;
760
761 pud = pud_offset(p4dp, address);
762 if (pud_none(*pud))
763 return no_page_table(vma, flags);
764 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
765 page = follow_huge_pud(mm, address, pud, flags);
766 if (page)
767 return page;
768 return no_page_table(vma, flags);
769 }
770 if (is_hugepd(__hugepd(pud_val(*pud)))) {
771 page = follow_huge_pd(vma, address,
772 __hugepd(pud_val(*pud)), flags,
773 PUD_SHIFT);
774 if (page)
775 return page;
776 return no_page_table(vma, flags);
777 }
778 if (pud_devmap(*pud)) {
779 ptl = pud_lock(mm, pud);
780 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
781 spin_unlock(ptl);
782 if (page)
783 return page;
784 }
785 if (unlikely(pud_bad(*pud)))
786 return no_page_table(vma, flags);
787
788 return follow_pmd_mask(vma, address, pud, flags, ctx);
789 }
790
follow_p4d_mask(struct vm_area_struct * vma,unsigned long address,pgd_t * pgdp,unsigned int flags,struct follow_page_context * ctx)791 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
792 unsigned long address, pgd_t *pgdp,
793 unsigned int flags,
794 struct follow_page_context *ctx)
795 {
796 p4d_t *p4d;
797 struct page *page;
798
799 p4d = p4d_offset(pgdp, address);
800 if (p4d_none(*p4d))
801 return no_page_table(vma, flags);
802 BUILD_BUG_ON(p4d_huge(*p4d));
803 if (unlikely(p4d_bad(*p4d)))
804 return no_page_table(vma, flags);
805
806 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
807 page = follow_huge_pd(vma, address,
808 __hugepd(p4d_val(*p4d)), flags,
809 P4D_SHIFT);
810 if (page)
811 return page;
812 return no_page_table(vma, flags);
813 }
814 return follow_pud_mask(vma, address, p4d, flags, ctx);
815 }
816
817 /**
818 * follow_page_mask - look up a page descriptor from a user-virtual address
819 * @vma: vm_area_struct mapping @address
820 * @address: virtual address to look up
821 * @flags: flags modifying lookup behaviour
822 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
823 * pointer to output page_mask
824 *
825 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
826 *
827 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
828 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
829 *
830 * On output, the @ctx->page_mask is set according to the size of the page.
831 *
832 * Return: the mapped (struct page *), %NULL if no mapping exists, or
833 * an error pointer if there is a mapping to something not represented
834 * by a page descriptor (see also vm_normal_page()).
835 */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct follow_page_context * ctx)836 static struct page *follow_page_mask(struct vm_area_struct *vma,
837 unsigned long address, unsigned int flags,
838 struct follow_page_context *ctx)
839 {
840 pgd_t *pgd;
841 struct page *page;
842 struct mm_struct *mm = vma->vm_mm;
843
844 ctx->page_mask = 0;
845
846 /* make this handle hugepd */
847 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
848 if (!IS_ERR(page)) {
849 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
850 return page;
851 }
852
853 pgd = pgd_offset(mm, address);
854
855 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
856 return no_page_table(vma, flags);
857
858 if (pgd_huge(*pgd)) {
859 page = follow_huge_pgd(mm, address, pgd, flags);
860 if (page)
861 return page;
862 return no_page_table(vma, flags);
863 }
864 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
865 page = follow_huge_pd(vma, address,
866 __hugepd(pgd_val(*pgd)), flags,
867 PGDIR_SHIFT);
868 if (page)
869 return page;
870 return no_page_table(vma, flags);
871 }
872
873 return follow_p4d_mask(vma, address, pgd, flags, ctx);
874 }
875
follow_page(struct vm_area_struct * vma,unsigned long address,unsigned int foll_flags)876 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
877 unsigned int foll_flags)
878 {
879 struct follow_page_context ctx = { NULL };
880 struct page *page;
881
882 if (vma_is_secretmem(vma))
883 return NULL;
884
885 page = follow_page_mask(vma, address, foll_flags, &ctx);
886 if (ctx.pgmap)
887 put_dev_pagemap(ctx.pgmap);
888 return page;
889 }
890
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)891 static int get_gate_page(struct mm_struct *mm, unsigned long address,
892 unsigned int gup_flags, struct vm_area_struct **vma,
893 struct page **page)
894 {
895 pgd_t *pgd;
896 p4d_t *p4d;
897 pud_t *pud;
898 pmd_t *pmd;
899 pte_t *pte;
900 int ret = -EFAULT;
901
902 /* user gate pages are read-only */
903 if (gup_flags & FOLL_WRITE)
904 return -EFAULT;
905 if (address > TASK_SIZE)
906 pgd = pgd_offset_k(address);
907 else
908 pgd = pgd_offset_gate(mm, address);
909 if (pgd_none(*pgd))
910 return -EFAULT;
911 p4d = p4d_offset(pgd, address);
912 if (p4d_none(*p4d))
913 return -EFAULT;
914 pud = pud_offset(p4d, address);
915 if (pud_none(*pud))
916 return -EFAULT;
917 pmd = pmd_offset(pud, address);
918 if (!pmd_present(*pmd))
919 return -EFAULT;
920 VM_BUG_ON(pmd_trans_huge(*pmd));
921 pte = pte_offset_map(pmd, address);
922 if (pte_none(*pte))
923 goto unmap;
924 *vma = get_gate_vma(mm);
925 if (!page)
926 goto out;
927 *page = vm_normal_page(*vma, address, *pte);
928 if (!*page) {
929 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
930 goto unmap;
931 *page = pte_page(*pte);
932 }
933 if (unlikely(!try_grab_page(*page, gup_flags))) {
934 ret = -ENOMEM;
935 goto unmap;
936 }
937 out:
938 ret = 0;
939 unmap:
940 pte_unmap(pte);
941 return ret;
942 }
943
944 /*
945 * mmap_lock must be held on entry. If @locked != NULL and *@flags
946 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
947 * is, *@locked will be set to 0 and -EBUSY returned.
948 */
faultin_page(struct vm_area_struct * vma,unsigned long address,unsigned int * flags,int * locked)949 static int faultin_page(struct vm_area_struct *vma,
950 unsigned long address, unsigned int *flags, int *locked)
951 {
952 unsigned int fault_flags = 0;
953 vm_fault_t ret;
954
955 /* mlock all present pages, but do not fault in new pages */
956 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
957 return -ENOENT;
958 if (*flags & FOLL_NOFAULT)
959 return -EFAULT;
960 if (*flags & FOLL_WRITE)
961 fault_flags |= FAULT_FLAG_WRITE;
962 if (*flags & FOLL_REMOTE)
963 fault_flags |= FAULT_FLAG_REMOTE;
964 if (locked)
965 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
966 if (*flags & FOLL_NOWAIT)
967 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
968 if (*flags & FOLL_TRIED) {
969 /*
970 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
971 * can co-exist
972 */
973 fault_flags |= FAULT_FLAG_TRIED;
974 }
975
976 ret = handle_mm_fault(vma, address, fault_flags, NULL);
977 if (ret & VM_FAULT_ERROR) {
978 int err = vm_fault_to_errno(ret, *flags);
979
980 if (err)
981 return err;
982 BUG();
983 }
984
985 if (ret & VM_FAULT_RETRY) {
986 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
987 *locked = 0;
988 return -EBUSY;
989 }
990
991 /*
992 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
993 * necessary, even if maybe_mkwrite decided not to set pte_write. We
994 * can thus safely do subsequent page lookups as if they were reads.
995 * But only do so when looping for pte_write is futile: in some cases
996 * userspace may also be wanting to write to the gotten user page,
997 * which a read fault here might prevent (a readonly page might get
998 * reCOWed by userspace write).
999 */
1000 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
1001 *flags |= FOLL_COW;
1002 return 0;
1003 }
1004
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)1005 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
1006 {
1007 vm_flags_t vm_flags = vma->vm_flags;
1008 int write = (gup_flags & FOLL_WRITE);
1009 int foreign = (gup_flags & FOLL_REMOTE);
1010
1011 if (vm_flags & (VM_IO | VM_PFNMAP))
1012 return -EFAULT;
1013
1014 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
1015 return -EFAULT;
1016
1017 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1018 return -EOPNOTSUPP;
1019
1020 if (vma_is_secretmem(vma))
1021 return -EFAULT;
1022
1023 if (write) {
1024 if (!(vm_flags & VM_WRITE)) {
1025 if (!(gup_flags & FOLL_FORCE))
1026 return -EFAULT;
1027 /*
1028 * We used to let the write,force case do COW in a
1029 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1030 * set a breakpoint in a read-only mapping of an
1031 * executable, without corrupting the file (yet only
1032 * when that file had been opened for writing!).
1033 * Anon pages in shared mappings are surprising: now
1034 * just reject it.
1035 */
1036 if (!is_cow_mapping(vm_flags))
1037 return -EFAULT;
1038 }
1039 } else if (!(vm_flags & VM_READ)) {
1040 if (!(gup_flags & FOLL_FORCE))
1041 return -EFAULT;
1042 /*
1043 * Is there actually any vma we can reach here which does not
1044 * have VM_MAYREAD set?
1045 */
1046 if (!(vm_flags & VM_MAYREAD))
1047 return -EFAULT;
1048 }
1049 /*
1050 * gups are always data accesses, not instruction
1051 * fetches, so execute=false here
1052 */
1053 if (!arch_vma_access_permitted(vma, write, false, foreign))
1054 return -EFAULT;
1055 return 0;
1056 }
1057
1058 /**
1059 * __get_user_pages() - pin user pages in memory
1060 * @mm: mm_struct of target mm
1061 * @start: starting user address
1062 * @nr_pages: number of pages from start to pin
1063 * @gup_flags: flags modifying pin behaviour
1064 * @pages: array that receives pointers to the pages pinned.
1065 * Should be at least nr_pages long. Or NULL, if caller
1066 * only intends to ensure the pages are faulted in.
1067 * @vmas: array of pointers to vmas corresponding to each page.
1068 * Or NULL if the caller does not require them.
1069 * @locked: whether we're still with the mmap_lock held
1070 *
1071 * Returns either number of pages pinned (which may be less than the
1072 * number requested), or an error. Details about the return value:
1073 *
1074 * -- If nr_pages is 0, returns 0.
1075 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1076 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1077 * pages pinned. Again, this may be less than nr_pages.
1078 * -- 0 return value is possible when the fault would need to be retried.
1079 *
1080 * The caller is responsible for releasing returned @pages, via put_page().
1081 *
1082 * @vmas are valid only as long as mmap_lock is held.
1083 *
1084 * Must be called with mmap_lock held. It may be released. See below.
1085 *
1086 * __get_user_pages walks a process's page tables and takes a reference to
1087 * each struct page that each user address corresponds to at a given
1088 * instant. That is, it takes the page that would be accessed if a user
1089 * thread accesses the given user virtual address at that instant.
1090 *
1091 * This does not guarantee that the page exists in the user mappings when
1092 * __get_user_pages returns, and there may even be a completely different
1093 * page there in some cases (eg. if mmapped pagecache has been invalidated
1094 * and subsequently re faulted). However it does guarantee that the page
1095 * won't be freed completely. And mostly callers simply care that the page
1096 * contains data that was valid *at some point in time*. Typically, an IO
1097 * or similar operation cannot guarantee anything stronger anyway because
1098 * locks can't be held over the syscall boundary.
1099 *
1100 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1101 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1102 * appropriate) must be called after the page is finished with, and
1103 * before put_page is called.
1104 *
1105 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1106 * released by an up_read(). That can happen if @gup_flags does not
1107 * have FOLL_NOWAIT.
1108 *
1109 * A caller using such a combination of @locked and @gup_flags
1110 * must therefore hold the mmap_lock for reading only, and recognize
1111 * when it's been released. Otherwise, it must be held for either
1112 * reading or writing and will not be released.
1113 *
1114 * In most cases, get_user_pages or get_user_pages_fast should be used
1115 * instead of __get_user_pages. __get_user_pages should be used only if
1116 * you need some special @gup_flags.
1117 */
__get_user_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1118 static long __get_user_pages(struct mm_struct *mm,
1119 unsigned long start, unsigned long nr_pages,
1120 unsigned int gup_flags, struct page **pages,
1121 struct vm_area_struct **vmas, int *locked)
1122 {
1123 long ret = 0, i = 0;
1124 struct vm_area_struct *vma = NULL;
1125 struct follow_page_context ctx = { NULL };
1126
1127 if (!nr_pages)
1128 return 0;
1129
1130 start = untagged_addr(start);
1131
1132 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1133
1134 /*
1135 * If FOLL_FORCE is set then do not force a full fault as the hinting
1136 * fault information is unrelated to the reference behaviour of a task
1137 * using the address space
1138 */
1139 if (!(gup_flags & FOLL_FORCE))
1140 gup_flags |= FOLL_NUMA;
1141
1142 do {
1143 struct page *page;
1144 unsigned int foll_flags = gup_flags;
1145 unsigned int page_increm;
1146
1147 /* first iteration or cross vma bound */
1148 if (!vma || start >= vma->vm_end) {
1149 vma = find_extend_vma(mm, start);
1150 if (!vma && in_gate_area(mm, start)) {
1151 ret = get_gate_page(mm, start & PAGE_MASK,
1152 gup_flags, &vma,
1153 pages ? &pages[i] : NULL);
1154 if (ret)
1155 goto out;
1156 ctx.page_mask = 0;
1157 goto next_page;
1158 }
1159
1160 if (!vma) {
1161 ret = -EFAULT;
1162 goto out;
1163 }
1164 ret = check_vma_flags(vma, gup_flags);
1165 if (ret)
1166 goto out;
1167
1168 if (is_vm_hugetlb_page(vma)) {
1169 i = follow_hugetlb_page(mm, vma, pages, vmas,
1170 &start, &nr_pages, i,
1171 gup_flags, locked);
1172 if (locked && *locked == 0) {
1173 /*
1174 * We've got a VM_FAULT_RETRY
1175 * and we've lost mmap_lock.
1176 * We must stop here.
1177 */
1178 BUG_ON(gup_flags & FOLL_NOWAIT);
1179 goto out;
1180 }
1181 continue;
1182 }
1183 }
1184 retry:
1185 /*
1186 * If we have a pending SIGKILL, don't keep faulting pages and
1187 * potentially allocating memory.
1188 */
1189 if (fatal_signal_pending(current)) {
1190 ret = -EINTR;
1191 goto out;
1192 }
1193 cond_resched();
1194
1195 page = follow_page_mask(vma, start, foll_flags, &ctx);
1196 if (!page) {
1197 ret = faultin_page(vma, start, &foll_flags, locked);
1198 switch (ret) {
1199 case 0:
1200 goto retry;
1201 case -EBUSY:
1202 ret = 0;
1203 fallthrough;
1204 case -EFAULT:
1205 case -ENOMEM:
1206 case -EHWPOISON:
1207 goto out;
1208 case -ENOENT:
1209 goto next_page;
1210 }
1211 BUG();
1212 } else if (PTR_ERR(page) == -EEXIST) {
1213 /*
1214 * Proper page table entry exists, but no corresponding
1215 * struct page.
1216 */
1217 goto next_page;
1218 } else if (IS_ERR(page)) {
1219 ret = PTR_ERR(page);
1220 goto out;
1221 }
1222 if (pages) {
1223 pages[i] = page;
1224 flush_anon_page(vma, page, start);
1225 flush_dcache_page(page);
1226 ctx.page_mask = 0;
1227 }
1228 next_page:
1229 if (vmas) {
1230 vmas[i] = vma;
1231 ctx.page_mask = 0;
1232 }
1233 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1234 if (page_increm > nr_pages)
1235 page_increm = nr_pages;
1236 i += page_increm;
1237 start += page_increm * PAGE_SIZE;
1238 nr_pages -= page_increm;
1239 } while (nr_pages);
1240 out:
1241 if (ctx.pgmap)
1242 put_dev_pagemap(ctx.pgmap);
1243 return i ? i : ret;
1244 }
1245
vma_permits_fault(struct vm_area_struct * vma,unsigned int fault_flags)1246 static bool vma_permits_fault(struct vm_area_struct *vma,
1247 unsigned int fault_flags)
1248 {
1249 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1250 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1251 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1252
1253 if (!(vm_flags & vma->vm_flags))
1254 return false;
1255
1256 /*
1257 * The architecture might have a hardware protection
1258 * mechanism other than read/write that can deny access.
1259 *
1260 * gup always represents data access, not instruction
1261 * fetches, so execute=false here:
1262 */
1263 if (!arch_vma_access_permitted(vma, write, false, foreign))
1264 return false;
1265
1266 return true;
1267 }
1268
1269 /**
1270 * fixup_user_fault() - manually resolve a user page fault
1271 * @mm: mm_struct of target mm
1272 * @address: user address
1273 * @fault_flags:flags to pass down to handle_mm_fault()
1274 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1275 * does not allow retry. If NULL, the caller must guarantee
1276 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1277 *
1278 * This is meant to be called in the specific scenario where for locking reasons
1279 * we try to access user memory in atomic context (within a pagefault_disable()
1280 * section), this returns -EFAULT, and we want to resolve the user fault before
1281 * trying again.
1282 *
1283 * Typically this is meant to be used by the futex code.
1284 *
1285 * The main difference with get_user_pages() is that this function will
1286 * unconditionally call handle_mm_fault() which will in turn perform all the
1287 * necessary SW fixup of the dirty and young bits in the PTE, while
1288 * get_user_pages() only guarantees to update these in the struct page.
1289 *
1290 * This is important for some architectures where those bits also gate the
1291 * access permission to the page because they are maintained in software. On
1292 * such architectures, gup() will not be enough to make a subsequent access
1293 * succeed.
1294 *
1295 * This function will not return with an unlocked mmap_lock. So it has not the
1296 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1297 */
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)1298 int fixup_user_fault(struct mm_struct *mm,
1299 unsigned long address, unsigned int fault_flags,
1300 bool *unlocked)
1301 {
1302 struct vm_area_struct *vma;
1303 vm_fault_t ret;
1304
1305 address = untagged_addr(address);
1306
1307 if (unlocked)
1308 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1309
1310 retry:
1311 vma = find_extend_vma(mm, address);
1312 if (!vma || address < vma->vm_start)
1313 return -EFAULT;
1314
1315 if (!vma_permits_fault(vma, fault_flags))
1316 return -EFAULT;
1317
1318 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1319 fatal_signal_pending(current))
1320 return -EINTR;
1321
1322 ret = handle_mm_fault(vma, address, fault_flags, NULL);
1323 if (ret & VM_FAULT_ERROR) {
1324 int err = vm_fault_to_errno(ret, 0);
1325
1326 if (err)
1327 return err;
1328 BUG();
1329 }
1330
1331 if (ret & VM_FAULT_RETRY) {
1332 mmap_read_lock(mm);
1333 *unlocked = true;
1334 fault_flags |= FAULT_FLAG_TRIED;
1335 goto retry;
1336 }
1337
1338 return 0;
1339 }
1340 EXPORT_SYMBOL_GPL(fixup_user_fault);
1341
1342 /*
1343 * Please note that this function, unlike __get_user_pages will not
1344 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1345 */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,int * locked,unsigned int flags)1346 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1347 unsigned long start,
1348 unsigned long nr_pages,
1349 struct page **pages,
1350 struct vm_area_struct **vmas,
1351 int *locked,
1352 unsigned int flags)
1353 {
1354 long ret, pages_done;
1355 bool lock_dropped;
1356
1357 if (locked) {
1358 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1359 BUG_ON(vmas);
1360 /* check caller initialized locked */
1361 BUG_ON(*locked != 1);
1362 }
1363
1364 if (flags & FOLL_PIN)
1365 mm_set_has_pinned_flag(&mm->flags);
1366
1367 /*
1368 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1369 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1370 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1371 * for FOLL_GET, not for the newer FOLL_PIN.
1372 *
1373 * FOLL_PIN always expects pages to be non-null, but no need to assert
1374 * that here, as any failures will be obvious enough.
1375 */
1376 if (pages && !(flags & FOLL_PIN))
1377 flags |= FOLL_GET;
1378
1379 pages_done = 0;
1380 lock_dropped = false;
1381 for (;;) {
1382 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1383 vmas, locked);
1384 if (!locked)
1385 /* VM_FAULT_RETRY couldn't trigger, bypass */
1386 return ret;
1387
1388 /* VM_FAULT_RETRY cannot return errors */
1389 if (!*locked) {
1390 BUG_ON(ret < 0);
1391 BUG_ON(ret >= nr_pages);
1392 }
1393
1394 if (ret > 0) {
1395 nr_pages -= ret;
1396 pages_done += ret;
1397 if (!nr_pages)
1398 break;
1399 }
1400 if (*locked) {
1401 /*
1402 * VM_FAULT_RETRY didn't trigger or it was a
1403 * FOLL_NOWAIT.
1404 */
1405 if (!pages_done)
1406 pages_done = ret;
1407 break;
1408 }
1409 /*
1410 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1411 * For the prefault case (!pages) we only update counts.
1412 */
1413 if (likely(pages))
1414 pages += ret;
1415 start += ret << PAGE_SHIFT;
1416 lock_dropped = true;
1417
1418 retry:
1419 /*
1420 * Repeat on the address that fired VM_FAULT_RETRY
1421 * with both FAULT_FLAG_ALLOW_RETRY and
1422 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1423 * by fatal signals, so we need to check it before we
1424 * start trying again otherwise it can loop forever.
1425 */
1426
1427 if (fatal_signal_pending(current)) {
1428 if (!pages_done)
1429 pages_done = -EINTR;
1430 break;
1431 }
1432
1433 ret = mmap_read_lock_killable(mm);
1434 if (ret) {
1435 BUG_ON(ret > 0);
1436 if (!pages_done)
1437 pages_done = ret;
1438 break;
1439 }
1440
1441 *locked = 1;
1442 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1443 pages, NULL, locked);
1444 if (!*locked) {
1445 /* Continue to retry until we succeeded */
1446 BUG_ON(ret != 0);
1447 goto retry;
1448 }
1449 if (ret != 1) {
1450 BUG_ON(ret > 1);
1451 if (!pages_done)
1452 pages_done = ret;
1453 break;
1454 }
1455 nr_pages--;
1456 pages_done++;
1457 if (!nr_pages)
1458 break;
1459 if (likely(pages))
1460 pages++;
1461 start += PAGE_SIZE;
1462 }
1463 if (lock_dropped && *locked) {
1464 /*
1465 * We must let the caller know we temporarily dropped the lock
1466 * and so the critical section protected by it was lost.
1467 */
1468 mmap_read_unlock(mm);
1469 *locked = 0;
1470 }
1471 return pages_done;
1472 }
1473
1474 /**
1475 * populate_vma_page_range() - populate a range of pages in the vma.
1476 * @vma: target vma
1477 * @start: start address
1478 * @end: end address
1479 * @locked: whether the mmap_lock is still held
1480 *
1481 * This takes care of mlocking the pages too if VM_LOCKED is set.
1482 *
1483 * Return either number of pages pinned in the vma, or a negative error
1484 * code on error.
1485 *
1486 * vma->vm_mm->mmap_lock must be held.
1487 *
1488 * If @locked is NULL, it may be held for read or write and will
1489 * be unperturbed.
1490 *
1491 * If @locked is non-NULL, it must held for read only and may be
1492 * released. If it's released, *@locked will be set to 0.
1493 */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * locked)1494 long populate_vma_page_range(struct vm_area_struct *vma,
1495 unsigned long start, unsigned long end, int *locked)
1496 {
1497 struct mm_struct *mm = vma->vm_mm;
1498 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1499 int gup_flags;
1500
1501 VM_BUG_ON(!PAGE_ALIGNED(start));
1502 VM_BUG_ON(!PAGE_ALIGNED(end));
1503 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1504 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1505 mmap_assert_locked(mm);
1506
1507 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1508 if (vma->vm_flags & VM_LOCKONFAULT)
1509 gup_flags &= ~FOLL_POPULATE;
1510 /*
1511 * We want to touch writable mappings with a write fault in order
1512 * to break COW, except for shared mappings because these don't COW
1513 * and we would not want to dirty them for nothing.
1514 */
1515 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1516 gup_flags |= FOLL_WRITE;
1517
1518 /*
1519 * We want mlock to succeed for regions that have any permissions
1520 * other than PROT_NONE.
1521 */
1522 if (vma_is_accessible(vma))
1523 gup_flags |= FOLL_FORCE;
1524
1525 /*
1526 * We made sure addr is within a VMA, so the following will
1527 * not result in a stack expansion that recurses back here.
1528 */
1529 return __get_user_pages(mm, start, nr_pages, gup_flags,
1530 NULL, NULL, locked);
1531 }
1532
1533 /*
1534 * faultin_vma_page_range() - populate (prefault) page tables inside the
1535 * given VMA range readable/writable
1536 *
1537 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1538 *
1539 * @vma: target vma
1540 * @start: start address
1541 * @end: end address
1542 * @write: whether to prefault readable or writable
1543 * @locked: whether the mmap_lock is still held
1544 *
1545 * Returns either number of processed pages in the vma, or a negative error
1546 * code on error (see __get_user_pages()).
1547 *
1548 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1549 * covered by the VMA.
1550 *
1551 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1552 *
1553 * If @locked is non-NULL, it must held for read only and may be released. If
1554 * it's released, *@locked will be set to 0.
1555 */
faultin_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool write,int * locked)1556 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1557 unsigned long end, bool write, int *locked)
1558 {
1559 struct mm_struct *mm = vma->vm_mm;
1560 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1561 int gup_flags;
1562
1563 VM_BUG_ON(!PAGE_ALIGNED(start));
1564 VM_BUG_ON(!PAGE_ALIGNED(end));
1565 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1566 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1567 mmap_assert_locked(mm);
1568
1569 /*
1570 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1571 * the page dirty with FOLL_WRITE -- which doesn't make a
1572 * difference with !FOLL_FORCE, because the page is writable
1573 * in the page table.
1574 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1575 * a poisoned page.
1576 * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
1577 * !FOLL_FORCE: Require proper access permissions.
1578 */
1579 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
1580 if (write)
1581 gup_flags |= FOLL_WRITE;
1582
1583 /*
1584 * We want to report -EINVAL instead of -EFAULT for any permission
1585 * problems or incompatible mappings.
1586 */
1587 if (check_vma_flags(vma, gup_flags))
1588 return -EINVAL;
1589
1590 return __get_user_pages(mm, start, nr_pages, gup_flags,
1591 NULL, NULL, locked);
1592 }
1593
1594 /*
1595 * __mm_populate - populate and/or mlock pages within a range of address space.
1596 *
1597 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1598 * flags. VMAs must be already marked with the desired vm_flags, and
1599 * mmap_lock must not be held.
1600 */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)1601 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1602 {
1603 struct mm_struct *mm = current->mm;
1604 unsigned long end, nstart, nend;
1605 struct vm_area_struct *vma = NULL;
1606 int locked = 0;
1607 long ret = 0;
1608
1609 end = start + len;
1610
1611 for (nstart = start; nstart < end; nstart = nend) {
1612 /*
1613 * We want to fault in pages for [nstart; end) address range.
1614 * Find first corresponding VMA.
1615 */
1616 if (!locked) {
1617 locked = 1;
1618 mmap_read_lock(mm);
1619 vma = find_vma(mm, nstart);
1620 } else if (nstart >= vma->vm_end)
1621 vma = vma->vm_next;
1622 if (!vma || vma->vm_start >= end)
1623 break;
1624 /*
1625 * Set [nstart; nend) to intersection of desired address
1626 * range with the first VMA. Also, skip undesirable VMA types.
1627 */
1628 nend = min(end, vma->vm_end);
1629 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1630 continue;
1631 if (nstart < vma->vm_start)
1632 nstart = vma->vm_start;
1633 /*
1634 * Now fault in a range of pages. populate_vma_page_range()
1635 * double checks the vma flags, so that it won't mlock pages
1636 * if the vma was already munlocked.
1637 */
1638 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1639 if (ret < 0) {
1640 if (ignore_errors) {
1641 ret = 0;
1642 continue; /* continue at next VMA */
1643 }
1644 break;
1645 }
1646 nend = nstart + ret * PAGE_SIZE;
1647 ret = 0;
1648 }
1649 if (locked)
1650 mmap_read_unlock(mm);
1651 return ret; /* 0 or negative error code */
1652 }
1653 #else /* CONFIG_MMU */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,int * locked,unsigned int foll_flags)1654 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1655 unsigned long nr_pages, struct page **pages,
1656 struct vm_area_struct **vmas, int *locked,
1657 unsigned int foll_flags)
1658 {
1659 struct vm_area_struct *vma;
1660 unsigned long vm_flags;
1661 long i;
1662
1663 /* calculate required read or write permissions.
1664 * If FOLL_FORCE is set, we only require the "MAY" flags.
1665 */
1666 vm_flags = (foll_flags & FOLL_WRITE) ?
1667 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1668 vm_flags &= (foll_flags & FOLL_FORCE) ?
1669 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1670
1671 for (i = 0; i < nr_pages; i++) {
1672 vma = find_vma(mm, start);
1673 if (!vma)
1674 goto finish_or_fault;
1675
1676 /* protect what we can, including chardevs */
1677 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1678 !(vm_flags & vma->vm_flags))
1679 goto finish_or_fault;
1680
1681 if (pages) {
1682 pages[i] = virt_to_page(start);
1683 if (pages[i])
1684 get_page(pages[i]);
1685 }
1686 if (vmas)
1687 vmas[i] = vma;
1688 start = (start + PAGE_SIZE) & PAGE_MASK;
1689 }
1690
1691 return i;
1692
1693 finish_or_fault:
1694 return i ? : -EFAULT;
1695 }
1696 #endif /* !CONFIG_MMU */
1697
1698 /**
1699 * fault_in_writeable - fault in userspace address range for writing
1700 * @uaddr: start of address range
1701 * @size: size of address range
1702 *
1703 * Returns the number of bytes not faulted in (like copy_to_user() and
1704 * copy_from_user()).
1705 */
fault_in_writeable(char __user * uaddr,size_t size)1706 size_t fault_in_writeable(char __user *uaddr, size_t size)
1707 {
1708 char __user *start = uaddr, *end;
1709
1710 if (unlikely(size == 0))
1711 return 0;
1712 if (!PAGE_ALIGNED(uaddr)) {
1713 if (unlikely(__put_user(0, uaddr) != 0))
1714 return size;
1715 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1716 }
1717 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1718 if (unlikely(end < start))
1719 end = NULL;
1720 while (uaddr != end) {
1721 if (unlikely(__put_user(0, uaddr) != 0))
1722 goto out;
1723 uaddr += PAGE_SIZE;
1724 }
1725
1726 out:
1727 if (size > uaddr - start)
1728 return size - (uaddr - start);
1729 return 0;
1730 }
1731 EXPORT_SYMBOL(fault_in_writeable);
1732
1733 /**
1734 * fault_in_subpage_writeable - fault in an address range for writing
1735 * @uaddr: start of address range
1736 * @size: size of address range
1737 *
1738 * Fault in a user address range for writing while checking for permissions at
1739 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1740 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1741 *
1742 * Returns the number of bytes not faulted in (like copy_to_user() and
1743 * copy_from_user()).
1744 */
fault_in_subpage_writeable(char __user * uaddr,size_t size)1745 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1746 {
1747 size_t faulted_in;
1748
1749 /*
1750 * Attempt faulting in at page granularity first for page table
1751 * permission checking. The arch-specific probe_subpage_writeable()
1752 * functions may not check for this.
1753 */
1754 faulted_in = size - fault_in_writeable(uaddr, size);
1755 if (faulted_in)
1756 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1757
1758 return size - faulted_in;
1759 }
1760 EXPORT_SYMBOL(fault_in_subpage_writeable);
1761
1762 /*
1763 * fault_in_safe_writeable - fault in an address range for writing
1764 * @uaddr: start of address range
1765 * @size: length of address range
1766 *
1767 * Faults in an address range for writing. This is primarily useful when we
1768 * already know that some or all of the pages in the address range aren't in
1769 * memory.
1770 *
1771 * Unlike fault_in_writeable(), this function is non-destructive.
1772 *
1773 * Note that we don't pin or otherwise hold the pages referenced that we fault
1774 * in. There's no guarantee that they'll stay in memory for any duration of
1775 * time.
1776 *
1777 * Returns the number of bytes not faulted in, like copy_to_user() and
1778 * copy_from_user().
1779 */
fault_in_safe_writeable(const char __user * uaddr,size_t size)1780 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1781 {
1782 unsigned long start = (unsigned long)uaddr, end;
1783 struct mm_struct *mm = current->mm;
1784 bool unlocked = false;
1785
1786 if (unlikely(size == 0))
1787 return 0;
1788 end = PAGE_ALIGN(start + size);
1789 if (end < start)
1790 end = 0;
1791
1792 mmap_read_lock(mm);
1793 do {
1794 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1795 break;
1796 start = (start + PAGE_SIZE) & PAGE_MASK;
1797 } while (start != end);
1798 mmap_read_unlock(mm);
1799
1800 if (size > (unsigned long)uaddr - start)
1801 return size - ((unsigned long)uaddr - start);
1802 return 0;
1803 }
1804 EXPORT_SYMBOL(fault_in_safe_writeable);
1805
1806 /**
1807 * fault_in_readable - fault in userspace address range for reading
1808 * @uaddr: start of user address range
1809 * @size: size of user address range
1810 *
1811 * Returns the number of bytes not faulted in (like copy_to_user() and
1812 * copy_from_user()).
1813 */
fault_in_readable(const char __user * uaddr,size_t size)1814 size_t fault_in_readable(const char __user *uaddr, size_t size)
1815 {
1816 const char __user *start = uaddr, *end;
1817 volatile char c;
1818
1819 if (unlikely(size == 0))
1820 return 0;
1821 if (!PAGE_ALIGNED(uaddr)) {
1822 if (unlikely(__get_user(c, uaddr) != 0))
1823 return size;
1824 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1825 }
1826 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1827 if (unlikely(end < start))
1828 end = NULL;
1829 while (uaddr != end) {
1830 if (unlikely(__get_user(c, uaddr) != 0))
1831 goto out;
1832 uaddr += PAGE_SIZE;
1833 }
1834
1835 out:
1836 (void)c;
1837 if (size > uaddr - start)
1838 return size - (uaddr - start);
1839 return 0;
1840 }
1841 EXPORT_SYMBOL(fault_in_readable);
1842
1843 /**
1844 * get_dump_page() - pin user page in memory while writing it to core dump
1845 * @addr: user address
1846 *
1847 * Returns struct page pointer of user page pinned for dump,
1848 * to be freed afterwards by put_page().
1849 *
1850 * Returns NULL on any kind of failure - a hole must then be inserted into
1851 * the corefile, to preserve alignment with its headers; and also returns
1852 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1853 * allowing a hole to be left in the corefile to save disk space.
1854 *
1855 * Called without mmap_lock (takes and releases the mmap_lock by itself).
1856 */
1857 #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)1858 struct page *get_dump_page(unsigned long addr)
1859 {
1860 struct mm_struct *mm = current->mm;
1861 struct page *page;
1862 int locked = 1;
1863 int ret;
1864
1865 if (mmap_read_lock_killable(mm))
1866 return NULL;
1867 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1868 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1869 if (locked)
1870 mmap_read_unlock(mm);
1871 return (ret == 1) ? page : NULL;
1872 }
1873 #endif /* CONFIG_ELF_CORE */
1874
1875 #ifdef CONFIG_MIGRATION
1876 /*
1877 * Check whether all pages are pinnable, if so return number of pages. If some
1878 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1879 * pages were migrated, or if some pages were not successfully isolated.
1880 * Return negative error if migration fails.
1881 */
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)1882 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1883 struct page **pages,
1884 unsigned int gup_flags)
1885 {
1886 unsigned long i;
1887 unsigned long isolation_error_count = 0;
1888 bool drain_allow = true;
1889 LIST_HEAD(movable_page_list);
1890 long ret = 0;
1891 struct page *prev_head = NULL;
1892 struct page *head;
1893 struct migration_target_control mtc = {
1894 .nid = NUMA_NO_NODE,
1895 .gfp_mask = GFP_USER | __GFP_NOWARN,
1896 };
1897
1898 for (i = 0; i < nr_pages; i++) {
1899 head = compound_head(pages[i]);
1900 if (head == prev_head)
1901 continue;
1902 prev_head = head;
1903 /*
1904 * If we get a movable page, since we are going to be pinning
1905 * these entries, try to move them out if possible.
1906 */
1907 if (!is_pinnable_page(head)) {
1908 if (PageHuge(head)) {
1909 if (isolate_hugetlb(head, &movable_page_list))
1910 isolation_error_count++;
1911 } else {
1912 if (!PageLRU(head) && drain_allow) {
1913 lru_add_drain_all();
1914 drain_allow = false;
1915 }
1916
1917 if (isolate_lru_page(head)) {
1918 isolation_error_count++;
1919 continue;
1920 }
1921 list_add_tail(&head->lru, &movable_page_list);
1922 mod_node_page_state(page_pgdat(head),
1923 NR_ISOLATED_ANON +
1924 page_is_file_lru(head),
1925 thp_nr_pages(head));
1926 }
1927 }
1928 }
1929
1930 /*
1931 * If list is empty, and no isolation errors, means that all pages are
1932 * in the correct zone.
1933 */
1934 if (list_empty(&movable_page_list) && !isolation_error_count)
1935 return nr_pages;
1936
1937 if (gup_flags & FOLL_PIN) {
1938 unpin_user_pages(pages, nr_pages);
1939 } else {
1940 for (i = 0; i < nr_pages; i++)
1941 put_page(pages[i]);
1942 }
1943 if (!list_empty(&movable_page_list)) {
1944 ret = migrate_pages(&movable_page_list, alloc_migration_target,
1945 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1946 MR_LONGTERM_PIN, NULL);
1947 if (ret && !list_empty(&movable_page_list))
1948 putback_movable_pages(&movable_page_list);
1949 }
1950
1951 return ret > 0 ? -ENOMEM : ret;
1952 }
1953 #else
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)1954 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1955 struct page **pages,
1956 unsigned int gup_flags)
1957 {
1958 return nr_pages;
1959 }
1960 #endif /* CONFIG_MIGRATION */
1961
1962 /*
1963 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1964 * allows us to process the FOLL_LONGTERM flag.
1965 */
__gup_longterm_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,unsigned int gup_flags)1966 static long __gup_longterm_locked(struct mm_struct *mm,
1967 unsigned long start,
1968 unsigned long nr_pages,
1969 struct page **pages,
1970 struct vm_area_struct **vmas,
1971 unsigned int gup_flags)
1972 {
1973 unsigned int flags;
1974 long rc;
1975
1976 if (!(gup_flags & FOLL_LONGTERM))
1977 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1978 NULL, gup_flags);
1979 flags = memalloc_pin_save();
1980 do {
1981 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1982 NULL, gup_flags);
1983 if (rc <= 0)
1984 break;
1985 rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1986 } while (!rc);
1987 memalloc_pin_restore(flags);
1988
1989 return rc;
1990 }
1991
is_valid_gup_flags(unsigned int gup_flags)1992 static bool is_valid_gup_flags(unsigned int gup_flags)
1993 {
1994 /*
1995 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1996 * never directly by the caller, so enforce that with an assertion:
1997 */
1998 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1999 return false;
2000 /*
2001 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
2002 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
2003 * FOLL_PIN.
2004 */
2005 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2006 return false;
2007
2008 return true;
2009 }
2010
2011 #ifdef CONFIG_MMU
__get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)2012 static long __get_user_pages_remote(struct mm_struct *mm,
2013 unsigned long start, unsigned long nr_pages,
2014 unsigned int gup_flags, struct page **pages,
2015 struct vm_area_struct **vmas, int *locked)
2016 {
2017 /*
2018 * Parts of FOLL_LONGTERM behavior are incompatible with
2019 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2020 * vmas. However, this only comes up if locked is set, and there are
2021 * callers that do request FOLL_LONGTERM, but do not set locked. So,
2022 * allow what we can.
2023 */
2024 if (gup_flags & FOLL_LONGTERM) {
2025 if (WARN_ON_ONCE(locked))
2026 return -EINVAL;
2027 /*
2028 * This will check the vmas (even if our vmas arg is NULL)
2029 * and return -ENOTSUPP if DAX isn't allowed in this case:
2030 */
2031 return __gup_longterm_locked(mm, start, nr_pages, pages,
2032 vmas, gup_flags | FOLL_TOUCH |
2033 FOLL_REMOTE);
2034 }
2035
2036 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2037 locked,
2038 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
2039 }
2040
2041 /**
2042 * get_user_pages_remote() - pin user pages in memory
2043 * @mm: mm_struct of target mm
2044 * @start: starting user address
2045 * @nr_pages: number of pages from start to pin
2046 * @gup_flags: flags modifying lookup behaviour
2047 * @pages: array that receives pointers to the pages pinned.
2048 * Should be at least nr_pages long. Or NULL, if caller
2049 * only intends to ensure the pages are faulted in.
2050 * @vmas: array of pointers to vmas corresponding to each page.
2051 * Or NULL if the caller does not require them.
2052 * @locked: pointer to lock flag indicating whether lock is held and
2053 * subsequently whether VM_FAULT_RETRY functionality can be
2054 * utilised. Lock must initially be held.
2055 *
2056 * Returns either number of pages pinned (which may be less than the
2057 * number requested), or an error. Details about the return value:
2058 *
2059 * -- If nr_pages is 0, returns 0.
2060 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2061 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2062 * pages pinned. Again, this may be less than nr_pages.
2063 *
2064 * The caller is responsible for releasing returned @pages, via put_page().
2065 *
2066 * @vmas are valid only as long as mmap_lock is held.
2067 *
2068 * Must be called with mmap_lock held for read or write.
2069 *
2070 * get_user_pages_remote walks a process's page tables and takes a reference
2071 * to each struct page that each user address corresponds to at a given
2072 * instant. That is, it takes the page that would be accessed if a user
2073 * thread accesses the given user virtual address at that instant.
2074 *
2075 * This does not guarantee that the page exists in the user mappings when
2076 * get_user_pages_remote returns, and there may even be a completely different
2077 * page there in some cases (eg. if mmapped pagecache has been invalidated
2078 * and subsequently re faulted). However it does guarantee that the page
2079 * won't be freed completely. And mostly callers simply care that the page
2080 * contains data that was valid *at some point in time*. Typically, an IO
2081 * or similar operation cannot guarantee anything stronger anyway because
2082 * locks can't be held over the syscall boundary.
2083 *
2084 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2085 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2086 * be called after the page is finished with, and before put_page is called.
2087 *
2088 * get_user_pages_remote is typically used for fewer-copy IO operations,
2089 * to get a handle on the memory by some means other than accesses
2090 * via the user virtual addresses. The pages may be submitted for
2091 * DMA to devices or accessed via their kernel linear mapping (via the
2092 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2093 *
2094 * See also get_user_pages_fast, for performance critical applications.
2095 *
2096 * get_user_pages_remote should be phased out in favor of
2097 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2098 * should use get_user_pages_remote because it cannot pass
2099 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2100 */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)2101 long get_user_pages_remote(struct mm_struct *mm,
2102 unsigned long start, unsigned long nr_pages,
2103 unsigned int gup_flags, struct page **pages,
2104 struct vm_area_struct **vmas, int *locked)
2105 {
2106 if (!is_valid_gup_flags(gup_flags))
2107 return -EINVAL;
2108
2109 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2110 pages, vmas, locked);
2111 }
2112 EXPORT_SYMBOL(get_user_pages_remote);
2113
2114 #else /* CONFIG_MMU */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)2115 long get_user_pages_remote(struct mm_struct *mm,
2116 unsigned long start, unsigned long nr_pages,
2117 unsigned int gup_flags, struct page **pages,
2118 struct vm_area_struct **vmas, int *locked)
2119 {
2120 return 0;
2121 }
2122
__get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)2123 static long __get_user_pages_remote(struct mm_struct *mm,
2124 unsigned long start, unsigned long nr_pages,
2125 unsigned int gup_flags, struct page **pages,
2126 struct vm_area_struct **vmas, int *locked)
2127 {
2128 return 0;
2129 }
2130 #endif /* !CONFIG_MMU */
2131
2132 /**
2133 * get_user_pages() - pin user pages in memory
2134 * @start: starting user address
2135 * @nr_pages: number of pages from start to pin
2136 * @gup_flags: flags modifying lookup behaviour
2137 * @pages: array that receives pointers to the pages pinned.
2138 * Should be at least nr_pages long. Or NULL, if caller
2139 * only intends to ensure the pages are faulted in.
2140 * @vmas: array of pointers to vmas corresponding to each page.
2141 * Or NULL if the caller does not require them.
2142 *
2143 * This is the same as get_user_pages_remote(), just with a less-flexible
2144 * calling convention where we assume that the mm being operated on belongs to
2145 * the current task, and doesn't allow passing of a locked parameter. We also
2146 * obviously don't pass FOLL_REMOTE in here.
2147 */
get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)2148 long get_user_pages(unsigned long start, unsigned long nr_pages,
2149 unsigned int gup_flags, struct page **pages,
2150 struct vm_area_struct **vmas)
2151 {
2152 if (!is_valid_gup_flags(gup_flags))
2153 return -EINVAL;
2154
2155 return __gup_longterm_locked(current->mm, start, nr_pages,
2156 pages, vmas, gup_flags | FOLL_TOUCH);
2157 }
2158 EXPORT_SYMBOL(get_user_pages);
2159
2160 /**
2161 * get_user_pages_locked() - variant of get_user_pages()
2162 *
2163 * @start: starting user address
2164 * @nr_pages: number of pages from start to pin
2165 * @gup_flags: flags modifying lookup behaviour
2166 * @pages: array that receives pointers to the pages pinned.
2167 * Should be at least nr_pages long. Or NULL, if caller
2168 * only intends to ensure the pages are faulted in.
2169 * @locked: pointer to lock flag indicating whether lock is held and
2170 * subsequently whether VM_FAULT_RETRY functionality can be
2171 * utilised. Lock must initially be held.
2172 *
2173 * It is suitable to replace the form:
2174 *
2175 * mmap_read_lock(mm);
2176 * do_something()
2177 * get_user_pages(mm, ..., pages, NULL);
2178 * mmap_read_unlock(mm);
2179 *
2180 * to:
2181 *
2182 * int locked = 1;
2183 * mmap_read_lock(mm);
2184 * do_something()
2185 * get_user_pages_locked(mm, ..., pages, &locked);
2186 * if (locked)
2187 * mmap_read_unlock(mm);
2188 *
2189 * We can leverage the VM_FAULT_RETRY functionality in the page fault
2190 * paths better by using either get_user_pages_locked() or
2191 * get_user_pages_unlocked().
2192 *
2193 */
get_user_pages_locked(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2194 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2195 unsigned int gup_flags, struct page **pages,
2196 int *locked)
2197 {
2198 /*
2199 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2200 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2201 * vmas. As there are no users of this flag in this call we simply
2202 * disallow this option for now.
2203 */
2204 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2205 return -EINVAL;
2206 /*
2207 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2208 * never directly by the caller, so enforce that:
2209 */
2210 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2211 return -EINVAL;
2212
2213 return __get_user_pages_locked(current->mm, start, nr_pages,
2214 pages, NULL, locked,
2215 gup_flags | FOLL_TOUCH);
2216 }
2217 EXPORT_SYMBOL(get_user_pages_locked);
2218
2219 /*
2220 * get_user_pages_unlocked() is suitable to replace the form:
2221 *
2222 * mmap_read_lock(mm);
2223 * get_user_pages(mm, ..., pages, NULL);
2224 * mmap_read_unlock(mm);
2225 *
2226 * with:
2227 *
2228 * get_user_pages_unlocked(mm, ..., pages);
2229 *
2230 * It is functionally equivalent to get_user_pages_fast so
2231 * get_user_pages_fast should be used instead if specific gup_flags
2232 * (e.g. FOLL_FORCE) are not required.
2233 */
get_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)2234 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2235 struct page **pages, unsigned int gup_flags)
2236 {
2237 struct mm_struct *mm = current->mm;
2238 int locked = 1;
2239 long ret;
2240
2241 /*
2242 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2243 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2244 * vmas. As there are no users of this flag in this call we simply
2245 * disallow this option for now.
2246 */
2247 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2248 return -EINVAL;
2249
2250 mmap_read_lock(mm);
2251 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2252 &locked, gup_flags | FOLL_TOUCH);
2253 if (locked)
2254 mmap_read_unlock(mm);
2255 return ret;
2256 }
2257 EXPORT_SYMBOL(get_user_pages_unlocked);
2258
2259 /*
2260 * Fast GUP
2261 *
2262 * get_user_pages_fast attempts to pin user pages by walking the page
2263 * tables directly and avoids taking locks. Thus the walker needs to be
2264 * protected from page table pages being freed from under it, and should
2265 * block any THP splits.
2266 *
2267 * One way to achieve this is to have the walker disable interrupts, and
2268 * rely on IPIs from the TLB flushing code blocking before the page table
2269 * pages are freed. This is unsuitable for architectures that do not need
2270 * to broadcast an IPI when invalidating TLBs.
2271 *
2272 * Another way to achieve this is to batch up page table containing pages
2273 * belonging to more than one mm_user, then rcu_sched a callback to free those
2274 * pages. Disabling interrupts will allow the fast_gup walker to both block
2275 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2276 * (which is a relatively rare event). The code below adopts this strategy.
2277 *
2278 * Before activating this code, please be aware that the following assumptions
2279 * are currently made:
2280 *
2281 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2282 * free pages containing page tables or TLB flushing requires IPI broadcast.
2283 *
2284 * *) ptes can be read atomically by the architecture.
2285 *
2286 * *) access_ok is sufficient to validate userspace address ranges.
2287 *
2288 * The last two assumptions can be relaxed by the addition of helper functions.
2289 *
2290 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2291 */
2292 #ifdef CONFIG_HAVE_FAST_GUP
2293
undo_dev_pagemap(int * nr,int nr_start,unsigned int flags,struct page ** pages)2294 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2295 unsigned int flags,
2296 struct page **pages)
2297 {
2298 while ((*nr) - nr_start) {
2299 struct page *page = pages[--(*nr)];
2300
2301 ClearPageReferenced(page);
2302 if (flags & FOLL_PIN)
2303 unpin_user_page(page);
2304 else
2305 put_page(page);
2306 }
2307 }
2308
2309 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2310 /*
2311 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2312 * operations.
2313 *
2314 * To pin the page, fast-gup needs to do below in order:
2315 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2316 *
2317 * For the rest of pgtable operations where pgtable updates can be racy
2318 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2319 * is pinned.
2320 *
2321 * Above will work for all pte-level operations, including THP split.
2322 *
2323 * For THP collapse, it's a bit more complicated because fast-gup may be
2324 * walking a pgtable page that is being freed (pte is still valid but pmd
2325 * can be cleared already). To avoid race in such condition, we need to
2326 * also check pmd here to make sure pmd doesn't change (corresponds to
2327 * pmdp_collapse_flush() in the THP collapse code path).
2328 */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2329 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2330 unsigned long end, unsigned int flags,
2331 struct page **pages, int *nr)
2332 {
2333 struct dev_pagemap *pgmap = NULL;
2334 int nr_start = *nr, ret = 0;
2335 pte_t *ptep, *ptem;
2336
2337 ptem = ptep = pte_offset_map(&pmd, addr);
2338 do {
2339 pte_t pte = ptep_get_lockless(ptep);
2340 struct page *head, *page;
2341
2342 /*
2343 * Similar to the PMD case below, NUMA hinting must take slow
2344 * path using the pte_protnone check.
2345 */
2346 if (pte_protnone(pte))
2347 goto pte_unmap;
2348
2349 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2350 goto pte_unmap;
2351
2352 if (pte_devmap(pte)) {
2353 if (unlikely(flags & FOLL_LONGTERM))
2354 goto pte_unmap;
2355
2356 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2357 if (unlikely(!pgmap)) {
2358 undo_dev_pagemap(nr, nr_start, flags, pages);
2359 goto pte_unmap;
2360 }
2361 } else if (pte_special(pte))
2362 goto pte_unmap;
2363
2364 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2365 page = pte_page(pte);
2366
2367 head = try_grab_compound_head(page, 1, flags);
2368 if (!head)
2369 goto pte_unmap;
2370
2371 if (unlikely(page_is_secretmem(page))) {
2372 put_compound_head(head, 1, flags);
2373 goto pte_unmap;
2374 }
2375
2376 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2377 unlikely(pte_val(pte) != pte_val(*ptep))) {
2378 put_compound_head(head, 1, flags);
2379 goto pte_unmap;
2380 }
2381
2382 VM_BUG_ON_PAGE(compound_head(page) != head, page);
2383
2384 /*
2385 * We need to make the page accessible if and only if we are
2386 * going to access its content (the FOLL_PIN case). Please
2387 * see Documentation/core-api/pin_user_pages.rst for
2388 * details.
2389 */
2390 if (flags & FOLL_PIN) {
2391 ret = arch_make_page_accessible(page);
2392 if (ret) {
2393 unpin_user_page(page);
2394 goto pte_unmap;
2395 }
2396 }
2397 SetPageReferenced(page);
2398 pages[*nr] = page;
2399 (*nr)++;
2400
2401 } while (ptep++, addr += PAGE_SIZE, addr != end);
2402
2403 ret = 1;
2404
2405 pte_unmap:
2406 if (pgmap)
2407 put_dev_pagemap(pgmap);
2408 pte_unmap(ptem);
2409 return ret;
2410 }
2411 #else
2412
2413 /*
2414 * If we can't determine whether or not a pte is special, then fail immediately
2415 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2416 * to be special.
2417 *
2418 * For a futex to be placed on a THP tail page, get_futex_key requires a
2419 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2420 * useful to have gup_huge_pmd even if we can't operate on ptes.
2421 */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2422 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2423 unsigned long end, unsigned int flags,
2424 struct page **pages, int *nr)
2425 {
2426 return 0;
2427 }
2428 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2429
2430 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
__gup_device_huge(unsigned long pfn,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2431 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2432 unsigned long end, unsigned int flags,
2433 struct page **pages, int *nr)
2434 {
2435 int nr_start = *nr;
2436 struct dev_pagemap *pgmap = NULL;
2437 int ret = 1;
2438
2439 do {
2440 struct page *page = pfn_to_page(pfn);
2441
2442 pgmap = get_dev_pagemap(pfn, pgmap);
2443 if (unlikely(!pgmap)) {
2444 undo_dev_pagemap(nr, nr_start, flags, pages);
2445 ret = 0;
2446 break;
2447 }
2448 SetPageReferenced(page);
2449 pages[*nr] = page;
2450 if (unlikely(!try_grab_page(page, flags))) {
2451 undo_dev_pagemap(nr, nr_start, flags, pages);
2452 ret = 0;
2453 break;
2454 }
2455 (*nr)++;
2456 pfn++;
2457 } while (addr += PAGE_SIZE, addr != end);
2458
2459 put_dev_pagemap(pgmap);
2460 return ret;
2461 }
2462
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2463 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2464 unsigned long end, unsigned int flags,
2465 struct page **pages, int *nr)
2466 {
2467 unsigned long fault_pfn;
2468 int nr_start = *nr;
2469
2470 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2471 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2472 return 0;
2473
2474 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2475 undo_dev_pagemap(nr, nr_start, flags, pages);
2476 return 0;
2477 }
2478 return 1;
2479 }
2480
__gup_device_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2481 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2482 unsigned long end, unsigned int flags,
2483 struct page **pages, int *nr)
2484 {
2485 unsigned long fault_pfn;
2486 int nr_start = *nr;
2487
2488 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2489 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2490 return 0;
2491
2492 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2493 undo_dev_pagemap(nr, nr_start, flags, pages);
2494 return 0;
2495 }
2496 return 1;
2497 }
2498 #else
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2499 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2500 unsigned long end, unsigned int flags,
2501 struct page **pages, int *nr)
2502 {
2503 BUILD_BUG();
2504 return 0;
2505 }
2506
__gup_device_huge_pud(pud_t pud,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2507 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2508 unsigned long end, unsigned int flags,
2509 struct page **pages, int *nr)
2510 {
2511 BUILD_BUG();
2512 return 0;
2513 }
2514 #endif
2515
record_subpages(struct page * page,unsigned long addr,unsigned long end,struct page ** pages)2516 static int record_subpages(struct page *page, unsigned long addr,
2517 unsigned long end, struct page **pages)
2518 {
2519 int nr;
2520
2521 for (nr = 0; addr != end; addr += PAGE_SIZE)
2522 pages[nr++] = page++;
2523
2524 return nr;
2525 }
2526
2527 #ifdef CONFIG_ARCH_HAS_HUGEPD
hugepte_addr_end(unsigned long addr,unsigned long end,unsigned long sz)2528 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2529 unsigned long sz)
2530 {
2531 unsigned long __boundary = (addr + sz) & ~(sz-1);
2532 return (__boundary - 1 < end - 1) ? __boundary : end;
2533 }
2534
gup_hugepte(pte_t * ptep,unsigned long sz,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2535 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2536 unsigned long end, unsigned int flags,
2537 struct page **pages, int *nr)
2538 {
2539 unsigned long pte_end;
2540 struct page *head, *page;
2541 pte_t pte;
2542 int refs;
2543
2544 pte_end = (addr + sz) & ~(sz-1);
2545 if (pte_end < end)
2546 end = pte_end;
2547
2548 pte = huge_ptep_get(ptep);
2549
2550 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2551 return 0;
2552
2553 /* hugepages are never "special" */
2554 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2555
2556 head = pte_page(pte);
2557 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2558 refs = record_subpages(page, addr, end, pages + *nr);
2559
2560 head = try_grab_compound_head(head, refs, flags);
2561 if (!head)
2562 return 0;
2563
2564 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2565 put_compound_head(head, refs, flags);
2566 return 0;
2567 }
2568
2569 *nr += refs;
2570 SetPageReferenced(head);
2571 return 1;
2572 }
2573
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2574 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2575 unsigned int pdshift, unsigned long end, unsigned int flags,
2576 struct page **pages, int *nr)
2577 {
2578 pte_t *ptep;
2579 unsigned long sz = 1UL << hugepd_shift(hugepd);
2580 unsigned long next;
2581
2582 ptep = hugepte_offset(hugepd, addr, pdshift);
2583 do {
2584 next = hugepte_addr_end(addr, end, sz);
2585 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2586 return 0;
2587 } while (ptep++, addr = next, addr != end);
2588
2589 return 1;
2590 }
2591 #else
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2592 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2593 unsigned int pdshift, unsigned long end, unsigned int flags,
2594 struct page **pages, int *nr)
2595 {
2596 return 0;
2597 }
2598 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2599
gup_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2600 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2601 unsigned long end, unsigned int flags,
2602 struct page **pages, int *nr)
2603 {
2604 struct page *head, *page;
2605 int refs;
2606
2607 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2608 return 0;
2609
2610 if (pmd_devmap(orig)) {
2611 if (unlikely(flags & FOLL_LONGTERM))
2612 return 0;
2613 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2614 pages, nr);
2615 }
2616
2617 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2618 refs = record_subpages(page, addr, end, pages + *nr);
2619
2620 head = try_grab_compound_head(pmd_page(orig), refs, flags);
2621 if (!head)
2622 return 0;
2623
2624 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2625 put_compound_head(head, refs, flags);
2626 return 0;
2627 }
2628
2629 *nr += refs;
2630 SetPageReferenced(head);
2631 return 1;
2632 }
2633
gup_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2634 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2635 unsigned long end, unsigned int flags,
2636 struct page **pages, int *nr)
2637 {
2638 struct page *head, *page;
2639 int refs;
2640
2641 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2642 return 0;
2643
2644 if (pud_devmap(orig)) {
2645 if (unlikely(flags & FOLL_LONGTERM))
2646 return 0;
2647 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2648 pages, nr);
2649 }
2650
2651 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2652 refs = record_subpages(page, addr, end, pages + *nr);
2653
2654 head = try_grab_compound_head(pud_page(orig), refs, flags);
2655 if (!head)
2656 return 0;
2657
2658 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2659 put_compound_head(head, refs, flags);
2660 return 0;
2661 }
2662
2663 *nr += refs;
2664 SetPageReferenced(head);
2665 return 1;
2666 }
2667
gup_huge_pgd(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2668 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2669 unsigned long end, unsigned int flags,
2670 struct page **pages, int *nr)
2671 {
2672 int refs;
2673 struct page *head, *page;
2674
2675 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2676 return 0;
2677
2678 BUILD_BUG_ON(pgd_devmap(orig));
2679
2680 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2681 refs = record_subpages(page, addr, end, pages + *nr);
2682
2683 head = try_grab_compound_head(pgd_page(orig), refs, flags);
2684 if (!head)
2685 return 0;
2686
2687 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2688 put_compound_head(head, refs, flags);
2689 return 0;
2690 }
2691
2692 *nr += refs;
2693 SetPageReferenced(head);
2694 return 1;
2695 }
2696
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2697 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2698 unsigned int flags, struct page **pages, int *nr)
2699 {
2700 unsigned long next;
2701 pmd_t *pmdp;
2702
2703 pmdp = pmd_offset_lockless(pudp, pud, addr);
2704 do {
2705 pmd_t pmd = READ_ONCE(*pmdp);
2706
2707 next = pmd_addr_end(addr, end);
2708 if (!pmd_present(pmd))
2709 return 0;
2710
2711 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2712 pmd_devmap(pmd))) {
2713 /*
2714 * NUMA hinting faults need to be handled in the GUP
2715 * slowpath for accounting purposes and so that they
2716 * can be serialised against THP migration.
2717 */
2718 if (pmd_protnone(pmd))
2719 return 0;
2720
2721 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2722 pages, nr))
2723 return 0;
2724
2725 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2726 /*
2727 * architecture have different format for hugetlbfs
2728 * pmd format and THP pmd format
2729 */
2730 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2731 PMD_SHIFT, next, flags, pages, nr))
2732 return 0;
2733 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2734 return 0;
2735 } while (pmdp++, addr = next, addr != end);
2736
2737 return 1;
2738 }
2739
gup_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2740 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2741 unsigned int flags, struct page **pages, int *nr)
2742 {
2743 unsigned long next;
2744 pud_t *pudp;
2745
2746 pudp = pud_offset_lockless(p4dp, p4d, addr);
2747 do {
2748 pud_t pud = READ_ONCE(*pudp);
2749
2750 next = pud_addr_end(addr, end);
2751 if (unlikely(!pud_present(pud)))
2752 return 0;
2753 if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
2754 if (!gup_huge_pud(pud, pudp, addr, next, flags,
2755 pages, nr))
2756 return 0;
2757 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2758 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2759 PUD_SHIFT, next, flags, pages, nr))
2760 return 0;
2761 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2762 return 0;
2763 } while (pudp++, addr = next, addr != end);
2764
2765 return 1;
2766 }
2767
gup_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2768 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2769 unsigned int flags, struct page **pages, int *nr)
2770 {
2771 unsigned long next;
2772 p4d_t *p4dp;
2773
2774 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2775 do {
2776 p4d_t p4d = READ_ONCE(*p4dp);
2777
2778 next = p4d_addr_end(addr, end);
2779 if (p4d_none(p4d))
2780 return 0;
2781 BUILD_BUG_ON(p4d_huge(p4d));
2782 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2783 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2784 P4D_SHIFT, next, flags, pages, nr))
2785 return 0;
2786 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2787 return 0;
2788 } while (p4dp++, addr = next, addr != end);
2789
2790 return 1;
2791 }
2792
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2793 static void gup_pgd_range(unsigned long addr, unsigned long end,
2794 unsigned int flags, struct page **pages, int *nr)
2795 {
2796 unsigned long next;
2797 pgd_t *pgdp;
2798
2799 pgdp = pgd_offset(current->mm, addr);
2800 do {
2801 pgd_t pgd = READ_ONCE(*pgdp);
2802
2803 next = pgd_addr_end(addr, end);
2804 if (pgd_none(pgd))
2805 return;
2806 if (unlikely(pgd_huge(pgd))) {
2807 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2808 pages, nr))
2809 return;
2810 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2811 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2812 PGDIR_SHIFT, next, flags, pages, nr))
2813 return;
2814 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2815 return;
2816 } while (pgdp++, addr = next, addr != end);
2817 }
2818 #else
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2819 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2820 unsigned int flags, struct page **pages, int *nr)
2821 {
2822 }
2823 #endif /* CONFIG_HAVE_FAST_GUP */
2824
2825 #ifndef gup_fast_permitted
2826 /*
2827 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2828 * we need to fall back to the slow version:
2829 */
gup_fast_permitted(unsigned long start,unsigned long end)2830 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2831 {
2832 return true;
2833 }
2834 #endif
2835
__gup_longterm_unlocked(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2836 static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2837 unsigned int gup_flags, struct page **pages)
2838 {
2839 int ret;
2840
2841 /*
2842 * FIXME: FOLL_LONGTERM does not work with
2843 * get_user_pages_unlocked() (see comments in that function)
2844 */
2845 if (gup_flags & FOLL_LONGTERM) {
2846 mmap_read_lock(current->mm);
2847 ret = __gup_longterm_locked(current->mm,
2848 start, nr_pages,
2849 pages, NULL, gup_flags);
2850 mmap_read_unlock(current->mm);
2851 } else {
2852 ret = get_user_pages_unlocked(start, nr_pages,
2853 pages, gup_flags);
2854 }
2855
2856 return ret;
2857 }
2858
lockless_pages_from_mm(unsigned long start,unsigned long end,unsigned int gup_flags,struct page ** pages)2859 static unsigned long lockless_pages_from_mm(unsigned long start,
2860 unsigned long end,
2861 unsigned int gup_flags,
2862 struct page **pages)
2863 {
2864 unsigned long flags;
2865 int nr_pinned = 0;
2866 unsigned seq;
2867
2868 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2869 !gup_fast_permitted(start, end))
2870 return 0;
2871
2872 if (gup_flags & FOLL_PIN) {
2873 seq = raw_read_seqcount(¤t->mm->write_protect_seq);
2874 if (seq & 1)
2875 return 0;
2876 }
2877
2878 /*
2879 * Disable interrupts. The nested form is used, in order to allow full,
2880 * general purpose use of this routine.
2881 *
2882 * With interrupts disabled, we block page table pages from being freed
2883 * from under us. See struct mmu_table_batch comments in
2884 * include/asm-generic/tlb.h for more details.
2885 *
2886 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2887 * that come from THPs splitting.
2888 */
2889 local_irq_save(flags);
2890 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2891 local_irq_restore(flags);
2892
2893 /*
2894 * When pinning pages for DMA there could be a concurrent write protect
2895 * from fork() via copy_page_range(), in this case always fail fast GUP.
2896 */
2897 if (gup_flags & FOLL_PIN) {
2898 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
2899 unpin_user_pages(pages, nr_pinned);
2900 return 0;
2901 }
2902 }
2903 return nr_pinned;
2904 }
2905
internal_get_user_pages_fast(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)2906 static int internal_get_user_pages_fast(unsigned long start,
2907 unsigned long nr_pages,
2908 unsigned int gup_flags,
2909 struct page **pages)
2910 {
2911 unsigned long len, end;
2912 unsigned long nr_pinned;
2913 int ret;
2914
2915 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2916 FOLL_FORCE | FOLL_PIN | FOLL_GET |
2917 FOLL_FAST_ONLY | FOLL_NOFAULT)))
2918 return -EINVAL;
2919
2920 if (gup_flags & FOLL_PIN)
2921 mm_set_has_pinned_flag(¤t->mm->flags);
2922
2923 if (!(gup_flags & FOLL_FAST_ONLY))
2924 might_lock_read(¤t->mm->mmap_lock);
2925
2926 start = untagged_addr(start) & PAGE_MASK;
2927 len = nr_pages << PAGE_SHIFT;
2928 if (check_add_overflow(start, len, &end))
2929 return 0;
2930 if (unlikely(!access_ok((void __user *)start, len)))
2931 return -EFAULT;
2932
2933 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2934 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2935 return nr_pinned;
2936
2937 /* Slow path: try to get the remaining pages with get_user_pages */
2938 start += nr_pinned << PAGE_SHIFT;
2939 pages += nr_pinned;
2940 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2941 pages);
2942 if (ret < 0) {
2943 /*
2944 * The caller has to unpin the pages we already pinned so
2945 * returning -errno is not an option
2946 */
2947 if (nr_pinned)
2948 return nr_pinned;
2949 return ret;
2950 }
2951 return ret + nr_pinned;
2952 }
2953
2954 /**
2955 * get_user_pages_fast_only() - pin user pages in memory
2956 * @start: starting user address
2957 * @nr_pages: number of pages from start to pin
2958 * @gup_flags: flags modifying pin behaviour
2959 * @pages: array that receives pointers to the pages pinned.
2960 * Should be at least nr_pages long.
2961 *
2962 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2963 * the regular GUP.
2964 * Note a difference with get_user_pages_fast: this always returns the
2965 * number of pages pinned, 0 if no pages were pinned.
2966 *
2967 * If the architecture does not support this function, simply return with no
2968 * pages pinned.
2969 *
2970 * Careful, careful! COW breaking can go either way, so a non-write
2971 * access can get ambiguous page results. If you call this function without
2972 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2973 */
get_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2974 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2975 unsigned int gup_flags, struct page **pages)
2976 {
2977 int nr_pinned;
2978 /*
2979 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2980 * because gup fast is always a "pin with a +1 page refcount" request.
2981 *
2982 * FOLL_FAST_ONLY is required in order to match the API description of
2983 * this routine: no fall back to regular ("slow") GUP.
2984 */
2985 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2986
2987 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2988 pages);
2989
2990 /*
2991 * As specified in the API description above, this routine is not
2992 * allowed to return negative values. However, the common core
2993 * routine internal_get_user_pages_fast() *can* return -errno.
2994 * Therefore, correct for that here:
2995 */
2996 if (nr_pinned < 0)
2997 nr_pinned = 0;
2998
2999 return nr_pinned;
3000 }
3001 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3002
3003 /**
3004 * get_user_pages_fast() - pin user pages in memory
3005 * @start: starting user address
3006 * @nr_pages: number of pages from start to pin
3007 * @gup_flags: flags modifying pin behaviour
3008 * @pages: array that receives pointers to the pages pinned.
3009 * Should be at least nr_pages long.
3010 *
3011 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3012 * If not successful, it will fall back to taking the lock and
3013 * calling get_user_pages().
3014 *
3015 * Returns number of pages pinned. This may be fewer than the number requested.
3016 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3017 * -errno.
3018 */
get_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3019 int get_user_pages_fast(unsigned long start, int nr_pages,
3020 unsigned int gup_flags, struct page **pages)
3021 {
3022 if (!is_valid_gup_flags(gup_flags))
3023 return -EINVAL;
3024
3025 /*
3026 * The caller may or may not have explicitly set FOLL_GET; either way is
3027 * OK. However, internally (within mm/gup.c), gup fast variants must set
3028 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3029 * request.
3030 */
3031 gup_flags |= FOLL_GET;
3032 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3033 }
3034 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3035
3036 /**
3037 * pin_user_pages_fast() - pin user pages in memory without taking locks
3038 *
3039 * @start: starting user address
3040 * @nr_pages: number of pages from start to pin
3041 * @gup_flags: flags modifying pin behaviour
3042 * @pages: array that receives pointers to the pages pinned.
3043 * Should be at least nr_pages long.
3044 *
3045 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3046 * get_user_pages_fast() for documentation on the function arguments, because
3047 * the arguments here are identical.
3048 *
3049 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3050 * see Documentation/core-api/pin_user_pages.rst for further details.
3051 */
pin_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3052 int pin_user_pages_fast(unsigned long start, int nr_pages,
3053 unsigned int gup_flags, struct page **pages)
3054 {
3055 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3056 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3057 return -EINVAL;
3058
3059 gup_flags |= FOLL_PIN;
3060 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3061 }
3062 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3063
3064 /*
3065 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
3066 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
3067 *
3068 * The API rules are the same, too: no negative values may be returned.
3069 */
pin_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3070 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
3071 unsigned int gup_flags, struct page **pages)
3072 {
3073 int nr_pinned;
3074
3075 /*
3076 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3077 * rules require returning 0, rather than -errno:
3078 */
3079 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3080 return 0;
3081 /*
3082 * FOLL_FAST_ONLY is required in order to match the API description of
3083 * this routine: no fall back to regular ("slow") GUP.
3084 */
3085 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
3086 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3087 pages);
3088 /*
3089 * This routine is not allowed to return negative values. However,
3090 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3091 * correct for that here:
3092 */
3093 if (nr_pinned < 0)
3094 nr_pinned = 0;
3095
3096 return nr_pinned;
3097 }
3098 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3099
3100 /**
3101 * pin_user_pages_remote() - pin pages of a remote process
3102 *
3103 * @mm: mm_struct of target mm
3104 * @start: starting user address
3105 * @nr_pages: number of pages from start to pin
3106 * @gup_flags: flags modifying lookup behaviour
3107 * @pages: array that receives pointers to the pages pinned.
3108 * Should be at least nr_pages long. Or NULL, if caller
3109 * only intends to ensure the pages are faulted in.
3110 * @vmas: array of pointers to vmas corresponding to each page.
3111 * Or NULL if the caller does not require them.
3112 * @locked: pointer to lock flag indicating whether lock is held and
3113 * subsequently whether VM_FAULT_RETRY functionality can be
3114 * utilised. Lock must initially be held.
3115 *
3116 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3117 * get_user_pages_remote() for documentation on the function arguments, because
3118 * the arguments here are identical.
3119 *
3120 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3121 * see Documentation/core-api/pin_user_pages.rst for details.
3122 */
pin_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)3123 long pin_user_pages_remote(struct mm_struct *mm,
3124 unsigned long start, unsigned long nr_pages,
3125 unsigned int gup_flags, struct page **pages,
3126 struct vm_area_struct **vmas, int *locked)
3127 {
3128 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3129 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3130 return -EINVAL;
3131
3132 gup_flags |= FOLL_PIN;
3133 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3134 pages, vmas, locked);
3135 }
3136 EXPORT_SYMBOL(pin_user_pages_remote);
3137
3138 /**
3139 * pin_user_pages() - pin user pages in memory for use by other devices
3140 *
3141 * @start: starting user address
3142 * @nr_pages: number of pages from start to pin
3143 * @gup_flags: flags modifying lookup behaviour
3144 * @pages: array that receives pointers to the pages pinned.
3145 * Should be at least nr_pages long. Or NULL, if caller
3146 * only intends to ensure the pages are faulted in.
3147 * @vmas: array of pointers to vmas corresponding to each page.
3148 * Or NULL if the caller does not require them.
3149 *
3150 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3151 * FOLL_PIN is set.
3152 *
3153 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3154 * see Documentation/core-api/pin_user_pages.rst for details.
3155 */
pin_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)3156 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3157 unsigned int gup_flags, struct page **pages,
3158 struct vm_area_struct **vmas)
3159 {
3160 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3161 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3162 return -EINVAL;
3163
3164 gup_flags |= FOLL_PIN;
3165 return __gup_longterm_locked(current->mm, start, nr_pages,
3166 pages, vmas, gup_flags);
3167 }
3168 EXPORT_SYMBOL(pin_user_pages);
3169
3170 /*
3171 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3172 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3173 * FOLL_PIN and rejects FOLL_GET.
3174 */
pin_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)3175 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3176 struct page **pages, unsigned int gup_flags)
3177 {
3178 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3179 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3180 return -EINVAL;
3181
3182 gup_flags |= FOLL_PIN;
3183 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3184 }
3185 EXPORT_SYMBOL(pin_user_pages_unlocked);
3186
3187 /*
3188 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
3189 * Behavior is the same, except that this one sets FOLL_PIN and rejects
3190 * FOLL_GET.
3191 */
pin_user_pages_locked(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)3192 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
3193 unsigned int gup_flags, struct page **pages,
3194 int *locked)
3195 {
3196 /*
3197 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
3198 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
3199 * vmas. As there are no users of this flag in this call we simply
3200 * disallow this option for now.
3201 */
3202 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
3203 return -EINVAL;
3204
3205 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3206 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3207 return -EINVAL;
3208
3209 gup_flags |= FOLL_PIN;
3210 return __get_user_pages_locked(current->mm, start, nr_pages,
3211 pages, NULL, locked,
3212 gup_flags | FOLL_TOUCH);
3213 }
3214 EXPORT_SYMBOL(pin_user_pages_locked);
3215