1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/err.h>
4 #include <linux/spinlock.h>
5
6 #include <linux/hugetlb.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12
13 #include <linux/sched.h>
14 #include <linux/rwsem.h>
15 #include <asm/pgtable.h>
16
17 #include "internal.h"
18
no_page_table(struct vm_area_struct * vma,unsigned int flags)19 static struct page *no_page_table(struct vm_area_struct *vma,
20 unsigned int flags)
21 {
22 /*
23 * When core dumping an enormous anonymous area that nobody
24 * has touched so far, we don't want to allocate unnecessary pages or
25 * page tables. Return error instead of NULL to skip handle_mm_fault,
26 * then get_dump_page() will return NULL to leave a hole in the dump.
27 * But we can only make this optimization where a hole would surely
28 * be zero-filled if handle_mm_fault() actually did handle it.
29 */
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
31 return ERR_PTR(-EFAULT);
32 return NULL;
33 }
34
35 /*
36 * FOLL_FORCE can write to even unwritable pte's, but only
37 * after we've gone through a COW cycle and they are dirty.
38 */
can_follow_write_pte(pte_t pte,unsigned int flags)39 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
40 {
41 return pte_write(pte) ||
42 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
43 }
44
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags)45 static struct page *follow_page_pte(struct vm_area_struct *vma,
46 unsigned long address, pmd_t *pmd, unsigned int flags)
47 {
48 struct mm_struct *mm = vma->vm_mm;
49 struct page *page;
50 spinlock_t *ptl;
51 pte_t *ptep, pte;
52
53 retry:
54 if (unlikely(pmd_bad(*pmd)))
55 return no_page_table(vma, flags);
56
57 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
58 pte = *ptep;
59 if (!pte_present(pte)) {
60 swp_entry_t entry;
61 /*
62 * KSM's break_ksm() relies upon recognizing a ksm page
63 * even while it is being migrated, so for that case we
64 * need migration_entry_wait().
65 */
66 if (likely(!(flags & FOLL_MIGRATION)))
67 goto no_page;
68 if (pte_none(pte) || pte_file(pte))
69 goto no_page;
70 entry = pte_to_swp_entry(pte);
71 if (!is_migration_entry(entry))
72 goto no_page;
73 pte_unmap_unlock(ptep, ptl);
74 migration_entry_wait(mm, pmd, address);
75 goto retry;
76 }
77 if ((flags & FOLL_NUMA) && pte_numa(pte))
78 goto no_page;
79 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
80 pte_unmap_unlock(ptep, ptl);
81 return NULL;
82 }
83
84 page = vm_normal_page(vma, address, pte);
85 if (unlikely(!page)) {
86 if ((flags & FOLL_DUMP) ||
87 !is_zero_pfn(pte_pfn(pte)))
88 goto bad_page;
89 page = pte_page(pte);
90 }
91
92 if (flags & FOLL_GET)
93 get_page_foll(page);
94 if (flags & FOLL_TOUCH) {
95 if ((flags & FOLL_WRITE) &&
96 !pte_dirty(pte) && !PageDirty(page))
97 set_page_dirty(page);
98 /*
99 * pte_mkyoung() would be more correct here, but atomic care
100 * is needed to avoid losing the dirty bit: it is easier to use
101 * mark_page_accessed().
102 */
103 mark_page_accessed(page);
104 }
105 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
106 /*
107 * The preliminary mapping check is mainly to avoid the
108 * pointless overhead of lock_page on the ZERO_PAGE
109 * which might bounce very badly if there is contention.
110 *
111 * If the page is already locked, we don't need to
112 * handle it now - vmscan will handle it later if and
113 * when it attempts to reclaim the page.
114 */
115 if (page->mapping && trylock_page(page)) {
116 lru_add_drain(); /* push cached pages to LRU */
117 /*
118 * Because we lock page here, and migration is
119 * blocked by the pte's page reference, and we
120 * know the page is still mapped, we don't even
121 * need to check for file-cache page truncation.
122 */
123 mlock_vma_page(page);
124 unlock_page(page);
125 }
126 }
127 pte_unmap_unlock(ptep, ptl);
128 return page;
129 bad_page:
130 pte_unmap_unlock(ptep, ptl);
131 return ERR_PTR(-EFAULT);
132
133 no_page:
134 pte_unmap_unlock(ptep, ptl);
135 if (!pte_none(pte))
136 return NULL;
137 return no_page_table(vma, flags);
138 }
139
140 /**
141 * follow_page_mask - look up a page descriptor from a user-virtual address
142 * @vma: vm_area_struct mapping @address
143 * @address: virtual address to look up
144 * @flags: flags modifying lookup behaviour
145 * @page_mask: on output, *page_mask is set according to the size of the page
146 *
147 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
148 *
149 * Returns the mapped (struct page *), %NULL if no mapping exists, or
150 * an error pointer if there is a mapping to something not represented
151 * by a page descriptor (see also vm_normal_page()).
152 */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned int * page_mask)153 struct page *follow_page_mask(struct vm_area_struct *vma,
154 unsigned long address, unsigned int flags,
155 unsigned int *page_mask)
156 {
157 pgd_t *pgd;
158 pud_t *pud;
159 pmd_t *pmd;
160 spinlock_t *ptl;
161 struct page *page;
162 struct mm_struct *mm = vma->vm_mm;
163
164 *page_mask = 0;
165
166 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
167 if (!IS_ERR(page)) {
168 BUG_ON(flags & FOLL_GET);
169 return page;
170 }
171
172 pgd = pgd_offset(mm, address);
173 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
174 return no_page_table(vma, flags);
175
176 pud = pud_offset(pgd, address);
177 if (pud_none(*pud))
178 return no_page_table(vma, flags);
179 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
180 page = follow_huge_pud(mm, address, pud, flags);
181 if (page)
182 return page;
183 return no_page_table(vma, flags);
184 }
185 if (unlikely(pud_bad(*pud)))
186 return no_page_table(vma, flags);
187
188 pmd = pmd_offset(pud, address);
189 if (pmd_none(*pmd))
190 return no_page_table(vma, flags);
191 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
192 page = follow_huge_pmd(mm, address, pmd, flags);
193 if (page)
194 return page;
195 return no_page_table(vma, flags);
196 }
197 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
198 return no_page_table(vma, flags);
199 if (pmd_trans_huge(*pmd)) {
200 if (flags & FOLL_SPLIT) {
201 split_huge_page_pmd(vma, address, pmd);
202 return follow_page_pte(vma, address, pmd, flags);
203 }
204 ptl = pmd_lock(mm, pmd);
205 if (likely(pmd_trans_huge(*pmd))) {
206 if (unlikely(pmd_trans_splitting(*pmd))) {
207 spin_unlock(ptl);
208 wait_split_huge_page(vma->anon_vma, pmd);
209 } else {
210 page = follow_trans_huge_pmd(vma, address,
211 pmd, flags);
212 spin_unlock(ptl);
213 *page_mask = HPAGE_PMD_NR - 1;
214 return page;
215 }
216 } else
217 spin_unlock(ptl);
218 }
219 return follow_page_pte(vma, address, pmd, flags);
220 }
221
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)222 static int get_gate_page(struct mm_struct *mm, unsigned long address,
223 unsigned int gup_flags, struct vm_area_struct **vma,
224 struct page **page)
225 {
226 pgd_t *pgd;
227 pud_t *pud;
228 pmd_t *pmd;
229 pte_t *pte;
230 int ret = -EFAULT;
231
232 /* user gate pages are read-only */
233 if (gup_flags & FOLL_WRITE)
234 return -EFAULT;
235 if (address > TASK_SIZE)
236 pgd = pgd_offset_k(address);
237 else
238 pgd = pgd_offset_gate(mm, address);
239 BUG_ON(pgd_none(*pgd));
240 pud = pud_offset(pgd, address);
241 BUG_ON(pud_none(*pud));
242 pmd = pmd_offset(pud, address);
243 if (pmd_none(*pmd))
244 return -EFAULT;
245 VM_BUG_ON(pmd_trans_huge(*pmd));
246 pte = pte_offset_map(pmd, address);
247 if (pte_none(*pte))
248 goto unmap;
249 *vma = get_gate_vma(mm);
250 if (!page)
251 goto out;
252 *page = vm_normal_page(*vma, address, *pte);
253 if (!*page) {
254 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
255 goto unmap;
256 *page = pte_page(*pte);
257 }
258 get_page(*page);
259 out:
260 ret = 0;
261 unmap:
262 pte_unmap(pte);
263 return ret;
264 }
265
266 /*
267 * mmap_sem must be held on entry. If @nonblocking != NULL and
268 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
269 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
270 */
faultin_page(struct task_struct * tsk,struct vm_area_struct * vma,unsigned long address,unsigned int * flags,int * nonblocking)271 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
272 unsigned long address, unsigned int *flags, int *nonblocking)
273 {
274 struct mm_struct *mm = vma->vm_mm;
275 unsigned int fault_flags = 0;
276 int ret;
277
278 if (*flags & FOLL_WRITE)
279 fault_flags |= FAULT_FLAG_WRITE;
280 if (nonblocking)
281 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
282 if (*flags & FOLL_NOWAIT)
283 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
284 if (*flags & FOLL_TRIED) {
285 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
286 fault_flags |= FAULT_FLAG_TRIED;
287 }
288
289 ret = handle_mm_fault(mm, vma, address, fault_flags);
290 if (ret & VM_FAULT_ERROR) {
291 if (ret & VM_FAULT_OOM)
292 return -ENOMEM;
293 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
294 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
295 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
296 return -EFAULT;
297 BUG();
298 }
299
300 if (tsk) {
301 if (ret & VM_FAULT_MAJOR)
302 tsk->maj_flt++;
303 else
304 tsk->min_flt++;
305 }
306
307 if (ret & VM_FAULT_RETRY) {
308 if (nonblocking)
309 *nonblocking = 0;
310 return -EBUSY;
311 }
312
313 /*
314 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
315 * necessary, even if maybe_mkwrite decided not to set pte_write. We
316 * can thus safely do subsequent page lookups as if they were reads.
317 * But only do so when looping for pte_write is futile: in some cases
318 * userspace may also be wanting to write to the gotten user page,
319 * which a read fault here might prevent (a readonly page might get
320 * reCOWed by userspace write).
321 */
322 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
323 *flags |= FOLL_COW;
324 return 0;
325 }
326
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)327 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
328 {
329 vm_flags_t vm_flags = vma->vm_flags;
330
331 if (vm_flags & (VM_IO | VM_PFNMAP))
332 return -EFAULT;
333
334 if (gup_flags & FOLL_WRITE) {
335 if (!(vm_flags & VM_WRITE)) {
336 if (!(gup_flags & FOLL_FORCE))
337 return -EFAULT;
338 /*
339 * We used to let the write,force case do COW in a
340 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
341 * set a breakpoint in a read-only mapping of an
342 * executable, without corrupting the file (yet only
343 * when that file had been opened for writing!).
344 * Anon pages in shared mappings are surprising: now
345 * just reject it.
346 */
347 if (!is_cow_mapping(vm_flags)) {
348 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
349 return -EFAULT;
350 }
351 }
352 } else if (!(vm_flags & VM_READ)) {
353 if (!(gup_flags & FOLL_FORCE))
354 return -EFAULT;
355 /*
356 * Is there actually any vma we can reach here which does not
357 * have VM_MAYREAD set?
358 */
359 if (!(vm_flags & VM_MAYREAD))
360 return -EFAULT;
361 }
362 return 0;
363 }
364
365 /**
366 * __get_user_pages() - pin user pages in memory
367 * @tsk: task_struct of target task
368 * @mm: mm_struct of target mm
369 * @start: starting user address
370 * @nr_pages: number of pages from start to pin
371 * @gup_flags: flags modifying pin behaviour
372 * @pages: array that receives pointers to the pages pinned.
373 * Should be at least nr_pages long. Or NULL, if caller
374 * only intends to ensure the pages are faulted in.
375 * @vmas: array of pointers to vmas corresponding to each page.
376 * Or NULL if the caller does not require them.
377 * @nonblocking: whether waiting for disk IO or mmap_sem contention
378 *
379 * Returns number of pages pinned. This may be fewer than the number
380 * requested. If nr_pages is 0 or negative, returns 0. If no pages
381 * were pinned, returns -errno. Each page returned must be released
382 * with a put_page() call when it is finished with. vmas will only
383 * remain valid while mmap_sem is held.
384 *
385 * Must be called with mmap_sem held. It may be released. See below.
386 *
387 * __get_user_pages walks a process's page tables and takes a reference to
388 * each struct page that each user address corresponds to at a given
389 * instant. That is, it takes the page that would be accessed if a user
390 * thread accesses the given user virtual address at that instant.
391 *
392 * This does not guarantee that the page exists in the user mappings when
393 * __get_user_pages returns, and there may even be a completely different
394 * page there in some cases (eg. if mmapped pagecache has been invalidated
395 * and subsequently re faulted). However it does guarantee that the page
396 * won't be freed completely. And mostly callers simply care that the page
397 * contains data that was valid *at some point in time*. Typically, an IO
398 * or similar operation cannot guarantee anything stronger anyway because
399 * locks can't be held over the syscall boundary.
400 *
401 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
402 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
403 * appropriate) must be called after the page is finished with, and
404 * before put_page is called.
405 *
406 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
407 * or mmap_sem contention, and if waiting is needed to pin all pages,
408 * *@nonblocking will be set to 0. Further, if @gup_flags does not
409 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
410 * this case.
411 *
412 * A caller using such a combination of @nonblocking and @gup_flags
413 * must therefore hold the mmap_sem for reading only, and recognize
414 * when it's been released. Otherwise, it must be held for either
415 * reading or writing and will not be released.
416 *
417 * In most cases, get_user_pages or get_user_pages_fast should be used
418 * instead of __get_user_pages. __get_user_pages should be used only if
419 * you need some special @gup_flags.
420 */
__get_user_pages(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * nonblocking)421 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
422 unsigned long start, unsigned long nr_pages,
423 unsigned int gup_flags, struct page **pages,
424 struct vm_area_struct **vmas, int *nonblocking)
425 {
426 long i = 0;
427 unsigned int page_mask;
428 struct vm_area_struct *vma = NULL;
429
430 if (!nr_pages)
431 return 0;
432
433 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
434
435 /*
436 * If FOLL_FORCE is set then do not force a full fault as the hinting
437 * fault information is unrelated to the reference behaviour of a task
438 * using the address space
439 */
440 if (!(gup_flags & FOLL_FORCE))
441 gup_flags |= FOLL_NUMA;
442
443 do {
444 struct page *page;
445 unsigned int foll_flags = gup_flags;
446 unsigned int page_increm;
447
448 /* first iteration or cross vma bound */
449 if (!vma || start >= vma->vm_end) {
450 vma = find_extend_vma(mm, start);
451 if (!vma && in_gate_area(mm, start)) {
452 int ret;
453 ret = get_gate_page(mm, start & PAGE_MASK,
454 gup_flags, &vma,
455 pages ? &pages[i] : NULL);
456 if (ret)
457 return i ? : ret;
458 page_mask = 0;
459 goto next_page;
460 }
461
462 if (!vma || check_vma_flags(vma, gup_flags))
463 return i ? : -EFAULT;
464 if (is_vm_hugetlb_page(vma)) {
465 i = follow_hugetlb_page(mm, vma, pages, vmas,
466 &start, &nr_pages, i,
467 gup_flags);
468 continue;
469 }
470 }
471 retry:
472 /*
473 * If we have a pending SIGKILL, don't keep faulting pages and
474 * potentially allocating memory.
475 */
476 if (unlikely(fatal_signal_pending(current)))
477 return i ? i : -ERESTARTSYS;
478 cond_resched();
479 page = follow_page_mask(vma, start, foll_flags, &page_mask);
480 if (!page) {
481 int ret;
482 ret = faultin_page(tsk, vma, start, &foll_flags,
483 nonblocking);
484 switch (ret) {
485 case 0:
486 goto retry;
487 case -EFAULT:
488 case -ENOMEM:
489 case -EHWPOISON:
490 return i ? i : ret;
491 case -EBUSY:
492 return i;
493 case -ENOENT:
494 goto next_page;
495 }
496 BUG();
497 }
498 if (IS_ERR(page))
499 return i ? i : PTR_ERR(page);
500 if (pages) {
501 pages[i] = page;
502 flush_anon_page(vma, page, start);
503 flush_dcache_page(page);
504 page_mask = 0;
505 }
506 next_page:
507 if (vmas) {
508 vmas[i] = vma;
509 page_mask = 0;
510 }
511 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
512 if (page_increm > nr_pages)
513 page_increm = nr_pages;
514 i += page_increm;
515 start += page_increm * PAGE_SIZE;
516 nr_pages -= page_increm;
517 } while (nr_pages);
518 return i;
519 }
520 EXPORT_SYMBOL(__get_user_pages);
521
522 /*
523 * fixup_user_fault() - manually resolve a user page fault
524 * @tsk: the task_struct to use for page fault accounting, or
525 * NULL if faults are not to be recorded.
526 * @mm: mm_struct of target mm
527 * @address: user address
528 * @fault_flags:flags to pass down to handle_mm_fault()
529 *
530 * This is meant to be called in the specific scenario where for locking reasons
531 * we try to access user memory in atomic context (within a pagefault_disable()
532 * section), this returns -EFAULT, and we want to resolve the user fault before
533 * trying again.
534 *
535 * Typically this is meant to be used by the futex code.
536 *
537 * The main difference with get_user_pages() is that this function will
538 * unconditionally call handle_mm_fault() which will in turn perform all the
539 * necessary SW fixup of the dirty and young bits in the PTE, while
540 * handle_mm_fault() only guarantees to update these in the struct page.
541 *
542 * This is important for some architectures where those bits also gate the
543 * access permission to the page because they are maintained in software. On
544 * such architectures, gup() will not be enough to make a subsequent access
545 * succeed.
546 *
547 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
548 */
fixup_user_fault(struct task_struct * tsk,struct mm_struct * mm,unsigned long address,unsigned int fault_flags)549 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
550 unsigned long address, unsigned int fault_flags)
551 {
552 struct vm_area_struct *vma;
553 vm_flags_t vm_flags;
554 int ret;
555
556 vma = find_extend_vma(mm, address);
557 if (!vma || address < vma->vm_start)
558 return -EFAULT;
559
560 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
561 if (!(vm_flags & vma->vm_flags))
562 return -EFAULT;
563
564 ret = handle_mm_fault(mm, vma, address, fault_flags);
565 if (ret & VM_FAULT_ERROR) {
566 if (ret & VM_FAULT_OOM)
567 return -ENOMEM;
568 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
569 return -EHWPOISON;
570 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
571 return -EFAULT;
572 BUG();
573 }
574 if (tsk) {
575 if (ret & VM_FAULT_MAJOR)
576 tsk->maj_flt++;
577 else
578 tsk->min_flt++;
579 }
580 return 0;
581 }
582
583 /*
584 * get_user_pages() - pin user pages in memory
585 * @tsk: the task_struct to use for page fault accounting, or
586 * NULL if faults are not to be recorded.
587 * @mm: mm_struct of target mm
588 * @start: starting user address
589 * @nr_pages: number of pages from start to pin
590 * @write: whether pages will be written to by the caller
591 * @force: whether to force access even when user mapping is currently
592 * protected (but never forces write access to shared mapping).
593 * @pages: array that receives pointers to the pages pinned.
594 * Should be at least nr_pages long. Or NULL, if caller
595 * only intends to ensure the pages are faulted in.
596 * @vmas: array of pointers to vmas corresponding to each page.
597 * Or NULL if the caller does not require them.
598 *
599 * Returns number of pages pinned. This may be fewer than the number
600 * requested. If nr_pages is 0 or negative, returns 0. If no pages
601 * were pinned, returns -errno. Each page returned must be released
602 * with a put_page() call when it is finished with. vmas will only
603 * remain valid while mmap_sem is held.
604 *
605 * Must be called with mmap_sem held for read or write.
606 *
607 * get_user_pages walks a process's page tables and takes a reference to
608 * each struct page that each user address corresponds to at a given
609 * instant. That is, it takes the page that would be accessed if a user
610 * thread accesses the given user virtual address at that instant.
611 *
612 * This does not guarantee that the page exists in the user mappings when
613 * get_user_pages returns, and there may even be a completely different
614 * page there in some cases (eg. if mmapped pagecache has been invalidated
615 * and subsequently re faulted). However it does guarantee that the page
616 * won't be freed completely. And mostly callers simply care that the page
617 * contains data that was valid *at some point in time*. Typically, an IO
618 * or similar operation cannot guarantee anything stronger anyway because
619 * locks can't be held over the syscall boundary.
620 *
621 * If write=0, the page must not be written to. If the page is written to,
622 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
623 * after the page is finished with, and before put_page is called.
624 *
625 * get_user_pages is typically used for fewer-copy IO operations, to get a
626 * handle on the memory by some means other than accesses via the user virtual
627 * addresses. The pages may be submitted for DMA to devices or accessed via
628 * their kernel linear mapping (via the kmap APIs). Care should be taken to
629 * use the correct cache flushing APIs.
630 *
631 * See also get_user_pages_fast, for performance critical applications.
632 */
get_user_pages(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,int write,int force,struct page ** pages,struct vm_area_struct ** vmas)633 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
634 unsigned long start, unsigned long nr_pages, int write,
635 int force, struct page **pages, struct vm_area_struct **vmas)
636 {
637 int flags = FOLL_TOUCH;
638
639 if (pages)
640 flags |= FOLL_GET;
641 if (write)
642 flags |= FOLL_WRITE;
643 if (force)
644 flags |= FOLL_FORCE;
645
646 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
647 NULL);
648 }
649 EXPORT_SYMBOL(get_user_pages);
650
651 /**
652 * get_dump_page() - pin user page in memory while writing it to core dump
653 * @addr: user address
654 *
655 * Returns struct page pointer of user page pinned for dump,
656 * to be freed afterwards by page_cache_release() or put_page().
657 *
658 * Returns NULL on any kind of failure - a hole must then be inserted into
659 * the corefile, to preserve alignment with its headers; and also returns
660 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
661 * allowing a hole to be left in the corefile to save diskspace.
662 *
663 * Called without mmap_sem, but after all other threads have been killed.
664 */
665 #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)666 struct page *get_dump_page(unsigned long addr)
667 {
668 struct vm_area_struct *vma;
669 struct page *page;
670
671 if (__get_user_pages(current, current->mm, addr, 1,
672 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
673 NULL) < 1)
674 return NULL;
675 flush_cache_page(vma, addr, page_to_pfn(page));
676 return page;
677 }
678 #endif /* CONFIG_ELF_CORE */
679
680 /*
681 * Generic RCU Fast GUP
682 *
683 * get_user_pages_fast attempts to pin user pages by walking the page
684 * tables directly and avoids taking locks. Thus the walker needs to be
685 * protected from page table pages being freed from under it, and should
686 * block any THP splits.
687 *
688 * One way to achieve this is to have the walker disable interrupts, and
689 * rely on IPIs from the TLB flushing code blocking before the page table
690 * pages are freed. This is unsuitable for architectures that do not need
691 * to broadcast an IPI when invalidating TLBs.
692 *
693 * Another way to achieve this is to batch up page table containing pages
694 * belonging to more than one mm_user, then rcu_sched a callback to free those
695 * pages. Disabling interrupts will allow the fast_gup walker to both block
696 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
697 * (which is a relatively rare event). The code below adopts this strategy.
698 *
699 * Before activating this code, please be aware that the following assumptions
700 * are currently made:
701 *
702 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
703 * pages containing page tables.
704 *
705 * *) THP splits will broadcast an IPI, this can be achieved by overriding
706 * pmdp_splitting_flush.
707 *
708 * *) ptes can be read atomically by the architecture.
709 *
710 * *) access_ok is sufficient to validate userspace address ranges.
711 *
712 * The last two assumptions can be relaxed by the addition of helper functions.
713 *
714 * This code is based heavily on the PowerPC implementation by Nick Piggin.
715 */
716 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
717
718 #ifdef __HAVE_ARCH_PTE_SPECIAL
gup_pte_range(pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)719 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
720 int write, struct page **pages, int *nr)
721 {
722 pte_t *ptep, *ptem;
723 int ret = 0;
724
725 ptem = ptep = pte_offset_map(&pmd, addr);
726 do {
727 /*
728 * In the line below we are assuming that the pte can be read
729 * atomically. If this is not the case for your architecture,
730 * please wrap this in a helper function!
731 *
732 * for an example see gup_get_pte in arch/x86/mm/gup.c
733 */
734 pte_t pte = ACCESS_ONCE(*ptep);
735 struct page *page;
736
737 /*
738 * Similar to the PMD case below, NUMA hinting must take slow
739 * path
740 */
741 if (!pte_present(pte) || pte_special(pte) ||
742 pte_numa(pte) || (write && !pte_write(pte)))
743 goto pte_unmap;
744
745 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
746 page = pte_page(pte);
747
748 if (!page_cache_get_speculative(page))
749 goto pte_unmap;
750
751 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
752 put_page(page);
753 goto pte_unmap;
754 }
755
756 pages[*nr] = page;
757 (*nr)++;
758
759 } while (ptep++, addr += PAGE_SIZE, addr != end);
760
761 ret = 1;
762
763 pte_unmap:
764 pte_unmap(ptem);
765 return ret;
766 }
767 #else
768
769 /*
770 * If we can't determine whether or not a pte is special, then fail immediately
771 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
772 * to be special.
773 *
774 * For a futex to be placed on a THP tail page, get_futex_key requires a
775 * __get_user_pages_fast implementation that can pin pages. Thus it's still
776 * useful to have gup_huge_pmd even if we can't operate on ptes.
777 */
gup_pte_range(pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)778 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
779 int write, struct page **pages, int *nr)
780 {
781 return 0;
782 }
783 #endif /* __HAVE_ARCH_PTE_SPECIAL */
784
gup_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)785 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
786 unsigned long end, int write, struct page **pages, int *nr)
787 {
788 struct page *head, *page, *tail;
789 int refs;
790
791 if (write && !pmd_write(orig))
792 return 0;
793
794 refs = 0;
795 head = pmd_page(orig);
796 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
797 tail = page;
798 do {
799 VM_BUG_ON_PAGE(compound_head(page) != head, page);
800 pages[*nr] = page;
801 (*nr)++;
802 page++;
803 refs++;
804 } while (addr += PAGE_SIZE, addr != end);
805
806 if (!page_cache_add_speculative(head, refs)) {
807 *nr -= refs;
808 return 0;
809 }
810
811 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
812 *nr -= refs;
813 while (refs--)
814 put_page(head);
815 return 0;
816 }
817
818 /*
819 * Any tail pages need their mapcount reference taken before we
820 * return. (This allows the THP code to bump their ref count when
821 * they are split into base pages).
822 */
823 while (refs--) {
824 if (PageTail(tail))
825 get_huge_page_tail(tail);
826 tail++;
827 }
828
829 return 1;
830 }
831
gup_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)832 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
833 unsigned long end, int write, struct page **pages, int *nr)
834 {
835 struct page *head, *page, *tail;
836 int refs;
837
838 if (write && !pud_write(orig))
839 return 0;
840
841 refs = 0;
842 head = pud_page(orig);
843 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
844 tail = page;
845 do {
846 VM_BUG_ON_PAGE(compound_head(page) != head, page);
847 pages[*nr] = page;
848 (*nr)++;
849 page++;
850 refs++;
851 } while (addr += PAGE_SIZE, addr != end);
852
853 if (!page_cache_add_speculative(head, refs)) {
854 *nr -= refs;
855 return 0;
856 }
857
858 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
859 *nr -= refs;
860 while (refs--)
861 put_page(head);
862 return 0;
863 }
864
865 while (refs--) {
866 if (PageTail(tail))
867 get_huge_page_tail(tail);
868 tail++;
869 }
870
871 return 1;
872 }
873
gup_pmd_range(pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)874 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
875 int write, struct page **pages, int *nr)
876 {
877 unsigned long next;
878 pmd_t *pmdp;
879
880 pmdp = pmd_offset(&pud, addr);
881 do {
882 pmd_t pmd = ACCESS_ONCE(*pmdp);
883
884 next = pmd_addr_end(addr, end);
885 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
886 return 0;
887
888 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
889 /*
890 * NUMA hinting faults need to be handled in the GUP
891 * slowpath for accounting purposes and so that they
892 * can be serialised against THP migration.
893 */
894 if (pmd_numa(pmd))
895 return 0;
896
897 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
898 pages, nr))
899 return 0;
900
901 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
902 return 0;
903 } while (pmdp++, addr = next, addr != end);
904
905 return 1;
906 }
907
gup_pud_range(pgd_t * pgdp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)908 static int gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end,
909 int write, struct page **pages, int *nr)
910 {
911 unsigned long next;
912 pud_t *pudp;
913
914 pudp = pud_offset(pgdp, addr);
915 do {
916 pud_t pud = ACCESS_ONCE(*pudp);
917
918 next = pud_addr_end(addr, end);
919 if (pud_none(pud))
920 return 0;
921 if (pud_huge(pud)) {
922 if (!gup_huge_pud(pud, pudp, addr, next, write,
923 pages, nr))
924 return 0;
925 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
926 return 0;
927 } while (pudp++, addr = next, addr != end);
928
929 return 1;
930 }
931
932 /*
933 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
934 * the regular GUP. It will only return non-negative values.
935 */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)936 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
937 struct page **pages)
938 {
939 struct mm_struct *mm = current->mm;
940 unsigned long addr, len, end;
941 unsigned long next, flags;
942 pgd_t *pgdp;
943 int nr = 0;
944
945 start &= PAGE_MASK;
946 addr = start;
947 len = (unsigned long) nr_pages << PAGE_SHIFT;
948 end = start + len;
949
950 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
951 start, len)))
952 return 0;
953
954 /*
955 * Disable interrupts. We use the nested form as we can already have
956 * interrupts disabled by get_futex_key.
957 *
958 * With interrupts disabled, we block page table pages from being
959 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
960 * for more details.
961 *
962 * We do not adopt an rcu_read_lock(.) here as we also want to
963 * block IPIs that come from THPs splitting.
964 */
965
966 local_irq_save(flags);
967 pgdp = pgd_offset(mm, addr);
968 do {
969 next = pgd_addr_end(addr, end);
970 if (pgd_none(*pgdp))
971 break;
972 else if (!gup_pud_range(pgdp, addr, next, write, pages, &nr))
973 break;
974 } while (pgdp++, addr = next, addr != end);
975 local_irq_restore(flags);
976
977 return nr;
978 }
979
980 /**
981 * get_user_pages_fast() - pin user pages in memory
982 * @start: starting user address
983 * @nr_pages: number of pages from start to pin
984 * @write: whether pages will be written to
985 * @pages: array that receives pointers to the pages pinned.
986 * Should be at least nr_pages long.
987 *
988 * Attempt to pin user pages in memory without taking mm->mmap_sem.
989 * If not successful, it will fall back to taking the lock and
990 * calling get_user_pages().
991 *
992 * Returns number of pages pinned. This may be fewer than the number
993 * requested. If nr_pages is 0 or negative, returns 0. If no pages
994 * were pinned, returns -errno.
995 */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)996 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
997 struct page **pages)
998 {
999 struct mm_struct *mm = current->mm;
1000 int nr, ret;
1001
1002 start &= PAGE_MASK;
1003 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1004 ret = nr;
1005
1006 if (nr < nr_pages) {
1007 /* Try to get the remaining pages with get_user_pages */
1008 start += nr << PAGE_SHIFT;
1009 pages += nr;
1010
1011 down_read(&mm->mmap_sem);
1012 ret = get_user_pages(current, mm, start,
1013 nr_pages - nr, write, 0, pages, NULL);
1014 up_read(&mm->mmap_sem);
1015
1016 /* Have to be a bit careful with return values */
1017 if (nr > 0) {
1018 if (ret < 0)
1019 ret = nr;
1020 else
1021 ret += nr;
1022 }
1023 }
1024
1025 return ret;
1026 }
1027
1028 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1029