• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/err.h>
4 #include <linux/spinlock.h>
5 
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 
12 #include <linux/sched.h>
13 #include <linux/rwsem.h>
14 #include <linux/hugetlb.h>
15 
16 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
18 
19 #include "internal.h"
20 
no_page_table(struct vm_area_struct * vma,unsigned int flags)21 static struct page *no_page_table(struct vm_area_struct *vma,
22 		unsigned int flags)
23 {
24 	/*
25 	 * When core dumping an enormous anonymous area that nobody
26 	 * has touched so far, we don't want to allocate unnecessary pages or
27 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
28 	 * then get_dump_page() will return NULL to leave a hole in the dump.
29 	 * But we can only make this optimization where a hole would surely
30 	 * be zero-filled if handle_mm_fault() actually did handle it.
31 	 */
32 	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
33 		return ERR_PTR(-EFAULT);
34 	return NULL;
35 }
36 
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
38 		pte_t *pte, unsigned int flags)
39 {
40 	/* No page to get reference */
41 	if (flags & FOLL_GET)
42 		return -EFAULT;
43 
44 	if (flags & FOLL_TOUCH) {
45 		pte_t entry = *pte;
46 
47 		if (flags & FOLL_WRITE)
48 			entry = pte_mkdirty(entry);
49 		entry = pte_mkyoung(entry);
50 
51 		if (!pte_same(*pte, entry)) {
52 			set_pte_at(vma->vm_mm, address, pte, entry);
53 			update_mmu_cache(vma, address, pte);
54 		}
55 	}
56 
57 	/* Proper page table entry exists, but no corresponding struct page */
58 	return -EEXIST;
59 }
60 
61 /*
62  * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
63  * but only after we've gone through a COW cycle and they are dirty.
64  */
can_follow_write_pte(pte_t pte,unsigned int flags)65 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
66 {
67 	return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
68 }
69 
70 /*
71  * A (separate) COW fault might break the page the other way and
72  * get_user_pages() would return the page from what is now the wrong
73  * VM. So we need to force a COW break at GUP time even for reads.
74  */
should_force_cow_break(struct vm_area_struct * vma,unsigned int flags)75 static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
76 {
77 	return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
78 }
79 
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags)80 static struct page *follow_page_pte(struct vm_area_struct *vma,
81 		unsigned long address, pmd_t *pmd, unsigned int flags)
82 {
83 	struct mm_struct *mm = vma->vm_mm;
84 	struct page *page;
85 	spinlock_t *ptl;
86 	pte_t *ptep, pte;
87 
88 retry:
89 	if (unlikely(pmd_bad(*pmd)))
90 		return no_page_table(vma, flags);
91 
92 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
93 	pte = *ptep;
94 	if (!pte_present(pte)) {
95 		swp_entry_t entry;
96 		/*
97 		 * KSM's break_ksm() relies upon recognizing a ksm page
98 		 * even while it is being migrated, so for that case we
99 		 * need migration_entry_wait().
100 		 */
101 		if (likely(!(flags & FOLL_MIGRATION)))
102 			goto no_page;
103 		if (pte_none(pte))
104 			goto no_page;
105 		entry = pte_to_swp_entry(pte);
106 		if (!is_migration_entry(entry))
107 			goto no_page;
108 		pte_unmap_unlock(ptep, ptl);
109 		migration_entry_wait(mm, pmd, address);
110 		goto retry;
111 	}
112 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
113 		goto no_page;
114 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
115 		pte_unmap_unlock(ptep, ptl);
116 		return NULL;
117 	}
118 
119 	page = vm_normal_page(vma, address, pte);
120 	if (unlikely(!page)) {
121 		if (flags & FOLL_DUMP) {
122 			/* Avoid special (like zero) pages in core dumps */
123 			page = ERR_PTR(-EFAULT);
124 			goto out;
125 		}
126 
127 		if (is_zero_pfn(pte_pfn(pte))) {
128 			page = pte_page(pte);
129 		} else {
130 			int ret;
131 
132 			ret = follow_pfn_pte(vma, address, ptep, flags);
133 			page = ERR_PTR(ret);
134 			goto out;
135 		}
136 	}
137 
138 	if (flags & FOLL_GET) {
139 		if (unlikely(!try_get_page_foll(page))) {
140 			page = ERR_PTR(-ENOMEM);
141 			goto out;
142 		}
143 	}
144 	if (flags & FOLL_TOUCH) {
145 		if ((flags & FOLL_WRITE) &&
146 		    !pte_dirty(pte) && !PageDirty(page))
147 			set_page_dirty(page);
148 		/*
149 		 * pte_mkyoung() would be more correct here, but atomic care
150 		 * is needed to avoid losing the dirty bit: it is easier to use
151 		 * mark_page_accessed().
152 		 */
153 		mark_page_accessed(page);
154 	}
155 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
156 		/*
157 		 * The preliminary mapping check is mainly to avoid the
158 		 * pointless overhead of lock_page on the ZERO_PAGE
159 		 * which might bounce very badly if there is contention.
160 		 *
161 		 * If the page is already locked, we don't need to
162 		 * handle it now - vmscan will handle it later if and
163 		 * when it attempts to reclaim the page.
164 		 */
165 		if (page->mapping && trylock_page(page)) {
166 			lru_add_drain();  /* push cached pages to LRU */
167 			/*
168 			 * Because we lock page here, and migration is
169 			 * blocked by the pte's page reference, and we
170 			 * know the page is still mapped, we don't even
171 			 * need to check for file-cache page truncation.
172 			 */
173 			mlock_vma_page(page);
174 			unlock_page(page);
175 		}
176 	}
177 out:
178 	pte_unmap_unlock(ptep, ptl);
179 	return page;
180 no_page:
181 	pte_unmap_unlock(ptep, ptl);
182 	if (!pte_none(pte))
183 		return NULL;
184 	return no_page_table(vma, flags);
185 }
186 
187 /**
188  * follow_page_mask - look up a page descriptor from a user-virtual address
189  * @vma: vm_area_struct mapping @address
190  * @address: virtual address to look up
191  * @flags: flags modifying lookup behaviour
192  * @page_mask: on output, *page_mask is set according to the size of the page
193  *
194  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
195  *
196  * Returns the mapped (struct page *), %NULL if no mapping exists, or
197  * an error pointer if there is a mapping to something not represented
198  * by a page descriptor (see also vm_normal_page()).
199  */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned int * page_mask)200 struct page *follow_page_mask(struct vm_area_struct *vma,
201 			      unsigned long address, unsigned int flags,
202 			      unsigned int *page_mask)
203 {
204 	pgd_t *pgd;
205 	pud_t *pud;
206 	pmd_t *pmd;
207 	spinlock_t *ptl;
208 	struct page *page;
209 	struct mm_struct *mm = vma->vm_mm;
210 
211 	*page_mask = 0;
212 
213 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
214 	if (!IS_ERR(page)) {
215 		BUG_ON(flags & FOLL_GET);
216 		return page;
217 	}
218 
219 	pgd = pgd_offset(mm, address);
220 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
221 		return no_page_table(vma, flags);
222 
223 	pud = pud_offset(pgd, address);
224 	if (pud_none(*pud))
225 		return no_page_table(vma, flags);
226 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
227 		page = follow_huge_pud(mm, address, pud, flags);
228 		if (page)
229 			return page;
230 		return no_page_table(vma, flags);
231 	}
232 	if (unlikely(pud_bad(*pud)))
233 		return no_page_table(vma, flags);
234 
235 	pmd = pmd_offset(pud, address);
236 	if (pmd_none(*pmd))
237 		return no_page_table(vma, flags);
238 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
239 		page = follow_huge_pmd(mm, address, pmd, flags);
240 		if (page)
241 			return page;
242 		return no_page_table(vma, flags);
243 	}
244 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
245 		return no_page_table(vma, flags);
246 	if (pmd_trans_huge(*pmd)) {
247 		if (flags & FOLL_SPLIT) {
248 			split_huge_page_pmd(vma, address, pmd);
249 			return follow_page_pte(vma, address, pmd, flags);
250 		}
251 		ptl = pmd_lock(mm, pmd);
252 		if (likely(pmd_trans_huge(*pmd))) {
253 			if (unlikely(pmd_trans_splitting(*pmd))) {
254 				spin_unlock(ptl);
255 				wait_split_huge_page(vma->anon_vma, pmd);
256 			} else {
257 				page = follow_trans_huge_pmd(vma, address,
258 							     pmd, flags);
259 				spin_unlock(ptl);
260 				*page_mask = HPAGE_PMD_NR - 1;
261 				return page;
262 			}
263 		} else
264 			spin_unlock(ptl);
265 	}
266 	return follow_page_pte(vma, address, pmd, flags);
267 }
268 
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)269 static int get_gate_page(struct mm_struct *mm, unsigned long address,
270 		unsigned int gup_flags, struct vm_area_struct **vma,
271 		struct page **page)
272 {
273 	pgd_t *pgd;
274 	pud_t *pud;
275 	pmd_t *pmd;
276 	pte_t *pte;
277 	int ret = -EFAULT;
278 
279 	/* user gate pages are read-only */
280 	if (gup_flags & FOLL_WRITE)
281 		return -EFAULT;
282 	if (address > TASK_SIZE)
283 		pgd = pgd_offset_k(address);
284 	else
285 		pgd = pgd_offset_gate(mm, address);
286 	BUG_ON(pgd_none(*pgd));
287 	pud = pud_offset(pgd, address);
288 	BUG_ON(pud_none(*pud));
289 	pmd = pmd_offset(pud, address);
290 	if (pmd_none(*pmd))
291 		return -EFAULT;
292 	VM_BUG_ON(pmd_trans_huge(*pmd));
293 	pte = pte_offset_map(pmd, address);
294 	if (pte_none(*pte))
295 		goto unmap;
296 	*vma = get_gate_vma(mm);
297 	if (!page)
298 		goto out;
299 	*page = vm_normal_page(*vma, address, *pte);
300 	if (!*page) {
301 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
302 			goto unmap;
303 		*page = pte_page(*pte);
304 	}
305 	if (unlikely(!try_get_page(*page))) {
306 		ret = -ENOMEM;
307 		goto unmap;
308 	}
309 out:
310 	ret = 0;
311 unmap:
312 	pte_unmap(pte);
313 	return ret;
314 }
315 
316 /*
317  * mmap_sem must be held on entry.  If @nonblocking != NULL and
318  * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
319  * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
320  */
faultin_page(struct task_struct * tsk,struct vm_area_struct * vma,unsigned long address,unsigned int * flags,int * nonblocking)321 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
322 		unsigned long address, unsigned int *flags, int *nonblocking)
323 {
324 	struct mm_struct *mm = vma->vm_mm;
325 	unsigned int fault_flags = 0;
326 	int ret;
327 
328 	/* mlock all present pages, but do not fault in new pages */
329 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
330 		return -ENOENT;
331 	if (*flags & FOLL_WRITE)
332 		fault_flags |= FAULT_FLAG_WRITE;
333 	if (nonblocking)
334 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
335 	if (*flags & FOLL_NOWAIT)
336 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
337 	if (*flags & FOLL_TRIED) {
338 		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
339 		fault_flags |= FAULT_FLAG_TRIED;
340 	}
341 
342 	ret = handle_mm_fault(mm, vma, address, fault_flags);
343 	if (ret & VM_FAULT_ERROR) {
344 		if (ret & VM_FAULT_OOM)
345 			return -ENOMEM;
346 		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
347 			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
348 		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
349 			return -EFAULT;
350 		BUG();
351 	}
352 
353 	if (tsk) {
354 		if (ret & VM_FAULT_MAJOR)
355 			tsk->maj_flt++;
356 		else
357 			tsk->min_flt++;
358 	}
359 
360 	if (ret & VM_FAULT_RETRY) {
361 		if (nonblocking)
362 			*nonblocking = 0;
363 		return -EBUSY;
364 	}
365 
366 	/*
367 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
368 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
369 	 * can thus safely do subsequent page lookups as if they were reads.
370 	 * But only do so when looping for pte_write is futile: in some cases
371 	 * userspace may also be wanting to write to the gotten user page,
372 	 * which a read fault here might prevent (a readonly page might get
373 	 * reCOWed by userspace write).
374 	 */
375 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
376 	        *flags |= FOLL_COW;
377 	return 0;
378 }
379 
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)380 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
381 {
382 	vm_flags_t vm_flags = vma->vm_flags;
383 
384 	if (vm_flags & (VM_IO | VM_PFNMAP))
385 		return -EFAULT;
386 
387 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
388 		return -EFAULT;
389 
390 	if (gup_flags & FOLL_WRITE) {
391 		if (!(vm_flags & VM_WRITE)) {
392 			if (!(gup_flags & FOLL_FORCE))
393 				return -EFAULT;
394 			/*
395 			 * We used to let the write,force case do COW in a
396 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
397 			 * set a breakpoint in a read-only mapping of an
398 			 * executable, without corrupting the file (yet only
399 			 * when that file had been opened for writing!).
400 			 * Anon pages in shared mappings are surprising: now
401 			 * just reject it.
402 			 */
403 			if (!is_cow_mapping(vm_flags)) {
404 				WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
405 				return -EFAULT;
406 			}
407 		}
408 	} else if (!(vm_flags & VM_READ)) {
409 		if (!(gup_flags & FOLL_FORCE))
410 			return -EFAULT;
411 		/*
412 		 * Is there actually any vma we can reach here which does not
413 		 * have VM_MAYREAD set?
414 		 */
415 		if (!(vm_flags & VM_MAYREAD))
416 			return -EFAULT;
417 	}
418 	return 0;
419 }
420 
421 /**
422  * __get_user_pages() - pin user pages in memory
423  * @tsk:	task_struct of target task
424  * @mm:		mm_struct of target mm
425  * @start:	starting user address
426  * @nr_pages:	number of pages from start to pin
427  * @gup_flags:	flags modifying pin behaviour
428  * @pages:	array that receives pointers to the pages pinned.
429  *		Should be at least nr_pages long. Or NULL, if caller
430  *		only intends to ensure the pages are faulted in.
431  * @vmas:	array of pointers to vmas corresponding to each page.
432  *		Or NULL if the caller does not require them.
433  * @nonblocking: whether waiting for disk IO or mmap_sem contention
434  *
435  * Returns number of pages pinned. This may be fewer than the number
436  * requested. If nr_pages is 0 or negative, returns 0. If no pages
437  * were pinned, returns -errno. Each page returned must be released
438  * with a put_page() call when it is finished with. vmas will only
439  * remain valid while mmap_sem is held.
440  *
441  * Must be called with mmap_sem held.  It may be released.  See below.
442  *
443  * __get_user_pages walks a process's page tables and takes a reference to
444  * each struct page that each user address corresponds to at a given
445  * instant. That is, it takes the page that would be accessed if a user
446  * thread accesses the given user virtual address at that instant.
447  *
448  * This does not guarantee that the page exists in the user mappings when
449  * __get_user_pages returns, and there may even be a completely different
450  * page there in some cases (eg. if mmapped pagecache has been invalidated
451  * and subsequently re faulted). However it does guarantee that the page
452  * won't be freed completely. And mostly callers simply care that the page
453  * contains data that was valid *at some point in time*. Typically, an IO
454  * or similar operation cannot guarantee anything stronger anyway because
455  * locks can't be held over the syscall boundary.
456  *
457  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
458  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
459  * appropriate) must be called after the page is finished with, and
460  * before put_page is called.
461  *
462  * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
463  * or mmap_sem contention, and if waiting is needed to pin all pages,
464  * *@nonblocking will be set to 0.  Further, if @gup_flags does not
465  * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
466  * this case.
467  *
468  * A caller using such a combination of @nonblocking and @gup_flags
469  * must therefore hold the mmap_sem for reading only, and recognize
470  * when it's been released.  Otherwise, it must be held for either
471  * reading or writing and will not be released.
472  *
473  * In most cases, get_user_pages or get_user_pages_fast should be used
474  * instead of __get_user_pages. __get_user_pages should be used only if
475  * you need some special @gup_flags.
476  */
__get_user_pages(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * nonblocking)477 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
478 		unsigned long start, unsigned long nr_pages,
479 		unsigned int gup_flags, struct page **pages,
480 		struct vm_area_struct **vmas, int *nonblocking)
481 {
482 	long i = 0;
483 	unsigned int page_mask;
484 	struct vm_area_struct *vma = NULL;
485 
486 	if (!nr_pages)
487 		return 0;
488 
489 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
490 
491 	/*
492 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
493 	 * fault information is unrelated to the reference behaviour of a task
494 	 * using the address space
495 	 */
496 	if (!(gup_flags & FOLL_FORCE))
497 		gup_flags |= FOLL_NUMA;
498 
499 	do {
500 		struct page *page;
501 		unsigned int foll_flags = gup_flags;
502 		unsigned int page_increm;
503 
504 		/* first iteration or cross vma bound */
505 		if (!vma || start >= vma->vm_end) {
506 			vma = find_extend_vma(mm, start);
507 			if (!vma && in_gate_area(mm, start)) {
508 				int ret;
509 				ret = get_gate_page(mm, start & PAGE_MASK,
510 						gup_flags, &vma,
511 						pages ? &pages[i] : NULL);
512 				if (ret)
513 					return i ? : ret;
514 				page_mask = 0;
515 				goto next_page;
516 			}
517 
518 			if (!vma || check_vma_flags(vma, gup_flags))
519 				return i ? : -EFAULT;
520 			if (is_vm_hugetlb_page(vma)) {
521 				if (should_force_cow_break(vma, foll_flags))
522 					foll_flags |= FOLL_WRITE;
523 				i = follow_hugetlb_page(mm, vma, pages, vmas,
524 						&start, &nr_pages, i,
525 						foll_flags);
526 				continue;
527 			}
528 		}
529 
530 		if (should_force_cow_break(vma, foll_flags))
531 			foll_flags |= FOLL_WRITE;
532 
533 retry:
534 		/*
535 		 * If we have a pending SIGKILL, don't keep faulting pages and
536 		 * potentially allocating memory.
537 		 */
538 		if (unlikely(fatal_signal_pending(current)))
539 			return i ? i : -ERESTARTSYS;
540 		cond_resched();
541 		page = follow_page_mask(vma, start, foll_flags, &page_mask);
542 		if (!page) {
543 			int ret;
544 			ret = faultin_page(tsk, vma, start, &foll_flags,
545 					nonblocking);
546 			switch (ret) {
547 			case 0:
548 				goto retry;
549 			case -EFAULT:
550 			case -ENOMEM:
551 			case -EHWPOISON:
552 				return i ? i : ret;
553 			case -EBUSY:
554 				return i;
555 			case -ENOENT:
556 				goto next_page;
557 			}
558 			BUG();
559 		} else if (PTR_ERR(page) == -EEXIST) {
560 			/*
561 			 * Proper page table entry exists, but no corresponding
562 			 * struct page.
563 			 */
564 			goto next_page;
565 		} else if (IS_ERR(page)) {
566 			return i ? i : PTR_ERR(page);
567 		}
568 		if (pages) {
569 			pages[i] = page;
570 			flush_anon_page(vma, page, start);
571 			flush_dcache_page(page);
572 			page_mask = 0;
573 		}
574 next_page:
575 		if (vmas) {
576 			vmas[i] = vma;
577 			page_mask = 0;
578 		}
579 		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
580 		if (page_increm > nr_pages)
581 			page_increm = nr_pages;
582 		i += page_increm;
583 		start += page_increm * PAGE_SIZE;
584 		nr_pages -= page_increm;
585 	} while (nr_pages);
586 	return i;
587 }
588 EXPORT_SYMBOL(__get_user_pages);
589 
590 /*
591  * fixup_user_fault() - manually resolve a user page fault
592  * @tsk:	the task_struct to use for page fault accounting, or
593  *		NULL if faults are not to be recorded.
594  * @mm:		mm_struct of target mm
595  * @address:	user address
596  * @fault_flags:flags to pass down to handle_mm_fault()
597  *
598  * This is meant to be called in the specific scenario where for locking reasons
599  * we try to access user memory in atomic context (within a pagefault_disable()
600  * section), this returns -EFAULT, and we want to resolve the user fault before
601  * trying again.
602  *
603  * Typically this is meant to be used by the futex code.
604  *
605  * The main difference with get_user_pages() is that this function will
606  * unconditionally call handle_mm_fault() which will in turn perform all the
607  * necessary SW fixup of the dirty and young bits in the PTE, while
608  * handle_mm_fault() only guarantees to update these in the struct page.
609  *
610  * This is important for some architectures where those bits also gate the
611  * access permission to the page because they are maintained in software.  On
612  * such architectures, gup() will not be enough to make a subsequent access
613  * succeed.
614  *
615  * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
616  */
fixup_user_fault(struct task_struct * tsk,struct mm_struct * mm,unsigned long address,unsigned int fault_flags)617 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
618 		     unsigned long address, unsigned int fault_flags)
619 {
620 	struct vm_area_struct *vma;
621 	vm_flags_t vm_flags;
622 	int ret;
623 
624 	vma = find_extend_vma(mm, address);
625 	if (!vma || address < vma->vm_start)
626 		return -EFAULT;
627 
628 	vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
629 	if (!(vm_flags & vma->vm_flags))
630 		return -EFAULT;
631 
632 	ret = handle_mm_fault(mm, vma, address, fault_flags);
633 	if (ret & VM_FAULT_ERROR) {
634 		if (ret & VM_FAULT_OOM)
635 			return -ENOMEM;
636 		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
637 			return -EHWPOISON;
638 		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
639 			return -EFAULT;
640 		BUG();
641 	}
642 	if (tsk) {
643 		if (ret & VM_FAULT_MAJOR)
644 			tsk->maj_flt++;
645 		else
646 			tsk->min_flt++;
647 	}
648 	return 0;
649 }
650 
__get_user_pages_locked(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,int * locked,bool notify_drop,unsigned int flags)651 static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
652 						struct mm_struct *mm,
653 						unsigned long start,
654 						unsigned long nr_pages,
655 						struct page **pages,
656 						struct vm_area_struct **vmas,
657 						int *locked, bool notify_drop,
658 						unsigned int flags)
659 {
660 	long ret, pages_done;
661 	bool lock_dropped;
662 
663 	if (locked) {
664 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
665 		BUG_ON(vmas);
666 		/* check caller initialized locked */
667 		BUG_ON(*locked != 1);
668 	}
669 
670 	if (pages)
671 		flags |= FOLL_GET;
672 
673 	pages_done = 0;
674 	lock_dropped = false;
675 	for (;;) {
676 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
677 				       vmas, locked);
678 		if (!locked)
679 			/* VM_FAULT_RETRY couldn't trigger, bypass */
680 			return ret;
681 
682 		/* VM_FAULT_RETRY cannot return errors */
683 		if (!*locked) {
684 			BUG_ON(ret < 0);
685 			BUG_ON(ret >= nr_pages);
686 		}
687 
688 		if (!pages)
689 			/* If it's a prefault don't insist harder */
690 			return ret;
691 
692 		if (ret > 0) {
693 			nr_pages -= ret;
694 			pages_done += ret;
695 			if (!nr_pages)
696 				break;
697 		}
698 		if (*locked) {
699 			/* VM_FAULT_RETRY didn't trigger */
700 			if (!pages_done)
701 				pages_done = ret;
702 			break;
703 		}
704 		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
705 		pages += ret;
706 		start += ret << PAGE_SHIFT;
707 
708 		/*
709 		 * Repeat on the address that fired VM_FAULT_RETRY
710 		 * without FAULT_FLAG_ALLOW_RETRY but with
711 		 * FAULT_FLAG_TRIED.
712 		 */
713 		*locked = 1;
714 		lock_dropped = true;
715 		down_read(&mm->mmap_sem);
716 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
717 				       pages, NULL, NULL);
718 		if (ret != 1) {
719 			BUG_ON(ret > 1);
720 			if (!pages_done)
721 				pages_done = ret;
722 			break;
723 		}
724 		nr_pages--;
725 		pages_done++;
726 		if (!nr_pages)
727 			break;
728 		pages++;
729 		start += PAGE_SIZE;
730 	}
731 	if (notify_drop && lock_dropped && *locked) {
732 		/*
733 		 * We must let the caller know we temporarily dropped the lock
734 		 * and so the critical section protected by it was lost.
735 		 */
736 		up_read(&mm->mmap_sem);
737 		*locked = 0;
738 	}
739 	return pages_done;
740 }
741 
742 /*
743  * We can leverage the VM_FAULT_RETRY functionality in the page fault
744  * paths better by using either get_user_pages_locked() or
745  * get_user_pages_unlocked().
746  *
747  * get_user_pages_locked() is suitable to replace the form:
748  *
749  *      down_read(&mm->mmap_sem);
750  *      do_something()
751  *      get_user_pages(tsk, mm, ..., pages, NULL);
752  *      up_read(&mm->mmap_sem);
753  *
754  *  to:
755  *
756  *      int locked = 1;
757  *      down_read(&mm->mmap_sem);
758  *      do_something()
759  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
760  *      if (locked)
761  *          up_read(&mm->mmap_sem);
762  */
get_user_pages_locked(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)763 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
764 			   unsigned long start, unsigned long nr_pages,
765 			   unsigned int gup_flags, struct page **pages,
766 			   int *locked)
767 {
768 	return __get_user_pages_locked(tsk, mm, start, nr_pages,
769 				       pages, NULL, locked, true,
770 				       gup_flags | FOLL_TOUCH);
771 }
772 EXPORT_SYMBOL(get_user_pages_locked);
773 
774 /*
775  * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
776  * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
777  *
778  * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
779  * caller if required (just like with __get_user_pages). "FOLL_GET",
780  * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
781  * according to the parameters "pages", "write", "force"
782  * respectively.
783  */
__get_user_pages_unlocked(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)784 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
785 					       unsigned long start, unsigned long nr_pages,
786 					       struct page **pages, unsigned int gup_flags)
787 {
788 	long ret;
789 	int locked = 1;
790 
791 	down_read(&mm->mmap_sem);
792 	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
793 				      &locked, false, gup_flags);
794 	if (locked)
795 		up_read(&mm->mmap_sem);
796 	return ret;
797 }
798 EXPORT_SYMBOL(__get_user_pages_unlocked);
799 
800 /*
801  * get_user_pages_unlocked() is suitable to replace the form:
802  *
803  *      down_read(&mm->mmap_sem);
804  *      get_user_pages(tsk, mm, ..., pages, NULL);
805  *      up_read(&mm->mmap_sem);
806  *
807  *  with:
808  *
809  *      get_user_pages_unlocked(tsk, mm, ..., pages);
810  *
811  * It is functionally equivalent to get_user_pages_fast so
812  * get_user_pages_fast should be used instead, if the two parameters
813  * "tsk" and "mm" are respectively equal to current and current->mm,
814  * or if "force" shall be set to 1 (get_user_pages_fast misses the
815  * "force" parameter).
816  */
get_user_pages_unlocked(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)817 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
818 			     unsigned long start, unsigned long nr_pages,
819 			     struct page **pages, unsigned int gup_flags)
820 {
821 	return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
822 					 pages, gup_flags | FOLL_TOUCH);
823 }
824 EXPORT_SYMBOL(get_user_pages_unlocked);
825 
826 /*
827  * get_user_pages() - pin user pages in memory
828  * @tsk:	the task_struct to use for page fault accounting, or
829  *		NULL if faults are not to be recorded.
830  * @mm:		mm_struct of target mm
831  * @start:	starting user address
832  * @nr_pages:	number of pages from start to pin
833  * @write:	whether pages will be written to by the caller
834  * @force:	whether to force access even when user mapping is currently
835  *		protected (but never forces write access to shared mapping).
836  * @pages:	array that receives pointers to the pages pinned.
837  *		Should be at least nr_pages long. Or NULL, if caller
838  *		only intends to ensure the pages are faulted in.
839  * @vmas:	array of pointers to vmas corresponding to each page.
840  *		Or NULL if the caller does not require them.
841  *
842  * Returns number of pages pinned. This may be fewer than the number
843  * requested. If nr_pages is 0 or negative, returns 0. If no pages
844  * were pinned, returns -errno. Each page returned must be released
845  * with a put_page() call when it is finished with. vmas will only
846  * remain valid while mmap_sem is held.
847  *
848  * Must be called with mmap_sem held for read or write.
849  *
850  * get_user_pages walks a process's page tables and takes a reference to
851  * each struct page that each user address corresponds to at a given
852  * instant. That is, it takes the page that would be accessed if a user
853  * thread accesses the given user virtual address at that instant.
854  *
855  * This does not guarantee that the page exists in the user mappings when
856  * get_user_pages returns, and there may even be a completely different
857  * page there in some cases (eg. if mmapped pagecache has been invalidated
858  * and subsequently re faulted). However it does guarantee that the page
859  * won't be freed completely. And mostly callers simply care that the page
860  * contains data that was valid *at some point in time*. Typically, an IO
861  * or similar operation cannot guarantee anything stronger anyway because
862  * locks can't be held over the syscall boundary.
863  *
864  * If write=0, the page must not be written to. If the page is written to,
865  * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
866  * after the page is finished with, and before put_page is called.
867  *
868  * get_user_pages is typically used for fewer-copy IO operations, to get a
869  * handle on the memory by some means other than accesses via the user virtual
870  * addresses. The pages may be submitted for DMA to devices or accessed via
871  * their kernel linear mapping (via the kmap APIs). Care should be taken to
872  * use the correct cache flushing APIs.
873  *
874  * See also get_user_pages_fast, for performance critical applications.
875  *
876  * get_user_pages should be phased out in favor of
877  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
878  * should use get_user_pages because it cannot pass
879  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
880  */
get_user_pages(struct task_struct * tsk,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)881 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
882 		unsigned long start, unsigned long nr_pages,
883 		unsigned int gup_flags, struct page **pages,
884 		struct vm_area_struct **vmas)
885 {
886 	return __get_user_pages_locked(tsk, mm, start, nr_pages,
887 				       pages, vmas, NULL, false,
888 				       gup_flags | FOLL_TOUCH);
889 }
890 EXPORT_SYMBOL(get_user_pages);
891 
892 /**
893  * populate_vma_page_range() -  populate a range of pages in the vma.
894  * @vma:   target vma
895  * @start: start address
896  * @end:   end address
897  * @nonblocking:
898  *
899  * This takes care of mlocking the pages too if VM_LOCKED is set.
900  *
901  * return 0 on success, negative error code on error.
902  *
903  * vma->vm_mm->mmap_sem must be held.
904  *
905  * If @nonblocking is NULL, it may be held for read or write and will
906  * be unperturbed.
907  *
908  * If @nonblocking is non-NULL, it must held for read only and may be
909  * released.  If it's released, *@nonblocking will be set to 0.
910  */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * nonblocking)911 long populate_vma_page_range(struct vm_area_struct *vma,
912 		unsigned long start, unsigned long end, int *nonblocking)
913 {
914 	struct mm_struct *mm = vma->vm_mm;
915 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
916 	int gup_flags;
917 
918 	VM_BUG_ON(start & ~PAGE_MASK);
919 	VM_BUG_ON(end   & ~PAGE_MASK);
920 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
921 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
922 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
923 
924 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
925 	if (vma->vm_flags & VM_LOCKONFAULT)
926 		gup_flags &= ~FOLL_POPULATE;
927 
928 	/*
929 	 * We want to touch writable mappings with a write fault in order
930 	 * to break COW, except for shared mappings because these don't COW
931 	 * and we would not want to dirty them for nothing.
932 	 */
933 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
934 		gup_flags |= FOLL_WRITE;
935 
936 	/*
937 	 * We want mlock to succeed for regions that have any permissions
938 	 * other than PROT_NONE.
939 	 */
940 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
941 		gup_flags |= FOLL_FORCE;
942 
943 	/*
944 	 * We made sure addr is within a VMA, so the following will
945 	 * not result in a stack expansion that recurses back here.
946 	 */
947 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
948 				NULL, NULL, nonblocking);
949 }
950 
951 /*
952  * __mm_populate - populate and/or mlock pages within a range of address space.
953  *
954  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
955  * flags. VMAs must be already marked with the desired vm_flags, and
956  * mmap_sem must not be held.
957  */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)958 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
959 {
960 	struct mm_struct *mm = current->mm;
961 	unsigned long end, nstart, nend;
962 	struct vm_area_struct *vma = NULL;
963 	int locked = 0;
964 	long ret = 0;
965 
966 	end = start + len;
967 
968 	for (nstart = start; nstart < end; nstart = nend) {
969 		/*
970 		 * We want to fault in pages for [nstart; end) address range.
971 		 * Find first corresponding VMA.
972 		 */
973 		if (!locked) {
974 			locked = 1;
975 			down_read(&mm->mmap_sem);
976 			vma = find_vma(mm, nstart);
977 		} else if (nstart >= vma->vm_end)
978 			vma = vma->vm_next;
979 		if (!vma || vma->vm_start >= end)
980 			break;
981 		/*
982 		 * Set [nstart; nend) to intersection of desired address
983 		 * range with the first VMA. Also, skip undesirable VMA types.
984 		 */
985 		nend = min(end, vma->vm_end);
986 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
987 			continue;
988 		if (nstart < vma->vm_start)
989 			nstart = vma->vm_start;
990 		/*
991 		 * Now fault in a range of pages. populate_vma_page_range()
992 		 * double checks the vma flags, so that it won't mlock pages
993 		 * if the vma was already munlocked.
994 		 */
995 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
996 		if (ret < 0) {
997 			if (ignore_errors) {
998 				ret = 0;
999 				continue;	/* continue at next VMA */
1000 			}
1001 			break;
1002 		}
1003 		nend = nstart + ret * PAGE_SIZE;
1004 		ret = 0;
1005 	}
1006 	if (locked)
1007 		up_read(&mm->mmap_sem);
1008 	return ret;	/* 0 or negative error code */
1009 }
1010 
1011 /**
1012  * get_dump_page() - pin user page in memory while writing it to core dump
1013  * @addr: user address
1014  *
1015  * Returns struct page pointer of user page pinned for dump,
1016  * to be freed afterwards by page_cache_release() or put_page().
1017  *
1018  * Returns NULL on any kind of failure - a hole must then be inserted into
1019  * the corefile, to preserve alignment with its headers; and also returns
1020  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1021  * allowing a hole to be left in the corefile to save diskspace.
1022  *
1023  * Called without mmap_sem, but after all other threads have been killed.
1024  */
1025 #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)1026 struct page *get_dump_page(unsigned long addr)
1027 {
1028 	struct vm_area_struct *vma;
1029 	struct page *page;
1030 
1031 	if (__get_user_pages(current, current->mm, addr, 1,
1032 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1033 			     NULL) < 1)
1034 		return NULL;
1035 	flush_cache_page(vma, addr, page_to_pfn(page));
1036 	return page;
1037 }
1038 #endif /* CONFIG_ELF_CORE */
1039 
1040 /*
1041  * Generic RCU Fast GUP
1042  *
1043  * get_user_pages_fast attempts to pin user pages by walking the page
1044  * tables directly and avoids taking locks. Thus the walker needs to be
1045  * protected from page table pages being freed from under it, and should
1046  * block any THP splits.
1047  *
1048  * One way to achieve this is to have the walker disable interrupts, and
1049  * rely on IPIs from the TLB flushing code blocking before the page table
1050  * pages are freed. This is unsuitable for architectures that do not need
1051  * to broadcast an IPI when invalidating TLBs.
1052  *
1053  * Another way to achieve this is to batch up page table containing pages
1054  * belonging to more than one mm_user, then rcu_sched a callback to free those
1055  * pages. Disabling interrupts will allow the fast_gup walker to both block
1056  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1057  * (which is a relatively rare event). The code below adopts this strategy.
1058  *
1059  * Before activating this code, please be aware that the following assumptions
1060  * are currently made:
1061  *
1062  *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1063  *      pages containing page tables.
1064  *
1065  *  *) THP splits will broadcast an IPI, this can be achieved by overriding
1066  *      pmdp_splitting_flush.
1067  *
1068  *  *) ptes can be read atomically by the architecture.
1069  *
1070  *  *) access_ok is sufficient to validate userspace address ranges.
1071  *
1072  * The last two assumptions can be relaxed by the addition of helper functions.
1073  *
1074  * This code is based heavily on the PowerPC implementation by Nick Piggin.
1075  */
1076 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
1077 
1078 /*
1079  * Return the compund head page with ref appropriately incremented,
1080  * or NULL if that failed.
1081  */
try_get_compound_head(struct page * page,int refs)1082 static inline struct page *try_get_compound_head(struct page *page, int refs)
1083 {
1084 	struct page *head = compound_head(page);
1085 	if (WARN_ON_ONCE(atomic_read(&head->_count) < 0))
1086 		return NULL;
1087 	if (unlikely(!page_cache_add_speculative(head, refs)))
1088 		return NULL;
1089 	return head;
1090 }
1091 
1092 #ifdef __HAVE_ARCH_PTE_SPECIAL
gup_pte_range(pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1093 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1094 			 int write, struct page **pages, int *nr)
1095 {
1096 	pte_t *ptep, *ptem;
1097 	int ret = 0;
1098 
1099 	ptem = ptep = pte_offset_map(&pmd, addr);
1100 	do {
1101 		/*
1102 		 * In the line below we are assuming that the pte can be read
1103 		 * atomically. If this is not the case for your architecture,
1104 		 * please wrap this in a helper function!
1105 		 *
1106 		 * for an example see gup_get_pte in arch/x86/mm/gup.c
1107 		 */
1108 		pte_t pte = READ_ONCE(*ptep);
1109 		struct page *page;
1110 
1111 		/*
1112 		 * Similar to the PMD case below, NUMA hinting must take slow
1113 		 * path using the pte_protnone check.
1114 		 */
1115 		if (!pte_present(pte) || pte_special(pte) ||
1116 			pte_protnone(pte) || (write && !pte_write(pte)))
1117 			goto pte_unmap;
1118 
1119 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1120 		page = pte_page(pte);
1121 
1122 		if (WARN_ON_ONCE(page_ref_count(page) < 0))
1123 			goto pte_unmap;
1124 
1125 		if (!page_cache_get_speculative(page))
1126 			goto pte_unmap;
1127 
1128 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1129 			put_page(page);
1130 			goto pte_unmap;
1131 		}
1132 
1133 		pages[*nr] = page;
1134 		(*nr)++;
1135 
1136 	} while (ptep++, addr += PAGE_SIZE, addr != end);
1137 
1138 	ret = 1;
1139 
1140 pte_unmap:
1141 	pte_unmap(ptem);
1142 	return ret;
1143 }
1144 #else
1145 
1146 /*
1147  * If we can't determine whether or not a pte is special, then fail immediately
1148  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1149  * to be special.
1150  *
1151  * For a futex to be placed on a THP tail page, get_futex_key requires a
1152  * __get_user_pages_fast implementation that can pin pages. Thus it's still
1153  * useful to have gup_huge_pmd even if we can't operate on ptes.
1154  */
gup_pte_range(pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1155 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1156 			 int write, struct page **pages, int *nr)
1157 {
1158 	return 0;
1159 }
1160 #endif /* __HAVE_ARCH_PTE_SPECIAL */
1161 
gup_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1162 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1163 		unsigned long end, int write, struct page **pages, int *nr)
1164 {
1165 	struct page *head, *page, *tail;
1166 	int refs;
1167 
1168 	if (write && !pmd_write(orig))
1169 		return 0;
1170 
1171 	refs = 0;
1172 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1173 	tail = page;
1174 	do {
1175 		pages[*nr] = page;
1176 		(*nr)++;
1177 		page++;
1178 		refs++;
1179 	} while (addr += PAGE_SIZE, addr != end);
1180 
1181 	head = try_get_compound_head(pmd_page(orig), refs);
1182 	if (!head) {
1183 		*nr -= refs;
1184 		return 0;
1185 	}
1186 
1187 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1188 		*nr -= refs;
1189 		while (refs--)
1190 			put_page(head);
1191 		return 0;
1192 	}
1193 
1194 	/*
1195 	 * Any tail pages need their mapcount reference taken before we
1196 	 * return. (This allows the THP code to bump their ref count when
1197 	 * they are split into base pages).
1198 	 */
1199 	while (refs--) {
1200 		if (PageTail(tail))
1201 			get_huge_page_tail(tail);
1202 		tail++;
1203 	}
1204 
1205 	return 1;
1206 }
1207 
gup_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1208 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1209 		unsigned long end, int write, struct page **pages, int *nr)
1210 {
1211 	struct page *head, *page, *tail;
1212 	int refs;
1213 
1214 	if (write && !pud_write(orig))
1215 		return 0;
1216 
1217 	refs = 0;
1218 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1219 	tail = page;
1220 	do {
1221 		pages[*nr] = page;
1222 		(*nr)++;
1223 		page++;
1224 		refs++;
1225 	} while (addr += PAGE_SIZE, addr != end);
1226 
1227 	head = try_get_compound_head(pud_page(orig), refs);
1228 	if (!head) {
1229 		*nr -= refs;
1230 		return 0;
1231 	}
1232 
1233 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1234 		*nr -= refs;
1235 		while (refs--)
1236 			put_page(head);
1237 		return 0;
1238 	}
1239 
1240 	while (refs--) {
1241 		if (PageTail(tail))
1242 			get_huge_page_tail(tail);
1243 		tail++;
1244 	}
1245 
1246 	return 1;
1247 }
1248 
gup_huge_pgd(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1249 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1250 			unsigned long end, int write,
1251 			struct page **pages, int *nr)
1252 {
1253 	int refs;
1254 	struct page *head, *page, *tail;
1255 
1256 	if (write && !pgd_write(orig))
1257 		return 0;
1258 
1259 	refs = 0;
1260 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1261 	tail = page;
1262 	do {
1263 		pages[*nr] = page;
1264 		(*nr)++;
1265 		page++;
1266 		refs++;
1267 	} while (addr += PAGE_SIZE, addr != end);
1268 
1269 	head = try_get_compound_head(pgd_page(orig), refs);
1270 	if (!head) {
1271 		*nr -= refs;
1272 		return 0;
1273 	}
1274 
1275 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1276 		*nr -= refs;
1277 		while (refs--)
1278 			put_page(head);
1279 		return 0;
1280 	}
1281 
1282 	while (refs--) {
1283 		if (PageTail(tail))
1284 			get_huge_page_tail(tail);
1285 		tail++;
1286 	}
1287 
1288 	return 1;
1289 }
1290 
gup_pmd_range(pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1291 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1292 		int write, struct page **pages, int *nr)
1293 {
1294 	unsigned long next;
1295 	pmd_t *pmdp;
1296 
1297 	pmdp = pmd_offset(&pud, addr);
1298 	do {
1299 		pmd_t pmd = READ_ONCE(*pmdp);
1300 
1301 		next = pmd_addr_end(addr, end);
1302 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
1303 			return 0;
1304 
1305 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1306 			/*
1307 			 * NUMA hinting faults need to be handled in the GUP
1308 			 * slowpath for accounting purposes and so that they
1309 			 * can be serialised against THP migration.
1310 			 */
1311 			if (pmd_protnone(pmd))
1312 				return 0;
1313 
1314 			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1315 				pages, nr))
1316 				return 0;
1317 
1318 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1319 			/*
1320 			 * architecture have different format for hugetlbfs
1321 			 * pmd format and THP pmd format
1322 			 */
1323 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1324 					 PMD_SHIFT, next, write, pages, nr))
1325 				return 0;
1326 		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1327 				return 0;
1328 	} while (pmdp++, addr = next, addr != end);
1329 
1330 	return 1;
1331 }
1332 
gup_pud_range(pgd_t pgd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)1333 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1334 			 int write, struct page **pages, int *nr)
1335 {
1336 	unsigned long next;
1337 	pud_t *pudp;
1338 
1339 	pudp = pud_offset(&pgd, addr);
1340 	do {
1341 		pud_t pud = READ_ONCE(*pudp);
1342 
1343 		next = pud_addr_end(addr, end);
1344 		if (pud_none(pud))
1345 			return 0;
1346 		if (unlikely(pud_huge(pud))) {
1347 			if (!gup_huge_pud(pud, pudp, addr, next, write,
1348 					  pages, nr))
1349 				return 0;
1350 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1351 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1352 					 PUD_SHIFT, next, write, pages, nr))
1353 				return 0;
1354 		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1355 			return 0;
1356 	} while (pudp++, addr = next, addr != end);
1357 
1358 	return 1;
1359 }
1360 
1361 /*
1362  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1363  * the regular GUP. It will only return non-negative values.
1364  *
1365  * Careful, careful! COW breaking can go either way, so a non-write
1366  * access can get ambiguous page results. If you call this function without
1367  * 'write' set, you'd better be sure that you're ok with that ambiguity.
1368  */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)1369 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1370 			  struct page **pages)
1371 {
1372 	struct mm_struct *mm = current->mm;
1373 	unsigned long addr, len, end;
1374 	unsigned long next, flags;
1375 	pgd_t *pgdp;
1376 	int nr = 0;
1377 
1378 	start &= PAGE_MASK;
1379 	addr = start;
1380 	len = (unsigned long) nr_pages << PAGE_SHIFT;
1381 	end = start + len;
1382 
1383 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1384 					start, len)))
1385 		return 0;
1386 
1387 	/*
1388 	 * Disable interrupts.  We use the nested form as we can already have
1389 	 * interrupts disabled by get_futex_key.
1390 	 *
1391 	 * With interrupts disabled, we block page table pages from being
1392 	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1393 	 * for more details.
1394 	 *
1395 	 * We do not adopt an rcu_read_lock(.) here as we also want to
1396 	 * block IPIs that come from THPs splitting.
1397 	 *
1398 	 * NOTE! We allow read-only gup_fast() here, but you'd better be
1399 	 * careful about possible COW pages. You'll get _a_ COW page, but
1400 	 * not necessarily the one you intended to get depending on what
1401 	 * COW event happens after this. COW may break the page copy in a
1402 	 * random direction.
1403 	 */
1404 
1405 	local_irq_save(flags);
1406 	pgdp = pgd_offset(mm, addr);
1407 	do {
1408 		pgd_t pgd = READ_ONCE(*pgdp);
1409 
1410 		next = pgd_addr_end(addr, end);
1411 		if (pgd_none(pgd))
1412 			break;
1413 		/*
1414 		 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
1415 		 * because get_user_pages() may need to cause an early COW in
1416 		 * order to avoid confusing the normal COW routines. So only
1417 		 * targets that are already writable are safe to do by just
1418 		 * looking at the page tables.
1419 		 */
1420 		if (unlikely(pgd_huge(pgd))) {
1421 			if (!gup_huge_pgd(pgd, pgdp, addr, next, 1,
1422 					  pages, &nr))
1423 				break;
1424 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1425 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1426 					 PGDIR_SHIFT, next, 1, pages, &nr))
1427 				break;
1428 		} else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr))
1429 			break;
1430 	} while (pgdp++, addr = next, addr != end);
1431 	local_irq_restore(flags);
1432 
1433 	return nr;
1434 }
1435 
1436 /**
1437  * get_user_pages_fast() - pin user pages in memory
1438  * @start:	starting user address
1439  * @nr_pages:	number of pages from start to pin
1440  * @write:	whether pages will be written to
1441  * @pages:	array that receives pointers to the pages pinned.
1442  *		Should be at least nr_pages long.
1443  *
1444  * Attempt to pin user pages in memory without taking mm->mmap_sem.
1445  * If not successful, it will fall back to taking the lock and
1446  * calling get_user_pages().
1447  *
1448  * Returns number of pages pinned. This may be fewer than the number
1449  * requested. If nr_pages is 0 or negative, returns 0. If no pages
1450  * were pinned, returns -errno.
1451  */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)1452 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1453 			struct page **pages)
1454 {
1455 	struct mm_struct *mm = current->mm;
1456 	int nr, ret;
1457 
1458 	start &= PAGE_MASK;
1459 	nr = __get_user_pages_fast(start, nr_pages, write, pages);
1460 	ret = nr;
1461 
1462 	if (nr < nr_pages) {
1463 		/* Try to get the remaining pages with get_user_pages */
1464 		start += nr << PAGE_SHIFT;
1465 		pages += nr;
1466 
1467 		ret = get_user_pages_unlocked(current, mm, start,
1468 					      nr_pages - nr, pages,
1469 					      write ? FOLL_WRITE : 0);
1470 
1471 		/* Have to be a bit careful with return values */
1472 		if (nr > 0) {
1473 			if (ret < 0)
1474 				ret = nr;
1475 			else
1476 				ret += nr;
1477 		}
1478 	}
1479 
1480 	return ret;
1481 }
1482 
1483 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1484