• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/sched/mm.h>
45 #include <linux/sched/coredump.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/ksm.h>
55 #include <linux/rmap.h>
56 #include <linux/export.h>
57 #include <linux/delayacct.h>
58 #include <linux/init.h>
59 #include <linux/pfn_t.h>
60 #include <linux/writeback.h>
61 #include <linux/memcontrol.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/swapops.h>
64 #include <linux/elf.h>
65 #include <linux/gfp.h>
66 #include <linux/migrate.h>
67 #include <linux/string.h>
68 #include <linux/debugfs.h>
69 #include <linux/userfaultfd_k.h>
70 #include <linux/dax.h>
71 #include <linux/oom.h>
72 #include <linux/numa.h>
73 #include <linux/perf_event.h>
74 #include <linux/ptrace.h>
75 #include <linux/vmalloc.h>
76 #include <trace/hooks/mm.h>
77 
78 #include <trace/events/kmem.h>
79 
80 #include <asm/io.h>
81 #include <asm/mmu_context.h>
82 #include <asm/pgalloc.h>
83 #include <linux/uaccess.h>
84 #include <asm/tlb.h>
85 #include <asm/tlbflush.h>
86 
87 #include "pgalloc-track.h"
88 #include "internal.h"
89 
90 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
91 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
92 #endif
93 
94 #ifndef CONFIG_NUMA
95 unsigned long max_mapnr;
96 EXPORT_SYMBOL(max_mapnr);
97 
98 struct page *mem_map;
99 EXPORT_SYMBOL(mem_map);
100 #endif
101 
102 /*
103  * A number of key systems in x86 including ioremap() rely on the assumption
104  * that high_memory defines the upper bound on direct map memory, then end
105  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
106  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
107  * and ZONE_HIGHMEM.
108  */
109 void *high_memory;
110 EXPORT_SYMBOL(high_memory);
111 
112 /*
113  * Randomize the address space (stacks, mmaps, brk, etc.).
114  *
115  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
116  *   as ancient (libc5 based) binaries can segfault. )
117  */
118 int randomize_va_space __read_mostly =
119 #ifdef CONFIG_COMPAT_BRK
120 					1;
121 #else
122 					2;
123 #endif
124 
125 #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)126 static inline bool arch_wants_old_prefaulted_pte(void)
127 {
128 	/*
129 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
130 	 * some architectures, even if it's performed in hardware. By
131 	 * default, "false" means prefaulted entries will be 'young'.
132 	 */
133 	return false;
134 }
135 #endif
136 
disable_randmaps(char * s)137 static int __init disable_randmaps(char *s)
138 {
139 	randomize_va_space = 0;
140 	return 1;
141 }
142 __setup("norandmaps", disable_randmaps);
143 
144 unsigned long zero_pfn __read_mostly;
145 EXPORT_SYMBOL(zero_pfn);
146 
147 unsigned long highest_memmap_pfn __read_mostly;
148 
149 /*
150  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
151  */
init_zero_pfn(void)152 static int __init init_zero_pfn(void)
153 {
154 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
155 	return 0;
156 }
157 early_initcall(init_zero_pfn);
158 
mm_trace_rss_stat(struct mm_struct * mm,int member,long count)159 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
160 {
161 	trace_rss_stat(mm, member, count);
162 }
163 EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
164 
165 #if defined(SPLIT_RSS_COUNTING)
166 
sync_mm_rss(struct mm_struct * mm)167 void sync_mm_rss(struct mm_struct *mm)
168 {
169 	int i;
170 
171 	for (i = 0; i < NR_MM_COUNTERS; i++) {
172 		if (current->rss_stat.count[i]) {
173 			add_mm_counter(mm, i, current->rss_stat.count[i]);
174 			current->rss_stat.count[i] = 0;
175 		}
176 	}
177 	current->rss_stat.events = 0;
178 }
179 
add_mm_counter_fast(struct mm_struct * mm,int member,int val)180 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
181 {
182 	struct task_struct *task = current;
183 
184 	if (likely(task->mm == mm))
185 		task->rss_stat.count[member] += val;
186 	else
187 		add_mm_counter(mm, member, val);
188 }
189 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
190 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
191 
192 /* sync counter once per 64 page faults */
193 #define TASK_RSS_EVENTS_THRESH	(64)
check_sync_rss_stat(struct task_struct * task)194 static void check_sync_rss_stat(struct task_struct *task)
195 {
196 	if (unlikely(task != current))
197 		return;
198 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
199 		sync_mm_rss(task->mm);
200 }
201 #else /* SPLIT_RSS_COUNTING */
202 
203 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
204 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
205 
check_sync_rss_stat(struct task_struct * task)206 static void check_sync_rss_stat(struct task_struct *task)
207 {
208 }
209 
210 #endif /* SPLIT_RSS_COUNTING */
211 
212 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
213 
get_vma(struct mm_struct * mm,unsigned long addr)214 struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
215 {
216 	struct vm_area_struct *vma;
217 
218 	rcu_read_lock();
219 	vma = find_vma_from_tree(mm, addr);
220 
221 	/*
222 	 * atomic_inc_unless_negative() also protects from races with
223 	 * fast mremap.
224 	 *
225 	 * If there is a concurrent fast mremap, bail out since the entire
226 	 * PMD/PUD subtree may have been remapped.
227 	 *
228 	 * This is usually safe for conventional mremap since it takes the
229 	 * PTE locks as does SPF. However fast mremap only takes the lock
230 	 * at the PMD/PUD level which is ok as it is done with the mmap
231 	 * write lock held. But since SPF, as the term implies forgoes,
232 	 * taking the mmap read lock and also cannot take PTL lock at the
233 	 * larger PMD/PUD granualrity, since it would introduce huge
234 	 * contention in the page fault path; fall back to regular fault
235 	 * handling.
236 	 */
237 	if (vma) {
238 		if (vma->vm_start > addr ||
239 		    !atomic_inc_unless_negative(&vma->file_ref_count))
240 			vma = NULL;
241 	}
242 	rcu_read_unlock();
243 
244 	return vma;
245 }
246 
put_vma(struct vm_area_struct * vma)247 void put_vma(struct vm_area_struct *vma)
248 {
249 	int new_ref_count;
250 
251 	new_ref_count = atomic_dec_return(&vma->file_ref_count);
252 	if (new_ref_count < 0)
253 		vm_area_free_no_check(vma);
254 }
255 
256 #if ALLOC_SPLIT_PTLOCKS
wait_for_smp_sync(void * arg)257 static void wait_for_smp_sync(void *arg)
258 {
259 }
260 #endif
261 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
262 
263 /*
264  * Note: this doesn't free the actual pages themselves. That
265  * has been handled earlier when unmapping all the memory regions.
266  */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)267 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
268 			   unsigned long addr)
269 {
270 	pgtable_t token = pmd_pgtable(*pmd);
271 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
272 	/*
273 	 * Ensure page table destruction is blocked if __pte_map_lock managed
274 	 * to take this lock. Without this barrier tlb_remove_table_rcu can
275 	 * destroy ptl after __pte_map_lock locked it and during unlock would
276 	 * cause a use-after-free.
277 	 */
278 	spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
279 	spin_unlock(ptl);
280 #if ALLOC_SPLIT_PTLOCKS
281 	/*
282 	 * The __pte_map_lock can still be working on the ->ptl in the read side
283 	 * critical section while ->ptl is freed which results into the use-after
284 	 * -free. Sync it using the smp_call_().
285 	 */
286 	smp_call_function(wait_for_smp_sync, NULL, 1);
287 #endif
288 #endif
289 	pmd_clear(pmd);
290 	pte_free_tlb(tlb, token, addr);
291 	mm_dec_nr_ptes(tlb->mm);
292 }
293 
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)294 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
295 				unsigned long addr, unsigned long end,
296 				unsigned long floor, unsigned long ceiling)
297 {
298 	pmd_t *pmd;
299 	unsigned long next;
300 	unsigned long start;
301 
302 	start = addr;
303 	pmd = pmd_offset(pud, addr);
304 	do {
305 		next = pmd_addr_end(addr, end);
306 		if (pmd_none_or_clear_bad(pmd))
307 			continue;
308 		free_pte_range(tlb, pmd, addr);
309 	} while (pmd++, addr = next, addr != end);
310 
311 	start &= PUD_MASK;
312 	if (start < floor)
313 		return;
314 	if (ceiling) {
315 		ceiling &= PUD_MASK;
316 		if (!ceiling)
317 			return;
318 	}
319 	if (end - 1 > ceiling - 1)
320 		return;
321 
322 	pmd = pmd_offset(pud, start);
323 	pud_clear(pud);
324 	pmd_free_tlb(tlb, pmd, start);
325 	mm_dec_nr_pmds(tlb->mm);
326 }
327 
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)328 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
329 				unsigned long addr, unsigned long end,
330 				unsigned long floor, unsigned long ceiling)
331 {
332 	pud_t *pud;
333 	unsigned long next;
334 	unsigned long start;
335 
336 	start = addr;
337 	pud = pud_offset(p4d, addr);
338 	do {
339 		next = pud_addr_end(addr, end);
340 		if (pud_none_or_clear_bad(pud))
341 			continue;
342 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
343 	} while (pud++, addr = next, addr != end);
344 
345 	start &= P4D_MASK;
346 	if (start < floor)
347 		return;
348 	if (ceiling) {
349 		ceiling &= P4D_MASK;
350 		if (!ceiling)
351 			return;
352 	}
353 	if (end - 1 > ceiling - 1)
354 		return;
355 
356 	pud = pud_offset(p4d, start);
357 	p4d_clear(p4d);
358 	pud_free_tlb(tlb, pud, start);
359 	mm_dec_nr_puds(tlb->mm);
360 }
361 
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)362 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
363 				unsigned long addr, unsigned long end,
364 				unsigned long floor, unsigned long ceiling)
365 {
366 	p4d_t *p4d;
367 	unsigned long next;
368 	unsigned long start;
369 
370 	start = addr;
371 	p4d = p4d_offset(pgd, addr);
372 	do {
373 		next = p4d_addr_end(addr, end);
374 		if (p4d_none_or_clear_bad(p4d))
375 			continue;
376 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
377 	} while (p4d++, addr = next, addr != end);
378 
379 	start &= PGDIR_MASK;
380 	if (start < floor)
381 		return;
382 	if (ceiling) {
383 		ceiling &= PGDIR_MASK;
384 		if (!ceiling)
385 			return;
386 	}
387 	if (end - 1 > ceiling - 1)
388 		return;
389 
390 	p4d = p4d_offset(pgd, start);
391 	pgd_clear(pgd);
392 	p4d_free_tlb(tlb, p4d, start);
393 }
394 
395 /*
396  * This function frees user-level page tables of a process.
397  */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)398 void free_pgd_range(struct mmu_gather *tlb,
399 			unsigned long addr, unsigned long end,
400 			unsigned long floor, unsigned long ceiling)
401 {
402 	pgd_t *pgd;
403 	unsigned long next;
404 
405 	/*
406 	 * The next few lines have given us lots of grief...
407 	 *
408 	 * Why are we testing PMD* at this top level?  Because often
409 	 * there will be no work to do at all, and we'd prefer not to
410 	 * go all the way down to the bottom just to discover that.
411 	 *
412 	 * Why all these "- 1"s?  Because 0 represents both the bottom
413 	 * of the address space and the top of it (using -1 for the
414 	 * top wouldn't help much: the masks would do the wrong thing).
415 	 * The rule is that addr 0 and floor 0 refer to the bottom of
416 	 * the address space, but end 0 and ceiling 0 refer to the top
417 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
418 	 * that end 0 case should be mythical).
419 	 *
420 	 * Wherever addr is brought up or ceiling brought down, we must
421 	 * be careful to reject "the opposite 0" before it confuses the
422 	 * subsequent tests.  But what about where end is brought down
423 	 * by PMD_SIZE below? no, end can't go down to 0 there.
424 	 *
425 	 * Whereas we round start (addr) and ceiling down, by different
426 	 * masks at different levels, in order to test whether a table
427 	 * now has no other vmas using it, so can be freed, we don't
428 	 * bother to round floor or end up - the tests don't need that.
429 	 */
430 
431 	addr &= PMD_MASK;
432 	if (addr < floor) {
433 		addr += PMD_SIZE;
434 		if (!addr)
435 			return;
436 	}
437 	if (ceiling) {
438 		ceiling &= PMD_MASK;
439 		if (!ceiling)
440 			return;
441 	}
442 	if (end - 1 > ceiling - 1)
443 		end -= PMD_SIZE;
444 	if (addr > end - 1)
445 		return;
446 	/*
447 	 * We add page table cache pages with PAGE_SIZE,
448 	 * (see pte_free_tlb()), flush the tlb if we need
449 	 */
450 	tlb_change_page_size(tlb, PAGE_SIZE);
451 	pgd = pgd_offset(tlb->mm, addr);
452 	do {
453 		next = pgd_addr_end(addr, end);
454 		if (pgd_none_or_clear_bad(pgd))
455 			continue;
456 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
457 	} while (pgd++, addr = next, addr != end);
458 }
459 
free_pgtables(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling)460 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
461 		unsigned long floor, unsigned long ceiling)
462 {
463 	while (vma) {
464 		struct vm_area_struct *next = vma->vm_next;
465 		unsigned long addr = vma->vm_start;
466 
467 		/*
468 		 * Hide vma from rmap and truncate_pagecache before freeing
469 		 * pgtables
470 		 */
471 		unlink_anon_vmas(vma);
472 		unlink_file_vma(vma);
473 
474 		if (is_vm_hugetlb_page(vma)) {
475 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
476 				floor, next ? next->vm_start : ceiling);
477 		} else {
478 			/*
479 			 * Optimization: gather nearby vmas into one call down
480 			 */
481 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
482 			       && !is_vm_hugetlb_page(next)) {
483 				vma = next;
484 				next = vma->vm_next;
485 				unlink_anon_vmas(vma);
486 				unlink_file_vma(vma);
487 			}
488 			free_pgd_range(tlb, addr, vma->vm_end,
489 				floor, next ? next->vm_start : ceiling);
490 		}
491 		vma = next;
492 	}
493 }
494 
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)495 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
496 {
497 	spinlock_t *ptl;
498 	pgtable_t new = pte_alloc_one(mm);
499 	if (!new)
500 		return -ENOMEM;
501 
502 	/*
503 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
504 	 * visible before the pte is made visible to other CPUs by being
505 	 * put into page tables.
506 	 *
507 	 * The other side of the story is the pointer chasing in the page
508 	 * table walking code (when walking the page table without locking;
509 	 * ie. most of the time). Fortunately, these data accesses consist
510 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
511 	 * being the notable exception) will already guarantee loads are
512 	 * seen in-order. See the alpha page table accessors for the
513 	 * smp_rmb() barriers in page table walking code.
514 	 */
515 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
516 
517 	ptl = pmd_lock(mm, pmd);
518 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
519 		mm_inc_nr_ptes(mm);
520 		pmd_populate(mm, pmd, new);
521 		new = NULL;
522 	}
523 	spin_unlock(ptl);
524 	if (new)
525 		pte_free(mm, new);
526 	return 0;
527 }
528 
__pte_alloc_kernel(pmd_t * pmd)529 int __pte_alloc_kernel(pmd_t *pmd)
530 {
531 	pte_t *new = pte_alloc_one_kernel(&init_mm);
532 	if (!new)
533 		return -ENOMEM;
534 
535 	smp_wmb(); /* See comment in __pte_alloc */
536 
537 	spin_lock(&init_mm.page_table_lock);
538 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
539 		pmd_populate_kernel(&init_mm, pmd, new);
540 		new = NULL;
541 	}
542 	spin_unlock(&init_mm.page_table_lock);
543 	if (new)
544 		pte_free_kernel(&init_mm, new);
545 	return 0;
546 }
547 
init_rss_vec(int * rss)548 static inline void init_rss_vec(int *rss)
549 {
550 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
551 }
552 
add_mm_rss_vec(struct mm_struct * mm,int * rss)553 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
554 {
555 	int i;
556 
557 	if (current->mm == mm)
558 		sync_mm_rss(mm);
559 	for (i = 0; i < NR_MM_COUNTERS; i++)
560 		if (rss[i])
561 			add_mm_counter(mm, i, rss[i]);
562 }
563 
564 /*
565  * This function is called to print an error when a bad pte
566  * is found. For example, we might have a PFN-mapped pte in
567  * a region that doesn't allow it.
568  *
569  * The calling function must still handle the error.
570  */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)571 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
572 			  pte_t pte, struct page *page)
573 {
574 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
575 	p4d_t *p4d = p4d_offset(pgd, addr);
576 	pud_t *pud = pud_offset(p4d, addr);
577 	pmd_t *pmd = pmd_offset(pud, addr);
578 	struct address_space *mapping;
579 	pgoff_t index;
580 	static unsigned long resume;
581 	static unsigned long nr_shown;
582 	static unsigned long nr_unshown;
583 
584 	/*
585 	 * Allow a burst of 60 reports, then keep quiet for that minute;
586 	 * or allow a steady drip of one report per second.
587 	 */
588 	if (nr_shown == 60) {
589 		if (time_before(jiffies, resume)) {
590 			nr_unshown++;
591 			return;
592 		}
593 		if (nr_unshown) {
594 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
595 				 nr_unshown);
596 			nr_unshown = 0;
597 		}
598 		nr_shown = 0;
599 	}
600 	if (nr_shown++ == 0)
601 		resume = jiffies + 60 * HZ;
602 
603 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
604 	index = linear_page_index(vma, addr);
605 
606 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
607 		 current->comm,
608 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
609 	if (page)
610 		dump_page(page, "bad pte");
611 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
612 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
613 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
614 		 vma->vm_file,
615 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
616 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
617 		 mapping ? mapping->a_ops->readpage : NULL);
618 	dump_stack();
619 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
620 }
621 
622 /*
623  * vm_normal_page -- This function gets the "struct page" associated with a pte.
624  *
625  * "Special" mappings do not wish to be associated with a "struct page" (either
626  * it doesn't exist, or it exists but they don't want to touch it). In this
627  * case, NULL is returned here. "Normal" mappings do have a struct page.
628  *
629  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
630  * pte bit, in which case this function is trivial. Secondly, an architecture
631  * may not have a spare pte bit, which requires a more complicated scheme,
632  * described below.
633  *
634  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
635  * special mapping (even if there are underlying and valid "struct pages").
636  * COWed pages of a VM_PFNMAP are always normal.
637  *
638  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
639  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
640  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
641  * mapping will always honor the rule
642  *
643  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
644  *
645  * And for normal mappings this is false.
646  *
647  * This restricts such mappings to be a linear translation from virtual address
648  * to pfn. To get around this restriction, we allow arbitrary mappings so long
649  * as the vma is not a COW mapping; in that case, we know that all ptes are
650  * special (because none can have been COWed).
651  *
652  *
653  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
654  *
655  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
656  * page" backing, however the difference is that _all_ pages with a struct
657  * page (that is, those where pfn_valid is true) are refcounted and considered
658  * normal pages by the VM. The disadvantage is that pages are refcounted
659  * (which can be slower and simply not an option for some PFNMAP users). The
660  * advantage is that we don't have to follow the strict linearity rule of
661  * PFNMAP mappings in order to support COWable mappings.
662  *
663  */
vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte)664 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
665 			    pte_t pte)
666 {
667 	unsigned long pfn = pte_pfn(pte);
668 
669 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
670 		if (likely(!pte_special(pte)))
671 			goto check_pfn;
672 		if (vma->vm_ops && vma->vm_ops->find_special_page)
673 			return vma->vm_ops->find_special_page(vma, addr);
674 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
675 			return NULL;
676 		if (is_zero_pfn(pfn))
677 			return NULL;
678 		if (pte_devmap(pte))
679 			return NULL;
680 
681 		print_bad_pte(vma, addr, pte, NULL);
682 		return NULL;
683 	}
684 
685 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
686 
687 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
688 		if (vma->vm_flags & VM_MIXEDMAP) {
689 			if (!pfn_valid(pfn))
690 				return NULL;
691 			goto out;
692 		} else {
693 			unsigned long off;
694 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
695 			if (pfn == vma->vm_pgoff + off)
696 				return NULL;
697 			if (!is_cow_mapping(vma->vm_flags))
698 				return NULL;
699 		}
700 	}
701 
702 	if (is_zero_pfn(pfn))
703 		return NULL;
704 
705 check_pfn:
706 	if (unlikely(pfn > highest_memmap_pfn)) {
707 		print_bad_pte(vma, addr, pte, NULL);
708 		return NULL;
709 	}
710 
711 	/*
712 	 * NOTE! We still have PageReserved() pages in the page tables.
713 	 * eg. VDSO mappings can cause them to exist.
714 	 */
715 out:
716 	return pfn_to_page(pfn);
717 }
718 
719 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)720 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
721 				pmd_t pmd)
722 {
723 	unsigned long pfn = pmd_pfn(pmd);
724 
725 	/*
726 	 * There is no pmd_special() but there may be special pmds, e.g.
727 	 * in a direct-access (dax) mapping, so let's just replicate the
728 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
729 	 */
730 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
731 		if (vma->vm_flags & VM_MIXEDMAP) {
732 			if (!pfn_valid(pfn))
733 				return NULL;
734 			goto out;
735 		} else {
736 			unsigned long off;
737 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
738 			if (pfn == vma->vm_pgoff + off)
739 				return NULL;
740 			if (!is_cow_mapping(vma->vm_flags))
741 				return NULL;
742 		}
743 	}
744 
745 	if (pmd_devmap(pmd))
746 		return NULL;
747 	if (is_huge_zero_pmd(pmd))
748 		return NULL;
749 	if (unlikely(pfn > highest_memmap_pfn))
750 		return NULL;
751 
752 	/*
753 	 * NOTE! We still have PageReserved() pages in the page tables.
754 	 * eg. VDSO mappings can cause them to exist.
755 	 */
756 out:
757 	return pfn_to_page(pfn);
758 }
759 #endif
760 
restore_exclusive_pte(struct vm_area_struct * vma,struct page * page,unsigned long address,pte_t * ptep)761 static void restore_exclusive_pte(struct vm_area_struct *vma,
762 				  struct page *page, unsigned long address,
763 				  pte_t *ptep)
764 {
765 	pte_t pte;
766 	swp_entry_t entry;
767 
768 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
769 	if (pte_swp_soft_dirty(*ptep))
770 		pte = pte_mksoft_dirty(pte);
771 
772 	entry = pte_to_swp_entry(*ptep);
773 	if (pte_swp_uffd_wp(*ptep))
774 		pte = pte_mkuffd_wp(pte);
775 	else if (is_writable_device_exclusive_entry(entry))
776 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
777 
778 	set_pte_at(vma->vm_mm, address, ptep, pte);
779 
780 	/*
781 	 * No need to take a page reference as one was already
782 	 * created when the swap entry was made.
783 	 */
784 	if (PageAnon(page))
785 		page_add_anon_rmap(page, vma, address, false);
786 	else
787 		/*
788 		 * Currently device exclusive access only supports anonymous
789 		 * memory so the entry shouldn't point to a filebacked page.
790 		 */
791 		WARN_ON_ONCE(!PageAnon(page));
792 
793 	if (vma->vm_flags & VM_LOCKED)
794 		mlock_vma_page(page);
795 
796 	/*
797 	 * No need to invalidate - it was non-present before. However
798 	 * secondary CPUs may have mappings that need invalidating.
799 	 */
800 	update_mmu_cache(vma, address, ptep);
801 }
802 
803 /*
804  * Tries to restore an exclusive pte if the page lock can be acquired without
805  * sleeping.
806  */
807 static int
try_restore_exclusive_pte(pte_t * src_pte,struct vm_area_struct * vma,unsigned long addr)808 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
809 			unsigned long addr)
810 {
811 	swp_entry_t entry = pte_to_swp_entry(*src_pte);
812 	struct page *page = pfn_swap_entry_to_page(entry);
813 
814 	if (trylock_page(page)) {
815 		restore_exclusive_pte(vma, page, addr, src_pte);
816 		unlock_page(page);
817 		return 0;
818 	}
819 
820 	return -EBUSY;
821 }
822 
823 /*
824  * copy one vm_area from one task to the other. Assumes the page tables
825  * already present in the new task to be cleared in the whole range
826  * covered by this vma.
827  */
828 
829 static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)830 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
831 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
832 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
833 {
834 	unsigned long vm_flags = dst_vma->vm_flags;
835 	pte_t pte = *src_pte;
836 	struct page *page;
837 	swp_entry_t entry = pte_to_swp_entry(pte);
838 
839 	if (likely(!non_swap_entry(entry))) {
840 		if (swap_duplicate(entry) < 0)
841 			return -EIO;
842 
843 		/* make sure dst_mm is on swapoff's mmlist. */
844 		if (unlikely(list_empty(&dst_mm->mmlist))) {
845 			spin_lock(&mmlist_lock);
846 			if (list_empty(&dst_mm->mmlist))
847 				list_add(&dst_mm->mmlist,
848 						&src_mm->mmlist);
849 			spin_unlock(&mmlist_lock);
850 		}
851 		rss[MM_SWAPENTS]++;
852 	} else if (is_migration_entry(entry)) {
853 		page = pfn_swap_entry_to_page(entry);
854 
855 		rss[mm_counter(page)]++;
856 
857 		if (is_writable_migration_entry(entry) &&
858 				is_cow_mapping(vm_flags)) {
859 			/*
860 			 * COW mappings require pages in both
861 			 * parent and child to be set to read.
862 			 */
863 			entry = make_readable_migration_entry(
864 							swp_offset(entry));
865 			pte = swp_entry_to_pte(entry);
866 			if (pte_swp_soft_dirty(*src_pte))
867 				pte = pte_swp_mksoft_dirty(pte);
868 			if (pte_swp_uffd_wp(*src_pte))
869 				pte = pte_swp_mkuffd_wp(pte);
870 			set_pte_at(src_mm, addr, src_pte, pte);
871 		}
872 	} else if (is_device_private_entry(entry)) {
873 		page = pfn_swap_entry_to_page(entry);
874 
875 		/*
876 		 * Update rss count even for unaddressable pages, as
877 		 * they should treated just like normal pages in this
878 		 * respect.
879 		 *
880 		 * We will likely want to have some new rss counters
881 		 * for unaddressable pages, at some point. But for now
882 		 * keep things as they are.
883 		 */
884 		get_page(page);
885 		rss[mm_counter(page)]++;
886 		page_dup_rmap(page, false);
887 
888 		/*
889 		 * We do not preserve soft-dirty information, because so
890 		 * far, checkpoint/restore is the only feature that
891 		 * requires that. And checkpoint/restore does not work
892 		 * when a device driver is involved (you cannot easily
893 		 * save and restore device driver state).
894 		 */
895 		if (is_writable_device_private_entry(entry) &&
896 		    is_cow_mapping(vm_flags)) {
897 			entry = make_readable_device_private_entry(
898 							swp_offset(entry));
899 			pte = swp_entry_to_pte(entry);
900 			if (pte_swp_uffd_wp(*src_pte))
901 				pte = pte_swp_mkuffd_wp(pte);
902 			set_pte_at(src_mm, addr, src_pte, pte);
903 		}
904 	} else if (is_device_exclusive_entry(entry)) {
905 		/*
906 		 * Make device exclusive entries present by restoring the
907 		 * original entry then copying as for a present pte. Device
908 		 * exclusive entries currently only support private writable
909 		 * (ie. COW) mappings.
910 		 */
911 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
912 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
913 			return -EBUSY;
914 		return -ENOENT;
915 	}
916 	if (!userfaultfd_wp(dst_vma))
917 		pte = pte_swp_clear_uffd_wp(pte);
918 	set_pte_at(dst_mm, addr, dst_pte, pte);
919 	return 0;
920 }
921 
922 /*
923  * Copy a present and normal page if necessary.
924  *
925  * NOTE! The usual case is that this doesn't need to do
926  * anything, and can just return a positive value. That
927  * will let the caller know that it can just increase
928  * the page refcount and re-use the pte the traditional
929  * way.
930  *
931  * But _if_ we need to copy it because it needs to be
932  * pinned in the parent (and the child should get its own
933  * copy rather than just a reference to the same page),
934  * we'll do that here and return zero to let the caller
935  * know we're done.
936  *
937  * And if we need a pre-allocated page but don't yet have
938  * one, return a negative error to let the preallocation
939  * code know so that it can do so outside the page table
940  * lock.
941  */
942 static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc,pte_t pte,struct page * page)943 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
944 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
945 		  struct page **prealloc, pte_t pte, struct page *page)
946 {
947 	struct page *new_page;
948 
949 	/*
950 	 * What we want to do is to check whether this page may
951 	 * have been pinned by the parent process.  If so,
952 	 * instead of wrprotect the pte on both sides, we copy
953 	 * the page immediately so that we'll always guarantee
954 	 * the pinned page won't be randomly replaced in the
955 	 * future.
956 	 *
957 	 * The page pinning checks are just "has this mm ever
958 	 * seen pinning", along with the (inexact) check of
959 	 * the page count. That might give false positives for
960 	 * for pinning, but it will work correctly.
961 	 */
962 	if (likely(!page_needs_cow_for_dma(src_vma, page)))
963 		return 1;
964 
965 	new_page = *prealloc;
966 	if (!new_page)
967 		return -EAGAIN;
968 
969 	/*
970 	 * We have a prealloc page, all good!  Take it
971 	 * over and copy the page & arm it.
972 	 */
973 	*prealloc = NULL;
974 	copy_user_highpage(new_page, page, addr, src_vma);
975 	__SetPageUptodate(new_page);
976 	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
977 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
978 	rss[mm_counter(new_page)]++;
979 
980 	/* All done, just insert the new page copy in the child */
981 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
982 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
983 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
984 		/* Uffd-wp needs to be delivered to dest pte as well */
985 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
986 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
987 	return 0;
988 }
989 
990 /*
991  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
992  * is required to copy this pte.
993  */
994 static inline int
copy_present_pte(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc)995 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
996 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
997 		 struct page **prealloc)
998 {
999 	struct mm_struct *src_mm = src_vma->vm_mm;
1000 	unsigned long vm_flags = src_vma->vm_flags;
1001 	pte_t pte = *src_pte;
1002 	struct page *page;
1003 
1004 	page = vm_normal_page(src_vma, addr, pte);
1005 	if (page) {
1006 		int retval;
1007 
1008 		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1009 					   addr, rss, prealloc, pte, page);
1010 		if (retval <= 0)
1011 			return retval;
1012 
1013 		get_page(page);
1014 		page_dup_rmap(page, false);
1015 		rss[mm_counter(page)]++;
1016 	}
1017 
1018 	/*
1019 	 * If it's a COW mapping, write protect it both
1020 	 * in the parent and the child
1021 	 */
1022 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
1023 		ptep_set_wrprotect(src_mm, addr, src_pte);
1024 		pte = pte_wrprotect(pte);
1025 	}
1026 
1027 	/*
1028 	 * If it's a shared mapping, mark it clean in
1029 	 * the child
1030 	 */
1031 	if (vm_flags & VM_SHARED)
1032 		pte = pte_mkclean(pte);
1033 	pte = pte_mkold(pte);
1034 
1035 	if (!userfaultfd_wp(dst_vma))
1036 		pte = pte_clear_uffd_wp(pte);
1037 
1038 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
1039 	return 0;
1040 }
1041 
1042 static inline struct page *
page_copy_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr)1043 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
1044 		   unsigned long addr)
1045 {
1046 	struct page *new_page;
1047 
1048 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
1049 	if (!new_page)
1050 		return NULL;
1051 
1052 	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
1053 		put_page(new_page);
1054 		return NULL;
1055 	}
1056 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
1057 
1058 	return new_page;
1059 }
1060 
1061 static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)1062 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1063 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1064 	       unsigned long end)
1065 {
1066 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1067 	struct mm_struct *src_mm = src_vma->vm_mm;
1068 	pte_t *orig_src_pte, *orig_dst_pte;
1069 	pte_t *src_pte, *dst_pte;
1070 	spinlock_t *src_ptl, *dst_ptl;
1071 	int progress, ret = 0;
1072 	int rss[NR_MM_COUNTERS];
1073 	swp_entry_t entry = (swp_entry_t){0};
1074 	struct page *prealloc = NULL;
1075 
1076 again:
1077 	progress = 0;
1078 	init_rss_vec(rss);
1079 
1080 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1081 	if (!dst_pte) {
1082 		ret = -ENOMEM;
1083 		goto out;
1084 	}
1085 	src_pte = pte_offset_map(src_pmd, addr);
1086 	src_ptl = pte_lockptr(src_mm, src_pmd);
1087 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1088 	orig_src_pte = src_pte;
1089 	orig_dst_pte = dst_pte;
1090 	arch_enter_lazy_mmu_mode();
1091 
1092 	do {
1093 		/*
1094 		 * We are holding two locks at this point - either of them
1095 		 * could generate latencies in another task on another CPU.
1096 		 */
1097 		if (progress >= 32) {
1098 			progress = 0;
1099 			if (need_resched() ||
1100 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1101 				break;
1102 		}
1103 		if (pte_none(*src_pte)) {
1104 			progress++;
1105 			continue;
1106 		}
1107 		if (unlikely(!pte_present(*src_pte))) {
1108 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1109 						  dst_pte, src_pte,
1110 						  dst_vma, src_vma,
1111 						  addr, rss);
1112 			if (ret == -EIO) {
1113 				entry = pte_to_swp_entry(*src_pte);
1114 				break;
1115 			} else if (ret == -EBUSY) {
1116 				break;
1117 			} else if (!ret) {
1118 				progress += 8;
1119 				continue;
1120 			}
1121 
1122 			/*
1123 			 * Device exclusive entry restored, continue by copying
1124 			 * the now present pte.
1125 			 */
1126 			WARN_ON_ONCE(ret != -ENOENT);
1127 		}
1128 		/* copy_present_pte() will clear `*prealloc' if consumed */
1129 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1130 				       addr, rss, &prealloc);
1131 		/*
1132 		 * If we need a pre-allocated page for this pte, drop the
1133 		 * locks, allocate, and try again.
1134 		 */
1135 		if (unlikely(ret == -EAGAIN))
1136 			break;
1137 		if (unlikely(prealloc)) {
1138 			/*
1139 			 * pre-alloc page cannot be reused by next time so as
1140 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1141 			 * will allocate page according to address).  This
1142 			 * could only happen if one pinned pte changed.
1143 			 */
1144 			put_page(prealloc);
1145 			prealloc = NULL;
1146 		}
1147 		progress += 8;
1148 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1149 
1150 	arch_leave_lazy_mmu_mode();
1151 	spin_unlock(src_ptl);
1152 	pte_unmap(orig_src_pte);
1153 	add_mm_rss_vec(dst_mm, rss);
1154 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1155 	cond_resched();
1156 
1157 	if (ret == -EIO) {
1158 		VM_WARN_ON_ONCE(!entry.val);
1159 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1160 			ret = -ENOMEM;
1161 			goto out;
1162 		}
1163 		entry.val = 0;
1164 	} else if (ret == -EBUSY) {
1165 		goto out;
1166 	} else if (ret ==  -EAGAIN) {
1167 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1168 		if (!prealloc)
1169 			return -ENOMEM;
1170 	} else if (ret) {
1171 		VM_WARN_ON_ONCE(1);
1172 	}
1173 
1174 	/* We've captured and resolved the error. Reset, try again. */
1175 	ret = 0;
1176 
1177 	if (addr != end)
1178 		goto again;
1179 out:
1180 	if (unlikely(prealloc))
1181 		put_page(prealloc);
1182 	return ret;
1183 }
1184 
1185 static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1186 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1187 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1188 	       unsigned long end)
1189 {
1190 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1191 	struct mm_struct *src_mm = src_vma->vm_mm;
1192 	pmd_t *src_pmd, *dst_pmd;
1193 	unsigned long next;
1194 
1195 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1196 	if (!dst_pmd)
1197 		return -ENOMEM;
1198 	src_pmd = pmd_offset(src_pud, addr);
1199 	do {
1200 		next = pmd_addr_end(addr, end);
1201 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1202 			|| pmd_devmap(*src_pmd)) {
1203 			int err;
1204 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1205 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1206 					    addr, dst_vma, src_vma);
1207 			if (err == -ENOMEM)
1208 				return -ENOMEM;
1209 			if (!err)
1210 				continue;
1211 			/* fall through */
1212 		}
1213 		if (pmd_none_or_clear_bad(src_pmd))
1214 			continue;
1215 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1216 				   addr, next))
1217 			return -ENOMEM;
1218 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1219 	return 0;
1220 }
1221 
1222 static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1223 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1224 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1225 	       unsigned long end)
1226 {
1227 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1228 	struct mm_struct *src_mm = src_vma->vm_mm;
1229 	pud_t *src_pud, *dst_pud;
1230 	unsigned long next;
1231 
1232 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1233 	if (!dst_pud)
1234 		return -ENOMEM;
1235 	src_pud = pud_offset(src_p4d, addr);
1236 	do {
1237 		next = pud_addr_end(addr, end);
1238 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1239 			int err;
1240 
1241 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1242 			err = copy_huge_pud(dst_mm, src_mm,
1243 					    dst_pud, src_pud, addr, src_vma);
1244 			if (err == -ENOMEM)
1245 				return -ENOMEM;
1246 			if (!err)
1247 				continue;
1248 			/* fall through */
1249 		}
1250 		if (pud_none_or_clear_bad(src_pud))
1251 			continue;
1252 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1253 				   addr, next))
1254 			return -ENOMEM;
1255 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1256 	return 0;
1257 }
1258 
1259 static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1260 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1261 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1262 	       unsigned long end)
1263 {
1264 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1265 	p4d_t *src_p4d, *dst_p4d;
1266 	unsigned long next;
1267 
1268 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1269 	if (!dst_p4d)
1270 		return -ENOMEM;
1271 	src_p4d = p4d_offset(src_pgd, addr);
1272 	do {
1273 		next = p4d_addr_end(addr, end);
1274 		if (p4d_none_or_clear_bad(src_p4d))
1275 			continue;
1276 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1277 				   addr, next))
1278 			return -ENOMEM;
1279 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1280 	return 0;
1281 }
1282 
1283 int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1284 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1285 {
1286 	pgd_t *src_pgd, *dst_pgd;
1287 	unsigned long next;
1288 	unsigned long addr = src_vma->vm_start;
1289 	unsigned long end = src_vma->vm_end;
1290 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1291 	struct mm_struct *src_mm = src_vma->vm_mm;
1292 	struct mmu_notifier_range range;
1293 	bool is_cow;
1294 	int ret;
1295 
1296 	/*
1297 	 * Don't copy ptes where a page fault will fill them correctly.
1298 	 * Fork becomes much lighter when there are big shared or private
1299 	 * readonly mappings. The tradeoff is that copy_page_range is more
1300 	 * efficient than faulting.
1301 	 */
1302 	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1303 	    !src_vma->anon_vma)
1304 		return 0;
1305 
1306 	if (is_vm_hugetlb_page(src_vma))
1307 		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1308 
1309 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1310 		/*
1311 		 * We do not free on error cases below as remove_vma
1312 		 * gets called on error from higher level routine
1313 		 */
1314 		ret = track_pfn_copy(src_vma);
1315 		if (ret)
1316 			return ret;
1317 	}
1318 
1319 	/*
1320 	 * We need to invalidate the secondary MMU mappings only when
1321 	 * there could be a permission downgrade on the ptes of the
1322 	 * parent mm. And a permission downgrade will only happen if
1323 	 * is_cow_mapping() returns true.
1324 	 */
1325 	is_cow = is_cow_mapping(src_vma->vm_flags);
1326 
1327 	if (is_cow) {
1328 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1329 					0, src_vma, src_mm, addr, end);
1330 		mmu_notifier_invalidate_range_start(&range);
1331 		/*
1332 		 * Disabling preemption is not needed for the write side, as
1333 		 * the read side doesn't spin, but goes to the mmap_lock.
1334 		 *
1335 		 * Use the raw variant of the seqcount_t write API to avoid
1336 		 * lockdep complaining about preemptibility.
1337 		 */
1338 		mmap_assert_write_locked(src_mm);
1339 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1340 	}
1341 
1342 	ret = 0;
1343 	dst_pgd = pgd_offset(dst_mm, addr);
1344 	src_pgd = pgd_offset(src_mm, addr);
1345 	do {
1346 		next = pgd_addr_end(addr, end);
1347 		if (pgd_none_or_clear_bad(src_pgd))
1348 			continue;
1349 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1350 					    addr, next))) {
1351 			ret = -ENOMEM;
1352 			break;
1353 		}
1354 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1355 
1356 	if (is_cow) {
1357 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1358 		mmu_notifier_invalidate_range_end(&range);
1359 	}
1360 	return ret;
1361 }
1362 
1363 /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1364 static inline bool should_zap_cows(struct zap_details *details)
1365 {
1366 	/* By default, zap all pages */
1367 	if (!details)
1368 		return true;
1369 
1370 	/* Or, we zap COWed pages only if the caller wants to */
1371 	return !details->check_mapping;
1372 }
1373 
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1374 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1375 				struct vm_area_struct *vma, pmd_t *pmd,
1376 				unsigned long addr, unsigned long end,
1377 				struct zap_details *details)
1378 {
1379 	struct mm_struct *mm = tlb->mm;
1380 	int force_flush = 0;
1381 	int rss[NR_MM_COUNTERS];
1382 	spinlock_t *ptl;
1383 	pte_t *start_pte;
1384 	pte_t *pte;
1385 	swp_entry_t entry;
1386 
1387 	tlb_change_page_size(tlb, PAGE_SIZE);
1388 again:
1389 	init_rss_vec(rss);
1390 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1391 	pte = start_pte;
1392 	flush_tlb_batched_pending(mm);
1393 	arch_enter_lazy_mmu_mode();
1394 	do {
1395 		pte_t ptent = *pte;
1396 		if (pte_none(ptent))
1397 			continue;
1398 
1399 		if (need_resched())
1400 			break;
1401 
1402 		if (pte_present(ptent)) {
1403 			struct page *page;
1404 
1405 			page = vm_normal_page(vma, addr, ptent);
1406 			if (unlikely(details) && page) {
1407 				/*
1408 				 * unmap_shared_mapping_pages() wants to
1409 				 * invalidate cache without truncating:
1410 				 * unmap shared but keep private pages.
1411 				 */
1412 				if (details->check_mapping &&
1413 				    details->check_mapping != page_rmapping(page))
1414 					continue;
1415 			}
1416 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1417 							tlb->fullmm);
1418 			tlb_remove_tlb_entry(tlb, pte, addr);
1419 			if (unlikely(!page))
1420 				continue;
1421 
1422 			if (!PageAnon(page)) {
1423 				if (pte_dirty(ptent)) {
1424 					force_flush = 1;
1425 					set_page_dirty(page);
1426 				}
1427 				if (pte_young(ptent) &&
1428 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1429 					mark_page_accessed(page);
1430 			}
1431 			rss[mm_counter(page)]--;
1432 			page_remove_rmap(page, false);
1433 			if (unlikely(page_mapcount(page) < 0))
1434 				print_bad_pte(vma, addr, ptent, page);
1435 			if (unlikely(__tlb_remove_page(tlb, page))) {
1436 				force_flush = 1;
1437 				addr += PAGE_SIZE;
1438 				break;
1439 			}
1440 			continue;
1441 		}
1442 
1443 		entry = pte_to_swp_entry(ptent);
1444 		if (is_device_private_entry(entry) ||
1445 		    is_device_exclusive_entry(entry)) {
1446 			struct page *page = pfn_swap_entry_to_page(entry);
1447 
1448 			if (unlikely(details && details->check_mapping)) {
1449 				/*
1450 				 * unmap_shared_mapping_pages() wants to
1451 				 * invalidate cache without truncating:
1452 				 * unmap shared but keep private pages.
1453 				 */
1454 				if (details->check_mapping !=
1455 				    page_rmapping(page))
1456 					continue;
1457 			}
1458 
1459 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1460 			rss[mm_counter(page)]--;
1461 
1462 			if (is_device_private_entry(entry))
1463 				page_remove_rmap(page, false);
1464 
1465 			put_page(page);
1466 			continue;
1467 		}
1468 
1469 		if (!non_swap_entry(entry)) {
1470 			/* Genuine swap entry, hence a private anon page */
1471 			if (!should_zap_cows(details))
1472 				continue;
1473 			rss[MM_SWAPENTS]--;
1474 		} else if (is_migration_entry(entry)) {
1475 			struct page *page;
1476 
1477 			page = pfn_swap_entry_to_page(entry);
1478 			if (details && details->check_mapping &&
1479 			    details->check_mapping != page_rmapping(page))
1480 				continue;
1481 			rss[mm_counter(page)]--;
1482 		}
1483 		if (unlikely(!free_swap_and_cache(entry)))
1484 			print_bad_pte(vma, addr, ptent, NULL);
1485 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1486 	} while (pte++, addr += PAGE_SIZE, addr != end);
1487 
1488 	add_mm_rss_vec(mm, rss);
1489 	arch_leave_lazy_mmu_mode();
1490 
1491 	/* Do the actual TLB flush before dropping ptl */
1492 	if (force_flush)
1493 		tlb_flush_mmu_tlbonly(tlb);
1494 	pte_unmap_unlock(start_pte, ptl);
1495 
1496 	/*
1497 	 * If we forced a TLB flush (either due to running out of
1498 	 * batch buffers or because we needed to flush dirty TLB
1499 	 * entries before releasing the ptl), free the batched
1500 	 * memory too. Restart if we didn't do everything.
1501 	 */
1502 	if (force_flush) {
1503 		force_flush = 0;
1504 		tlb_flush_mmu(tlb);
1505 	}
1506 
1507 	if (addr != end) {
1508 		cond_resched();
1509 		goto again;
1510 	}
1511 
1512 	return addr;
1513 }
1514 
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1515 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1516 				struct vm_area_struct *vma, pud_t *pud,
1517 				unsigned long addr, unsigned long end,
1518 				struct zap_details *details)
1519 {
1520 	pmd_t *pmd;
1521 	unsigned long next;
1522 
1523 	pmd = pmd_offset(pud, addr);
1524 	do {
1525 		next = pmd_addr_end(addr, end);
1526 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1527 			if (next - addr != HPAGE_PMD_SIZE)
1528 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1529 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1530 				goto next;
1531 			/* fall through */
1532 		} else if (details && details->single_page &&
1533 			   PageTransCompound(details->single_page) &&
1534 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1535 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1536 			/*
1537 			 * Take and drop THP pmd lock so that we cannot return
1538 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1539 			 * but not yet decremented compound_mapcount().
1540 			 */
1541 			spin_unlock(ptl);
1542 		}
1543 
1544 		/*
1545 		 * Here there can be other concurrent MADV_DONTNEED or
1546 		 * trans huge page faults running, and if the pmd is
1547 		 * none or trans huge it can change under us. This is
1548 		 * because MADV_DONTNEED holds the mmap_lock in read
1549 		 * mode.
1550 		 */
1551 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1552 			goto next;
1553 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1554 next:
1555 		cond_resched();
1556 	} while (pmd++, addr = next, addr != end);
1557 
1558 	return addr;
1559 }
1560 
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1561 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1562 				struct vm_area_struct *vma, p4d_t *p4d,
1563 				unsigned long addr, unsigned long end,
1564 				struct zap_details *details)
1565 {
1566 	pud_t *pud;
1567 	unsigned long next;
1568 
1569 	pud = pud_offset(p4d, addr);
1570 	do {
1571 		next = pud_addr_end(addr, end);
1572 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1573 			if (next - addr != HPAGE_PUD_SIZE) {
1574 				mmap_assert_locked(tlb->mm);
1575 				split_huge_pud(vma, pud, addr);
1576 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1577 				goto next;
1578 			/* fall through */
1579 		}
1580 		if (pud_none_or_clear_bad(pud))
1581 			continue;
1582 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1583 next:
1584 		cond_resched();
1585 	} while (pud++, addr = next, addr != end);
1586 
1587 	return addr;
1588 }
1589 
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1590 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1591 				struct vm_area_struct *vma, pgd_t *pgd,
1592 				unsigned long addr, unsigned long end,
1593 				struct zap_details *details)
1594 {
1595 	p4d_t *p4d;
1596 	unsigned long next;
1597 
1598 	p4d = p4d_offset(pgd, addr);
1599 	do {
1600 		next = p4d_addr_end(addr, end);
1601 		if (p4d_none_or_clear_bad(p4d))
1602 			continue;
1603 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1604 	} while (p4d++, addr = next, addr != end);
1605 
1606 	return addr;
1607 }
1608 
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1609 void unmap_page_range(struct mmu_gather *tlb,
1610 			     struct vm_area_struct *vma,
1611 			     unsigned long addr, unsigned long end,
1612 			     struct zap_details *details)
1613 {
1614 	pgd_t *pgd;
1615 	unsigned long next;
1616 
1617 	BUG_ON(addr >= end);
1618 	tlb_start_vma(tlb, vma);
1619 	pgd = pgd_offset(vma->vm_mm, addr);
1620 	do {
1621 		next = pgd_addr_end(addr, end);
1622 		if (pgd_none_or_clear_bad(pgd))
1623 			continue;
1624 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1625 	} while (pgd++, addr = next, addr != end);
1626 	tlb_end_vma(tlb, vma);
1627 }
1628 
1629 
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)1630 static void unmap_single_vma(struct mmu_gather *tlb,
1631 		struct vm_area_struct *vma, unsigned long start_addr,
1632 		unsigned long end_addr,
1633 		struct zap_details *details)
1634 {
1635 	unsigned long start = max(vma->vm_start, start_addr);
1636 	unsigned long end;
1637 
1638 	if (start >= vma->vm_end)
1639 		return;
1640 	end = min(vma->vm_end, end_addr);
1641 	if (end <= vma->vm_start)
1642 		return;
1643 
1644 	if (vma->vm_file)
1645 		uprobe_munmap(vma, start, end);
1646 
1647 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1648 		untrack_pfn(vma, 0, 0);
1649 
1650 	if (start != end) {
1651 		if (unlikely(is_vm_hugetlb_page(vma))) {
1652 			/*
1653 			 * It is undesirable to test vma->vm_file as it
1654 			 * should be non-null for valid hugetlb area.
1655 			 * However, vm_file will be NULL in the error
1656 			 * cleanup path of mmap_region. When
1657 			 * hugetlbfs ->mmap method fails,
1658 			 * mmap_region() nullifies vma->vm_file
1659 			 * before calling this function to clean up.
1660 			 * Since no pte has actually been setup, it is
1661 			 * safe to do nothing in this case.
1662 			 */
1663 			if (vma->vm_file) {
1664 				i_mmap_lock_write(vma->vm_file->f_mapping);
1665 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1666 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1667 			}
1668 		} else
1669 			unmap_page_range(tlb, vma, start, end, details);
1670 	}
1671 }
1672 
1673 /**
1674  * unmap_vmas - unmap a range of memory covered by a list of vma's
1675  * @tlb: address of the caller's struct mmu_gather
1676  * @vma: the starting vma
1677  * @start_addr: virtual address at which to start unmapping
1678  * @end_addr: virtual address at which to end unmapping
1679  *
1680  * Unmap all pages in the vma list.
1681  *
1682  * Only addresses between `start' and `end' will be unmapped.
1683  *
1684  * The VMA list must be sorted in ascending virtual address order.
1685  *
1686  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1687  * range after unmap_vmas() returns.  So the only responsibility here is to
1688  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1689  * drops the lock and schedules.
1690  */
unmap_vmas(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)1691 void unmap_vmas(struct mmu_gather *tlb,
1692 		struct vm_area_struct *vma, unsigned long start_addr,
1693 		unsigned long end_addr)
1694 {
1695 	struct mmu_notifier_range range;
1696 
1697 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1698 				start_addr, end_addr);
1699 	mmu_notifier_invalidate_range_start(&range);
1700 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1701 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1702 	mmu_notifier_invalidate_range_end(&range);
1703 }
1704 
1705 /**
1706  * zap_page_range - remove user pages in a given range
1707  * @vma: vm_area_struct holding the applicable pages
1708  * @start: starting address of pages to zap
1709  * @size: number of bytes to zap
1710  *
1711  * Caller must protect the VMA list
1712  */
zap_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long size)1713 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1714 		unsigned long size)
1715 {
1716 	struct mmu_notifier_range range;
1717 	struct mmu_gather tlb;
1718 
1719 	lru_add_drain();
1720 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1721 				start, start + size);
1722 	tlb_gather_mmu(&tlb, vma->vm_mm);
1723 	update_hiwater_rss(vma->vm_mm);
1724 	mmu_notifier_invalidate_range_start(&range);
1725 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1726 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1727 	mmu_notifier_invalidate_range_end(&range);
1728 	tlb_finish_mmu(&tlb);
1729 }
1730 
1731 /**
1732  * zap_page_range_single - remove user pages in a given range
1733  * @vma: vm_area_struct holding the applicable pages
1734  * @address: starting address of pages to zap
1735  * @size: number of bytes to zap
1736  * @details: details of shared cache invalidation
1737  *
1738  * The range must fit into one VMA.
1739  */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)1740 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1741 		unsigned long size, struct zap_details *details)
1742 {
1743 	struct mmu_notifier_range range;
1744 	struct mmu_gather tlb;
1745 
1746 	lru_add_drain();
1747 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1748 				address, address + size);
1749 	tlb_gather_mmu(&tlb, vma->vm_mm);
1750 	update_hiwater_rss(vma->vm_mm);
1751 	mmu_notifier_invalidate_range_start(&range);
1752 	unmap_single_vma(&tlb, vma, address, range.end, details);
1753 	mmu_notifier_invalidate_range_end(&range);
1754 	tlb_finish_mmu(&tlb);
1755 }
1756 
1757 /**
1758  * zap_vma_ptes - remove ptes mapping the vma
1759  * @vma: vm_area_struct holding ptes to be zapped
1760  * @address: starting address of pages to zap
1761  * @size: number of bytes to zap
1762  *
1763  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1764  *
1765  * The entire address range must be fully contained within the vma.
1766  *
1767  */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)1768 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1769 		unsigned long size)
1770 {
1771 	if (address < vma->vm_start || address + size > vma->vm_end ||
1772 	    		!(vma->vm_flags & VM_PFNMAP))
1773 		return;
1774 
1775 	zap_page_range_single(vma, address, size, NULL);
1776 }
1777 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1778 
walk_to_pmd(struct mm_struct * mm,unsigned long addr)1779 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1780 {
1781 	pgd_t *pgd;
1782 	p4d_t *p4d;
1783 	pud_t *pud;
1784 	pmd_t *pmd;
1785 
1786 	pgd = pgd_offset(mm, addr);
1787 	p4d = p4d_alloc(mm, pgd, addr);
1788 	if (!p4d)
1789 		return NULL;
1790 	pud = pud_alloc(mm, p4d, addr);
1791 	if (!pud)
1792 		return NULL;
1793 	pmd = pmd_alloc(mm, pud, addr);
1794 	if (!pmd)
1795 		return NULL;
1796 
1797 	VM_BUG_ON(pmd_trans_huge(*pmd));
1798 	return pmd;
1799 }
1800 
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)1801 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1802 			spinlock_t **ptl)
1803 {
1804 	pmd_t *pmd = walk_to_pmd(mm, addr);
1805 
1806 	if (!pmd)
1807 		return NULL;
1808 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1809 }
1810 
validate_page_before_insert(struct page * page)1811 static int validate_page_before_insert(struct page *page)
1812 {
1813 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1814 		return -EINVAL;
1815 	flush_dcache_page(page);
1816 	return 0;
1817 }
1818 
insert_page_into_pte_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1819 static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1820 			unsigned long addr, struct page *page, pgprot_t prot)
1821 {
1822 	if (!pte_none(*pte))
1823 		return -EBUSY;
1824 	/* Ok, finally just insert the thing.. */
1825 	get_page(page);
1826 	inc_mm_counter_fast(mm, mm_counter_file(page));
1827 	page_add_file_rmap(page, false);
1828 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1829 	return 0;
1830 }
1831 
1832 /*
1833  * This is the old fallback for page remapping.
1834  *
1835  * For historical reasons, it only allows reserved pages. Only
1836  * old drivers should use this, and they needed to mark their
1837  * pages reserved for the old functions anyway.
1838  */
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)1839 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1840 			struct page *page, pgprot_t prot)
1841 {
1842 	struct mm_struct *mm = vma->vm_mm;
1843 	int retval;
1844 	pte_t *pte;
1845 	spinlock_t *ptl;
1846 
1847 	retval = validate_page_before_insert(page);
1848 	if (retval)
1849 		goto out;
1850 	retval = -ENOMEM;
1851 	pte = get_locked_pte(mm, addr, &ptl);
1852 	if (!pte)
1853 		goto out;
1854 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1855 	pte_unmap_unlock(pte, ptl);
1856 out:
1857 	return retval;
1858 }
1859 
1860 #ifdef pte_index
insert_page_in_batch_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1861 static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1862 			unsigned long addr, struct page *page, pgprot_t prot)
1863 {
1864 	int err;
1865 
1866 	if (!page_count(page))
1867 		return -EINVAL;
1868 	err = validate_page_before_insert(page);
1869 	if (err)
1870 		return err;
1871 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1872 }
1873 
1874 /* insert_pages() amortizes the cost of spinlock operations
1875  * when inserting pages in a loop. Arch *must* define pte_index.
1876  */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)1877 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1878 			struct page **pages, unsigned long *num, pgprot_t prot)
1879 {
1880 	pmd_t *pmd = NULL;
1881 	pte_t *start_pte, *pte;
1882 	spinlock_t *pte_lock;
1883 	struct mm_struct *const mm = vma->vm_mm;
1884 	unsigned long curr_page_idx = 0;
1885 	unsigned long remaining_pages_total = *num;
1886 	unsigned long pages_to_write_in_pmd;
1887 	int ret;
1888 more:
1889 	ret = -EFAULT;
1890 	pmd = walk_to_pmd(mm, addr);
1891 	if (!pmd)
1892 		goto out;
1893 
1894 	pages_to_write_in_pmd = min_t(unsigned long,
1895 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1896 
1897 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1898 	ret = -ENOMEM;
1899 	if (pte_alloc(mm, pmd))
1900 		goto out;
1901 
1902 	while (pages_to_write_in_pmd) {
1903 		int pte_idx = 0;
1904 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1905 
1906 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1907 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1908 			int err = insert_page_in_batch_locked(mm, pte,
1909 				addr, pages[curr_page_idx], prot);
1910 			if (unlikely(err)) {
1911 				pte_unmap_unlock(start_pte, pte_lock);
1912 				ret = err;
1913 				remaining_pages_total -= pte_idx;
1914 				goto out;
1915 			}
1916 			addr += PAGE_SIZE;
1917 			++curr_page_idx;
1918 		}
1919 		pte_unmap_unlock(start_pte, pte_lock);
1920 		pages_to_write_in_pmd -= batch_size;
1921 		remaining_pages_total -= batch_size;
1922 	}
1923 	if (remaining_pages_total)
1924 		goto more;
1925 	ret = 0;
1926 out:
1927 	*num = remaining_pages_total;
1928 	return ret;
1929 }
1930 #endif  /* ifdef pte_index */
1931 
1932 /**
1933  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1934  * @vma: user vma to map to
1935  * @addr: target start user address of these pages
1936  * @pages: source kernel pages
1937  * @num: in: number of pages to map. out: number of pages that were *not*
1938  * mapped. (0 means all pages were successfully mapped).
1939  *
1940  * Preferred over vm_insert_page() when inserting multiple pages.
1941  *
1942  * In case of error, we may have mapped a subset of the provided
1943  * pages. It is the caller's responsibility to account for this case.
1944  *
1945  * The same restrictions apply as in vm_insert_page().
1946  */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)1947 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1948 			struct page **pages, unsigned long *num)
1949 {
1950 #ifdef pte_index
1951 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1952 
1953 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1954 		return -EFAULT;
1955 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1956 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1957 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1958 		vma->vm_flags |= VM_MIXEDMAP;
1959 	}
1960 	/* Defer page refcount checking till we're about to map that page. */
1961 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1962 #else
1963 	unsigned long idx = 0, pgcount = *num;
1964 	int err = -EINVAL;
1965 
1966 	for (; idx < pgcount; ++idx) {
1967 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1968 		if (err)
1969 			break;
1970 	}
1971 	*num = pgcount - idx;
1972 	return err;
1973 #endif  /* ifdef pte_index */
1974 }
1975 EXPORT_SYMBOL(vm_insert_pages);
1976 
1977 /**
1978  * vm_insert_page - insert single page into user vma
1979  * @vma: user vma to map to
1980  * @addr: target user address of this page
1981  * @page: source kernel page
1982  *
1983  * This allows drivers to insert individual pages they've allocated
1984  * into a user vma.
1985  *
1986  * The page has to be a nice clean _individual_ kernel allocation.
1987  * If you allocate a compound page, you need to have marked it as
1988  * such (__GFP_COMP), or manually just split the page up yourself
1989  * (see split_page()).
1990  *
1991  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1992  * took an arbitrary page protection parameter. This doesn't allow
1993  * that. Your vma protection will have to be set up correctly, which
1994  * means that if you want a shared writable mapping, you'd better
1995  * ask for a shared writable mapping!
1996  *
1997  * The page does not need to be reserved.
1998  *
1999  * Usually this function is called from f_op->mmap() handler
2000  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2001  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2002  * function from other places, for example from page-fault handler.
2003  *
2004  * Return: %0 on success, negative error code otherwise.
2005  */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)2006 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2007 			struct page *page)
2008 {
2009 	if (addr < vma->vm_start || addr >= vma->vm_end)
2010 		return -EFAULT;
2011 	if (!page_count(page))
2012 		return -EINVAL;
2013 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2014 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2015 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2016 		vma->vm_flags |= VM_MIXEDMAP;
2017 	}
2018 	return insert_page(vma, addr, page, vma->vm_page_prot);
2019 }
2020 EXPORT_SYMBOL(vm_insert_page);
2021 
2022 /*
2023  * __vm_map_pages - maps range of kernel pages into user vma
2024  * @vma: user vma to map to
2025  * @pages: pointer to array of source kernel pages
2026  * @num: number of pages in page array
2027  * @offset: user's requested vm_pgoff
2028  *
2029  * This allows drivers to map range of kernel pages into a user vma.
2030  *
2031  * Return: 0 on success and error code otherwise.
2032  */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)2033 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2034 				unsigned long num, unsigned long offset)
2035 {
2036 	unsigned long count = vma_pages(vma);
2037 	unsigned long uaddr = vma->vm_start;
2038 	int ret, i;
2039 
2040 	/* Fail if the user requested offset is beyond the end of the object */
2041 	if (offset >= num)
2042 		return -ENXIO;
2043 
2044 	/* Fail if the user requested size exceeds available object size */
2045 	if (count > num - offset)
2046 		return -ENXIO;
2047 
2048 	for (i = 0; i < count; i++) {
2049 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2050 		if (ret < 0)
2051 			return ret;
2052 		uaddr += PAGE_SIZE;
2053 	}
2054 
2055 	return 0;
2056 }
2057 
2058 /**
2059  * vm_map_pages - maps range of kernel pages starts with non zero offset
2060  * @vma: user vma to map to
2061  * @pages: pointer to array of source kernel pages
2062  * @num: number of pages in page array
2063  *
2064  * Maps an object consisting of @num pages, catering for the user's
2065  * requested vm_pgoff
2066  *
2067  * If we fail to insert any page into the vma, the function will return
2068  * immediately leaving any previously inserted pages present.  Callers
2069  * from the mmap handler may immediately return the error as their caller
2070  * will destroy the vma, removing any successfully inserted pages. Other
2071  * callers should make their own arrangements for calling unmap_region().
2072  *
2073  * Context: Process context. Called by mmap handlers.
2074  * Return: 0 on success and error code otherwise.
2075  */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2076 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2077 				unsigned long num)
2078 {
2079 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2080 }
2081 EXPORT_SYMBOL(vm_map_pages);
2082 
2083 /**
2084  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2085  * @vma: user vma to map to
2086  * @pages: pointer to array of source kernel pages
2087  * @num: number of pages in page array
2088  *
2089  * Similar to vm_map_pages(), except that it explicitly sets the offset
2090  * to 0. This function is intended for the drivers that did not consider
2091  * vm_pgoff.
2092  *
2093  * Context: Process context. Called by mmap handlers.
2094  * Return: 0 on success and error code otherwise.
2095  */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2096 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2097 				unsigned long num)
2098 {
2099 	return __vm_map_pages(vma, pages, num, 0);
2100 }
2101 EXPORT_SYMBOL(vm_map_pages_zero);
2102 
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)2103 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2104 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2105 {
2106 	struct mm_struct *mm = vma->vm_mm;
2107 	pte_t *pte, entry;
2108 	spinlock_t *ptl;
2109 
2110 	pte = get_locked_pte(mm, addr, &ptl);
2111 	if (!pte)
2112 		return VM_FAULT_OOM;
2113 	if (!pte_none(*pte)) {
2114 		if (mkwrite) {
2115 			/*
2116 			 * For read faults on private mappings the PFN passed
2117 			 * in may not match the PFN we have mapped if the
2118 			 * mapped PFN is a writeable COW page.  In the mkwrite
2119 			 * case we are creating a writable PTE for a shared
2120 			 * mapping and we expect the PFNs to match. If they
2121 			 * don't match, we are likely racing with block
2122 			 * allocation and mapping invalidation so just skip the
2123 			 * update.
2124 			 */
2125 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2126 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2127 				goto out_unlock;
2128 			}
2129 			entry = pte_mkyoung(*pte);
2130 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2131 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2132 				update_mmu_cache(vma, addr, pte);
2133 		}
2134 		goto out_unlock;
2135 	}
2136 
2137 	/* Ok, finally just insert the thing.. */
2138 	if (pfn_t_devmap(pfn))
2139 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2140 	else
2141 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2142 
2143 	if (mkwrite) {
2144 		entry = pte_mkyoung(entry);
2145 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2146 	}
2147 
2148 	set_pte_at(mm, addr, pte, entry);
2149 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2150 
2151 out_unlock:
2152 	pte_unmap_unlock(pte, ptl);
2153 	return VM_FAULT_NOPAGE;
2154 }
2155 
2156 /**
2157  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2158  * @vma: user vma to map to
2159  * @addr: target user address of this page
2160  * @pfn: source kernel pfn
2161  * @pgprot: pgprot flags for the inserted page
2162  *
2163  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2164  * to override pgprot on a per-page basis.
2165  *
2166  * This only makes sense for IO mappings, and it makes no sense for
2167  * COW mappings.  In general, using multiple vmas is preferable;
2168  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2169  * impractical.
2170  *
2171  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2172  * a value of @pgprot different from that of @vma->vm_page_prot.
2173  *
2174  * Context: Process context.  May allocate using %GFP_KERNEL.
2175  * Return: vm_fault_t value.
2176  */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2177 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2178 			unsigned long pfn, pgprot_t pgprot)
2179 {
2180 	/*
2181 	 * Technically, architectures with pte_special can avoid all these
2182 	 * restrictions (same for remap_pfn_range).  However we would like
2183 	 * consistency in testing and feature parity among all, so we should
2184 	 * try to keep these invariants in place for everybody.
2185 	 */
2186 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2187 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2188 						(VM_PFNMAP|VM_MIXEDMAP));
2189 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2190 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2191 
2192 	if (addr < vma->vm_start || addr >= vma->vm_end)
2193 		return VM_FAULT_SIGBUS;
2194 
2195 	if (!pfn_modify_allowed(pfn, pgprot))
2196 		return VM_FAULT_SIGBUS;
2197 
2198 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2199 
2200 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2201 			false);
2202 }
2203 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2204 
2205 /**
2206  * vmf_insert_pfn - insert single pfn into user vma
2207  * @vma: user vma to map to
2208  * @addr: target user address of this page
2209  * @pfn: source kernel pfn
2210  *
2211  * Similar to vm_insert_page, this allows drivers to insert individual pages
2212  * they've allocated into a user vma. Same comments apply.
2213  *
2214  * This function should only be called from a vm_ops->fault handler, and
2215  * in that case the handler should return the result of this function.
2216  *
2217  * vma cannot be a COW mapping.
2218  *
2219  * As this is called only for pages that do not currently exist, we
2220  * do not need to flush old virtual caches or the TLB.
2221  *
2222  * Context: Process context.  May allocate using %GFP_KERNEL.
2223  * Return: vm_fault_t value.
2224  */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2225 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2226 			unsigned long pfn)
2227 {
2228 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2229 }
2230 EXPORT_SYMBOL(vmf_insert_pfn);
2231 
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn)2232 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2233 {
2234 	/* these checks mirror the abort conditions in vm_normal_page */
2235 	if (vma->vm_flags & VM_MIXEDMAP)
2236 		return true;
2237 	if (pfn_t_devmap(pfn))
2238 		return true;
2239 	if (pfn_t_special(pfn))
2240 		return true;
2241 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2242 		return true;
2243 	return false;
2244 }
2245 
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot,bool mkwrite)2246 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2247 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2248 		bool mkwrite)
2249 {
2250 	int err;
2251 
2252 	BUG_ON(!vm_mixed_ok(vma, pfn));
2253 
2254 	if (addr < vma->vm_start || addr >= vma->vm_end)
2255 		return VM_FAULT_SIGBUS;
2256 
2257 	track_pfn_insert(vma, &pgprot, pfn);
2258 
2259 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2260 		return VM_FAULT_SIGBUS;
2261 
2262 	/*
2263 	 * If we don't have pte special, then we have to use the pfn_valid()
2264 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2265 	 * refcount the page if pfn_valid is true (hence insert_page rather
2266 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2267 	 * without pte special, it would there be refcounted as a normal page.
2268 	 */
2269 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2270 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2271 		struct page *page;
2272 
2273 		/*
2274 		 * At this point we are committed to insert_page()
2275 		 * regardless of whether the caller specified flags that
2276 		 * result in pfn_t_has_page() == false.
2277 		 */
2278 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2279 		err = insert_page(vma, addr, page, pgprot);
2280 	} else {
2281 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2282 	}
2283 
2284 	if (err == -ENOMEM)
2285 		return VM_FAULT_OOM;
2286 	if (err < 0 && err != -EBUSY)
2287 		return VM_FAULT_SIGBUS;
2288 
2289 	return VM_FAULT_NOPAGE;
2290 }
2291 
2292 /**
2293  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2294  * @vma: user vma to map to
2295  * @addr: target user address of this page
2296  * @pfn: source kernel pfn
2297  * @pgprot: pgprot flags for the inserted page
2298  *
2299  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2300  * to override pgprot on a per-page basis.
2301  *
2302  * Typically this function should be used by drivers to set caching- and
2303  * encryption bits different than those of @vma->vm_page_prot, because
2304  * the caching- or encryption mode may not be known at mmap() time.
2305  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2306  * to set caching and encryption bits for those vmas (except for COW pages).
2307  * This is ensured by core vm only modifying these page table entries using
2308  * functions that don't touch caching- or encryption bits, using pte_modify()
2309  * if needed. (See for example mprotect()).
2310  * Also when new page-table entries are created, this is only done using the
2311  * fault() callback, and never using the value of vma->vm_page_prot,
2312  * except for page-table entries that point to anonymous pages as the result
2313  * of COW.
2314  *
2315  * Context: Process context.  May allocate using %GFP_KERNEL.
2316  * Return: vm_fault_t value.
2317  */
vmf_insert_mixed_prot(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot)2318 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2319 				 pfn_t pfn, pgprot_t pgprot)
2320 {
2321 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2322 }
2323 EXPORT_SYMBOL(vmf_insert_mixed_prot);
2324 
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2325 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2326 		pfn_t pfn)
2327 {
2328 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2329 }
2330 EXPORT_SYMBOL(vmf_insert_mixed);
2331 
2332 /*
2333  *  If the insertion of PTE failed because someone else already added a
2334  *  different entry in the mean time, we treat that as success as we assume
2335  *  the same entry was actually inserted.
2336  */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2337 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2338 		unsigned long addr, pfn_t pfn)
2339 {
2340 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2341 }
2342 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2343 
2344 /*
2345  * maps a range of physical memory into the requested pages. the old
2346  * mappings are removed. any references to nonexistent pages results
2347  * in null mappings (currently treated as "copy-on-access")
2348  */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2349 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2350 			unsigned long addr, unsigned long end,
2351 			unsigned long pfn, pgprot_t prot)
2352 {
2353 	pte_t *pte, *mapped_pte;
2354 	spinlock_t *ptl;
2355 	int err = 0;
2356 
2357 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2358 	if (!pte)
2359 		return -ENOMEM;
2360 	arch_enter_lazy_mmu_mode();
2361 	do {
2362 		BUG_ON(!pte_none(*pte));
2363 		if (!pfn_modify_allowed(pfn, prot)) {
2364 			err = -EACCES;
2365 			break;
2366 		}
2367 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2368 		pfn++;
2369 	} while (pte++, addr += PAGE_SIZE, addr != end);
2370 	arch_leave_lazy_mmu_mode();
2371 	pte_unmap_unlock(mapped_pte, ptl);
2372 	return err;
2373 }
2374 
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2375 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2376 			unsigned long addr, unsigned long end,
2377 			unsigned long pfn, pgprot_t prot)
2378 {
2379 	pmd_t *pmd;
2380 	unsigned long next;
2381 	int err;
2382 
2383 	pfn -= addr >> PAGE_SHIFT;
2384 	pmd = pmd_alloc(mm, pud, addr);
2385 	if (!pmd)
2386 		return -ENOMEM;
2387 	VM_BUG_ON(pmd_trans_huge(*pmd));
2388 	do {
2389 		next = pmd_addr_end(addr, end);
2390 		err = remap_pte_range(mm, pmd, addr, next,
2391 				pfn + (addr >> PAGE_SHIFT), prot);
2392 		if (err)
2393 			return err;
2394 	} while (pmd++, addr = next, addr != end);
2395 	return 0;
2396 }
2397 
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2398 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2399 			unsigned long addr, unsigned long end,
2400 			unsigned long pfn, pgprot_t prot)
2401 {
2402 	pud_t *pud;
2403 	unsigned long next;
2404 	int err;
2405 
2406 	pfn -= addr >> PAGE_SHIFT;
2407 	pud = pud_alloc(mm, p4d, addr);
2408 	if (!pud)
2409 		return -ENOMEM;
2410 	do {
2411 		next = pud_addr_end(addr, end);
2412 		err = remap_pmd_range(mm, pud, addr, next,
2413 				pfn + (addr >> PAGE_SHIFT), prot);
2414 		if (err)
2415 			return err;
2416 	} while (pud++, addr = next, addr != end);
2417 	return 0;
2418 }
2419 
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2420 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2421 			unsigned long addr, unsigned long end,
2422 			unsigned long pfn, pgprot_t prot)
2423 {
2424 	p4d_t *p4d;
2425 	unsigned long next;
2426 	int err;
2427 
2428 	pfn -= addr >> PAGE_SHIFT;
2429 	p4d = p4d_alloc(mm, pgd, addr);
2430 	if (!p4d)
2431 		return -ENOMEM;
2432 	do {
2433 		next = p4d_addr_end(addr, end);
2434 		err = remap_pud_range(mm, p4d, addr, next,
2435 				pfn + (addr >> PAGE_SHIFT), prot);
2436 		if (err)
2437 			return err;
2438 	} while (p4d++, addr = next, addr != end);
2439 	return 0;
2440 }
2441 
2442 /*
2443  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2444  * must have pre-validated the caching bits of the pgprot_t.
2445  */
remap_pfn_range_notrack(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2446 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2447 		unsigned long pfn, unsigned long size, pgprot_t prot)
2448 {
2449 	pgd_t *pgd;
2450 	unsigned long next;
2451 	unsigned long end = addr + PAGE_ALIGN(size);
2452 	struct mm_struct *mm = vma->vm_mm;
2453 	int err;
2454 
2455 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2456 		return -EINVAL;
2457 
2458 	/*
2459 	 * Physically remapped pages are special. Tell the
2460 	 * rest of the world about it:
2461 	 *   VM_IO tells people not to look at these pages
2462 	 *	(accesses can have side effects).
2463 	 *   VM_PFNMAP tells the core MM that the base pages are just
2464 	 *	raw PFN mappings, and do not have a "struct page" associated
2465 	 *	with them.
2466 	 *   VM_DONTEXPAND
2467 	 *      Disable vma merging and expanding with mremap().
2468 	 *   VM_DONTDUMP
2469 	 *      Omit vma from core dump, even when VM_IO turned off.
2470 	 *
2471 	 * There's a horrible special case to handle copy-on-write
2472 	 * behaviour that some programs depend on. We mark the "original"
2473 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2474 	 * See vm_normal_page() for details.
2475 	 */
2476 	if (is_cow_mapping(vma->vm_flags)) {
2477 		if (addr != vma->vm_start || end != vma->vm_end)
2478 			return -EINVAL;
2479 		vma->vm_pgoff = pfn;
2480 	}
2481 
2482 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2483 
2484 	BUG_ON(addr >= end);
2485 	pfn -= addr >> PAGE_SHIFT;
2486 	pgd = pgd_offset(mm, addr);
2487 	flush_cache_range(vma, addr, end);
2488 	do {
2489 		next = pgd_addr_end(addr, end);
2490 		err = remap_p4d_range(mm, pgd, addr, next,
2491 				pfn + (addr >> PAGE_SHIFT), prot);
2492 		if (err)
2493 			return err;
2494 	} while (pgd++, addr = next, addr != end);
2495 
2496 	return 0;
2497 }
2498 
2499 /**
2500  * remap_pfn_range - remap kernel memory to userspace
2501  * @vma: user vma to map to
2502  * @addr: target page aligned user address to start at
2503  * @pfn: page frame number of kernel physical memory address
2504  * @size: size of mapping area
2505  * @prot: page protection flags for this mapping
2506  *
2507  * Note: this is only safe if the mm semaphore is held when called.
2508  *
2509  * Return: %0 on success, negative error code otherwise.
2510  */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2511 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2512 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2513 {
2514 	int err;
2515 
2516 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2517 	if (err)
2518 		return -EINVAL;
2519 
2520 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2521 	if (err)
2522 		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2523 	return err;
2524 }
2525 EXPORT_SYMBOL(remap_pfn_range);
2526 
2527 /**
2528  * vm_iomap_memory - remap memory to userspace
2529  * @vma: user vma to map to
2530  * @start: start of the physical memory to be mapped
2531  * @len: size of area
2532  *
2533  * This is a simplified io_remap_pfn_range() for common driver use. The
2534  * driver just needs to give us the physical memory range to be mapped,
2535  * we'll figure out the rest from the vma information.
2536  *
2537  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2538  * whatever write-combining details or similar.
2539  *
2540  * Return: %0 on success, negative error code otherwise.
2541  */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2542 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2543 {
2544 	unsigned long vm_len, pfn, pages;
2545 
2546 	/* Check that the physical memory area passed in looks valid */
2547 	if (start + len < start)
2548 		return -EINVAL;
2549 	/*
2550 	 * You *really* shouldn't map things that aren't page-aligned,
2551 	 * but we've historically allowed it because IO memory might
2552 	 * just have smaller alignment.
2553 	 */
2554 	len += start & ~PAGE_MASK;
2555 	pfn = start >> PAGE_SHIFT;
2556 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2557 	if (pfn + pages < pfn)
2558 		return -EINVAL;
2559 
2560 	/* We start the mapping 'vm_pgoff' pages into the area */
2561 	if (vma->vm_pgoff > pages)
2562 		return -EINVAL;
2563 	pfn += vma->vm_pgoff;
2564 	pages -= vma->vm_pgoff;
2565 
2566 	/* Can we fit all of the mapping? */
2567 	vm_len = vma->vm_end - vma->vm_start;
2568 	if (vm_len >> PAGE_SHIFT > pages)
2569 		return -EINVAL;
2570 
2571 	/* Ok, let it rip */
2572 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2573 }
2574 EXPORT_SYMBOL(vm_iomap_memory);
2575 
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2576 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2577 				     unsigned long addr, unsigned long end,
2578 				     pte_fn_t fn, void *data, bool create,
2579 				     pgtbl_mod_mask *mask)
2580 {
2581 	pte_t *pte, *mapped_pte;
2582 	int err = 0;
2583 	spinlock_t *ptl;
2584 
2585 	if (create) {
2586 		mapped_pte = pte = (mm == &init_mm) ?
2587 			pte_alloc_kernel_track(pmd, addr, mask) :
2588 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2589 		if (!pte)
2590 			return -ENOMEM;
2591 	} else {
2592 		mapped_pte = pte = (mm == &init_mm) ?
2593 			pte_offset_kernel(pmd, addr) :
2594 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2595 	}
2596 
2597 	BUG_ON(pmd_huge(*pmd));
2598 
2599 	arch_enter_lazy_mmu_mode();
2600 
2601 	if (fn) {
2602 		do {
2603 			if (create || !pte_none(*pte)) {
2604 				err = fn(pte++, addr, data);
2605 				if (err)
2606 					break;
2607 			}
2608 		} while (addr += PAGE_SIZE, addr != end);
2609 	}
2610 	*mask |= PGTBL_PTE_MODIFIED;
2611 
2612 	arch_leave_lazy_mmu_mode();
2613 
2614 	if (mm != &init_mm)
2615 		pte_unmap_unlock(mapped_pte, ptl);
2616 	return err;
2617 }
2618 
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2619 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2620 				     unsigned long addr, unsigned long end,
2621 				     pte_fn_t fn, void *data, bool create,
2622 				     pgtbl_mod_mask *mask)
2623 {
2624 	pmd_t *pmd;
2625 	unsigned long next;
2626 	int err = 0;
2627 
2628 	BUG_ON(pud_huge(*pud));
2629 
2630 	if (create) {
2631 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2632 		if (!pmd)
2633 			return -ENOMEM;
2634 	} else {
2635 		pmd = pmd_offset(pud, addr);
2636 	}
2637 	do {
2638 		next = pmd_addr_end(addr, end);
2639 		if (pmd_none(*pmd) && !create)
2640 			continue;
2641 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2642 			return -EINVAL;
2643 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2644 			if (!create)
2645 				continue;
2646 			pmd_clear_bad(pmd);
2647 		}
2648 		err = apply_to_pte_range(mm, pmd, addr, next,
2649 					 fn, data, create, mask);
2650 		if (err)
2651 			break;
2652 	} while (pmd++, addr = next, addr != end);
2653 
2654 	return err;
2655 }
2656 
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2657 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2658 				     unsigned long addr, unsigned long end,
2659 				     pte_fn_t fn, void *data, bool create,
2660 				     pgtbl_mod_mask *mask)
2661 {
2662 	pud_t *pud;
2663 	unsigned long next;
2664 	int err = 0;
2665 
2666 	if (create) {
2667 		pud = pud_alloc_track(mm, p4d, addr, mask);
2668 		if (!pud)
2669 			return -ENOMEM;
2670 	} else {
2671 		pud = pud_offset(p4d, addr);
2672 	}
2673 	do {
2674 		next = pud_addr_end(addr, end);
2675 		if (pud_none(*pud) && !create)
2676 			continue;
2677 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2678 			return -EINVAL;
2679 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2680 			if (!create)
2681 				continue;
2682 			pud_clear_bad(pud);
2683 		}
2684 		err = apply_to_pmd_range(mm, pud, addr, next,
2685 					 fn, data, create, mask);
2686 		if (err)
2687 			break;
2688 	} while (pud++, addr = next, addr != end);
2689 
2690 	return err;
2691 }
2692 
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2693 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2694 				     unsigned long addr, unsigned long end,
2695 				     pte_fn_t fn, void *data, bool create,
2696 				     pgtbl_mod_mask *mask)
2697 {
2698 	p4d_t *p4d;
2699 	unsigned long next;
2700 	int err = 0;
2701 
2702 	if (create) {
2703 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2704 		if (!p4d)
2705 			return -ENOMEM;
2706 	} else {
2707 		p4d = p4d_offset(pgd, addr);
2708 	}
2709 	do {
2710 		next = p4d_addr_end(addr, end);
2711 		if (p4d_none(*p4d) && !create)
2712 			continue;
2713 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2714 			return -EINVAL;
2715 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2716 			if (!create)
2717 				continue;
2718 			p4d_clear_bad(p4d);
2719 		}
2720 		err = apply_to_pud_range(mm, p4d, addr, next,
2721 					 fn, data, create, mask);
2722 		if (err)
2723 			break;
2724 	} while (p4d++, addr = next, addr != end);
2725 
2726 	return err;
2727 }
2728 
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)2729 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2730 				 unsigned long size, pte_fn_t fn,
2731 				 void *data, bool create)
2732 {
2733 	pgd_t *pgd;
2734 	unsigned long start = addr, next;
2735 	unsigned long end = addr + size;
2736 	pgtbl_mod_mask mask = 0;
2737 	int err = 0;
2738 
2739 	if (WARN_ON(addr >= end))
2740 		return -EINVAL;
2741 
2742 	pgd = pgd_offset(mm, addr);
2743 	do {
2744 		next = pgd_addr_end(addr, end);
2745 		if (pgd_none(*pgd) && !create)
2746 			continue;
2747 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2748 			return -EINVAL;
2749 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2750 			if (!create)
2751 				continue;
2752 			pgd_clear_bad(pgd);
2753 		}
2754 		err = apply_to_p4d_range(mm, pgd, addr, next,
2755 					 fn, data, create, &mask);
2756 		if (err)
2757 			break;
2758 	} while (pgd++, addr = next, addr != end);
2759 
2760 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2761 		arch_sync_kernel_mappings(start, start + size);
2762 
2763 	return err;
2764 }
2765 
2766 /*
2767  * Scan a region of virtual memory, filling in page tables as necessary
2768  * and calling a provided function on each leaf page table.
2769  */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2770 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2771 			unsigned long size, pte_fn_t fn, void *data)
2772 {
2773 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2774 }
2775 EXPORT_SYMBOL_GPL(apply_to_page_range);
2776 
2777 /*
2778  * Scan a region of virtual memory, calling a provided function on
2779  * each leaf page table where it exists.
2780  *
2781  * Unlike apply_to_page_range, this does _not_ fill in page tables
2782  * where they are absent.
2783  */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2784 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2785 				 unsigned long size, pte_fn_t fn, void *data)
2786 {
2787 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2788 }
2789 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2790 
2791 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
2792 
2793 /*
2794  * speculative_page_walk_begin() ... speculative_page_walk_end() protects
2795  * against races with page table reclamation.
2796  *
2797  * This is similar to what fast GUP does, but fast GUP also needs to
2798  * protect against races with THP page splitting, so it always needs
2799  * to disable interrupts.
2800  * Speculative page faults need to protect against page table reclamation,
2801  * even with MMU_GATHER_RCU_TABLE_FREE case page table removal slow-path is
2802  * not RCU-safe (see comment inside tlb_remove_table_sync_one), therefore
2803  * we still have to disable IRQs.
2804  */
2805 #define speculative_page_walk_begin() local_irq_disable()
2806 #define speculative_page_walk_end()   local_irq_enable()
2807 
__pte_map_lock(struct vm_fault * vmf)2808 bool __pte_map_lock(struct vm_fault *vmf)
2809 {
2810 	pmd_t pmdval;
2811 	pte_t *pte = vmf->pte;
2812 	spinlock_t *ptl;
2813 
2814 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
2815 		vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2816 		if (!pte)
2817 			vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
2818 		spin_lock(vmf->ptl);
2819 		return true;
2820 	}
2821 
2822 	speculative_page_walk_begin();
2823 	if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
2824 				 SPF_ABORT_PTE_MAP_LOCK_SEQ1))
2825 		goto fail;
2826 	/*
2827 	 * The mmap sequence count check guarantees that the page
2828 	 * tables are still valid at that point, and
2829 	 * speculative_page_walk_begin() ensures that they stay around.
2830 	 */
2831 	/*
2832 	 * We check if the pmd value is still the same to ensure that there
2833 	 * is not a huge collapse operation in progress in our back.
2834 	 * It also ensures that pmd was not cleared by pmd_clear in
2835 	 * free_pte_range and ptl is still valid.
2836 	 */
2837 	pmdval = READ_ONCE(*vmf->pmd);
2838 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
2839 		count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PMD);
2840 		goto fail;
2841 	}
2842 	ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
2843 	if (!pte)
2844 		pte = pte_offset_map(&pmdval, vmf->address);
2845 	/*
2846 	 * Try locking the page table.
2847 	 *
2848 	 * Note that we might race against zap_pte_range() which
2849 	 * invalidates TLBs while holding the page table lock.
2850 	 * We are still under the speculative_page_walk_begin() section,
2851 	 * and zap_pte_range() could thus deadlock with us if we tried
2852 	 * using spin_lock() here.
2853 	 *
2854 	 * We also don't want to retry until spin_trylock() succeeds,
2855 	 * because of the starvation potential against a stream of lockers.
2856 	 */
2857 	if (unlikely(!spin_trylock(ptl))) {
2858 		count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PTL);
2859 		goto fail;
2860 	}
2861 	/*
2862 	 * The check below will fail if __pte_map_lock passed its ptl barrier
2863 	 * before we took the ptl lock.
2864 	 */
2865 	if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
2866 				 SPF_ABORT_PTE_MAP_LOCK_SEQ2))
2867 		goto unlock_fail;
2868 	speculative_page_walk_end();
2869 	vmf->pte = pte;
2870 	vmf->ptl = ptl;
2871 	return true;
2872 
2873 unlock_fail:
2874 	spin_unlock(ptl);
2875 fail:
2876 	if (pte)
2877 		pte_unmap(pte);
2878 	speculative_page_walk_end();
2879 	return false;
2880 }
2881 
2882 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
2883 
2884 /*
2885  * handle_pte_fault chooses page fault handler according to an entry which was
2886  * read non-atomically.  Before making any commitment, on those architectures
2887  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2888  * parts, do_swap_page must check under lock before unmapping the pte and
2889  * proceeding (but do_wp_page is only called after already making such a check;
2890  * and do_anonymous_page can safely check later on).
2891  */
pte_unmap_same(struct mm_struct * mm,pmd_t * pmd,pte_t * page_table,pte_t orig_pte)2892 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2893 				pte_t *page_table, pte_t orig_pte)
2894 {
2895 	int same = 1;
2896 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2897 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2898 		spinlock_t *ptl = pte_lockptr(mm, pmd);
2899 		spin_lock(ptl);
2900 		same = pte_same(*page_table, orig_pte);
2901 		spin_unlock(ptl);
2902 	}
2903 #endif
2904 	pte_unmap(page_table);
2905 	return same;
2906 }
2907 
2908 /*
2909  * Return:
2910  *	0:		copied succeeded
2911  *	-EHWPOISON:	copy failed due to hwpoison in source page
2912  *	-EAGAIN:	copied failed (some other reason)
2913  */
cow_user_page(struct page * dst,struct page * src,struct vm_fault * vmf)2914 static inline int cow_user_page(struct page *dst, struct page *src,
2915 				      struct vm_fault *vmf)
2916 {
2917 	int ret;
2918 	void *kaddr;
2919 	void __user *uaddr;
2920 	bool locked = false;
2921 	struct vm_area_struct *vma = vmf->vma;
2922 	struct mm_struct *mm = vma->vm_mm;
2923 	unsigned long addr = vmf->address;
2924 
2925 	if (likely(src)) {
2926 		if (copy_mc_user_highpage(dst, src, addr, vma)) {
2927 			memory_failure_queue(page_to_pfn(src), 0);
2928 			return -EHWPOISON;
2929 		}
2930 		return 0;
2931 	}
2932 
2933 	/*
2934 	 * If the source page was a PFN mapping, we don't have
2935 	 * a "struct page" for it. We do a best-effort copy by
2936 	 * just copying from the original user address. If that
2937 	 * fails, we just zero-fill it. Live with it.
2938 	 */
2939 	kaddr = kmap_atomic(dst);
2940 	uaddr = (void __user *)(addr & PAGE_MASK);
2941 
2942 	/*
2943 	 * On architectures with software "accessed" bits, we would
2944 	 * take a double page fault, so mark it accessed here.
2945 	 */
2946 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
2947 		pte_t entry;
2948 
2949 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2950 		locked = true;
2951 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2952 			/*
2953 			 * Other thread has already handled the fault
2954 			 * and update local tlb only
2955 			 */
2956 			update_mmu_tlb(vma, addr, vmf->pte);
2957 			ret = -EAGAIN;
2958 			goto pte_unlock;
2959 		}
2960 
2961 		entry = pte_mkyoung(vmf->orig_pte);
2962 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2963 			update_mmu_cache(vma, addr, vmf->pte);
2964 	}
2965 
2966 	/*
2967 	 * This really shouldn't fail, because the page is there
2968 	 * in the page tables. But it might just be unreadable,
2969 	 * in which case we just give up and fill the result with
2970 	 * zeroes.
2971 	 */
2972 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2973 		if (locked)
2974 			goto warn;
2975 
2976 		/* Re-validate under PTL if the page is still mapped */
2977 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2978 		locked = true;
2979 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2980 			/* The PTE changed under us, update local tlb */
2981 			update_mmu_tlb(vma, addr, vmf->pte);
2982 			ret = -EAGAIN;
2983 			goto pte_unlock;
2984 		}
2985 
2986 		/*
2987 		 * The same page can be mapped back since last copy attempt.
2988 		 * Try to copy again under PTL.
2989 		 */
2990 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2991 			/*
2992 			 * Give a warn in case there can be some obscure
2993 			 * use-case
2994 			 */
2995 warn:
2996 			WARN_ON_ONCE(1);
2997 			clear_page(kaddr);
2998 		}
2999 	}
3000 
3001 	ret = 0;
3002 
3003 pte_unlock:
3004 	if (locked)
3005 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3006 	kunmap_atomic(kaddr);
3007 	flush_dcache_page(dst);
3008 
3009 	return ret;
3010 }
3011 
__get_fault_gfp_mask(struct vm_area_struct * vma)3012 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3013 {
3014 	struct file *vm_file = vma->vm_file;
3015 
3016 	if (vm_file)
3017 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3018 
3019 	/*
3020 	 * Special mappings (e.g. VDSO) do not have any file so fake
3021 	 * a default GFP_KERNEL for them.
3022 	 */
3023 	return GFP_KERNEL;
3024 }
3025 
3026 /*
3027  * Notify the address space that the page is about to become writable so that
3028  * it can prohibit this or wait for the page to get into an appropriate state.
3029  *
3030  * We do this without the lock held, so that it can sleep if it needs to.
3031  */
do_page_mkwrite(struct vm_fault * vmf)3032 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
3033 {
3034 	vm_fault_t ret;
3035 	struct page *page = vmf->page;
3036 	unsigned int old_flags = vmf->flags;
3037 
3038 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3039 
3040 	if (vmf->vma->vm_file &&
3041 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3042 		return VM_FAULT_SIGBUS;
3043 
3044 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3045 	/* Restore original flags so that caller is not surprised */
3046 	vmf->flags = old_flags;
3047 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3048 		return ret;
3049 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3050 		lock_page(page);
3051 		if (!page->mapping) {
3052 			unlock_page(page);
3053 			return 0; /* retry */
3054 		}
3055 		ret |= VM_FAULT_LOCKED;
3056 	} else
3057 		VM_BUG_ON_PAGE(!PageLocked(page), page);
3058 	return ret;
3059 }
3060 
3061 /*
3062  * Handle dirtying of a page in shared file mapping on a write fault.
3063  *
3064  * The function expects the page to be locked and unlocks it.
3065  */
fault_dirty_shared_page(struct vm_fault * vmf)3066 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3067 {
3068 	struct vm_area_struct *vma = vmf->vma;
3069 	struct address_space *mapping;
3070 	struct page *page = vmf->page;
3071 	bool dirtied;
3072 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3073 
3074 	dirtied = set_page_dirty(page);
3075 	VM_BUG_ON_PAGE(PageAnon(page), page);
3076 	/*
3077 	 * Take a local copy of the address_space - page.mapping may be zeroed
3078 	 * by truncate after unlock_page().   The address_space itself remains
3079 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3080 	 * release semantics to prevent the compiler from undoing this copying.
3081 	 */
3082 	mapping = page_rmapping(page);
3083 	unlock_page(page);
3084 
3085 	if (!page_mkwrite)
3086 		file_update_time(vma->vm_file);
3087 
3088 	/*
3089 	 * Throttle page dirtying rate down to writeback speed.
3090 	 *
3091 	 * mapping may be NULL here because some device drivers do not
3092 	 * set page.mapping but still dirty their pages
3093 	 *
3094 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3095 	 * is pinning the mapping, as per above.
3096 	 */
3097 	if ((dirtied || page_mkwrite) && mapping) {
3098 		struct file *fpin;
3099 
3100 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3101 		balance_dirty_pages_ratelimited(mapping);
3102 		if (fpin) {
3103 			fput(fpin);
3104 			return VM_FAULT_RETRY;
3105 		}
3106 	}
3107 
3108 	return 0;
3109 }
3110 
3111 /*
3112  * Handle write page faults for pages that can be reused in the current vma
3113  *
3114  * This can happen either due to the mapping being with the VM_SHARED flag,
3115  * or due to us being the last reference standing to the page. In either
3116  * case, all we need to do here is to mark the page as writable and update
3117  * any related book-keeping.
3118  */
wp_page_reuse(struct vm_fault * vmf)3119 static inline void wp_page_reuse(struct vm_fault *vmf)
3120 	__releases(vmf->ptl)
3121 {
3122 	struct vm_area_struct *vma = vmf->vma;
3123 	struct page *page = vmf->page;
3124 	pte_t entry;
3125 	/*
3126 	 * Clear the pages cpupid information as the existing
3127 	 * information potentially belongs to a now completely
3128 	 * unrelated process.
3129 	 */
3130 	if (page)
3131 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3132 
3133 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3134 	entry = pte_mkyoung(vmf->orig_pte);
3135 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3136 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3137 		update_mmu_cache(vma, vmf->address, vmf->pte);
3138 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3139 	count_vm_event(PGREUSE);
3140 }
3141 
3142 /*
3143  * Handle the case of a page which we actually need to copy to a new page.
3144  *
3145  * Called with mmap_lock locked and the old page referenced, but
3146  * without the ptl held.
3147  *
3148  * High level logic flow:
3149  *
3150  * - Allocate a page, copy the content of the old page to the new one.
3151  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3152  * - Take the PTL. If the pte changed, bail out and release the allocated page
3153  * - If the pte is still the way we remember it, update the page table and all
3154  *   relevant references. This includes dropping the reference the page-table
3155  *   held to the old page, as well as updating the rmap.
3156  * - In any case, unlock the PTL and drop the reference we took to the old page.
3157  */
wp_page_copy(struct vm_fault * vmf)3158 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3159 {
3160 	struct vm_area_struct *vma = vmf->vma;
3161 	struct mm_struct *mm = vma->vm_mm;
3162 	struct page *old_page = vmf->page;
3163 	struct page *new_page = NULL;
3164 	pte_t entry;
3165 	int page_copied = 0;
3166 	struct mmu_notifier_range range;
3167 	vm_fault_t ret = VM_FAULT_OOM;
3168 
3169 	if (unlikely(!vma->anon_vma)) {
3170 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3171 			count_vm_spf_event(SPF_ABORT_ANON_VMA);
3172 			ret = VM_FAULT_RETRY;
3173 			goto out;
3174 		}
3175 		if (__anon_vma_prepare(vma))
3176 			goto out;
3177 	}
3178 
3179 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3180 		new_page = alloc_zeroed_user_highpage_movable(vma,
3181 							      vmf->address);
3182 		if (!new_page)
3183 			goto out;
3184 	} else {
3185 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3186 				vmf->address);
3187 		if (!new_page)
3188 			goto out;
3189 
3190 		ret = cow_user_page(new_page, old_page, vmf);
3191 		if (ret) {
3192 			/*
3193 			 * COW failed, if the fault was solved by other,
3194 			 * it's fine. If not, userspace would re-fault on
3195 			 * the same address and we will handle the fault
3196 			 * from the second attempt.
3197 			 * The -EHWPOISON case will not be retried.
3198 			 */
3199 			put_page(new_page);
3200 			if (old_page)
3201 				put_page(old_page);
3202 
3203 			return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3204 		}
3205 	}
3206 
3207 	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
3208 		goto out_free_new;
3209 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3210 
3211 	__SetPageUptodate(new_page);
3212 
3213 	if ((vmf->flags & FAULT_FLAG_SPECULATIVE) &&
3214 	    !mmu_notifier_trylock(mm)) {
3215 		ret = VM_FAULT_RETRY;
3216 		goto out_free_new;
3217 	}
3218 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3219 				vmf->address & PAGE_MASK,
3220 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3221 	mmu_notifier_invalidate_range_start(&range);
3222 
3223 	/*
3224 	 * Re-check the pte - we dropped the lock
3225 	 */
3226 	if (!pte_map_lock(vmf)) {
3227 		ret = VM_FAULT_RETRY;
3228 		/* put_page() will uncharge the page */
3229 		goto out_notify;
3230 	}
3231 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3232 		if (old_page) {
3233 			if (!PageAnon(old_page)) {
3234 				dec_mm_counter_fast(mm,
3235 						mm_counter_file(old_page));
3236 				inc_mm_counter_fast(mm, MM_ANONPAGES);
3237 			}
3238 		} else {
3239 			inc_mm_counter_fast(mm, MM_ANONPAGES);
3240 		}
3241 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3242 		entry = mk_pte(new_page, vma->vm_page_prot);
3243 		entry = pte_sw_mkyoung(entry);
3244 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3245 
3246 		/*
3247 		 * Clear the pte entry and flush it first, before updating the
3248 		 * pte with the new entry, to keep TLBs on different CPUs in
3249 		 * sync. This code used to set the new PTE then flush TLBs, but
3250 		 * that left a window where the new PTE could be loaded into
3251 		 * some TLBs while the old PTE remains in others.
3252 		 */
3253 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3254 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3255 		lru_cache_add_inactive_or_unevictable(new_page, vma);
3256 		/*
3257 		 * We call the notify macro here because, when using secondary
3258 		 * mmu page tables (such as kvm shadow page tables), we want the
3259 		 * new page to be mapped directly into the secondary page table.
3260 		 */
3261 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3262 		update_mmu_cache(vma, vmf->address, vmf->pte);
3263 		if (old_page) {
3264 			/*
3265 			 * Only after switching the pte to the new page may
3266 			 * we remove the mapcount here. Otherwise another
3267 			 * process may come and find the rmap count decremented
3268 			 * before the pte is switched to the new page, and
3269 			 * "reuse" the old page writing into it while our pte
3270 			 * here still points into it and can be read by other
3271 			 * threads.
3272 			 *
3273 			 * The critical issue is to order this
3274 			 * page_remove_rmap with the ptp_clear_flush above.
3275 			 * Those stores are ordered by (if nothing else,)
3276 			 * the barrier present in the atomic_add_negative
3277 			 * in page_remove_rmap.
3278 			 *
3279 			 * Then the TLB flush in ptep_clear_flush ensures that
3280 			 * no process can access the old page before the
3281 			 * decremented mapcount is visible. And the old page
3282 			 * cannot be reused until after the decremented
3283 			 * mapcount is visible. So transitively, TLBs to
3284 			 * old page will be flushed before it can be reused.
3285 			 */
3286 			page_remove_rmap(old_page, false);
3287 		}
3288 
3289 		/* Free the old page.. */
3290 		new_page = old_page;
3291 		page_copied = 1;
3292 	} else {
3293 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3294 	}
3295 
3296 	if (new_page)
3297 		put_page(new_page);
3298 
3299 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3300 	/*
3301 	 * No need to double call mmu_notifier->invalidate_range() callback as
3302 	 * the above ptep_clear_flush_notify() did already call it.
3303 	 */
3304 	mmu_notifier_invalidate_range_only_end(&range);
3305 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3306 		mmu_notifier_unlock(mm);
3307 	if (old_page) {
3308 		/*
3309 		 * Don't let another task, with possibly unlocked vma,
3310 		 * keep the mlocked page.
3311 		 */
3312 		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
3313 			lock_page(old_page);	/* LRU manipulation */
3314 			if (PageMlocked(old_page))
3315 				munlock_vma_page(old_page);
3316 			unlock_page(old_page);
3317 		}
3318 		if (page_copied)
3319 			free_swap_cache(old_page);
3320 		put_page(old_page);
3321 	}
3322 	return page_copied ? VM_FAULT_WRITE : 0;
3323 out_notify:
3324 	mmu_notifier_invalidate_range_only_end(&range);
3325 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3326 		mmu_notifier_unlock(mm);
3327 out_free_new:
3328 	put_page(new_page);
3329 out:
3330 	if (old_page)
3331 		put_page(old_page);
3332 	return ret;
3333 }
3334 
3335 /**
3336  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3337  *			  writeable once the page is prepared
3338  *
3339  * @vmf: structure describing the fault
3340  *
3341  * This function handles all that is needed to finish a write page fault in a
3342  * shared mapping due to PTE being read-only once the mapped page is prepared.
3343  * It handles locking of PTE and modifying it.
3344  *
3345  * The function expects the page to be locked or other protection against
3346  * concurrent faults / writeback (such as DAX radix tree locks).
3347  *
3348  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3349  * we acquired PTE lock.
3350  */
finish_mkwrite_fault(struct vm_fault * vmf)3351 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3352 {
3353 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3354 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3355 				       &vmf->ptl);
3356 	/*
3357 	 * We might have raced with another page fault while we released the
3358 	 * pte_offset_map_lock.
3359 	 */
3360 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3361 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3362 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3363 		return VM_FAULT_NOPAGE;
3364 	}
3365 	wp_page_reuse(vmf);
3366 	return 0;
3367 }
3368 
3369 /*
3370  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3371  * mapping
3372  */
wp_pfn_shared(struct vm_fault * vmf)3373 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3374 {
3375 	struct vm_area_struct *vma = vmf->vma;
3376 
3377 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
3378 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3379 		vm_fault_t ret;
3380 
3381 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3382 		vmf->flags |= FAULT_FLAG_MKWRITE;
3383 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3384 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3385 			return ret;
3386 		return finish_mkwrite_fault(vmf);
3387 	}
3388 	wp_page_reuse(vmf);
3389 	return VM_FAULT_WRITE;
3390 }
3391 
wp_page_shared(struct vm_fault * vmf)3392 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3393 	__releases(vmf->ptl)
3394 {
3395 	struct vm_area_struct *vma = vmf->vma;
3396 	vm_fault_t ret = VM_FAULT_WRITE;
3397 
3398 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
3399 
3400 	get_page(vmf->page);
3401 
3402 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3403 		vm_fault_t tmp;
3404 
3405 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3406 		tmp = do_page_mkwrite(vmf);
3407 		if (unlikely(!tmp || (tmp &
3408 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3409 			put_page(vmf->page);
3410 			return tmp;
3411 		}
3412 		tmp = finish_mkwrite_fault(vmf);
3413 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3414 			unlock_page(vmf->page);
3415 			put_page(vmf->page);
3416 			return tmp;
3417 		}
3418 	} else {
3419 		wp_page_reuse(vmf);
3420 		lock_page(vmf->page);
3421 	}
3422 	ret |= fault_dirty_shared_page(vmf);
3423 	put_page(vmf->page);
3424 
3425 	return ret;
3426 }
3427 
3428 /*
3429  * This routine handles present pages, when users try to write
3430  * to a shared page. It is done by copying the page to a new address
3431  * and decrementing the shared-page counter for the old page.
3432  *
3433  * Note that this routine assumes that the protection checks have been
3434  * done by the caller (the low-level page fault routine in most cases).
3435  * Thus we can safely just mark it writable once we've done any necessary
3436  * COW.
3437  *
3438  * We also mark the page dirty at this point even though the page will
3439  * change only once the write actually happens. This avoids a few races,
3440  * and potentially makes it more efficient.
3441  *
3442  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3443  * but allow concurrent faults), with pte both mapped and locked.
3444  * We return with mmap_lock still held, but pte unmapped and unlocked.
3445  */
do_wp_page(struct vm_fault * vmf)3446 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3447 	__releases(vmf->ptl)
3448 {
3449 	struct vm_area_struct *vma = vmf->vma;
3450 
3451 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3452 		count_vm_spf_event(SPF_ATTEMPT_WP);
3453 
3454 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3455 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3456 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3457 			count_vm_spf_event(SPF_ABORT_USERFAULTFD);
3458 			return VM_FAULT_RETRY;
3459 		}
3460 		return handle_userfault(vmf, VM_UFFD_WP);
3461 	}
3462 
3463 	/*
3464 	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3465 	 * is flushed in this case before copying.
3466 	 */
3467 	if (unlikely(userfaultfd_wp(vmf->vma) &&
3468 		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3469 		flush_tlb_page(vmf->vma, vmf->address);
3470 
3471 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3472 	if (!vmf->page) {
3473 		/*
3474 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3475 		 * VM_PFNMAP VMA.
3476 		 *
3477 		 * We should not cow pages in a shared writeable mapping.
3478 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3479 		 */
3480 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3481 				     (VM_WRITE|VM_SHARED))
3482 			return wp_pfn_shared(vmf);
3483 
3484 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3485 		vmf->pte = NULL;
3486 		return wp_page_copy(vmf);
3487 	}
3488 
3489 	/*
3490 	 * Take out anonymous pages first, anonymous shared vmas are
3491 	 * not dirty accountable.
3492 	 */
3493 	if (PageAnon(vmf->page)) {
3494 		struct page *page = vmf->page;
3495 
3496 		/* PageKsm() doesn't necessarily raise the page refcount */
3497 		if (PageKsm(page) || page_count(page) != 1)
3498 			goto copy;
3499 		if (!trylock_page(page))
3500 			goto copy;
3501 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3502 			unlock_page(page);
3503 			goto copy;
3504 		}
3505 		/*
3506 		 * Ok, we've got the only map reference, and the only
3507 		 * page count reference, and the page is locked,
3508 		 * it's dark out, and we're wearing sunglasses. Hit it.
3509 		 */
3510 		unlock_page(page);
3511 		wp_page_reuse(vmf);
3512 		return VM_FAULT_WRITE;
3513 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3514 					(VM_WRITE|VM_SHARED))) {
3515 		return wp_page_shared(vmf);
3516 	}
3517 copy:
3518 	/*
3519 	 * Ok, we need to copy. Oh, well..
3520 	 */
3521 	get_page(vmf->page);
3522 
3523 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3524 	vmf->pte = NULL;
3525 	return wp_page_copy(vmf);
3526 }
3527 
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3528 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3529 		unsigned long start_addr, unsigned long end_addr,
3530 		struct zap_details *details)
3531 {
3532 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3533 }
3534 
unmap_mapping_range_tree(struct rb_root_cached * root,struct zap_details * details)3535 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3536 					    struct zap_details *details)
3537 {
3538 	struct vm_area_struct *vma;
3539 	pgoff_t vba, vea, zba, zea;
3540 
3541 	vma_interval_tree_foreach(vma, root,
3542 			details->first_index, details->last_index) {
3543 
3544 		vba = vma->vm_pgoff;
3545 		vea = vba + vma_pages(vma) - 1;
3546 		zba = details->first_index;
3547 		if (zba < vba)
3548 			zba = vba;
3549 		zea = details->last_index;
3550 		if (zea > vea)
3551 			zea = vea;
3552 
3553 		unmap_mapping_range_vma(vma,
3554 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3555 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3556 				details);
3557 	}
3558 }
3559 
3560 /**
3561  * unmap_mapping_page() - Unmap single page from processes.
3562  * @page: The locked page to be unmapped.
3563  *
3564  * Unmap this page from any userspace process which still has it mmaped.
3565  * Typically, for efficiency, the range of nearby pages has already been
3566  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3567  * truncation or invalidation holds the lock on a page, it may find that
3568  * the page has been remapped again: and then uses unmap_mapping_page()
3569  * to unmap it finally.
3570  */
unmap_mapping_page(struct page * page)3571 void unmap_mapping_page(struct page *page)
3572 {
3573 	struct address_space *mapping = page->mapping;
3574 	struct zap_details details = { };
3575 
3576 	VM_BUG_ON(!PageLocked(page));
3577 	VM_BUG_ON(PageTail(page));
3578 
3579 	details.check_mapping = mapping;
3580 	details.first_index = page->index;
3581 	details.last_index = page->index + thp_nr_pages(page) - 1;
3582 	details.single_page = page;
3583 
3584 	i_mmap_lock_write(mapping);
3585 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3586 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3587 	i_mmap_unlock_write(mapping);
3588 }
3589 
3590 /**
3591  * unmap_mapping_pages() - Unmap pages from processes.
3592  * @mapping: The address space containing pages to be unmapped.
3593  * @start: Index of first page to be unmapped.
3594  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3595  * @even_cows: Whether to unmap even private COWed pages.
3596  *
3597  * Unmap the pages in this address space from any userspace process which
3598  * has them mmaped.  Generally, you want to remove COWed pages as well when
3599  * a file is being truncated, but not when invalidating pages from the page
3600  * cache.
3601  */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3602 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3603 		pgoff_t nr, bool even_cows)
3604 {
3605 	struct zap_details details = { };
3606 
3607 	details.check_mapping = even_cows ? NULL : mapping;
3608 	details.first_index = start;
3609 	details.last_index = start + nr - 1;
3610 	if (details.last_index < details.first_index)
3611 		details.last_index = ULONG_MAX;
3612 
3613 	i_mmap_lock_write(mapping);
3614 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3615 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3616 	i_mmap_unlock_write(mapping);
3617 }
3618 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3619 
3620 /**
3621  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3622  * address_space corresponding to the specified byte range in the underlying
3623  * file.
3624  *
3625  * @mapping: the address space containing mmaps to be unmapped.
3626  * @holebegin: byte in first page to unmap, relative to the start of
3627  * the underlying file.  This will be rounded down to a PAGE_SIZE
3628  * boundary.  Note that this is different from truncate_pagecache(), which
3629  * must keep the partial page.  In contrast, we must get rid of
3630  * partial pages.
3631  * @holelen: size of prospective hole in bytes.  This will be rounded
3632  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3633  * end of the file.
3634  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3635  * but 0 when invalidating pagecache, don't throw away private data.
3636  */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3637 void unmap_mapping_range(struct address_space *mapping,
3638 		loff_t const holebegin, loff_t const holelen, int even_cows)
3639 {
3640 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3641 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3642 
3643 	/* Check for overflow. */
3644 	if (sizeof(holelen) > sizeof(hlen)) {
3645 		long long holeend =
3646 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3647 		if (holeend & ~(long long)ULONG_MAX)
3648 			hlen = ULONG_MAX - hba + 1;
3649 	}
3650 
3651 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3652 }
3653 EXPORT_SYMBOL(unmap_mapping_range);
3654 
3655 /*
3656  * Restore a potential device exclusive pte to a working pte entry
3657  */
remove_device_exclusive_entry(struct vm_fault * vmf)3658 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3659 {
3660 	struct page *page = vmf->page;
3661 	struct vm_area_struct *vma = vmf->vma;
3662 	struct mmu_notifier_range range;
3663 
3664 	/*
3665 	 * We need a reference to lock the page because we don't hold
3666 	 * the PTL so a racing thread can remove the device-exclusive
3667 	 * entry and unmap it. If the page is free the entry must
3668 	 * have been removed already. If it happens to have already
3669 	 * been re-allocated after being freed all we do is lock and
3670 	 * unlock it.
3671 	 */
3672 	if (!get_page_unless_zero(page))
3673 		return 0;
3674 
3675 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
3676 		put_page(page);
3677 		return VM_FAULT_RETRY;
3678 	}
3679 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3680 				vma->vm_mm, vmf->address & PAGE_MASK,
3681 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3682 	mmu_notifier_invalidate_range_start(&range);
3683 
3684 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3685 				&vmf->ptl);
3686 	if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3687 		restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3688 
3689 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3690 	unlock_page(page);
3691 	put_page(page);
3692 
3693 	mmu_notifier_invalidate_range_end(&range);
3694 	return 0;
3695 }
3696 
3697 /*
3698  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3699  * but allow concurrent faults), and pte mapped but not yet locked.
3700  * We return with pte unmapped and unlocked.
3701  *
3702  * We return with the mmap_lock locked or unlocked in the same cases
3703  * as does filemap_fault().
3704  */
do_swap_page(struct vm_fault * vmf)3705 vm_fault_t do_swap_page(struct vm_fault *vmf)
3706 {
3707 	struct vm_area_struct *vma = vmf->vma;
3708 	struct page *page = NULL, *swapcache;
3709 	struct swap_info_struct *si = NULL;
3710 	swp_entry_t entry;
3711 	pte_t pte;
3712 	int locked;
3713 	int exclusive = 0;
3714 	vm_fault_t ret = 0;
3715 	void *shadow = NULL;
3716 
3717 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3718 		bool allow_swap_spf = false;
3719 
3720 		/* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
3721 #ifndef CONFIG_KSM
3722 		trace_android_vh_do_swap_page_spf(&allow_swap_spf);
3723 #endif
3724 		if (!allow_swap_spf) {
3725 			pte_unmap(vmf->pte);
3726 			count_vm_spf_event(SPF_ABORT_SWAP);
3727 			return VM_FAULT_RETRY;
3728 		}
3729 	}
3730 
3731 	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
3732 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3733 			ret = VM_FAULT_RETRY;
3734 		goto out;
3735 	}
3736 
3737 	entry = pte_to_swp_entry(vmf->orig_pte);
3738 	if (unlikely(non_swap_entry(entry))) {
3739 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3740 			ret = VM_FAULT_RETRY;
3741 			goto out;
3742 		}
3743 		if (is_migration_entry(entry)) {
3744 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3745 					     vmf->address);
3746 		} else if (is_device_exclusive_entry(entry)) {
3747 			vmf->page = pfn_swap_entry_to_page(entry);
3748 			ret = remove_device_exclusive_entry(vmf);
3749 		} else if (is_device_private_entry(entry)) {
3750 			vmf->page = pfn_swap_entry_to_page(entry);
3751 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3752 		} else if (is_hwpoison_entry(entry)) {
3753 			ret = VM_FAULT_HWPOISON;
3754 		} else {
3755 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3756 			ret = VM_FAULT_SIGBUS;
3757 		}
3758 		goto out;
3759 	}
3760 
3761 	/* Prevent swapoff from happening to us. */
3762 	si = get_swap_device(entry);
3763 	if (unlikely(!si))
3764 		goto out;
3765 
3766 	delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
3767 	page = lookup_swap_cache(entry, vma, vmf->address);
3768 	swapcache = page;
3769 
3770 	if (!page) {
3771 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3772 		    __swap_count(entry) == 1) {
3773 			/* skip swapcache */
3774 			gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_CMA;
3775 
3776 			trace_android_rvh_set_skip_swapcache_flags(&flags);
3777 			page = alloc_page_vma(flags, vma, vmf->address);
3778 			if (page) {
3779 				__SetPageLocked(page);
3780 				__SetPageSwapBacked(page);
3781 
3782 				if (mem_cgroup_swapin_charge_page(page,
3783 					vma->vm_mm, GFP_KERNEL, entry)) {
3784 					ret = VM_FAULT_OOM;
3785 					goto out_page;
3786 				}
3787 				mem_cgroup_swapin_uncharge_swap(entry);
3788 
3789 				shadow = get_shadow_from_swap_cache(entry);
3790 				if (shadow)
3791 					workingset_refault(page, shadow);
3792 
3793 				lru_cache_add(page);
3794 
3795 				/* To provide entry to swap_readpage() */
3796 				set_page_private(page, entry.val);
3797 				swap_readpage(page, true);
3798 				set_page_private(page, 0);
3799 			}
3800 		} else if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3801 			/*
3802 			 * Don't try readahead during a speculative page fault
3803 			 * as the VMA's boundaries may change in our back.
3804 			 * If the page is not in the swap cache and synchronous
3805 			 * read is disabled, fall back to the regular page fault
3806 			 * mechanism.
3807 			 */
3808 			delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3809 			ret = VM_FAULT_RETRY;
3810 			goto out;
3811 		} else {
3812 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE | __GFP_CMA,
3813 						vmf);
3814 			swapcache = page;
3815 		}
3816 
3817 		if (!page) {
3818 			/*
3819 			 * Back out if somebody else faulted in this pte
3820 			 * while we released the pte lock.
3821 			 */
3822 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3823 					vmf->address, &vmf->ptl);
3824 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3825 				ret = VM_FAULT_OOM;
3826 			delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3827 			goto unlock;
3828 		}
3829 
3830 		/* Had to read the page from swap area: Major fault */
3831 		ret = VM_FAULT_MAJOR;
3832 		count_vm_event(PGMAJFAULT);
3833 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3834 	} else if (PageHWPoison(page)) {
3835 		/*
3836 		 * hwpoisoned dirty swapcache pages are kept for killing
3837 		 * owner processes (which may be unknown at hwpoison time)
3838 		 */
3839 		ret = VM_FAULT_HWPOISON;
3840 		delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3841 		goto out_release;
3842 	}
3843 
3844 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3845 
3846 	delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3847 	if (!locked) {
3848 		ret |= VM_FAULT_RETRY;
3849 		goto out_release;
3850 	}
3851 
3852 	/*
3853 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3854 	 * release the swapcache from under us.  The page pin, and pte_same
3855 	 * test below, are not enough to exclude that.  Even if it is still
3856 	 * swapcache, we need to check that the page's swap has not changed.
3857 	 */
3858 	if (unlikely((!PageSwapCache(page) ||
3859 			page_private(page) != entry.val)) && swapcache)
3860 		goto out_page;
3861 
3862 	page = ksm_might_need_to_copy(page, vma, vmf->address);
3863 	if (unlikely(!page)) {
3864 		ret = VM_FAULT_OOM;
3865 		page = swapcache;
3866 		goto out_page;
3867 	}
3868 
3869 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3870 
3871 	/*
3872 	 * Back out if somebody else already faulted in this pte.
3873 	 */
3874 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3875 			&vmf->ptl);
3876 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3877 		goto out_nomap;
3878 
3879 	if (unlikely(!PageUptodate(page))) {
3880 		ret = VM_FAULT_SIGBUS;
3881 		goto out_nomap;
3882 	}
3883 
3884 	/*
3885 	 * The page isn't present yet, go ahead with the fault.
3886 	 *
3887 	 * Be careful about the sequence of operations here.
3888 	 * To get its accounting right, reuse_swap_page() must be called
3889 	 * while the page is counted on swap but not yet in mapcount i.e.
3890 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3891 	 * must be called after the swap_free(), or it will never succeed.
3892 	 */
3893 
3894 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3895 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3896 	pte = mk_pte(page, vma->vm_page_prot);
3897 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3898 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3899 		vmf->flags &= ~FAULT_FLAG_WRITE;
3900 		ret |= VM_FAULT_WRITE;
3901 		exclusive = RMAP_EXCLUSIVE;
3902 	}
3903 	flush_icache_page(vma, page);
3904 	if (pte_swp_soft_dirty(vmf->orig_pte))
3905 		pte = pte_mksoft_dirty(pte);
3906 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3907 		pte = pte_mkuffd_wp(pte);
3908 		pte = pte_wrprotect(pte);
3909 	}
3910 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3911 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3912 	vmf->orig_pte = pte;
3913 
3914 	/* ksm created a completely new copy */
3915 	if (unlikely(page != swapcache && swapcache)) {
3916 		page_add_new_anon_rmap(page, vma, vmf->address, false);
3917 		lru_cache_add_inactive_or_unevictable(page, vma);
3918 	} else {
3919 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3920 	}
3921 
3922 	swap_free(entry);
3923 	if (mem_cgroup_swap_full(page) ||
3924 	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3925 		try_to_free_swap(page);
3926 	unlock_page(page);
3927 	if (page != swapcache && swapcache) {
3928 		/*
3929 		 * Hold the lock to avoid the swap entry to be reused
3930 		 * until we take the PT lock for the pte_same() check
3931 		 * (to avoid false positives from pte_same). For
3932 		 * further safety release the lock after the swap_free
3933 		 * so that the swap count won't change under a
3934 		 * parallel locked swapcache.
3935 		 */
3936 		unlock_page(swapcache);
3937 		put_page(swapcache);
3938 	}
3939 
3940 	if (vmf->flags & FAULT_FLAG_WRITE) {
3941 		ret |= do_wp_page(vmf);
3942 		if (ret & VM_FAULT_ERROR)
3943 			ret &= VM_FAULT_ERROR;
3944 		goto out;
3945 	}
3946 
3947 	/* No need to invalidate - it was non-present before */
3948 	update_mmu_cache(vma, vmf->address, vmf->pte);
3949 unlock:
3950 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3951 out:
3952 	if (si)
3953 		put_swap_device(si);
3954 	return ret;
3955 out_nomap:
3956 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3957 out_page:
3958 	unlock_page(page);
3959 out_release:
3960 	put_page(page);
3961 	if (page != swapcache && swapcache) {
3962 		unlock_page(swapcache);
3963 		put_page(swapcache);
3964 	}
3965 	if (si)
3966 		put_swap_device(si);
3967 	return ret;
3968 }
3969 
3970 /*
3971  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3972  * but allow concurrent faults), and pte mapped but not yet locked.
3973  * We return with mmap_lock still held, but pte unmapped and unlocked.
3974  */
do_anonymous_page(struct vm_fault * vmf)3975 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3976 {
3977 	struct vm_area_struct *vma = vmf->vma;
3978 	struct page *page = NULL;
3979 	vm_fault_t ret = 0;
3980 	pte_t entry;
3981 
3982 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3983 		count_vm_spf_event(SPF_ATTEMPT_ANON);
3984 
3985 	/* File mapping without ->vm_ops ? */
3986 	if (vma->vm_flags & VM_SHARED)
3987 		return VM_FAULT_SIGBUS;
3988 
3989 	/* Do not check unstable pmd, if it's changed will retry later */
3990 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3991 		goto skip_pmd_checks;
3992 
3993 	/*
3994 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3995 	 * pte_offset_map() on pmds where a huge pmd might be created
3996 	 * from a different thread.
3997 	 *
3998 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3999 	 * parallel threads are excluded by other means.
4000 	 *
4001 	 * Here we only have mmap_read_lock(mm).
4002 	 */
4003 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4004 		return VM_FAULT_OOM;
4005 
4006 	/* See comment in __handle_mm_fault() */
4007 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
4008 		return 0;
4009 
4010 skip_pmd_checks:
4011 	/* Use the zero-page for reads */
4012 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4013 			!mm_forbids_zeropage(vma->vm_mm)) {
4014 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4015 						vma->vm_page_prot));
4016 	} else {
4017 		/* Allocate our own private page. */
4018 		if (unlikely(!vma->anon_vma)) {
4019 			if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4020 				count_vm_spf_event(SPF_ABORT_ANON_VMA);
4021 				return VM_FAULT_RETRY;
4022 			}
4023 			if (__anon_vma_prepare(vma))
4024 				goto oom;
4025 		}
4026 		page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4027 		if (!page)
4028 			goto oom;
4029 
4030 		if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
4031 			goto oom_free_page;
4032 		cgroup_throttle_swaprate(page, GFP_KERNEL);
4033 
4034 		/*
4035 		 * The memory barrier inside __SetPageUptodate makes sure that
4036 		 * preceding stores to the page contents become visible before
4037 		 * the set_pte_at() write.
4038 		 */
4039 		__SetPageUptodate(page);
4040 
4041 		entry = mk_pte(page, vma->vm_page_prot);
4042 		entry = pte_sw_mkyoung(entry);
4043 		if (vma->vm_flags & VM_WRITE)
4044 			entry = pte_mkwrite(pte_mkdirty(entry));
4045 	}
4046 
4047 	if (!pte_map_lock(vmf)) {
4048 		ret = VM_FAULT_RETRY;
4049 		goto release;
4050 	}
4051 	if (!pte_none(*vmf->pte)) {
4052 		update_mmu_tlb(vma, vmf->address, vmf->pte);
4053 		goto unlock;
4054 	}
4055 
4056 	ret = check_stable_address_space(vma->vm_mm);
4057 	if (ret)
4058 		goto unlock;
4059 
4060 	/* Deliver the page fault to userland, check inside PT lock */
4061 	if (userfaultfd_missing(vma)) {
4062 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4063 		if (page)
4064 			put_page(page);
4065 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4066 			count_vm_spf_event(SPF_ABORT_USERFAULTFD);
4067 			return VM_FAULT_RETRY;
4068 		}
4069 		return handle_userfault(vmf, VM_UFFD_MISSING);
4070 	}
4071 
4072 	if (page) {
4073 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4074 		page_add_new_anon_rmap(page, vma, vmf->address, false);
4075 		lru_cache_add_inactive_or_unevictable(page, vma);
4076 	}
4077 
4078 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4079 
4080 	/* No need to invalidate - it was non-present before */
4081 	update_mmu_cache(vma, vmf->address, vmf->pte);
4082 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4083 	return 0;
4084 unlock:
4085 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4086 release:
4087 	if (page)
4088 		put_page(page);
4089 	return ret;
4090 oom_free_page:
4091 	put_page(page);
4092 oom:
4093 	return VM_FAULT_OOM;
4094 }
4095 
4096 /*
4097  * The mmap_lock must have been held on entry, and may have been
4098  * released depending on flags and vma->vm_ops->fault() return value.
4099  * See filemap_fault() and __lock_page_retry().
4100  */
__do_fault(struct vm_fault * vmf)4101 static vm_fault_t __do_fault(struct vm_fault *vmf)
4102 {
4103 	struct vm_area_struct *vma = vmf->vma;
4104 	vm_fault_t ret;
4105 
4106 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4107 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4108 		rcu_read_lock();
4109 		if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
4110 					 SPF_ABORT_FAULT)) {
4111 			ret = VM_FAULT_RETRY;
4112 		} else {
4113 			/*
4114 			 * The mmap sequence count check guarantees that the
4115 			 * vma we fetched at the start of the fault was still
4116 			 * current at that point in time. The rcu read lock
4117 			 * ensures vmf->vma->vm_file stays valid.
4118 			 */
4119 			ret = vma->vm_ops->fault(vmf);
4120 		}
4121 		rcu_read_unlock();
4122 	} else
4123 #endif
4124 	{
4125 		/*
4126 		 * Preallocate pte before we take page_lock because
4127 		 * this might lead to deadlocks for memcg reclaim
4128 		 * which waits for pages under writeback:
4129 		 *				lock_page(A)
4130 		 *				SetPageWriteback(A)
4131 		 *				unlock_page(A)
4132 		 * lock_page(B)
4133 		 *				lock_page(B)
4134 		 * pte_alloc_one
4135 		 *   shrink_page_list
4136 		 *     wait_on_page_writeback(A)
4137 		 *				SetPageWriteback(B)
4138 		 *				unlock_page(B)
4139 		 *				# flush A, B to clear writeback
4140 		 */
4141 		if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4142 			vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4143 			if (!vmf->prealloc_pte)
4144 				return VM_FAULT_OOM;
4145 			smp_wmb(); /* See comment in __pte_alloc() */
4146 		}
4147 
4148 		ret = vma->vm_ops->fault(vmf);
4149 	}
4150 
4151 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4152 			    VM_FAULT_DONE_COW)))
4153 		return ret;
4154 
4155 	if (unlikely(PageHWPoison(vmf->page))) {
4156 		struct page *page = vmf->page;
4157 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4158 		if (ret & VM_FAULT_LOCKED) {
4159 			if (page_mapped(page))
4160 				unmap_mapping_pages(page_mapping(page),
4161 						    page->index, 1, false);
4162 			/* Retry if a clean page was removed from the cache. */
4163 			if (invalidate_inode_page(page))
4164 				poisonret = VM_FAULT_NOPAGE;
4165 			unlock_page(page);
4166 		}
4167 		put_page(page);
4168 		vmf->page = NULL;
4169 		return poisonret;
4170 	}
4171 
4172 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4173 		lock_page(vmf->page);
4174 	else
4175 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4176 
4177 	return ret;
4178 }
4179 
4180 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)4181 static void deposit_prealloc_pte(struct vm_fault *vmf)
4182 {
4183 	struct vm_area_struct *vma = vmf->vma;
4184 
4185 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4186 	/*
4187 	 * We are going to consume the prealloc table,
4188 	 * count that as nr_ptes.
4189 	 */
4190 	mm_inc_nr_ptes(vma->vm_mm);
4191 	vmf->prealloc_pte = NULL;
4192 }
4193 
do_set_pmd(struct vm_fault * vmf,struct page * page)4194 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4195 {
4196 	struct vm_area_struct *vma = vmf->vma;
4197 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4198 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4199 	pmd_t entry;
4200 	int i;
4201 	vm_fault_t ret = VM_FAULT_FALLBACK;
4202 
4203 	if (!transhuge_vma_suitable(vma, haddr))
4204 		return ret;
4205 
4206 	page = compound_head(page);
4207 	if (compound_order(page) != HPAGE_PMD_ORDER)
4208 		return ret;
4209 
4210 	/*
4211 	 * Just backoff if any subpage of a THP is corrupted otherwise
4212 	 * the corrupted page may mapped by PMD silently to escape the
4213 	 * check.  This kind of THP just can be PTE mapped.  Access to
4214 	 * the corrupted subpage should trigger SIGBUS as expected.
4215 	 */
4216 	if (unlikely(PageHasHWPoisoned(page)))
4217 		return ret;
4218 
4219 	/*
4220 	 * Archs like ppc64 need additional space to store information
4221 	 * related to pte entry. Use the preallocated table for that.
4222 	 */
4223 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4224 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4225 		if (!vmf->prealloc_pte)
4226 			return VM_FAULT_OOM;
4227 		smp_wmb(); /* See comment in __pte_alloc() */
4228 	}
4229 
4230 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4231 	if (unlikely(!pmd_none(*vmf->pmd)))
4232 		goto out;
4233 
4234 	for (i = 0; i < HPAGE_PMD_NR; i++)
4235 		flush_icache_page(vma, page + i);
4236 
4237 	entry = mk_huge_pmd(page, vma->vm_page_prot);
4238 	if (write)
4239 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4240 
4241 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4242 	page_add_file_rmap(page, true);
4243 	/*
4244 	 * deposit and withdraw with pmd lock held
4245 	 */
4246 	if (arch_needs_pgtable_deposit())
4247 		deposit_prealloc_pte(vmf);
4248 
4249 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4250 
4251 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4252 
4253 	/* fault is handled */
4254 	ret = 0;
4255 	count_vm_event(THP_FILE_MAPPED);
4256 out:
4257 	spin_unlock(vmf->ptl);
4258 	return ret;
4259 }
4260 #else
do_set_pmd(struct vm_fault * vmf,struct page * page)4261 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4262 {
4263 	return VM_FAULT_FALLBACK;
4264 }
4265 #endif
4266 
do_set_pte(struct vm_fault * vmf,struct page * page,unsigned long addr)4267 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4268 {
4269 	struct vm_area_struct *vma = vmf->vma;
4270 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4271 	bool prefault = vmf->address != addr;
4272 	pte_t entry;
4273 
4274 	flush_icache_page(vma, page);
4275 	entry = mk_pte(page, vma->vm_page_prot);
4276 
4277 	if (prefault && arch_wants_old_prefaulted_pte())
4278 		entry = pte_mkold(entry);
4279 	else
4280 		entry = pte_sw_mkyoung(entry);
4281 
4282 	if (write)
4283 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4284 	/* copy-on-write page */
4285 	if (write && !(vma->vm_flags & VM_SHARED)) {
4286 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4287 		page_add_new_anon_rmap(page, vma, addr, false);
4288 		lru_cache_add_inactive_or_unevictable(page, vma);
4289 	} else {
4290 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
4291 		page_add_file_rmap(page, false);
4292 	}
4293 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4294 }
4295 
4296 /**
4297  * finish_fault - finish page fault once we have prepared the page to fault
4298  *
4299  * @vmf: structure describing the fault
4300  *
4301  * This function handles all that is needed to finish a page fault once the
4302  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4303  * given page, adds reverse page mapping, handles memcg charges and LRU
4304  * addition.
4305  *
4306  * The function expects the page to be locked and on success it consumes a
4307  * reference of a page being mapped (for the PTE which maps it).
4308  *
4309  * Return: %0 on success, %VM_FAULT_ code in case of error.
4310  */
finish_fault(struct vm_fault * vmf)4311 vm_fault_t finish_fault(struct vm_fault *vmf)
4312 {
4313 	struct vm_area_struct *vma = vmf->vma;
4314 	struct page *page;
4315 	vm_fault_t ret;
4316 
4317 	/* Did we COW the page? */
4318 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4319 		page = vmf->cow_page;
4320 	else
4321 		page = vmf->page;
4322 
4323 	/*
4324 	 * check even for read faults because we might have lost our CoWed
4325 	 * page
4326 	 */
4327 	if (!(vma->vm_flags & VM_SHARED)) {
4328 		ret = check_stable_address_space(vma->vm_mm);
4329 		if (ret)
4330 			return ret;
4331 	}
4332 
4333 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
4334 		if (pmd_none(*vmf->pmd)) {
4335 			if (PageTransCompound(page)) {
4336 				ret = do_set_pmd(vmf, page);
4337 				if (ret != VM_FAULT_FALLBACK)
4338 					return ret;
4339 			}
4340 
4341 			if (vmf->prealloc_pte) {
4342 				vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4343 				if (likely(pmd_none(*vmf->pmd))) {
4344 					mm_inc_nr_ptes(vma->vm_mm);
4345 					pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4346 					vmf->prealloc_pte = NULL;
4347 				}
4348 				spin_unlock(vmf->ptl);
4349 			} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
4350 				return VM_FAULT_OOM;
4351 			}
4352 		}
4353 
4354 		/*
4355 		 * See comment in handle_pte_fault() for how this scenario happens, we
4356 		 * need to return NOPAGE so that we drop this page.
4357 		 */
4358 		if (pmd_devmap_trans_unstable(vmf->pmd))
4359 			return VM_FAULT_NOPAGE;
4360 	}
4361 
4362 	if (!pte_map_lock(vmf))
4363 		return VM_FAULT_RETRY;
4364 	ret = 0;
4365 	/* Re-check under ptl */
4366 	if (likely(pte_none(*vmf->pte)))
4367 		do_set_pte(vmf, page, vmf->address);
4368 	else
4369 		ret = VM_FAULT_NOPAGE;
4370 
4371 	update_mmu_tlb(vma, vmf->address, vmf->pte);
4372 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4373 	return ret;
4374 }
4375 
4376 static unsigned long fault_around_bytes __read_mostly =
4377 	rounddown_pow_of_two(65536);
4378 
4379 #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)4380 static int fault_around_bytes_get(void *data, u64 *val)
4381 {
4382 	*val = fault_around_bytes;
4383 	return 0;
4384 }
4385 
4386 /*
4387  * fault_around_bytes must be rounded down to the nearest page order as it's
4388  * what do_fault_around() expects to see.
4389  */
fault_around_bytes_set(void * data,u64 val)4390 static int fault_around_bytes_set(void *data, u64 val)
4391 {
4392 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4393 		return -EINVAL;
4394 	if (val > PAGE_SIZE)
4395 		fault_around_bytes = rounddown_pow_of_two(val);
4396 	else
4397 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4398 	return 0;
4399 }
4400 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4401 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4402 
fault_around_debugfs(void)4403 static int __init fault_around_debugfs(void)
4404 {
4405 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4406 				   &fault_around_bytes_fops);
4407 	return 0;
4408 }
4409 late_initcall(fault_around_debugfs);
4410 #endif
4411 
4412 /*
4413  * do_fault_around() tries to map few pages around the fault address. The hope
4414  * is that the pages will be needed soon and this will lower the number of
4415  * faults to handle.
4416  *
4417  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4418  * not ready to be mapped: not up-to-date, locked, etc.
4419  *
4420  * This function is called with the page table lock taken. In the split ptlock
4421  * case the page table lock only protects only those entries which belong to
4422  * the page table corresponding to the fault address.
4423  *
4424  * This function doesn't cross the VMA boundaries, in order to call map_pages()
4425  * only once.
4426  *
4427  * fault_around_bytes defines how many bytes we'll try to map.
4428  * do_fault_around() expects it to be set to a power of two less than or equal
4429  * to PTRS_PER_PTE.
4430  *
4431  * The virtual address of the area that we map is naturally aligned to
4432  * fault_around_bytes rounded down to the machine page size
4433  * (and therefore to page order).  This way it's easier to guarantee
4434  * that we don't cross page table boundaries.
4435  */
do_fault_around(struct vm_fault * vmf)4436 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4437 {
4438 	unsigned long address = vmf->address, nr_pages, mask;
4439 	pgoff_t start_pgoff = vmf->pgoff;
4440 	pgoff_t end_pgoff;
4441 	int off;
4442 	vm_fault_t ret;
4443 
4444 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4445 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4446 
4447 	address = max(address & mask, vmf->vma->vm_start);
4448 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4449 	start_pgoff -= off;
4450 
4451 	/*
4452 	 *  end_pgoff is either the end of the page table, the end of
4453 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4454 	 */
4455 	end_pgoff = start_pgoff -
4456 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4457 		PTRS_PER_PTE - 1;
4458 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4459 			start_pgoff + nr_pages - 1);
4460 
4461 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
4462 	    pmd_none(*vmf->pmd)) {
4463 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4464 		if (!vmf->prealloc_pte)
4465 			return VM_FAULT_OOM;
4466 		smp_wmb(); /* See comment in __pte_alloc() */
4467 	}
4468 
4469 	rcu_read_lock();
4470 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4471 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4472 		if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
4473 					 SPF_ABORT_FAULT)) {
4474 			rcu_read_unlock();
4475 			return VM_FAULT_RETRY;
4476 		}
4477 		/*
4478 		 * the mmap sequence check verified that vmf->vma was still
4479 		 * current at that point in time.
4480 		 * The rcu read lock ensures vmf->vma->vm_file stays valid.
4481 		 */
4482 	}
4483 #endif
4484 	ret = vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4485 	rcu_read_unlock();
4486 	return ret;
4487 }
4488 
do_read_fault(struct vm_fault * vmf)4489 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4490 {
4491 	struct vm_area_struct *vma = vmf->vma;
4492 	vm_fault_t ret = 0;
4493 
4494 	/*
4495 	 * Let's call ->map_pages() first and use ->fault() as fallback
4496 	 * if page by the offset is not ready to be mapped (cold cache or
4497 	 * something).
4498 	 */
4499 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4500 		if (likely(!userfaultfd_minor(vmf->vma))) {
4501 			ret = do_fault_around(vmf);
4502 			if (ret)
4503 				return ret;
4504 		}
4505 	}
4506 
4507 	ret = __do_fault(vmf);
4508 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4509 		return ret;
4510 
4511 	ret |= finish_fault(vmf);
4512 	unlock_page(vmf->page);
4513 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4514 		put_page(vmf->page);
4515 	return ret;
4516 }
4517 
do_cow_fault(struct vm_fault * vmf)4518 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4519 {
4520 	struct vm_area_struct *vma = vmf->vma;
4521 	vm_fault_t ret;
4522 
4523 	if (unlikely(!vma->anon_vma)) {
4524 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4525 			count_vm_spf_event(SPF_ABORT_ANON_VMA);
4526 			return VM_FAULT_RETRY;
4527 		}
4528 		if (__anon_vma_prepare(vma))
4529 			return VM_FAULT_OOM;
4530 	}
4531 
4532 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4533 	if (!vmf->cow_page)
4534 		return VM_FAULT_OOM;
4535 
4536 	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4537 		put_page(vmf->cow_page);
4538 		return VM_FAULT_OOM;
4539 	}
4540 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4541 
4542 	ret = __do_fault(vmf);
4543 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4544 		goto uncharge_out;
4545 	if (ret & VM_FAULT_DONE_COW)
4546 		return ret;
4547 
4548 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4549 	__SetPageUptodate(vmf->cow_page);
4550 
4551 	ret |= finish_fault(vmf);
4552 	unlock_page(vmf->page);
4553 	put_page(vmf->page);
4554 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4555 		goto uncharge_out;
4556 	return ret;
4557 uncharge_out:
4558 	put_page(vmf->cow_page);
4559 	return ret;
4560 }
4561 
do_shared_fault(struct vm_fault * vmf)4562 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4563 {
4564 	struct vm_area_struct *vma = vmf->vma;
4565 	vm_fault_t ret, tmp;
4566 
4567 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
4568 
4569 	ret = __do_fault(vmf);
4570 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4571 		return ret;
4572 
4573 	/*
4574 	 * Check if the backing address space wants to know that the page is
4575 	 * about to become writable
4576 	 */
4577 	if (vma->vm_ops->page_mkwrite) {
4578 		unlock_page(vmf->page);
4579 		tmp = do_page_mkwrite(vmf);
4580 		if (unlikely(!tmp ||
4581 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4582 			put_page(vmf->page);
4583 			return tmp;
4584 		}
4585 	}
4586 
4587 	ret |= finish_fault(vmf);
4588 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4589 					VM_FAULT_RETRY))) {
4590 		unlock_page(vmf->page);
4591 		put_page(vmf->page);
4592 		return ret;
4593 	}
4594 
4595 	ret |= fault_dirty_shared_page(vmf);
4596 	return ret;
4597 }
4598 
4599 /*
4600  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4601  * but allow concurrent faults).
4602  * The mmap_lock may have been released depending on flags and our
4603  * return value.  See filemap_fault() and __lock_page_or_retry().
4604  * If mmap_lock is released, vma may become invalid (for example
4605  * by other thread calling munmap()).
4606  */
do_fault(struct vm_fault * vmf)4607 static vm_fault_t do_fault(struct vm_fault *vmf)
4608 {
4609 	struct vm_area_struct *vma = vmf->vma;
4610 	struct mm_struct *vm_mm = vma->vm_mm;
4611 	vm_fault_t ret;
4612 
4613 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4614 		count_vm_spf_event(SPF_ATTEMPT_FILE);
4615 
4616 	/*
4617 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4618 	 */
4619 	if (!vma->vm_ops->fault) {
4620 		VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
4621 
4622 		/*
4623 		 * If we find a migration pmd entry or a none pmd entry, which
4624 		 * should never happen, return SIGBUS
4625 		 */
4626 		if (unlikely(!pmd_present(*vmf->pmd)))
4627 			ret = VM_FAULT_SIGBUS;
4628 		else {
4629 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4630 						       vmf->pmd,
4631 						       vmf->address,
4632 						       &vmf->ptl);
4633 			/*
4634 			 * Make sure this is not a temporary clearing of pte
4635 			 * by holding ptl and checking again. A R/M/W update
4636 			 * of pte involves: take ptl, clearing the pte so that
4637 			 * we don't have concurrent modification by hardware
4638 			 * followed by an update.
4639 			 */
4640 			if (unlikely(pte_none(*vmf->pte)))
4641 				ret = VM_FAULT_SIGBUS;
4642 			else
4643 				ret = VM_FAULT_NOPAGE;
4644 
4645 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4646 		}
4647 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4648 		ret = do_read_fault(vmf);
4649 	else if (!(vma->vm_flags & VM_SHARED))
4650 		ret = do_cow_fault(vmf);
4651 	else
4652 		ret = do_shared_fault(vmf);
4653 
4654 	/* preallocated pagetable is unused: free it */
4655 	if (vmf->prealloc_pte) {
4656 		pte_free(vm_mm, vmf->prealloc_pte);
4657 		vmf->prealloc_pte = NULL;
4658 	}
4659 	return ret;
4660 }
4661 
numa_migrate_prep(struct page * page,struct vm_area_struct * vma,unsigned long addr,int page_nid,int * flags)4662 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4663 		      unsigned long addr, int page_nid, int *flags)
4664 {
4665 	get_page(page);
4666 
4667 	count_vm_numa_event(NUMA_HINT_FAULTS);
4668 	if (page_nid == numa_node_id()) {
4669 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4670 		*flags |= TNF_FAULT_LOCAL;
4671 	}
4672 
4673 	return mpol_misplaced(page, vma, addr);
4674 }
4675 
do_numa_page(struct vm_fault * vmf)4676 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4677 {
4678 	struct vm_area_struct *vma = vmf->vma;
4679 	struct page *page = NULL;
4680 	int page_nid = NUMA_NO_NODE;
4681 	int last_cpupid;
4682 	int target_nid;
4683 	pte_t pte, old_pte;
4684 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4685 	int flags = 0;
4686 
4687 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4688 		count_vm_spf_event(SPF_ATTEMPT_NUMA);
4689 
4690 	/*
4691 	 * The "pte" at this point cannot be used safely without
4692 	 * validation through pte_unmap_same(). It's of NUMA type but
4693 	 * the pfn may be screwed if the read is non atomic.
4694 	 */
4695 	if (!pte_spinlock(vmf))
4696 		return VM_FAULT_RETRY;
4697 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4698 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4699 		goto out;
4700 	}
4701 
4702 	/* Get the normal PTE  */
4703 	old_pte = ptep_get(vmf->pte);
4704 	pte = pte_modify(old_pte, vma->vm_page_prot);
4705 
4706 	page = vm_normal_page(vma, vmf->address, pte);
4707 	if (!page)
4708 		goto out_map;
4709 
4710 	/* TODO: handle PTE-mapped THP */
4711 	if (PageCompound(page))
4712 		goto out_map;
4713 
4714 	/*
4715 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4716 	 * much anyway since they can be in shared cache state. This misses
4717 	 * the case where a mapping is writable but the process never writes
4718 	 * to it but pte_write gets cleared during protection updates and
4719 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4720 	 * background writeback, dirty balancing and application behaviour.
4721 	 */
4722 	if (!was_writable)
4723 		flags |= TNF_NO_GROUP;
4724 
4725 	/*
4726 	 * Flag if the page is shared between multiple address spaces. This
4727 	 * is later used when determining whether to group tasks together
4728 	 */
4729 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4730 		flags |= TNF_SHARED;
4731 
4732 	last_cpupid = page_cpupid_last(page);
4733 	page_nid = page_to_nid(page);
4734 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4735 			&flags);
4736 	if (target_nid == NUMA_NO_NODE) {
4737 		put_page(page);
4738 		goto out_map;
4739 	}
4740 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4741 
4742 	/* Migrate to the requested node */
4743 	if (migrate_misplaced_page(page, vma, target_nid)) {
4744 		page_nid = target_nid;
4745 		flags |= TNF_MIGRATED;
4746 	} else {
4747 		flags |= TNF_MIGRATE_FAIL;
4748 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4749 		spin_lock(vmf->ptl);
4750 		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4751 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4752 			goto out;
4753 		}
4754 		goto out_map;
4755 	}
4756 
4757 out:
4758 	if (page_nid != NUMA_NO_NODE)
4759 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4760 	return 0;
4761 out_map:
4762 	/*
4763 	 * Make it present again, depending on how arch implements
4764 	 * non-accessible ptes, some can allow access by kernel mode.
4765 	 */
4766 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4767 	pte = pte_modify(old_pte, vma->vm_page_prot);
4768 	pte = pte_mkyoung(pte);
4769 	if (was_writable)
4770 		pte = pte_mkwrite(pte);
4771 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4772 	update_mmu_cache(vma, vmf->address, vmf->pte);
4773 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4774 	goto out;
4775 }
4776 
create_huge_pmd(struct vm_fault * vmf)4777 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4778 {
4779 	if (vma_is_anonymous(vmf->vma))
4780 		return do_huge_pmd_anonymous_page(vmf);
4781 	if (vmf->vma->vm_ops->huge_fault)
4782 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4783 	return VM_FAULT_FALLBACK;
4784 }
4785 
4786 /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf)4787 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4788 {
4789 	if (vma_is_anonymous(vmf->vma)) {
4790 		if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4791 			return handle_userfault(vmf, VM_UFFD_WP);
4792 		return do_huge_pmd_wp_page(vmf);
4793 	}
4794 	if (vmf->vma->vm_ops->huge_fault) {
4795 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4796 
4797 		if (!(ret & VM_FAULT_FALLBACK))
4798 			return ret;
4799 	}
4800 
4801 	/* COW or write-notify handled on pte level: split pmd. */
4802 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4803 
4804 	return VM_FAULT_FALLBACK;
4805 }
4806 
create_huge_pud(struct vm_fault * vmf)4807 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4808 {
4809 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4810 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4811 	/* No support for anonymous transparent PUD pages yet */
4812 	if (vma_is_anonymous(vmf->vma))
4813 		return VM_FAULT_FALLBACK;
4814 	if (vmf->vma->vm_ops->huge_fault)
4815 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4816 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4817 	return VM_FAULT_FALLBACK;
4818 }
4819 
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)4820 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4821 {
4822 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4823 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4824 	/* No support for anonymous transparent PUD pages yet */
4825 	if (vma_is_anonymous(vmf->vma))
4826 		goto split;
4827 	if (vmf->vma->vm_ops->huge_fault) {
4828 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4829 
4830 		if (!(ret & VM_FAULT_FALLBACK))
4831 			return ret;
4832 	}
4833 split:
4834 	/* COW or write-notify not handled on PUD level: split pud.*/
4835 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4836 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4837 	return VM_FAULT_FALLBACK;
4838 }
4839 
4840 /*
4841  * These routines also need to handle stuff like marking pages dirty
4842  * and/or accessed for architectures that don't do it in hardware (most
4843  * RISC architectures).  The early dirtying is also good on the i386.
4844  *
4845  * There is also a hook called "update_mmu_cache()" that architectures
4846  * with external mmu caches can use to update those (ie the Sparc or
4847  * PowerPC hashed page tables that act as extended TLBs).
4848  *
4849  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4850  * concurrent faults).
4851  *
4852  * The mmap_lock may have been released depending on flags and our return value.
4853  * See filemap_fault() and __lock_page_or_retry().
4854  */
handle_pte_fault(struct vm_fault * vmf)4855 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4856 {
4857 	pte_t entry;
4858 
4859 	if (!vmf->pte) {
4860 		if (vma_is_anonymous(vmf->vma))
4861 			return do_anonymous_page(vmf);
4862 		else
4863 			return do_fault(vmf);
4864 	}
4865 
4866 	if (!pte_present(vmf->orig_pte))
4867 		return do_swap_page(vmf);
4868 
4869 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4870 		return do_numa_page(vmf);
4871 
4872 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4873 		count_vm_spf_event(SPF_ATTEMPT_PTE);
4874 
4875 	if (!pte_spinlock(vmf))
4876 		return VM_FAULT_RETRY;
4877 	entry = vmf->orig_pte;
4878 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4879 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4880 		goto unlock;
4881 	}
4882 	if (vmf->flags & FAULT_FLAG_WRITE) {
4883 		if (!pte_write(entry))
4884 			return do_wp_page(vmf);
4885 		entry = pte_mkdirty(entry);
4886 	}
4887 	entry = pte_mkyoung(entry);
4888 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4889 				vmf->flags & FAULT_FLAG_WRITE)) {
4890 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4891 	} else {
4892 		/* Skip spurious TLB flush for retried page fault */
4893 		if (vmf->flags & FAULT_FLAG_TRIED)
4894 			goto unlock;
4895 		/*
4896 		 * This is needed only for protection faults but the arch code
4897 		 * is not yet telling us if this is a protection fault or not.
4898 		 * This still avoids useless tlb flushes for .text page faults
4899 		 * with threads.
4900 		 */
4901 		if (vmf->flags & FAULT_FLAG_WRITE)
4902 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4903 	}
4904 unlock:
4905 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4906 	return 0;
4907 }
4908 
4909 /*
4910  * By the time we get here, we already hold the mm semaphore
4911  *
4912  * The mmap_lock may have been released depending on flags and our
4913  * return value.  See filemap_fault() and __lock_page_or_retry().
4914  */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long seq)4915 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4916 		unsigned long address, unsigned int flags, unsigned long seq)
4917 {
4918 	struct vm_fault vmf = {
4919 		.vma = vma,
4920 		.address = address & PAGE_MASK,
4921 		.flags = flags,
4922 		.pgoff = linear_page_index(vma, address),
4923 		.gfp_mask = __get_fault_gfp_mask(vma),
4924 	};
4925 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4926 	struct mm_struct *mm = vma->vm_mm;
4927 	pgd_t *pgd;
4928 	p4d_t *p4d;
4929 	vm_fault_t ret;
4930 
4931 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4932 	if (flags & FAULT_FLAG_SPECULATIVE) {
4933 		pgd_t pgdval;
4934 		p4d_t p4dval;
4935 		pud_t pudval;
4936 		bool uffd_missing_sigbus = false;
4937 
4938 #ifdef CONFIG_USERFAULTFD
4939 		/*
4940 		 * Only support SPF for SIGBUS+MISSING userfaults in private
4941 		 * anonymous VMAs.
4942 		 */
4943 		uffd_missing_sigbus = vma_is_anonymous(vma) &&
4944 					(vma->vm_flags & VM_UFFD_MISSING) &&
4945 					userfaultfd_using_sigbus(vma);
4946 #endif
4947 
4948 		vmf.seq = seq;
4949 
4950 		speculative_page_walk_begin();
4951 		pgd = pgd_offset(mm, address);
4952 		pgdval = READ_ONCE(*pgd);
4953 		if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) {
4954 			count_vm_spf_event(SPF_ABORT_PUD);
4955 			goto spf_fail;
4956 		}
4957 
4958 		p4d = p4d_offset(pgd, address);
4959 		if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
4960 			goto spf_fail;
4961 		p4dval = READ_ONCE(*p4d);
4962 		if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) {
4963 			count_vm_spf_event(SPF_ABORT_PUD);
4964 			goto spf_fail;
4965 		}
4966 
4967 		vmf.pud = pud_offset(p4d, address);
4968 		if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
4969 			goto spf_fail;
4970 		pudval = READ_ONCE(*vmf.pud);
4971 		if (pud_none(pudval) || unlikely(pud_bad(pudval)) ||
4972 		    unlikely(pud_trans_huge(pudval)) ||
4973 		    unlikely(pud_devmap(pudval))) {
4974 			count_vm_spf_event(SPF_ABORT_PUD);
4975 			goto spf_fail;
4976 		}
4977 
4978 		vmf.pmd = pmd_offset(vmf.pud, address);
4979 		if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
4980 			goto spf_fail;
4981 		vmf.orig_pmd = READ_ONCE(*vmf.pmd);
4982 
4983 		/*
4984 		 * pmd_none could mean that a hugepage collapse is in
4985 		 * progress in our back as collapse_huge_page() mark
4986 		 * it before invalidating the pte (which is done once
4987 		 * the IPI is catched by all CPU and we have interrupt
4988 		 * disabled).  For this reason we cannot handle THP in
4989 		 * a speculative way since we can't safely identify an
4990 		 * in progress collapse operation done in our back on
4991 		 * that PMD.
4992 		 */
4993 		if (unlikely(pmd_none(vmf.orig_pmd) ||
4994 			     is_swap_pmd(vmf.orig_pmd) ||
4995 			     pmd_trans_huge(vmf.orig_pmd) ||
4996 			     pmd_devmap(vmf.orig_pmd))) {
4997 			count_vm_spf_event(SPF_ABORT_PMD);
4998 			goto spf_fail;
4999 		}
5000 
5001 		/*
5002 		 * The above does not allocate/instantiate page-tables because
5003 		 * doing so would lead to the possibility of instantiating
5004 		 * page-tables after free_pgtables() -- and consequently
5005 		 * leaking them.
5006 		 *
5007 		 * The result is that we take at least one non-speculative
5008 		 * fault per PMD in order to instantiate it.
5009 		 */
5010 
5011 		vmf.pte = pte_offset_map(vmf.pmd, address);
5012 		if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
5013 			pte_unmap(vmf.pte);
5014 			vmf.pte = NULL;
5015 			goto spf_fail;
5016 		}
5017 		vmf.orig_pte = READ_ONCE(*vmf.pte);
5018 		barrier();
5019 		if (pte_none(vmf.orig_pte)) {
5020 			pte_unmap(vmf.pte);
5021 			vmf.pte = NULL;
5022 		}
5023 
5024 		speculative_page_walk_end();
5025 
5026 		if (!vmf.pte && uffd_missing_sigbus)
5027 			return VM_FAULT_SIGBUS;
5028 
5029 		return handle_pte_fault(&vmf);
5030 
5031 	spf_fail:
5032 		speculative_page_walk_end();
5033 		/*
5034 		 * Failing page-table walk is similar to page-missing so give an
5035 		 * opportunity to SIGBUS+MISSING userfault to handle it before
5036 		 * retrying with mmap_lock
5037 		 */
5038 		return uffd_missing_sigbus ? VM_FAULT_SIGBUS : VM_FAULT_RETRY;
5039 	}
5040 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
5041 
5042 	pgd = pgd_offset(mm, address);
5043 	p4d = p4d_alloc(mm, pgd, address);
5044 	if (!p4d)
5045 		return VM_FAULT_OOM;
5046 
5047 	vmf.pud = pud_alloc(mm, p4d, address);
5048 	if (!vmf.pud)
5049 		return VM_FAULT_OOM;
5050 retry_pud:
5051 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
5052 		ret = create_huge_pud(&vmf);
5053 		if (!(ret & VM_FAULT_FALLBACK))
5054 			return ret;
5055 	} else {
5056 		pud_t orig_pud = *vmf.pud;
5057 
5058 		barrier();
5059 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5060 
5061 			/* NUMA case for anonymous PUDs would go here */
5062 
5063 			if (dirty && !pud_write(orig_pud)) {
5064 				ret = wp_huge_pud(&vmf, orig_pud);
5065 				if (!(ret & VM_FAULT_FALLBACK))
5066 					return ret;
5067 			} else {
5068 				huge_pud_set_accessed(&vmf, orig_pud);
5069 				return 0;
5070 			}
5071 		}
5072 	}
5073 
5074 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5075 	if (!vmf.pmd)
5076 		return VM_FAULT_OOM;
5077 
5078 	/* Huge pud page fault raced with pmd_alloc? */
5079 	if (pud_trans_unstable(vmf.pud))
5080 		goto retry_pud;
5081 
5082 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
5083 		ret = create_huge_pmd(&vmf);
5084 		if (!(ret & VM_FAULT_FALLBACK))
5085 			return ret;
5086 	} else {
5087 		vmf.orig_pmd = *vmf.pmd;
5088 
5089 		barrier();
5090 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5091 			VM_BUG_ON(thp_migration_supported() &&
5092 					  !is_pmd_migration_entry(vmf.orig_pmd));
5093 			if (is_pmd_migration_entry(vmf.orig_pmd))
5094 				pmd_migration_entry_wait(mm, vmf.pmd);
5095 			return 0;
5096 		}
5097 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5098 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5099 				return do_huge_pmd_numa_page(&vmf);
5100 
5101 			if (dirty && !pmd_write(vmf.orig_pmd)) {
5102 				ret = wp_huge_pmd(&vmf);
5103 				if (!(ret & VM_FAULT_FALLBACK))
5104 					return ret;
5105 			} else {
5106 				huge_pmd_set_accessed(&vmf);
5107 				return 0;
5108 			}
5109 		}
5110 	}
5111 
5112 	if (unlikely(pmd_none(*vmf.pmd))) {
5113 		/*
5114 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5115 		 * want to allocate huge page, and if we expose page table
5116 		 * for an instant, it will be difficult to retract from
5117 		 * concurrent faults and from rmap lookups.
5118 		 */
5119 		vmf.pte = NULL;
5120 	} else {
5121 		/*
5122 		 * If a huge pmd materialized under us just retry later.  Use
5123 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
5124 		 * of pmd_trans_huge() to ensure the pmd didn't become
5125 		 * pmd_trans_huge under us and then back to pmd_none, as a
5126 		 * result of MADV_DONTNEED running immediately after a huge pmd
5127 		 * fault in a different thread of this mm, in turn leading to a
5128 		 * misleading pmd_trans_huge() retval. All we have to ensure is
5129 		 * that it is a regular pmd that we can walk with
5130 		 * pte_offset_map() and we can do that through an atomic read
5131 		 * in C, which is what pmd_trans_unstable() provides.
5132 		 */
5133 		if (pmd_devmap_trans_unstable(vmf.pmd))
5134 			return 0;
5135 		/*
5136 		 * A regular pmd is established and it can't morph into a huge
5137 		 * pmd from under us anymore at this point because we hold the
5138 		 * mmap_lock read mode and khugepaged takes it in write mode.
5139 		 * So now it's safe to run pte_offset_map().
5140 		 */
5141 		vmf.pte = pte_offset_map(vmf.pmd, vmf.address);
5142 		vmf.orig_pte = *vmf.pte;
5143 
5144 		/*
5145 		 * some architectures can have larger ptes than wordsize,
5146 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
5147 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
5148 		 * accesses.  The code below just needs a consistent view
5149 		 * for the ifs and we later double check anyway with the
5150 		 * ptl lock held. So here a barrier will do.
5151 		 */
5152 		barrier();
5153 		if (pte_none(vmf.orig_pte)) {
5154 			pte_unmap(vmf.pte);
5155 			vmf.pte = NULL;
5156 		}
5157 	}
5158 
5159 	return handle_pte_fault(&vmf);
5160 }
5161 
5162 /**
5163  * mm_account_fault - Do page fault accounting
5164  *
5165  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
5166  *        of perf event counters, but we'll still do the per-task accounting to
5167  *        the task who triggered this page fault.
5168  * @address: the faulted address.
5169  * @flags: the fault flags.
5170  * @ret: the fault retcode.
5171  *
5172  * This will take care of most of the page fault accounting.  Meanwhile, it
5173  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
5174  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
5175  * still be in per-arch page fault handlers at the entry of page fault.
5176  */
mm_account_fault(struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)5177 static inline void mm_account_fault(struct pt_regs *regs,
5178 				    unsigned long address, unsigned int flags,
5179 				    vm_fault_t ret)
5180 {
5181 	bool major;
5182 
5183 	/*
5184 	 * We don't do accounting for some specific faults:
5185 	 *
5186 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
5187 	 *   includes arch_vma_access_permitted() failing before reaching here.
5188 	 *   So this is not a "this many hardware page faults" counter.  We
5189 	 *   should use the hw profiling for that.
5190 	 *
5191 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
5192 	 *   once they're completed.
5193 	 */
5194 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
5195 		return;
5196 
5197 	/*
5198 	 * We define the fault as a major fault when the final successful fault
5199 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5200 	 * handle it immediately previously).
5201 	 */
5202 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5203 
5204 	if (major)
5205 		current->maj_flt++;
5206 	else
5207 		current->min_flt++;
5208 
5209 	/*
5210 	 * If the fault is done for GUP, regs will be NULL.  We only do the
5211 	 * accounting for the per thread fault counters who triggered the
5212 	 * fault, and we skip the perf event updates.
5213 	 */
5214 	if (!regs)
5215 		return;
5216 
5217 	if (major)
5218 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5219 	else
5220 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5221 }
5222 
5223 #ifdef CONFIG_LRU_GEN
lru_gen_enter_fault(struct vm_area_struct * vma)5224 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5225 {
5226 	/* the LRU algorithm doesn't apply to sequential or random reads */
5227 	current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
5228 }
5229 
lru_gen_exit_fault(void)5230 static void lru_gen_exit_fault(void)
5231 {
5232 	current->in_lru_fault = false;
5233 }
5234 #else
lru_gen_enter_fault(struct vm_area_struct * vma)5235 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5236 {
5237 }
5238 
lru_gen_exit_fault(void)5239 static void lru_gen_exit_fault(void)
5240 {
5241 }
5242 #endif /* CONFIG_LRU_GEN */
5243 
5244 /*
5245  * By the time we get here, we already hold the mm semaphore
5246  *
5247  * The mmap_lock may have been released depending on flags and our
5248  * return value.  See filemap_fault() and __lock_page_or_retry().
5249  */
do_handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long seq,struct pt_regs * regs)5250 vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
5251 		unsigned long address, unsigned int flags,
5252 		unsigned long seq, struct pt_regs *regs)
5253 {
5254 	vm_fault_t ret;
5255 
5256 	VM_BUG_ON((flags & FAULT_FLAG_SPECULATIVE) &&
5257 		  !vma_can_speculate(vma, flags));
5258 
5259 	__set_current_state(TASK_RUNNING);
5260 
5261 	count_vm_event(PGFAULT);
5262 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
5263 
5264 	/* do counter updates before entering really critical section. */
5265 	check_sync_rss_stat(current);
5266 
5267 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5268 					    flags & FAULT_FLAG_INSTRUCTION,
5269 					    flags & FAULT_FLAG_REMOTE))
5270 		return VM_FAULT_SIGSEGV;
5271 
5272 	/*
5273 	 * Enable the memcg OOM handling for faults triggered in user
5274 	 * space.  Kernel faults are handled more gracefully.
5275 	 */
5276 	if (flags & FAULT_FLAG_USER)
5277 		mem_cgroup_enter_user_fault();
5278 
5279 	lru_gen_enter_fault(vma);
5280 
5281 	if (unlikely(is_vm_hugetlb_page(vma))) {
5282 		VM_BUG_ON(flags & FAULT_FLAG_SPECULATIVE);
5283 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5284 	} else {
5285 		ret = __handle_mm_fault(vma, address, flags, seq);
5286 	}
5287 
5288 	lru_gen_exit_fault();
5289 
5290 	if (flags & FAULT_FLAG_USER) {
5291 		mem_cgroup_exit_user_fault();
5292 		/*
5293 		 * The task may have entered a memcg OOM situation but
5294 		 * if the allocation error was handled gracefully (no
5295 		 * VM_FAULT_OOM), there is no need to kill anything.
5296 		 * Just clean up the OOM state peacefully.
5297 		 */
5298 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5299 			mem_cgroup_oom_synchronize(false);
5300 	}
5301 
5302 	mm_account_fault(regs, address, flags, ret);
5303 
5304 	return ret;
5305 }
5306 EXPORT_SYMBOL_GPL(do_handle_mm_fault);
5307 
5308 #ifndef __PAGETABLE_P4D_FOLDED
5309 /*
5310  * Allocate p4d page table.
5311  * We've already handled the fast-path in-line.
5312  */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)5313 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5314 {
5315 	p4d_t *new = p4d_alloc_one(mm, address);
5316 	if (!new)
5317 		return -ENOMEM;
5318 
5319 	smp_wmb(); /* See comment in __pte_alloc */
5320 
5321 	spin_lock(&mm->page_table_lock);
5322 	if (pgd_present(*pgd))		/* Another has populated it */
5323 		p4d_free(mm, new);
5324 	else
5325 		pgd_populate(mm, pgd, new);
5326 	spin_unlock(&mm->page_table_lock);
5327 	return 0;
5328 }
5329 #endif /* __PAGETABLE_P4D_FOLDED */
5330 
5331 #ifndef __PAGETABLE_PUD_FOLDED
5332 /*
5333  * Allocate page upper directory.
5334  * We've already handled the fast-path in-line.
5335  */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)5336 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
5337 {
5338 	pud_t *new = pud_alloc_one(mm, address);
5339 	if (!new)
5340 		return -ENOMEM;
5341 
5342 	smp_wmb(); /* See comment in __pte_alloc */
5343 
5344 	spin_lock(&mm->page_table_lock);
5345 	if (!p4d_present(*p4d)) {
5346 		mm_inc_nr_puds(mm);
5347 		p4d_populate(mm, p4d, new);
5348 	} else	/* Another has populated it */
5349 		pud_free(mm, new);
5350 	spin_unlock(&mm->page_table_lock);
5351 	return 0;
5352 }
5353 #endif /* __PAGETABLE_PUD_FOLDED */
5354 
5355 #ifndef __PAGETABLE_PMD_FOLDED
5356 /*
5357  * Allocate page middle directory.
5358  * We've already handled the fast-path in-line.
5359  */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)5360 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5361 {
5362 	spinlock_t *ptl;
5363 	pmd_t *new = pmd_alloc_one(mm, address);
5364 	if (!new)
5365 		return -ENOMEM;
5366 
5367 	smp_wmb(); /* See comment in __pte_alloc */
5368 
5369 	ptl = pud_lock(mm, pud);
5370 	if (!pud_present(*pud)) {
5371 		mm_inc_nr_pmds(mm);
5372 		pud_populate(mm, pud, new);
5373 	} else	/* Another has populated it */
5374 		pmd_free(mm, new);
5375 	spin_unlock(ptl);
5376 	return 0;
5377 }
5378 #endif /* __PAGETABLE_PMD_FOLDED */
5379 
follow_invalidate_pte(struct mm_struct * mm,unsigned long address,struct mmu_notifier_range * range,pte_t ** ptepp,pmd_t ** pmdpp,spinlock_t ** ptlp)5380 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
5381 			  struct mmu_notifier_range *range, pte_t **ptepp,
5382 			  pmd_t **pmdpp, spinlock_t **ptlp)
5383 {
5384 	pgd_t *pgd;
5385 	p4d_t *p4d;
5386 	pud_t *pud;
5387 	pmd_t *pmd;
5388 	pte_t *ptep;
5389 
5390 	pgd = pgd_offset(mm, address);
5391 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5392 		goto out;
5393 
5394 	p4d = p4d_offset(pgd, address);
5395 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5396 		goto out;
5397 
5398 	pud = pud_offset(p4d, address);
5399 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5400 		goto out;
5401 
5402 	pmd = pmd_offset(pud, address);
5403 	VM_BUG_ON(pmd_trans_huge(*pmd));
5404 
5405 	if (pmd_huge(*pmd)) {
5406 		if (!pmdpp)
5407 			goto out;
5408 
5409 		if (range) {
5410 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
5411 						NULL, mm, address & PMD_MASK,
5412 						(address & PMD_MASK) + PMD_SIZE);
5413 			mmu_notifier_invalidate_range_start(range);
5414 		}
5415 		*ptlp = pmd_lock(mm, pmd);
5416 		if (pmd_huge(*pmd)) {
5417 			*pmdpp = pmd;
5418 			return 0;
5419 		}
5420 		spin_unlock(*ptlp);
5421 		if (range)
5422 			mmu_notifier_invalidate_range_end(range);
5423 	}
5424 
5425 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
5426 		goto out;
5427 
5428 	if (range) {
5429 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
5430 					address & PAGE_MASK,
5431 					(address & PAGE_MASK) + PAGE_SIZE);
5432 		mmu_notifier_invalidate_range_start(range);
5433 	}
5434 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
5435 	if (!pte_present(*ptep))
5436 		goto unlock;
5437 	*ptepp = ptep;
5438 	return 0;
5439 unlock:
5440 	pte_unmap_unlock(ptep, *ptlp);
5441 	if (range)
5442 		mmu_notifier_invalidate_range_end(range);
5443 out:
5444 	return -EINVAL;
5445 }
5446 
5447 /**
5448  * follow_pte - look up PTE at a user virtual address
5449  * @mm: the mm_struct of the target address space
5450  * @address: user virtual address
5451  * @ptepp: location to store found PTE
5452  * @ptlp: location to store the lock for the PTE
5453  *
5454  * On a successful return, the pointer to the PTE is stored in @ptepp;
5455  * the corresponding lock is taken and its location is stored in @ptlp.
5456  * The contents of the PTE are only stable until @ptlp is released;
5457  * any further use, if any, must be protected against invalidation
5458  * with MMU notifiers.
5459  *
5460  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
5461  * should be taken for read.
5462  *
5463  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
5464  * it is not a good general-purpose API.
5465  *
5466  * Return: zero on success, -ve otherwise.
5467  */
follow_pte(struct mm_struct * mm,unsigned long address,pte_t ** ptepp,spinlock_t ** ptlp)5468 int follow_pte(struct mm_struct *mm, unsigned long address,
5469 	       pte_t **ptepp, spinlock_t **ptlp)
5470 {
5471 	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
5472 }
5473 EXPORT_SYMBOL_GPL(follow_pte);
5474 
5475 /**
5476  * follow_pfn - look up PFN at a user virtual address
5477  * @vma: memory mapping
5478  * @address: user virtual address
5479  * @pfn: location to store found PFN
5480  *
5481  * Only IO mappings and raw PFN mappings are allowed.
5482  *
5483  * This function does not allow the caller to read the permissions
5484  * of the PTE.  Do not use it.
5485  *
5486  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5487  */
follow_pfn(struct vm_area_struct * vma,unsigned long address,unsigned long * pfn)5488 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5489 	unsigned long *pfn)
5490 {
5491 	int ret = -EINVAL;
5492 	spinlock_t *ptl;
5493 	pte_t *ptep;
5494 
5495 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5496 		return ret;
5497 
5498 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5499 	if (ret)
5500 		return ret;
5501 	*pfn = pte_pfn(*ptep);
5502 	pte_unmap_unlock(ptep, ptl);
5503 	return 0;
5504 }
5505 EXPORT_SYMBOL(follow_pfn);
5506 
5507 #ifdef CONFIG_HAVE_IOREMAP_PROT
follow_phys(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long * prot,resource_size_t * phys)5508 int follow_phys(struct vm_area_struct *vma,
5509 		unsigned long address, unsigned int flags,
5510 		unsigned long *prot, resource_size_t *phys)
5511 {
5512 	int ret = -EINVAL;
5513 	pte_t *ptep, pte;
5514 	spinlock_t *ptl;
5515 
5516 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5517 		goto out;
5518 
5519 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5520 		goto out;
5521 	pte = *ptep;
5522 
5523 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5524 		goto unlock;
5525 
5526 	*prot = pgprot_val(pte_pgprot(pte));
5527 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5528 
5529 	ret = 0;
5530 unlock:
5531 	pte_unmap_unlock(ptep, ptl);
5532 out:
5533 	return ret;
5534 }
5535 
5536 /**
5537  * generic_access_phys - generic implementation for iomem mmap access
5538  * @vma: the vma to access
5539  * @addr: userspace address, not relative offset within @vma
5540  * @buf: buffer to read/write
5541  * @len: length of transfer
5542  * @write: set to FOLL_WRITE when writing, otherwise reading
5543  *
5544  * This is a generic implementation for &vm_operations_struct.access for an
5545  * iomem mapping. This callback is used by access_process_vm() when the @vma is
5546  * not page based.
5547  */
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)5548 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5549 			void *buf, int len, int write)
5550 {
5551 	resource_size_t phys_addr;
5552 	unsigned long prot = 0;
5553 	void __iomem *maddr;
5554 	pte_t *ptep, pte;
5555 	spinlock_t *ptl;
5556 	int offset = offset_in_page(addr);
5557 	int ret = -EINVAL;
5558 
5559 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5560 		return -EINVAL;
5561 
5562 retry:
5563 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5564 		return -EINVAL;
5565 	pte = *ptep;
5566 	pte_unmap_unlock(ptep, ptl);
5567 
5568 	prot = pgprot_val(pte_pgprot(pte));
5569 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5570 
5571 	if ((write & FOLL_WRITE) && !pte_write(pte))
5572 		return -EINVAL;
5573 
5574 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5575 	if (!maddr)
5576 		return -ENOMEM;
5577 
5578 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5579 		goto out_unmap;
5580 
5581 	if (!pte_same(pte, *ptep)) {
5582 		pte_unmap_unlock(ptep, ptl);
5583 		iounmap(maddr);
5584 
5585 		goto retry;
5586 	}
5587 
5588 	if (write)
5589 		memcpy_toio(maddr + offset, buf, len);
5590 	else
5591 		memcpy_fromio(buf, maddr + offset, len);
5592 	ret = len;
5593 	pte_unmap_unlock(ptep, ptl);
5594 out_unmap:
5595 	iounmap(maddr);
5596 
5597 	return ret;
5598 }
5599 EXPORT_SYMBOL_GPL(generic_access_phys);
5600 #endif
5601 
5602 /*
5603  * Access another process' address space as given in mm.
5604  */
__access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5605 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5606 		       int len, unsigned int gup_flags)
5607 {
5608 	struct vm_area_struct *vma;
5609 	void *old_buf = buf;
5610 	int write = gup_flags & FOLL_WRITE;
5611 
5612 	if (mmap_read_lock_killable(mm))
5613 		return 0;
5614 
5615 	/* ignore errors, just check how much was successfully transferred */
5616 	while (len) {
5617 		int bytes, ret, offset;
5618 		void *maddr;
5619 		struct page *page = NULL;
5620 
5621 		ret = get_user_pages_remote(mm, addr, 1,
5622 				gup_flags, &page, &vma, NULL);
5623 		if (ret <= 0) {
5624 #ifndef CONFIG_HAVE_IOREMAP_PROT
5625 			break;
5626 #else
5627 			/*
5628 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5629 			 * we can access using slightly different code.
5630 			 */
5631 			vma = vma_lookup(mm, addr);
5632 			if (!vma)
5633 				break;
5634 			if (vma->vm_ops && vma->vm_ops->access)
5635 				ret = vma->vm_ops->access(vma, addr, buf,
5636 							  len, write);
5637 			if (ret <= 0)
5638 				break;
5639 			bytes = ret;
5640 #endif
5641 		} else {
5642 			bytes = len;
5643 			offset = addr & (PAGE_SIZE-1);
5644 			if (bytes > PAGE_SIZE-offset)
5645 				bytes = PAGE_SIZE-offset;
5646 
5647 			maddr = kmap(page);
5648 			if (write) {
5649 				copy_to_user_page(vma, page, addr,
5650 						  maddr + offset, buf, bytes);
5651 				set_page_dirty_lock(page);
5652 			} else {
5653 				copy_from_user_page(vma, page, addr,
5654 						    buf, maddr + offset, bytes);
5655 			}
5656 			kunmap(page);
5657 			put_page(page);
5658 		}
5659 		len -= bytes;
5660 		buf += bytes;
5661 		addr += bytes;
5662 	}
5663 	mmap_read_unlock(mm);
5664 
5665 	return buf - old_buf;
5666 }
5667 
5668 /**
5669  * access_remote_vm - access another process' address space
5670  * @mm:		the mm_struct of the target address space
5671  * @addr:	start address to access
5672  * @buf:	source or destination buffer
5673  * @len:	number of bytes to transfer
5674  * @gup_flags:	flags modifying lookup behaviour
5675  *
5676  * The caller must hold a reference on @mm.
5677  *
5678  * Return: number of bytes copied from source to destination.
5679  */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5680 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5681 		void *buf, int len, unsigned int gup_flags)
5682 {
5683 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5684 }
5685 
5686 /*
5687  * Access another process' address space.
5688  * Source/target buffer must be kernel space,
5689  * Do not walk the page table directly, use get_user_pages
5690  */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)5691 int access_process_vm(struct task_struct *tsk, unsigned long addr,
5692 		void *buf, int len, unsigned int gup_flags)
5693 {
5694 	struct mm_struct *mm;
5695 	int ret;
5696 
5697 	mm = get_task_mm(tsk);
5698 	if (!mm)
5699 		return 0;
5700 
5701 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5702 
5703 	mmput(mm);
5704 
5705 	return ret;
5706 }
5707 EXPORT_SYMBOL_GPL(access_process_vm);
5708 
5709 /*
5710  * Print the name of a VMA.
5711  */
print_vma_addr(char * prefix,unsigned long ip)5712 void print_vma_addr(char *prefix, unsigned long ip)
5713 {
5714 	struct mm_struct *mm = current->mm;
5715 	struct vm_area_struct *vma;
5716 
5717 	/*
5718 	 * we might be running from an atomic context so we cannot sleep
5719 	 */
5720 	if (!mmap_read_trylock(mm))
5721 		return;
5722 
5723 	vma = find_vma(mm, ip);
5724 	if (vma && vma->vm_file) {
5725 		struct file *f = vma->vm_file;
5726 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5727 		if (buf) {
5728 			char *p;
5729 
5730 			p = file_path(f, buf, PAGE_SIZE);
5731 			if (IS_ERR(p))
5732 				p = "?";
5733 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5734 					vma->vm_start,
5735 					vma->vm_end - vma->vm_start);
5736 			free_page((unsigned long)buf);
5737 		}
5738 	}
5739 	mmap_read_unlock(mm);
5740 }
5741 
5742 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)5743 void __might_fault(const char *file, int line)
5744 {
5745 	/*
5746 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5747 	 * holding the mmap_lock, this is safe because kernel memory doesn't
5748 	 * get paged out, therefore we'll never actually fault, and the
5749 	 * below annotations will generate false positives.
5750 	 */
5751 	if (uaccess_kernel())
5752 		return;
5753 	if (pagefault_disabled())
5754 		return;
5755 	__might_sleep(file, line, 0);
5756 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5757 	if (current->mm)
5758 		might_lock_read(&current->mm->mmap_lock);
5759 #endif
5760 }
5761 EXPORT_SYMBOL(__might_fault);
5762 #endif
5763 
5764 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5765 /*
5766  * Process all subpages of the specified huge page with the specified
5767  * operation.  The target subpage will be processed last to keep its
5768  * cache lines hot.
5769  */
process_huge_page(unsigned long addr_hint,unsigned int pages_per_huge_page,void (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)5770 static inline void process_huge_page(
5771 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5772 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5773 	void *arg)
5774 {
5775 	int i, n, base, l;
5776 	unsigned long addr = addr_hint &
5777 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5778 
5779 	/* Process target subpage last to keep its cache lines hot */
5780 	might_sleep();
5781 	n = (addr_hint - addr) / PAGE_SIZE;
5782 	if (2 * n <= pages_per_huge_page) {
5783 		/* If target subpage in first half of huge page */
5784 		base = 0;
5785 		l = n;
5786 		/* Process subpages at the end of huge page */
5787 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5788 			cond_resched();
5789 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5790 		}
5791 	} else {
5792 		/* If target subpage in second half of huge page */
5793 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5794 		l = pages_per_huge_page - n;
5795 		/* Process subpages at the begin of huge page */
5796 		for (i = 0; i < base; i++) {
5797 			cond_resched();
5798 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5799 		}
5800 	}
5801 	/*
5802 	 * Process remaining subpages in left-right-left-right pattern
5803 	 * towards the target subpage
5804 	 */
5805 	for (i = 0; i < l; i++) {
5806 		int left_idx = base + i;
5807 		int right_idx = base + 2 * l - 1 - i;
5808 
5809 		cond_resched();
5810 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5811 		cond_resched();
5812 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5813 	}
5814 }
5815 
clear_gigantic_page(struct page * page,unsigned long addr,unsigned int pages_per_huge_page)5816 static void clear_gigantic_page(struct page *page,
5817 				unsigned long addr,
5818 				unsigned int pages_per_huge_page)
5819 {
5820 	int i;
5821 	struct page *p = page;
5822 
5823 	might_sleep();
5824 	for (i = 0; i < pages_per_huge_page;
5825 	     i++, p = mem_map_next(p, page, i)) {
5826 		cond_resched();
5827 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5828 	}
5829 }
5830 
clear_subpage(unsigned long addr,int idx,void * arg)5831 static void clear_subpage(unsigned long addr, int idx, void *arg)
5832 {
5833 	struct page *page = arg;
5834 
5835 	clear_user_highpage(page + idx, addr);
5836 }
5837 
clear_huge_page(struct page * page,unsigned long addr_hint,unsigned int pages_per_huge_page)5838 void clear_huge_page(struct page *page,
5839 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5840 {
5841 	unsigned long addr = addr_hint &
5842 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5843 
5844 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5845 		clear_gigantic_page(page, addr, pages_per_huge_page);
5846 		return;
5847 	}
5848 
5849 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5850 }
5851 
copy_user_gigantic_page(struct page * dst,struct page * src,unsigned long addr,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5852 static void copy_user_gigantic_page(struct page *dst, struct page *src,
5853 				    unsigned long addr,
5854 				    struct vm_area_struct *vma,
5855 				    unsigned int pages_per_huge_page)
5856 {
5857 	int i;
5858 	struct page *dst_base = dst;
5859 	struct page *src_base = src;
5860 
5861 	for (i = 0; i < pages_per_huge_page; ) {
5862 		cond_resched();
5863 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5864 
5865 		i++;
5866 		dst = mem_map_next(dst, dst_base, i);
5867 		src = mem_map_next(src, src_base, i);
5868 	}
5869 }
5870 
5871 struct copy_subpage_arg {
5872 	struct page *dst;
5873 	struct page *src;
5874 	struct vm_area_struct *vma;
5875 };
5876 
copy_subpage(unsigned long addr,int idx,void * arg)5877 static void copy_subpage(unsigned long addr, int idx, void *arg)
5878 {
5879 	struct copy_subpage_arg *copy_arg = arg;
5880 
5881 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5882 			   addr, copy_arg->vma);
5883 }
5884 
copy_user_huge_page(struct page * dst,struct page * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5885 void copy_user_huge_page(struct page *dst, struct page *src,
5886 			 unsigned long addr_hint, struct vm_area_struct *vma,
5887 			 unsigned int pages_per_huge_page)
5888 {
5889 	unsigned long addr = addr_hint &
5890 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5891 	struct copy_subpage_arg arg = {
5892 		.dst = dst,
5893 		.src = src,
5894 		.vma = vma,
5895 	};
5896 
5897 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5898 		copy_user_gigantic_page(dst, src, addr, vma,
5899 					pages_per_huge_page);
5900 		return;
5901 	}
5902 
5903 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5904 }
5905 
copy_huge_page_from_user(struct page * dst_page,const void __user * usr_src,unsigned int pages_per_huge_page,bool allow_pagefault)5906 long copy_huge_page_from_user(struct page *dst_page,
5907 				const void __user *usr_src,
5908 				unsigned int pages_per_huge_page,
5909 				bool allow_pagefault)
5910 {
5911 	void *src = (void *)usr_src;
5912 	void *page_kaddr;
5913 	unsigned long i, rc = 0;
5914 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5915 	struct page *subpage = dst_page;
5916 
5917 	for (i = 0; i < pages_per_huge_page;
5918 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5919 		if (allow_pagefault)
5920 			page_kaddr = kmap(subpage);
5921 		else
5922 			page_kaddr = kmap_atomic(subpage);
5923 		rc = copy_from_user(page_kaddr,
5924 				(const void __user *)(src + i * PAGE_SIZE),
5925 				PAGE_SIZE);
5926 		if (allow_pagefault)
5927 			kunmap(subpage);
5928 		else
5929 			kunmap_atomic(page_kaddr);
5930 
5931 		ret_val -= (PAGE_SIZE - rc);
5932 		if (rc)
5933 			break;
5934 
5935 		flush_dcache_page(subpage);
5936 
5937 		cond_resched();
5938 	}
5939 	return ret_val;
5940 }
5941 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5942 
5943 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5944 
5945 static struct kmem_cache *page_ptl_cachep;
5946 
ptlock_cache_init(void)5947 void __init ptlock_cache_init(void)
5948 {
5949 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5950 			SLAB_PANIC, NULL);
5951 }
5952 
ptlock_alloc(struct page * page)5953 bool ptlock_alloc(struct page *page)
5954 {
5955 	spinlock_t *ptl;
5956 
5957 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5958 	if (!ptl)
5959 		return false;
5960 	page->ptl = ptl;
5961 	return true;
5962 }
5963 
ptlock_free(struct page * page)5964 void ptlock_free(struct page *page)
5965 {
5966 	kmem_cache_free(page_ptl_cachep, page->ptl);
5967 }
5968 #endif
5969