• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/coredump.h>
47 #include <linux/sched/numa_balancing.h>
48 #include <linux/sched/task.h>
49 #include <linux/hugetlb.h>
50 #include <linux/mman.h>
51 #include <linux/swap.h>
52 #include <linux/highmem.h>
53 #include <linux/pagemap.h>
54 #include <linux/memremap.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/pgsize_migration.h>
62 #include <linux/writeback.h>
63 #include <linux/memcontrol.h>
64 #include <linux/mmu_notifier.h>
65 #include <linux/swapops.h>
66 #include <linux/elf.h>
67 #include <linux/gfp.h>
68 #include <linux/migrate.h>
69 #include <linux/string.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <trace/hooks/mm.h>
79 
80 #include <trace/events/kmem.h>
81 
82 #include <asm/io.h>
83 #include <asm/mmu_context.h>
84 #include <asm/pgalloc.h>
85 #include <linux/uaccess.h>
86 #include <asm/tlb.h>
87 #include <asm/tlbflush.h>
88 
89 #include "pgalloc-track.h"
90 #include "internal.h"
91 
92 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
93 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
94 #endif
95 
96 #ifndef CONFIG_NUMA
97 unsigned long max_mapnr;
98 EXPORT_SYMBOL(max_mapnr);
99 
100 struct page *mem_map;
101 EXPORT_SYMBOL(mem_map);
102 #endif
103 
104 /*
105  * A number of key systems in x86 including ioremap() rely on the assumption
106  * that high_memory defines the upper bound on direct map memory, then end
107  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
108  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
109  * and ZONE_HIGHMEM.
110  */
111 void *high_memory;
112 EXPORT_SYMBOL(high_memory);
113 
114 /*
115  * Randomize the address space (stacks, mmaps, brk, etc.).
116  *
117  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
118  *   as ancient (libc5 based) binaries can segfault. )
119  */
120 int randomize_va_space __read_mostly =
121 #ifdef CONFIG_COMPAT_BRK
122 					1;
123 #else
124 					2;
125 #endif
126 
127 #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)128 static inline bool arch_wants_old_prefaulted_pte(void)
129 {
130 	/*
131 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
132 	 * some architectures, even if it's performed in hardware. By
133 	 * default, "false" means prefaulted entries will be 'young'.
134 	 */
135 	return false;
136 }
137 #endif
138 
disable_randmaps(char * s)139 static int __init disable_randmaps(char *s)
140 {
141 	randomize_va_space = 0;
142 	return 1;
143 }
144 __setup("norandmaps", disable_randmaps);
145 
146 unsigned long zero_pfn __read_mostly;
147 EXPORT_SYMBOL(zero_pfn);
148 
149 unsigned long highest_memmap_pfn __read_mostly;
150 
151 /*
152  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
153  */
init_zero_pfn(void)154 static int __init init_zero_pfn(void)
155 {
156 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
157 	return 0;
158 }
159 early_initcall(init_zero_pfn);
160 
mm_trace_rss_stat(struct mm_struct * mm,int member,long count)161 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
162 {
163 	trace_rss_stat(mm, member, count);
164 }
165 EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
166 
167 #if defined(SPLIT_RSS_COUNTING)
168 
sync_mm_rss(struct mm_struct * mm)169 void sync_mm_rss(struct mm_struct *mm)
170 {
171 	int i;
172 
173 	for (i = 0; i < NR_MM_COUNTERS; i++) {
174 		if (current->rss_stat.count[i]) {
175 			add_mm_counter(mm, i, current->rss_stat.count[i]);
176 			current->rss_stat.count[i] = 0;
177 		}
178 	}
179 	current->rss_stat.events = 0;
180 }
181 
add_mm_counter_fast(struct mm_struct * mm,int member,int val)182 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
183 {
184 	struct task_struct *task = current;
185 
186 	if (likely(task->mm == mm))
187 		task->rss_stat.count[member] += val;
188 	else
189 		add_mm_counter(mm, member, val);
190 }
191 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
192 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
193 
194 /* sync counter once per 64 page faults */
195 #define TASK_RSS_EVENTS_THRESH	(64)
check_sync_rss_stat(struct task_struct * task)196 static void check_sync_rss_stat(struct task_struct *task)
197 {
198 	if (unlikely(task != current))
199 		return;
200 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
201 		sync_mm_rss(task->mm);
202 }
203 #else /* SPLIT_RSS_COUNTING */
204 
205 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
206 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
207 
check_sync_rss_stat(struct task_struct * task)208 static void check_sync_rss_stat(struct task_struct *task)
209 {
210 }
211 
212 #endif /* SPLIT_RSS_COUNTING */
213 
214 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
215 
get_vma(struct mm_struct * mm,unsigned long addr)216 struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
217 {
218 	struct vm_area_struct *vma;
219 
220 	rcu_read_lock();
221 	vma = find_vma_from_tree(mm, addr);
222 
223 	/*
224 	 * atomic_inc_unless_negative() also protects from races with
225 	 * fast mremap.
226 	 *
227 	 * If there is a concurrent fast mremap, bail out since the entire
228 	 * PMD/PUD subtree may have been remapped.
229 	 *
230 	 * This is usually safe for conventional mremap since it takes the
231 	 * PTE locks as does SPF. However fast mremap only takes the lock
232 	 * at the PMD/PUD level which is ok as it is done with the mmap
233 	 * write lock held. But since SPF, as the term implies forgoes,
234 	 * taking the mmap read lock and also cannot take PTL lock at the
235 	 * larger PMD/PUD granualrity, since it would introduce huge
236 	 * contention in the page fault path; fall back to regular fault
237 	 * handling.
238 	 */
239 	if (vma) {
240 		if (vma->vm_start > addr ||
241 		    !atomic_inc_unless_negative(&vma->file_ref_count))
242 			vma = NULL;
243 	}
244 	rcu_read_unlock();
245 
246 	return vma;
247 }
248 
put_vma(struct vm_area_struct * vma)249 void put_vma(struct vm_area_struct *vma)
250 {
251 	int new_ref_count;
252 
253 	new_ref_count = atomic_dec_return(&vma->file_ref_count);
254 	if (new_ref_count < 0)
255 		vm_area_free_no_check(vma);
256 }
257 
258 #if ALLOC_SPLIT_PTLOCKS
wait_for_smp_sync(void * arg)259 static void wait_for_smp_sync(void *arg)
260 {
261 }
262 #endif
263 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
264 
265 /*
266  * Note: this doesn't free the actual pages themselves. That
267  * has been handled earlier when unmapping all the memory regions.
268  */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)269 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
270 			   unsigned long addr)
271 {
272 	pgtable_t token = pmd_pgtable(*pmd);
273 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
274 	/*
275 	 * Ensure page table destruction is blocked if __pte_map_lock managed
276 	 * to take this lock. Without this barrier tlb_remove_table_rcu can
277 	 * destroy ptl after __pte_map_lock locked it and during unlock would
278 	 * cause a use-after-free.
279 	 */
280 	spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
281 	spin_unlock(ptl);
282 #if ALLOC_SPLIT_PTLOCKS
283 	/*
284 	 * The __pte_map_lock can still be working on the ->ptl in the read side
285 	 * critical section while ->ptl is freed which results into the use-after
286 	 * -free. Sync it using the smp_call_().
287 	 */
288 	smp_call_function(wait_for_smp_sync, NULL, 1);
289 #endif
290 #endif
291 	pmd_clear(pmd);
292 	pte_free_tlb(tlb, token, addr);
293 	mm_dec_nr_ptes(tlb->mm);
294 }
295 
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)296 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
297 				unsigned long addr, unsigned long end,
298 				unsigned long floor, unsigned long ceiling)
299 {
300 	pmd_t *pmd;
301 	unsigned long next;
302 	unsigned long start;
303 
304 	start = addr;
305 	pmd = pmd_offset(pud, addr);
306 	do {
307 		next = pmd_addr_end(addr, end);
308 		if (pmd_none_or_clear_bad(pmd))
309 			continue;
310 		free_pte_range(tlb, pmd, addr);
311 	} while (pmd++, addr = next, addr != end);
312 
313 	start &= PUD_MASK;
314 	if (start < floor)
315 		return;
316 	if (ceiling) {
317 		ceiling &= PUD_MASK;
318 		if (!ceiling)
319 			return;
320 	}
321 	if (end - 1 > ceiling - 1)
322 		return;
323 
324 	pmd = pmd_offset(pud, start);
325 	pud_clear(pud);
326 	pmd_free_tlb(tlb, pmd, start);
327 	mm_dec_nr_pmds(tlb->mm);
328 }
329 
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)330 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
331 				unsigned long addr, unsigned long end,
332 				unsigned long floor, unsigned long ceiling)
333 {
334 	pud_t *pud;
335 	unsigned long next;
336 	unsigned long start;
337 
338 	start = addr;
339 	pud = pud_offset(p4d, addr);
340 	do {
341 		next = pud_addr_end(addr, end);
342 		if (pud_none_or_clear_bad(pud))
343 			continue;
344 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
345 	} while (pud++, addr = next, addr != end);
346 
347 	start &= P4D_MASK;
348 	if (start < floor)
349 		return;
350 	if (ceiling) {
351 		ceiling &= P4D_MASK;
352 		if (!ceiling)
353 			return;
354 	}
355 	if (end - 1 > ceiling - 1)
356 		return;
357 
358 	pud = pud_offset(p4d, start);
359 	p4d_clear(p4d);
360 	pud_free_tlb(tlb, pud, start);
361 	mm_dec_nr_puds(tlb->mm);
362 }
363 
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)364 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
365 				unsigned long addr, unsigned long end,
366 				unsigned long floor, unsigned long ceiling)
367 {
368 	p4d_t *p4d;
369 	unsigned long next;
370 	unsigned long start;
371 
372 	start = addr;
373 	p4d = p4d_offset(pgd, addr);
374 	do {
375 		next = p4d_addr_end(addr, end);
376 		if (p4d_none_or_clear_bad(p4d))
377 			continue;
378 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
379 	} while (p4d++, addr = next, addr != end);
380 
381 	start &= PGDIR_MASK;
382 	if (start < floor)
383 		return;
384 	if (ceiling) {
385 		ceiling &= PGDIR_MASK;
386 		if (!ceiling)
387 			return;
388 	}
389 	if (end - 1 > ceiling - 1)
390 		return;
391 
392 	p4d = p4d_offset(pgd, start);
393 	pgd_clear(pgd);
394 	p4d_free_tlb(tlb, p4d, start);
395 }
396 
397 /*
398  * This function frees user-level page tables of a process.
399  */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)400 void free_pgd_range(struct mmu_gather *tlb,
401 			unsigned long addr, unsigned long end,
402 			unsigned long floor, unsigned long ceiling)
403 {
404 	pgd_t *pgd;
405 	unsigned long next;
406 
407 	/*
408 	 * The next few lines have given us lots of grief...
409 	 *
410 	 * Why are we testing PMD* at this top level?  Because often
411 	 * there will be no work to do at all, and we'd prefer not to
412 	 * go all the way down to the bottom just to discover that.
413 	 *
414 	 * Why all these "- 1"s?  Because 0 represents both the bottom
415 	 * of the address space and the top of it (using -1 for the
416 	 * top wouldn't help much: the masks would do the wrong thing).
417 	 * The rule is that addr 0 and floor 0 refer to the bottom of
418 	 * the address space, but end 0 and ceiling 0 refer to the top
419 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
420 	 * that end 0 case should be mythical).
421 	 *
422 	 * Wherever addr is brought up or ceiling brought down, we must
423 	 * be careful to reject "the opposite 0" before it confuses the
424 	 * subsequent tests.  But what about where end is brought down
425 	 * by PMD_SIZE below? no, end can't go down to 0 there.
426 	 *
427 	 * Whereas we round start (addr) and ceiling down, by different
428 	 * masks at different levels, in order to test whether a table
429 	 * now has no other vmas using it, so can be freed, we don't
430 	 * bother to round floor or end up - the tests don't need that.
431 	 */
432 
433 	addr &= PMD_MASK;
434 	if (addr < floor) {
435 		addr += PMD_SIZE;
436 		if (!addr)
437 			return;
438 	}
439 	if (ceiling) {
440 		ceiling &= PMD_MASK;
441 		if (!ceiling)
442 			return;
443 	}
444 	if (end - 1 > ceiling - 1)
445 		end -= PMD_SIZE;
446 	if (addr > end - 1)
447 		return;
448 	/*
449 	 * We add page table cache pages with PAGE_SIZE,
450 	 * (see pte_free_tlb()), flush the tlb if we need
451 	 */
452 	tlb_change_page_size(tlb, PAGE_SIZE);
453 	pgd = pgd_offset(tlb->mm, addr);
454 	do {
455 		next = pgd_addr_end(addr, end);
456 		if (pgd_none_or_clear_bad(pgd))
457 			continue;
458 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
459 	} while (pgd++, addr = next, addr != end);
460 }
461 
free_pgtables(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling)462 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
463 		unsigned long floor, unsigned long ceiling)
464 {
465 	while (vma) {
466 		struct vm_area_struct *next = vma->vm_next;
467 		unsigned long addr = vma->vm_start;
468 
469 		/*
470 		 * Hide vma from rmap and truncate_pagecache before freeing
471 		 * pgtables
472 		 */
473 		unlink_anon_vmas(vma);
474 		unlink_file_vma(vma);
475 
476 		if (is_vm_hugetlb_page(vma)) {
477 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
478 				floor, next ? next->vm_start : ceiling);
479 		} else {
480 			/*
481 			 * Optimization: gather nearby vmas into one call down
482 			 */
483 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
484 			       && !is_vm_hugetlb_page(next)) {
485 				vma = next;
486 				next = vma->vm_next;
487 				unlink_anon_vmas(vma);
488 				unlink_file_vma(vma);
489 			}
490 			free_pgd_range(tlb, addr, vma->vm_end,
491 				floor, next ? next->vm_start : ceiling);
492 		}
493 		vma = next;
494 	}
495 }
496 
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)497 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
498 {
499 	spinlock_t *ptl;
500 	pgtable_t new = pte_alloc_one(mm);
501 	if (!new)
502 		return -ENOMEM;
503 
504 	/*
505 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
506 	 * visible before the pte is made visible to other CPUs by being
507 	 * put into page tables.
508 	 *
509 	 * The other side of the story is the pointer chasing in the page
510 	 * table walking code (when walking the page table without locking;
511 	 * ie. most of the time). Fortunately, these data accesses consist
512 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
513 	 * being the notable exception) will already guarantee loads are
514 	 * seen in-order. See the alpha page table accessors for the
515 	 * smp_rmb() barriers in page table walking code.
516 	 */
517 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
518 
519 	ptl = pmd_lock(mm, pmd);
520 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
521 		mm_inc_nr_ptes(mm);
522 		pmd_populate(mm, pmd, new);
523 		new = NULL;
524 	}
525 	spin_unlock(ptl);
526 	if (new)
527 		pte_free(mm, new);
528 	return 0;
529 }
530 
__pte_alloc_kernel(pmd_t * pmd)531 int __pte_alloc_kernel(pmd_t *pmd)
532 {
533 	pte_t *new = pte_alloc_one_kernel(&init_mm);
534 	if (!new)
535 		return -ENOMEM;
536 
537 	smp_wmb(); /* See comment in __pte_alloc */
538 
539 	spin_lock(&init_mm.page_table_lock);
540 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
541 		pmd_populate_kernel(&init_mm, pmd, new);
542 		new = NULL;
543 	}
544 	spin_unlock(&init_mm.page_table_lock);
545 	if (new)
546 		pte_free_kernel(&init_mm, new);
547 	return 0;
548 }
549 
init_rss_vec(int * rss)550 static inline void init_rss_vec(int *rss)
551 {
552 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
553 }
554 
add_mm_rss_vec(struct mm_struct * mm,int * rss)555 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
556 {
557 	int i;
558 
559 	if (current->mm == mm)
560 		sync_mm_rss(mm);
561 	for (i = 0; i < NR_MM_COUNTERS; i++)
562 		if (rss[i])
563 			add_mm_counter(mm, i, rss[i]);
564 }
565 
566 /*
567  * This function is called to print an error when a bad pte
568  * is found. For example, we might have a PFN-mapped pte in
569  * a region that doesn't allow it.
570  *
571  * The calling function must still handle the error.
572  */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)573 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
574 			  pte_t pte, struct page *page)
575 {
576 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
577 	p4d_t *p4d = p4d_offset(pgd, addr);
578 	pud_t *pud = pud_offset(p4d, addr);
579 	pmd_t *pmd = pmd_offset(pud, addr);
580 	struct address_space *mapping;
581 	pgoff_t index;
582 	static unsigned long resume;
583 	static unsigned long nr_shown;
584 	static unsigned long nr_unshown;
585 
586 	/*
587 	 * Allow a burst of 60 reports, then keep quiet for that minute;
588 	 * or allow a steady drip of one report per second.
589 	 */
590 	if (nr_shown == 60) {
591 		if (time_before(jiffies, resume)) {
592 			nr_unshown++;
593 			return;
594 		}
595 		if (nr_unshown) {
596 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
597 				 nr_unshown);
598 			nr_unshown = 0;
599 		}
600 		nr_shown = 0;
601 	}
602 	if (nr_shown++ == 0)
603 		resume = jiffies + 60 * HZ;
604 
605 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
606 	index = linear_page_index(vma, addr);
607 
608 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
609 		 current->comm,
610 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
611 	if (page)
612 		dump_page(page, "bad pte");
613 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
614 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
615 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
616 		 vma->vm_file,
617 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
618 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
619 		 mapping ? mapping->a_ops->readpage : NULL);
620 	dump_stack();
621 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
622 }
623 
624 /*
625  * vm_normal_page -- This function gets the "struct page" associated with a pte.
626  *
627  * "Special" mappings do not wish to be associated with a "struct page" (either
628  * it doesn't exist, or it exists but they don't want to touch it). In this
629  * case, NULL is returned here. "Normal" mappings do have a struct page.
630  *
631  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
632  * pte bit, in which case this function is trivial. Secondly, an architecture
633  * may not have a spare pte bit, which requires a more complicated scheme,
634  * described below.
635  *
636  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
637  * special mapping (even if there are underlying and valid "struct pages").
638  * COWed pages of a VM_PFNMAP are always normal.
639  *
640  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
641  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
642  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
643  * mapping will always honor the rule
644  *
645  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
646  *
647  * And for normal mappings this is false.
648  *
649  * This restricts such mappings to be a linear translation from virtual address
650  * to pfn. To get around this restriction, we allow arbitrary mappings so long
651  * as the vma is not a COW mapping; in that case, we know that all ptes are
652  * special (because none can have been COWed).
653  *
654  *
655  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
656  *
657  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
658  * page" backing, however the difference is that _all_ pages with a struct
659  * page (that is, those where pfn_valid is true) are refcounted and considered
660  * normal pages by the VM. The disadvantage is that pages are refcounted
661  * (which can be slower and simply not an option for some PFNMAP users). The
662  * advantage is that we don't have to follow the strict linearity rule of
663  * PFNMAP mappings in order to support COWable mappings.
664  *
665  */
vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte)666 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
667 			    pte_t pte)
668 {
669 	unsigned long pfn = pte_pfn(pte);
670 
671 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
672 		if (likely(!pte_special(pte)))
673 			goto check_pfn;
674 		if (vma->vm_ops && vma->vm_ops->find_special_page)
675 			return vma->vm_ops->find_special_page(vma, addr);
676 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
677 			return NULL;
678 		if (is_zero_pfn(pfn))
679 			return NULL;
680 		if (pte_devmap(pte))
681 			return NULL;
682 
683 		print_bad_pte(vma, addr, pte, NULL);
684 		return NULL;
685 	}
686 
687 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
688 
689 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
690 		if (vma->vm_flags & VM_MIXEDMAP) {
691 			if (!pfn_valid(pfn))
692 				return NULL;
693 			goto out;
694 		} else {
695 			unsigned long off;
696 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
697 			if (pfn == vma->vm_pgoff + off)
698 				return NULL;
699 			if (!is_cow_mapping(vma->vm_flags))
700 				return NULL;
701 		}
702 	}
703 
704 	if (is_zero_pfn(pfn))
705 		return NULL;
706 
707 check_pfn:
708 	if (unlikely(pfn > highest_memmap_pfn)) {
709 		print_bad_pte(vma, addr, pte, NULL);
710 		return NULL;
711 	}
712 
713 	/*
714 	 * NOTE! We still have PageReserved() pages in the page tables.
715 	 * eg. VDSO mappings can cause them to exist.
716 	 */
717 out:
718 	return pfn_to_page(pfn);
719 }
720 
721 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)722 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
723 				pmd_t pmd)
724 {
725 	unsigned long pfn = pmd_pfn(pmd);
726 
727 	/*
728 	 * There is no pmd_special() but there may be special pmds, e.g.
729 	 * in a direct-access (dax) mapping, so let's just replicate the
730 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
731 	 */
732 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
733 		if (vma->vm_flags & VM_MIXEDMAP) {
734 			if (!pfn_valid(pfn))
735 				return NULL;
736 			goto out;
737 		} else {
738 			unsigned long off;
739 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
740 			if (pfn == vma->vm_pgoff + off)
741 				return NULL;
742 			if (!is_cow_mapping(vma->vm_flags))
743 				return NULL;
744 		}
745 	}
746 
747 	if (pmd_devmap(pmd))
748 		return NULL;
749 	if (is_huge_zero_pmd(pmd))
750 		return NULL;
751 	if (unlikely(pfn > highest_memmap_pfn))
752 		return NULL;
753 
754 	/*
755 	 * NOTE! We still have PageReserved() pages in the page tables.
756 	 * eg. VDSO mappings can cause them to exist.
757 	 */
758 out:
759 	return pfn_to_page(pfn);
760 }
761 #endif
762 
restore_exclusive_pte(struct vm_area_struct * vma,struct page * page,unsigned long address,pte_t * ptep)763 static void restore_exclusive_pte(struct vm_area_struct *vma,
764 				  struct page *page, unsigned long address,
765 				  pte_t *ptep)
766 {
767 	pte_t pte;
768 	swp_entry_t entry;
769 
770 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
771 	if (pte_swp_soft_dirty(*ptep))
772 		pte = pte_mksoft_dirty(pte);
773 
774 	entry = pte_to_swp_entry(*ptep);
775 	if (pte_swp_uffd_wp(*ptep))
776 		pte = pte_mkuffd_wp(pte);
777 	else if (is_writable_device_exclusive_entry(entry))
778 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
779 
780 	set_pte_at(vma->vm_mm, address, ptep, pte);
781 
782 	/*
783 	 * No need to take a page reference as one was already
784 	 * created when the swap entry was made.
785 	 */
786 	if (PageAnon(page))
787 		page_add_anon_rmap(page, vma, address, false);
788 	else
789 		/*
790 		 * Currently device exclusive access only supports anonymous
791 		 * memory so the entry shouldn't point to a filebacked page.
792 		 */
793 		WARN_ON_ONCE(!PageAnon(page));
794 
795 	if (vma->vm_flags & VM_LOCKED)
796 		mlock_vma_page(page);
797 
798 	/*
799 	 * No need to invalidate - it was non-present before. However
800 	 * secondary CPUs may have mappings that need invalidating.
801 	 */
802 	update_mmu_cache(vma, address, ptep);
803 }
804 
805 /*
806  * Tries to restore an exclusive pte if the page lock can be acquired without
807  * sleeping.
808  */
809 static int
try_restore_exclusive_pte(pte_t * src_pte,struct vm_area_struct * vma,unsigned long addr)810 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
811 			unsigned long addr)
812 {
813 	swp_entry_t entry = pte_to_swp_entry(*src_pte);
814 	struct page *page = pfn_swap_entry_to_page(entry);
815 
816 	if (trylock_page(page)) {
817 		restore_exclusive_pte(vma, page, addr, src_pte);
818 		unlock_page(page);
819 		return 0;
820 	}
821 
822 	return -EBUSY;
823 }
824 
825 /*
826  * copy one vm_area from one task to the other. Assumes the page tables
827  * already present in the new task to be cleared in the whole range
828  * covered by this vma.
829  */
830 
831 static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)832 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
833 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
834 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
835 {
836 	unsigned long vm_flags = dst_vma->vm_flags;
837 	pte_t pte = *src_pte;
838 	struct page *page;
839 	swp_entry_t entry = pte_to_swp_entry(pte);
840 
841 	if (likely(!non_swap_entry(entry))) {
842 		if (swap_duplicate(entry) < 0)
843 			return -EIO;
844 
845 		/* make sure dst_mm is on swapoff's mmlist. */
846 		if (unlikely(list_empty(&dst_mm->mmlist))) {
847 			spin_lock(&mmlist_lock);
848 			if (list_empty(&dst_mm->mmlist))
849 				list_add(&dst_mm->mmlist,
850 						&src_mm->mmlist);
851 			spin_unlock(&mmlist_lock);
852 		}
853 		rss[MM_SWAPENTS]++;
854 	} else if (is_migration_entry(entry)) {
855 		page = pfn_swap_entry_to_page(entry);
856 
857 		rss[mm_counter(page)]++;
858 
859 		if (is_writable_migration_entry(entry) &&
860 				is_cow_mapping(vm_flags)) {
861 			/*
862 			 * COW mappings require pages in both
863 			 * parent and child to be set to read.
864 			 */
865 			entry = make_readable_migration_entry(
866 							swp_offset(entry));
867 			pte = swp_entry_to_pte(entry);
868 			if (pte_swp_soft_dirty(*src_pte))
869 				pte = pte_swp_mksoft_dirty(pte);
870 			if (pte_swp_uffd_wp(*src_pte))
871 				pte = pte_swp_mkuffd_wp(pte);
872 			set_pte_at(src_mm, addr, src_pte, pte);
873 		}
874 	} else if (is_device_private_entry(entry)) {
875 		page = pfn_swap_entry_to_page(entry);
876 
877 		/*
878 		 * Update rss count even for unaddressable pages, as
879 		 * they should treated just like normal pages in this
880 		 * respect.
881 		 *
882 		 * We will likely want to have some new rss counters
883 		 * for unaddressable pages, at some point. But for now
884 		 * keep things as they are.
885 		 */
886 		get_page(page);
887 		rss[mm_counter(page)]++;
888 		page_dup_rmap(page, false);
889 
890 		/*
891 		 * We do not preserve soft-dirty information, because so
892 		 * far, checkpoint/restore is the only feature that
893 		 * requires that. And checkpoint/restore does not work
894 		 * when a device driver is involved (you cannot easily
895 		 * save and restore device driver state).
896 		 */
897 		if (is_writable_device_private_entry(entry) &&
898 		    is_cow_mapping(vm_flags)) {
899 			entry = make_readable_device_private_entry(
900 							swp_offset(entry));
901 			pte = swp_entry_to_pte(entry);
902 			if (pte_swp_uffd_wp(*src_pte))
903 				pte = pte_swp_mkuffd_wp(pte);
904 			set_pte_at(src_mm, addr, src_pte, pte);
905 		}
906 	} else if (is_device_exclusive_entry(entry)) {
907 		/*
908 		 * Make device exclusive entries present by restoring the
909 		 * original entry then copying as for a present pte. Device
910 		 * exclusive entries currently only support private writable
911 		 * (ie. COW) mappings.
912 		 */
913 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
914 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
915 			return -EBUSY;
916 		return -ENOENT;
917 	}
918 	if (!userfaultfd_wp(dst_vma))
919 		pte = pte_swp_clear_uffd_wp(pte);
920 	set_pte_at(dst_mm, addr, dst_pte, pte);
921 	return 0;
922 }
923 
924 /*
925  * Copy a present and normal page if necessary.
926  *
927  * NOTE! The usual case is that this doesn't need to do
928  * anything, and can just return a positive value. That
929  * will let the caller know that it can just increase
930  * the page refcount and re-use the pte the traditional
931  * way.
932  *
933  * But _if_ we need to copy it because it needs to be
934  * pinned in the parent (and the child should get its own
935  * copy rather than just a reference to the same page),
936  * we'll do that here and return zero to let the caller
937  * know we're done.
938  *
939  * And if we need a pre-allocated page but don't yet have
940  * one, return a negative error to let the preallocation
941  * code know so that it can do so outside the page table
942  * lock.
943  */
944 static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc,pte_t pte,struct page * page)945 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
946 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
947 		  struct page **prealloc, pte_t pte, struct page *page)
948 {
949 	struct page *new_page;
950 
951 	/*
952 	 * What we want to do is to check whether this page may
953 	 * have been pinned by the parent process.  If so,
954 	 * instead of wrprotect the pte on both sides, we copy
955 	 * the page immediately so that we'll always guarantee
956 	 * the pinned page won't be randomly replaced in the
957 	 * future.
958 	 *
959 	 * The page pinning checks are just "has this mm ever
960 	 * seen pinning", along with the (inexact) check of
961 	 * the page count. That might give false positives for
962 	 * for pinning, but it will work correctly.
963 	 */
964 	if (likely(!page_needs_cow_for_dma(src_vma, page)))
965 		return 1;
966 
967 	new_page = *prealloc;
968 	if (!new_page)
969 		return -EAGAIN;
970 
971 	/*
972 	 * We have a prealloc page, all good!  Take it
973 	 * over and copy the page & arm it.
974 	 */
975 	*prealloc = NULL;
976 	copy_user_highpage(new_page, page, addr, src_vma);
977 	__SetPageUptodate(new_page);
978 	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
979 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
980 	rss[mm_counter(new_page)]++;
981 
982 	/* All done, just insert the new page copy in the child */
983 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
984 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
985 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
986 		/* Uffd-wp needs to be delivered to dest pte as well */
987 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
988 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
989 	return 0;
990 }
991 
992 /*
993  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
994  * is required to copy this pte.
995  */
996 static inline int
copy_present_pte(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc)997 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
998 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
999 		 struct page **prealloc)
1000 {
1001 	struct mm_struct *src_mm = src_vma->vm_mm;
1002 	unsigned long vm_flags = src_vma->vm_flags;
1003 	pte_t pte = *src_pte;
1004 	struct page *page;
1005 
1006 	page = vm_normal_page(src_vma, addr, pte);
1007 	if (page) {
1008 		int retval;
1009 
1010 		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1011 					   addr, rss, prealloc, pte, page);
1012 		if (retval <= 0)
1013 			return retval;
1014 
1015 		get_page(page);
1016 		page_dup_rmap(page, false);
1017 		rss[mm_counter(page)]++;
1018 	}
1019 
1020 	/*
1021 	 * If it's a COW mapping, write protect it both
1022 	 * in the parent and the child
1023 	 */
1024 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
1025 		ptep_set_wrprotect(src_mm, addr, src_pte);
1026 		pte = pte_wrprotect(pte);
1027 	}
1028 
1029 	/*
1030 	 * If it's a shared mapping, mark it clean in
1031 	 * the child
1032 	 */
1033 	if (vm_flags & VM_SHARED)
1034 		pte = pte_mkclean(pte);
1035 	pte = pte_mkold(pte);
1036 
1037 	if (!userfaultfd_wp(dst_vma))
1038 		pte = pte_clear_uffd_wp(pte);
1039 
1040 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
1041 	return 0;
1042 }
1043 
1044 static inline struct page *
page_copy_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr)1045 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
1046 		   unsigned long addr)
1047 {
1048 	struct page *new_page;
1049 
1050 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
1051 	if (!new_page)
1052 		return NULL;
1053 
1054 	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
1055 		put_page(new_page);
1056 		return NULL;
1057 	}
1058 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
1059 
1060 	return new_page;
1061 }
1062 
1063 static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)1064 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1065 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1066 	       unsigned long end)
1067 {
1068 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1069 	struct mm_struct *src_mm = src_vma->vm_mm;
1070 	pte_t *orig_src_pte, *orig_dst_pte;
1071 	pte_t *src_pte, *dst_pte;
1072 	spinlock_t *src_ptl, *dst_ptl;
1073 	int progress, ret = 0;
1074 	int rss[NR_MM_COUNTERS];
1075 	swp_entry_t entry = (swp_entry_t){0};
1076 	struct page *prealloc = NULL;
1077 
1078 again:
1079 	progress = 0;
1080 	init_rss_vec(rss);
1081 
1082 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1083 	if (!dst_pte) {
1084 		ret = -ENOMEM;
1085 		goto out;
1086 	}
1087 	src_pte = pte_offset_map(src_pmd, addr);
1088 	src_ptl = pte_lockptr(src_mm, src_pmd);
1089 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1090 	orig_src_pte = src_pte;
1091 	orig_dst_pte = dst_pte;
1092 	arch_enter_lazy_mmu_mode();
1093 
1094 	do {
1095 		/*
1096 		 * We are holding two locks at this point - either of them
1097 		 * could generate latencies in another task on another CPU.
1098 		 */
1099 		if (progress >= 32) {
1100 			progress = 0;
1101 			if (need_resched() ||
1102 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1103 				break;
1104 		}
1105 		if (pte_none(*src_pte)) {
1106 			progress++;
1107 			continue;
1108 		}
1109 		if (unlikely(!pte_present(*src_pte))) {
1110 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1111 						  dst_pte, src_pte,
1112 						  dst_vma, src_vma,
1113 						  addr, rss);
1114 			if (ret == -EIO) {
1115 				entry = pte_to_swp_entry(*src_pte);
1116 				break;
1117 			} else if (ret == -EBUSY) {
1118 				break;
1119 			} else if (!ret) {
1120 				progress += 8;
1121 				continue;
1122 			}
1123 
1124 			/*
1125 			 * Device exclusive entry restored, continue by copying
1126 			 * the now present pte.
1127 			 */
1128 			WARN_ON_ONCE(ret != -ENOENT);
1129 		}
1130 		/* copy_present_pte() will clear `*prealloc' if consumed */
1131 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1132 				       addr, rss, &prealloc);
1133 		/*
1134 		 * If we need a pre-allocated page for this pte, drop the
1135 		 * locks, allocate, and try again.
1136 		 */
1137 		if (unlikely(ret == -EAGAIN))
1138 			break;
1139 		if (unlikely(prealloc)) {
1140 			/*
1141 			 * pre-alloc page cannot be reused by next time so as
1142 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1143 			 * will allocate page according to address).  This
1144 			 * could only happen if one pinned pte changed.
1145 			 */
1146 			put_page(prealloc);
1147 			prealloc = NULL;
1148 		}
1149 		progress += 8;
1150 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1151 
1152 	arch_leave_lazy_mmu_mode();
1153 	spin_unlock(src_ptl);
1154 	pte_unmap(orig_src_pte);
1155 	add_mm_rss_vec(dst_mm, rss);
1156 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1157 	cond_resched();
1158 
1159 	if (ret == -EIO) {
1160 		VM_WARN_ON_ONCE(!entry.val);
1161 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1162 			ret = -ENOMEM;
1163 			goto out;
1164 		}
1165 		entry.val = 0;
1166 	} else if (ret == -EBUSY) {
1167 		goto out;
1168 	} else if (ret ==  -EAGAIN) {
1169 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1170 		if (!prealloc)
1171 			return -ENOMEM;
1172 	} else if (ret) {
1173 		VM_WARN_ON_ONCE(1);
1174 	}
1175 
1176 	/* We've captured and resolved the error. Reset, try again. */
1177 	ret = 0;
1178 
1179 	if (addr != end)
1180 		goto again;
1181 out:
1182 	if (unlikely(prealloc))
1183 		put_page(prealloc);
1184 	return ret;
1185 }
1186 
1187 static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1188 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1189 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1190 	       unsigned long end)
1191 {
1192 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1193 	struct mm_struct *src_mm = src_vma->vm_mm;
1194 	pmd_t *src_pmd, *dst_pmd;
1195 	unsigned long next;
1196 
1197 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1198 	if (!dst_pmd)
1199 		return -ENOMEM;
1200 	src_pmd = pmd_offset(src_pud, addr);
1201 	do {
1202 		next = pmd_addr_end(addr, end);
1203 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1204 			|| pmd_devmap(*src_pmd)) {
1205 			int err;
1206 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1207 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1208 					    addr, dst_vma, src_vma);
1209 			if (err == -ENOMEM)
1210 				return -ENOMEM;
1211 			if (!err)
1212 				continue;
1213 			/* fall through */
1214 		}
1215 		if (pmd_none_or_clear_bad(src_pmd))
1216 			continue;
1217 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1218 				   addr, next))
1219 			return -ENOMEM;
1220 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1221 	return 0;
1222 }
1223 
1224 static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1225 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1226 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1227 	       unsigned long end)
1228 {
1229 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1230 	struct mm_struct *src_mm = src_vma->vm_mm;
1231 	pud_t *src_pud, *dst_pud;
1232 	unsigned long next;
1233 
1234 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1235 	if (!dst_pud)
1236 		return -ENOMEM;
1237 	src_pud = pud_offset(src_p4d, addr);
1238 	do {
1239 		next = pud_addr_end(addr, end);
1240 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1241 			int err;
1242 
1243 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1244 			err = copy_huge_pud(dst_mm, src_mm,
1245 					    dst_pud, src_pud, addr, src_vma);
1246 			if (err == -ENOMEM)
1247 				return -ENOMEM;
1248 			if (!err)
1249 				continue;
1250 			/* fall through */
1251 		}
1252 		if (pud_none_or_clear_bad(src_pud))
1253 			continue;
1254 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1255 				   addr, next))
1256 			return -ENOMEM;
1257 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1258 	return 0;
1259 }
1260 
1261 static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1262 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1263 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1264 	       unsigned long end)
1265 {
1266 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1267 	p4d_t *src_p4d, *dst_p4d;
1268 	unsigned long next;
1269 
1270 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1271 	if (!dst_p4d)
1272 		return -ENOMEM;
1273 	src_p4d = p4d_offset(src_pgd, addr);
1274 	do {
1275 		next = p4d_addr_end(addr, end);
1276 		if (p4d_none_or_clear_bad(src_p4d))
1277 			continue;
1278 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1279 				   addr, next))
1280 			return -ENOMEM;
1281 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1282 	return 0;
1283 }
1284 
1285 int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1286 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1287 {
1288 	pgd_t *src_pgd, *dst_pgd;
1289 	unsigned long next;
1290 	unsigned long addr = src_vma->vm_start;
1291 	unsigned long end = src_vma->vm_end;
1292 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1293 	struct mm_struct *src_mm = src_vma->vm_mm;
1294 	struct mmu_notifier_range range;
1295 	bool is_cow;
1296 	int ret;
1297 
1298 	/*
1299 	 * Don't copy ptes where a page fault will fill them correctly.
1300 	 * Fork becomes much lighter when there are big shared or private
1301 	 * readonly mappings. The tradeoff is that copy_page_range is more
1302 	 * efficient than faulting.
1303 	 */
1304 	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1305 	    !src_vma->anon_vma)
1306 		return 0;
1307 
1308 	if (is_vm_hugetlb_page(src_vma))
1309 		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1310 
1311 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1312 		/*
1313 		 * We do not free on error cases below as remove_vma
1314 		 * gets called on error from higher level routine
1315 		 */
1316 		ret = track_pfn_copy(src_vma);
1317 		if (ret)
1318 			return ret;
1319 	}
1320 
1321 	/*
1322 	 * We need to invalidate the secondary MMU mappings only when
1323 	 * there could be a permission downgrade on the ptes of the
1324 	 * parent mm. And a permission downgrade will only happen if
1325 	 * is_cow_mapping() returns true.
1326 	 */
1327 	is_cow = is_cow_mapping(src_vma->vm_flags);
1328 
1329 	if (is_cow) {
1330 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1331 					0, src_vma, src_mm, addr, end);
1332 		mmu_notifier_invalidate_range_start(&range);
1333 		/*
1334 		 * Disabling preemption is not needed for the write side, as
1335 		 * the read side doesn't spin, but goes to the mmap_lock.
1336 		 *
1337 		 * Use the raw variant of the seqcount_t write API to avoid
1338 		 * lockdep complaining about preemptibility.
1339 		 */
1340 		mmap_assert_write_locked(src_mm);
1341 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1342 	}
1343 
1344 	ret = 0;
1345 	dst_pgd = pgd_offset(dst_mm, addr);
1346 	src_pgd = pgd_offset(src_mm, addr);
1347 	do {
1348 		next = pgd_addr_end(addr, end);
1349 		if (pgd_none_or_clear_bad(src_pgd))
1350 			continue;
1351 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1352 					    addr, next))) {
1353 			ret = -ENOMEM;
1354 			break;
1355 		}
1356 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1357 
1358 	if (is_cow) {
1359 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1360 		mmu_notifier_invalidate_range_end(&range);
1361 	}
1362 	return ret;
1363 }
1364 
1365 /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1366 static inline bool should_zap_cows(struct zap_details *details)
1367 {
1368 	/* By default, zap all pages */
1369 	if (!details)
1370 		return true;
1371 
1372 	/* Or, we zap COWed pages only if the caller wants to */
1373 	return !details->check_mapping;
1374 }
1375 
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1376 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1377 				struct vm_area_struct *vma, pmd_t *pmd,
1378 				unsigned long addr, unsigned long end,
1379 				struct zap_details *details)
1380 {
1381 	struct mm_struct *mm = tlb->mm;
1382 	int force_flush = 0;
1383 	int rss[NR_MM_COUNTERS];
1384 	spinlock_t *ptl;
1385 	pte_t *start_pte;
1386 	pte_t *pte;
1387 	swp_entry_t entry;
1388 	int v_ret = 0;
1389 
1390 	tlb_change_page_size(tlb, PAGE_SIZE);
1391 again:
1392 	trace_android_vh_zap_pte_range_tlb_start(&v_ret);
1393 	init_rss_vec(rss);
1394 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1395 	pte = start_pte;
1396 	flush_tlb_batched_pending(mm);
1397 	arch_enter_lazy_mmu_mode();
1398 	do {
1399 		bool flush = false;
1400 		pte_t ptent = *pte;
1401 		if (pte_none(ptent))
1402 			continue;
1403 
1404 		if (need_resched())
1405 			break;
1406 
1407 		if (pte_present(ptent)) {
1408 			struct page *page;
1409 
1410 			page = vm_normal_page(vma, addr, ptent);
1411 			if (unlikely(details) && page) {
1412 				/*
1413 				 * unmap_shared_mapping_pages() wants to
1414 				 * invalidate cache without truncating:
1415 				 * unmap shared but keep private pages.
1416 				 */
1417 				if (details->check_mapping &&
1418 				    details->check_mapping != page_rmapping(page))
1419 					continue;
1420 			}
1421 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1422 							tlb->fullmm);
1423 			tlb_remove_tlb_entry(tlb, pte, addr);
1424 			if (unlikely(!page))
1425 				continue;
1426 
1427 			if (!PageAnon(page)) {
1428 				if (pte_dirty(ptent)) {
1429 					force_flush = 1;
1430 					set_page_dirty(page);
1431 				}
1432 				if (pte_young(ptent) && likely(vma_has_recency(vma)))
1433 					mark_page_accessed(page);
1434 			}
1435 			rss[mm_counter(page)]--;
1436 			page_remove_rmap(page, false);
1437 			if (unlikely(page_mapcount(page) < 0))
1438 				print_bad_pte(vma, addr, ptent, page);
1439 			trace_android_vh_zap_pte_range_tlb_force_flush(page, &flush);
1440 			if (unlikely(__tlb_remove_page(tlb, page)) || flush) {
1441 				force_flush = 1;
1442 				addr += PAGE_SIZE;
1443 				break;
1444 			}
1445 			continue;
1446 		}
1447 
1448 		entry = pte_to_swp_entry(ptent);
1449 		if (is_device_private_entry(entry) ||
1450 		    is_device_exclusive_entry(entry)) {
1451 			struct page *page = pfn_swap_entry_to_page(entry);
1452 
1453 			if (unlikely(details && details->check_mapping)) {
1454 				/*
1455 				 * unmap_shared_mapping_pages() wants to
1456 				 * invalidate cache without truncating:
1457 				 * unmap shared but keep private pages.
1458 				 */
1459 				if (details->check_mapping !=
1460 				    page_rmapping(page))
1461 					continue;
1462 			}
1463 
1464 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1465 			rss[mm_counter(page)]--;
1466 
1467 			if (is_device_private_entry(entry))
1468 				page_remove_rmap(page, false);
1469 
1470 			put_page(page);
1471 			continue;
1472 		}
1473 
1474 		if (!non_swap_entry(entry)) {
1475 			/* Genuine swap entry, hence a private anon page */
1476 			if (!should_zap_cows(details))
1477 				continue;
1478 			rss[MM_SWAPENTS]--;
1479 		} else if (is_migration_entry(entry)) {
1480 			struct page *page;
1481 
1482 			page = pfn_swap_entry_to_page(entry);
1483 			if (details && details->check_mapping &&
1484 			    details->check_mapping != page_rmapping(page))
1485 				continue;
1486 			rss[mm_counter(page)]--;
1487 		}
1488 		if (unlikely(!free_swap_and_cache(entry)))
1489 			print_bad_pte(vma, addr, ptent, NULL);
1490 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1491 	} while (pte++, addr += PAGE_SIZE, addr != end);
1492 
1493 	add_mm_rss_vec(mm, rss);
1494 	arch_leave_lazy_mmu_mode();
1495 
1496 	/* Do the actual TLB flush before dropping ptl */
1497 	if (force_flush)
1498 		tlb_flush_mmu_tlbonly(tlb);
1499 	pte_unmap_unlock(start_pte, ptl);
1500 
1501 	/*
1502 	 * If we forced a TLB flush (either due to running out of
1503 	 * batch buffers or because we needed to flush dirty TLB
1504 	 * entries before releasing the ptl), free the batched
1505 	 * memory too. Restart if we didn't do everything.
1506 	 */
1507 	if (force_flush) {
1508 		force_flush = 0;
1509 		tlb_flush_mmu(tlb);
1510 	}
1511 
1512 	trace_android_vh_zap_pte_range_tlb_end(&v_ret);
1513 	if (addr != end) {
1514 		cond_resched();
1515 		goto again;
1516 	}
1517 
1518 	return addr;
1519 }
1520 
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1521 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1522 				struct vm_area_struct *vma, pud_t *pud,
1523 				unsigned long addr, unsigned long end,
1524 				struct zap_details *details)
1525 {
1526 	pmd_t *pmd;
1527 	unsigned long next;
1528 
1529 	pmd = pmd_offset(pud, addr);
1530 	do {
1531 		next = pmd_addr_end(addr, end);
1532 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1533 			if (next - addr != HPAGE_PMD_SIZE)
1534 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1535 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1536 				goto next;
1537 			/* fall through */
1538 		} else if (details && details->single_page &&
1539 			   PageTransCompound(details->single_page) &&
1540 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1541 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1542 			/*
1543 			 * Take and drop THP pmd lock so that we cannot return
1544 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1545 			 * but not yet decremented compound_mapcount().
1546 			 */
1547 			spin_unlock(ptl);
1548 		}
1549 
1550 		/*
1551 		 * Here there can be other concurrent MADV_DONTNEED or
1552 		 * trans huge page faults running, and if the pmd is
1553 		 * none or trans huge it can change under us. This is
1554 		 * because MADV_DONTNEED holds the mmap_lock in read
1555 		 * mode.
1556 		 */
1557 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1558 			goto next;
1559 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1560 next:
1561 		cond_resched();
1562 	} while (pmd++, addr = next, addr != end);
1563 
1564 	return addr;
1565 }
1566 
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1567 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1568 				struct vm_area_struct *vma, p4d_t *p4d,
1569 				unsigned long addr, unsigned long end,
1570 				struct zap_details *details)
1571 {
1572 	pud_t *pud;
1573 	unsigned long next;
1574 
1575 	pud = pud_offset(p4d, addr);
1576 	do {
1577 		next = pud_addr_end(addr, end);
1578 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1579 			if (next - addr != HPAGE_PUD_SIZE) {
1580 				mmap_assert_locked(tlb->mm);
1581 				split_huge_pud(vma, pud, addr);
1582 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1583 				goto next;
1584 			/* fall through */
1585 		}
1586 		if (pud_none_or_clear_bad(pud))
1587 			continue;
1588 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1589 next:
1590 		cond_resched();
1591 	} while (pud++, addr = next, addr != end);
1592 
1593 	return addr;
1594 }
1595 
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1596 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1597 				struct vm_area_struct *vma, pgd_t *pgd,
1598 				unsigned long addr, unsigned long end,
1599 				struct zap_details *details)
1600 {
1601 	p4d_t *p4d;
1602 	unsigned long next;
1603 
1604 	p4d = p4d_offset(pgd, addr);
1605 	do {
1606 		next = p4d_addr_end(addr, end);
1607 		if (p4d_none_or_clear_bad(p4d))
1608 			continue;
1609 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1610 	} while (p4d++, addr = next, addr != end);
1611 
1612 	return addr;
1613 }
1614 
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1615 void unmap_page_range(struct mmu_gather *tlb,
1616 			     struct vm_area_struct *vma,
1617 			     unsigned long addr, unsigned long end,
1618 			     struct zap_details *details)
1619 {
1620 	pgd_t *pgd;
1621 	unsigned long next;
1622 
1623 	BUG_ON(addr >= end);
1624 	tlb_start_vma(tlb, vma);
1625 	pgd = pgd_offset(vma->vm_mm, addr);
1626 	do {
1627 		next = pgd_addr_end(addr, end);
1628 		if (pgd_none_or_clear_bad(pgd))
1629 			continue;
1630 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1631 	} while (pgd++, addr = next, addr != end);
1632 	tlb_end_vma(tlb, vma);
1633 }
1634 
1635 
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)1636 static void unmap_single_vma(struct mmu_gather *tlb,
1637 		struct vm_area_struct *vma, unsigned long start_addr,
1638 		unsigned long end_addr,
1639 		struct zap_details *details)
1640 {
1641 	unsigned long start = max(vma->vm_start, start_addr);
1642 	unsigned long end;
1643 
1644 	if (start >= vma->vm_end)
1645 		return;
1646 	end = min(vma->vm_end, end_addr);
1647 	if (end <= vma->vm_start)
1648 		return;
1649 
1650 	if (vma->vm_file)
1651 		uprobe_munmap(vma, start, end);
1652 
1653 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1654 		untrack_pfn(vma, 0, 0);
1655 
1656 	if (start != end) {
1657 		if (unlikely(is_vm_hugetlb_page(vma))) {
1658 			/*
1659 			 * It is undesirable to test vma->vm_file as it
1660 			 * should be non-null for valid hugetlb area.
1661 			 * However, vm_file will be NULL in the error
1662 			 * cleanup path of mmap_region. When
1663 			 * hugetlbfs ->mmap method fails,
1664 			 * mmap_region() nullifies vma->vm_file
1665 			 * before calling this function to clean up.
1666 			 * Since no pte has actually been setup, it is
1667 			 * safe to do nothing in this case.
1668 			 */
1669 			if (vma->vm_file) {
1670 				i_mmap_lock_write(vma->vm_file->f_mapping);
1671 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1672 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1673 			}
1674 		} else
1675 			unmap_page_range(tlb, vma, start, end, details);
1676 	}
1677 }
1678 
1679 /**
1680  * unmap_vmas - unmap a range of memory covered by a list of vma's
1681  * @tlb: address of the caller's struct mmu_gather
1682  * @vma: the starting vma
1683  * @start_addr: virtual address at which to start unmapping
1684  * @end_addr: virtual address at which to end unmapping
1685  *
1686  * Unmap all pages in the vma list.
1687  *
1688  * Only addresses between `start' and `end' will be unmapped.
1689  *
1690  * The VMA list must be sorted in ascending virtual address order.
1691  *
1692  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1693  * range after unmap_vmas() returns.  So the only responsibility here is to
1694  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1695  * drops the lock and schedules.
1696  */
unmap_vmas(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)1697 void unmap_vmas(struct mmu_gather *tlb,
1698 		struct vm_area_struct *vma, unsigned long start_addr,
1699 		unsigned long end_addr)
1700 {
1701 	struct mmu_notifier_range range;
1702 
1703 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1704 				start_addr, end_addr);
1705 	mmu_notifier_invalidate_range_start(&range);
1706 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1707 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1708 	mmu_notifier_invalidate_range_end(&range);
1709 }
1710 
1711 /**
1712  * zap_page_range - remove user pages in a given range
1713  * @vma: vm_area_struct holding the applicable pages
1714  * @start: starting address of pages to zap
1715  * @size: number of bytes to zap
1716  *
1717  * Caller must protect the VMA list
1718  */
zap_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long size)1719 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1720 		unsigned long size)
1721 {
1722 	struct mmu_notifier_range range;
1723 	struct mmu_gather tlb;
1724 
1725 	lru_add_drain();
1726 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1727 				start, start + size);
1728 	tlb_gather_mmu(&tlb, vma->vm_mm);
1729 	update_hiwater_rss(vma->vm_mm);
1730 	mmu_notifier_invalidate_range_start(&range);
1731 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1732 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1733 	mmu_notifier_invalidate_range_end(&range);
1734 	tlb_finish_mmu(&tlb);
1735 }
1736 
1737 /**
1738  * zap_page_range_single - remove user pages in a given range
1739  * @vma: vm_area_struct holding the applicable pages
1740  * @address: starting address of pages to zap
1741  * @size: number of bytes to zap
1742  * @details: details of shared cache invalidation
1743  *
1744  * The range must fit into one VMA.
1745  */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)1746 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1747 		unsigned long size, struct zap_details *details)
1748 {
1749 	struct mmu_notifier_range range;
1750 	struct mmu_gather tlb;
1751 
1752 	lru_add_drain();
1753 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1754 				address, address + size);
1755 	tlb_gather_mmu(&tlb, vma->vm_mm);
1756 	update_hiwater_rss(vma->vm_mm);
1757 	mmu_notifier_invalidate_range_start(&range);
1758 	unmap_single_vma(&tlb, vma, address, range.end, details);
1759 	mmu_notifier_invalidate_range_end(&range);
1760 	tlb_finish_mmu(&tlb);
1761 }
1762 
1763 /**
1764  * zap_vma_ptes - remove ptes mapping the vma
1765  * @vma: vm_area_struct holding ptes to be zapped
1766  * @address: starting address of pages to zap
1767  * @size: number of bytes to zap
1768  *
1769  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1770  *
1771  * The entire address range must be fully contained within the vma.
1772  *
1773  */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)1774 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1775 		unsigned long size)
1776 {
1777 	if (address < vma->vm_start || address + size > vma->vm_end ||
1778 	    		!(vma->vm_flags & VM_PFNMAP))
1779 		return;
1780 
1781 	zap_page_range_single(vma, address, size, NULL);
1782 }
1783 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1784 
walk_to_pmd(struct mm_struct * mm,unsigned long addr)1785 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1786 {
1787 	pgd_t *pgd;
1788 	p4d_t *p4d;
1789 	pud_t *pud;
1790 	pmd_t *pmd;
1791 
1792 	pgd = pgd_offset(mm, addr);
1793 	p4d = p4d_alloc(mm, pgd, addr);
1794 	if (!p4d)
1795 		return NULL;
1796 	pud = pud_alloc(mm, p4d, addr);
1797 	if (!pud)
1798 		return NULL;
1799 	pmd = pmd_alloc(mm, pud, addr);
1800 	if (!pmd)
1801 		return NULL;
1802 
1803 	VM_BUG_ON(pmd_trans_huge(*pmd));
1804 	return pmd;
1805 }
1806 
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)1807 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1808 			spinlock_t **ptl)
1809 {
1810 	pmd_t *pmd = walk_to_pmd(mm, addr);
1811 
1812 	if (!pmd)
1813 		return NULL;
1814 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1815 }
1816 
validate_page_before_insert(struct page * page)1817 static int validate_page_before_insert(struct page *page)
1818 {
1819 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1820 		return -EINVAL;
1821 	flush_dcache_page(page);
1822 	return 0;
1823 }
1824 
insert_page_into_pte_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1825 static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1826 			unsigned long addr, struct page *page, pgprot_t prot)
1827 {
1828 	if (!pte_none(*pte))
1829 		return -EBUSY;
1830 	/* Ok, finally just insert the thing.. */
1831 	get_page(page);
1832 	inc_mm_counter_fast(mm, mm_counter_file(page));
1833 	page_add_file_rmap(page, false);
1834 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1835 	return 0;
1836 }
1837 
1838 /*
1839  * This is the old fallback for page remapping.
1840  *
1841  * For historical reasons, it only allows reserved pages. Only
1842  * old drivers should use this, and they needed to mark their
1843  * pages reserved for the old functions anyway.
1844  */
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)1845 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1846 			struct page *page, pgprot_t prot)
1847 {
1848 	struct mm_struct *mm = vma->vm_mm;
1849 	int retval;
1850 	pte_t *pte;
1851 	spinlock_t *ptl;
1852 
1853 	retval = validate_page_before_insert(page);
1854 	if (retval)
1855 		goto out;
1856 	retval = -ENOMEM;
1857 	pte = get_locked_pte(mm, addr, &ptl);
1858 	if (!pte)
1859 		goto out;
1860 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1861 	pte_unmap_unlock(pte, ptl);
1862 out:
1863 	return retval;
1864 }
1865 
1866 #ifdef pte_index
insert_page_in_batch_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1867 static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1868 			unsigned long addr, struct page *page, pgprot_t prot)
1869 {
1870 	int err;
1871 
1872 	if (!page_count(page))
1873 		return -EINVAL;
1874 	err = validate_page_before_insert(page);
1875 	if (err)
1876 		return err;
1877 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1878 }
1879 
1880 /* insert_pages() amortizes the cost of spinlock operations
1881  * when inserting pages in a loop. Arch *must* define pte_index.
1882  */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)1883 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1884 			struct page **pages, unsigned long *num, pgprot_t prot)
1885 {
1886 	pmd_t *pmd = NULL;
1887 	pte_t *start_pte, *pte;
1888 	spinlock_t *pte_lock;
1889 	struct mm_struct *const mm = vma->vm_mm;
1890 	unsigned long curr_page_idx = 0;
1891 	unsigned long remaining_pages_total = *num;
1892 	unsigned long pages_to_write_in_pmd;
1893 	int ret;
1894 more:
1895 	ret = -EFAULT;
1896 	pmd = walk_to_pmd(mm, addr);
1897 	if (!pmd)
1898 		goto out;
1899 
1900 	pages_to_write_in_pmd = min_t(unsigned long,
1901 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1902 
1903 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1904 	ret = -ENOMEM;
1905 	if (pte_alloc(mm, pmd))
1906 		goto out;
1907 
1908 	while (pages_to_write_in_pmd) {
1909 		int pte_idx = 0;
1910 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1911 
1912 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1913 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1914 			int err = insert_page_in_batch_locked(mm, pte,
1915 				addr, pages[curr_page_idx], prot);
1916 			if (unlikely(err)) {
1917 				pte_unmap_unlock(start_pte, pte_lock);
1918 				ret = err;
1919 				remaining_pages_total -= pte_idx;
1920 				goto out;
1921 			}
1922 			addr += PAGE_SIZE;
1923 			++curr_page_idx;
1924 		}
1925 		pte_unmap_unlock(start_pte, pte_lock);
1926 		pages_to_write_in_pmd -= batch_size;
1927 		remaining_pages_total -= batch_size;
1928 	}
1929 	if (remaining_pages_total)
1930 		goto more;
1931 	ret = 0;
1932 out:
1933 	*num = remaining_pages_total;
1934 	return ret;
1935 }
1936 #endif  /* ifdef pte_index */
1937 
1938 /**
1939  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1940  * @vma: user vma to map to
1941  * @addr: target start user address of these pages
1942  * @pages: source kernel pages
1943  * @num: in: number of pages to map. out: number of pages that were *not*
1944  * mapped. (0 means all pages were successfully mapped).
1945  *
1946  * Preferred over vm_insert_page() when inserting multiple pages.
1947  *
1948  * In case of error, we may have mapped a subset of the provided
1949  * pages. It is the caller's responsibility to account for this case.
1950  *
1951  * The same restrictions apply as in vm_insert_page().
1952  */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)1953 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1954 			struct page **pages, unsigned long *num)
1955 {
1956 #ifdef pte_index
1957 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1958 
1959 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1960 		return -EFAULT;
1961 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1962 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1963 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1964 		vma->vm_flags |= VM_MIXEDMAP;
1965 	}
1966 	/* Defer page refcount checking till we're about to map that page. */
1967 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1968 #else
1969 	unsigned long idx = 0, pgcount = *num;
1970 	int err = -EINVAL;
1971 
1972 	for (; idx < pgcount; ++idx) {
1973 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1974 		if (err)
1975 			break;
1976 	}
1977 	*num = pgcount - idx;
1978 	return err;
1979 #endif  /* ifdef pte_index */
1980 }
1981 EXPORT_SYMBOL(vm_insert_pages);
1982 
1983 /**
1984  * vm_insert_page - insert single page into user vma
1985  * @vma: user vma to map to
1986  * @addr: target user address of this page
1987  * @page: source kernel page
1988  *
1989  * This allows drivers to insert individual pages they've allocated
1990  * into a user vma.
1991  *
1992  * The page has to be a nice clean _individual_ kernel allocation.
1993  * If you allocate a compound page, you need to have marked it as
1994  * such (__GFP_COMP), or manually just split the page up yourself
1995  * (see split_page()).
1996  *
1997  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1998  * took an arbitrary page protection parameter. This doesn't allow
1999  * that. Your vma protection will have to be set up correctly, which
2000  * means that if you want a shared writable mapping, you'd better
2001  * ask for a shared writable mapping!
2002  *
2003  * The page does not need to be reserved.
2004  *
2005  * Usually this function is called from f_op->mmap() handler
2006  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2007  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2008  * function from other places, for example from page-fault handler.
2009  *
2010  * Return: %0 on success, negative error code otherwise.
2011  */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)2012 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2013 			struct page *page)
2014 {
2015 	if (addr < vma->vm_start || addr >= vma->vm_end)
2016 		return -EFAULT;
2017 	if (!page_count(page))
2018 		return -EINVAL;
2019 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2020 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2021 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2022 		vma->vm_flags |= VM_MIXEDMAP;
2023 	}
2024 	return insert_page(vma, addr, page, vma->vm_page_prot);
2025 }
2026 EXPORT_SYMBOL(vm_insert_page);
2027 
2028 /*
2029  * __vm_map_pages - maps range of kernel pages into user vma
2030  * @vma: user vma to map to
2031  * @pages: pointer to array of source kernel pages
2032  * @num: number of pages in page array
2033  * @offset: user's requested vm_pgoff
2034  *
2035  * This allows drivers to map range of kernel pages into a user vma.
2036  *
2037  * Return: 0 on success and error code otherwise.
2038  */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)2039 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2040 				unsigned long num, unsigned long offset)
2041 {
2042 	unsigned long count = vma_pages(vma);
2043 	unsigned long uaddr = vma->vm_start;
2044 	int ret, i;
2045 
2046 	/* Fail if the user requested offset is beyond the end of the object */
2047 	if (offset >= num)
2048 		return -ENXIO;
2049 
2050 	/* Fail if the user requested size exceeds available object size */
2051 	if (count > num - offset)
2052 		return -ENXIO;
2053 
2054 	for (i = 0; i < count; i++) {
2055 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2056 		if (ret < 0)
2057 			return ret;
2058 		uaddr += PAGE_SIZE;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 /**
2065  * vm_map_pages - maps range of kernel pages starts with non zero offset
2066  * @vma: user vma to map to
2067  * @pages: pointer to array of source kernel pages
2068  * @num: number of pages in page array
2069  *
2070  * Maps an object consisting of @num pages, catering for the user's
2071  * requested vm_pgoff
2072  *
2073  * If we fail to insert any page into the vma, the function will return
2074  * immediately leaving any previously inserted pages present.  Callers
2075  * from the mmap handler may immediately return the error as their caller
2076  * will destroy the vma, removing any successfully inserted pages. Other
2077  * callers should make their own arrangements for calling unmap_region().
2078  *
2079  * Context: Process context. Called by mmap handlers.
2080  * Return: 0 on success and error code otherwise.
2081  */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2082 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2083 				unsigned long num)
2084 {
2085 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2086 }
2087 EXPORT_SYMBOL(vm_map_pages);
2088 
2089 /**
2090  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2091  * @vma: user vma to map to
2092  * @pages: pointer to array of source kernel pages
2093  * @num: number of pages in page array
2094  *
2095  * Similar to vm_map_pages(), except that it explicitly sets the offset
2096  * to 0. This function is intended for the drivers that did not consider
2097  * vm_pgoff.
2098  *
2099  * Context: Process context. Called by mmap handlers.
2100  * Return: 0 on success and error code otherwise.
2101  */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2102 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2103 				unsigned long num)
2104 {
2105 	return __vm_map_pages(vma, pages, num, 0);
2106 }
2107 EXPORT_SYMBOL(vm_map_pages_zero);
2108 
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)2109 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2110 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2111 {
2112 	struct mm_struct *mm = vma->vm_mm;
2113 	pte_t *pte, entry;
2114 	spinlock_t *ptl;
2115 
2116 	pte = get_locked_pte(mm, addr, &ptl);
2117 	if (!pte)
2118 		return VM_FAULT_OOM;
2119 	if (!pte_none(*pte)) {
2120 		if (mkwrite) {
2121 			/*
2122 			 * For read faults on private mappings the PFN passed
2123 			 * in may not match the PFN we have mapped if the
2124 			 * mapped PFN is a writeable COW page.  In the mkwrite
2125 			 * case we are creating a writable PTE for a shared
2126 			 * mapping and we expect the PFNs to match. If they
2127 			 * don't match, we are likely racing with block
2128 			 * allocation and mapping invalidation so just skip the
2129 			 * update.
2130 			 */
2131 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2132 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2133 				goto out_unlock;
2134 			}
2135 			entry = pte_mkyoung(*pte);
2136 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2137 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2138 				update_mmu_cache(vma, addr, pte);
2139 		}
2140 		goto out_unlock;
2141 	}
2142 
2143 	/* Ok, finally just insert the thing.. */
2144 	if (pfn_t_devmap(pfn))
2145 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2146 	else
2147 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2148 
2149 	if (mkwrite) {
2150 		entry = pte_mkyoung(entry);
2151 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2152 	}
2153 
2154 	set_pte_at(mm, addr, pte, entry);
2155 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2156 
2157 out_unlock:
2158 	pte_unmap_unlock(pte, ptl);
2159 	return VM_FAULT_NOPAGE;
2160 }
2161 
2162 /**
2163  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2164  * @vma: user vma to map to
2165  * @addr: target user address of this page
2166  * @pfn: source kernel pfn
2167  * @pgprot: pgprot flags for the inserted page
2168  *
2169  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2170  * to override pgprot on a per-page basis.
2171  *
2172  * This only makes sense for IO mappings, and it makes no sense for
2173  * COW mappings.  In general, using multiple vmas is preferable;
2174  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2175  * impractical.
2176  *
2177  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2178  * a value of @pgprot different from that of @vma->vm_page_prot.
2179  *
2180  * Context: Process context.  May allocate using %GFP_KERNEL.
2181  * Return: vm_fault_t value.
2182  */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2183 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2184 			unsigned long pfn, pgprot_t pgprot)
2185 {
2186 	/*
2187 	 * Technically, architectures with pte_special can avoid all these
2188 	 * restrictions (same for remap_pfn_range).  However we would like
2189 	 * consistency in testing and feature parity among all, so we should
2190 	 * try to keep these invariants in place for everybody.
2191 	 */
2192 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2193 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2194 						(VM_PFNMAP|VM_MIXEDMAP));
2195 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2196 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2197 
2198 	if (addr < vma->vm_start || addr >= vma->vm_end)
2199 		return VM_FAULT_SIGBUS;
2200 
2201 	if (!pfn_modify_allowed(pfn, pgprot))
2202 		return VM_FAULT_SIGBUS;
2203 
2204 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2205 
2206 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2207 			false);
2208 }
2209 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2210 
2211 /**
2212  * vmf_insert_pfn - insert single pfn into user vma
2213  * @vma: user vma to map to
2214  * @addr: target user address of this page
2215  * @pfn: source kernel pfn
2216  *
2217  * Similar to vm_insert_page, this allows drivers to insert individual pages
2218  * they've allocated into a user vma. Same comments apply.
2219  *
2220  * This function should only be called from a vm_ops->fault handler, and
2221  * in that case the handler should return the result of this function.
2222  *
2223  * vma cannot be a COW mapping.
2224  *
2225  * As this is called only for pages that do not currently exist, we
2226  * do not need to flush old virtual caches or the TLB.
2227  *
2228  * Context: Process context.  May allocate using %GFP_KERNEL.
2229  * Return: vm_fault_t value.
2230  */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2231 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2232 			unsigned long pfn)
2233 {
2234 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2235 }
2236 EXPORT_SYMBOL(vmf_insert_pfn);
2237 
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn)2238 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2239 {
2240 	/* these checks mirror the abort conditions in vm_normal_page */
2241 	if (vma->vm_flags & VM_MIXEDMAP)
2242 		return true;
2243 	if (pfn_t_devmap(pfn))
2244 		return true;
2245 	if (pfn_t_special(pfn))
2246 		return true;
2247 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2248 		return true;
2249 	return false;
2250 }
2251 
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot,bool mkwrite)2252 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2253 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2254 		bool mkwrite)
2255 {
2256 	int err;
2257 
2258 	BUG_ON(!vm_mixed_ok(vma, pfn));
2259 
2260 	if (addr < vma->vm_start || addr >= vma->vm_end)
2261 		return VM_FAULT_SIGBUS;
2262 
2263 	track_pfn_insert(vma, &pgprot, pfn);
2264 
2265 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2266 		return VM_FAULT_SIGBUS;
2267 
2268 	/*
2269 	 * If we don't have pte special, then we have to use the pfn_valid()
2270 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2271 	 * refcount the page if pfn_valid is true (hence insert_page rather
2272 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2273 	 * without pte special, it would there be refcounted as a normal page.
2274 	 */
2275 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2276 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2277 		struct page *page;
2278 
2279 		/*
2280 		 * At this point we are committed to insert_page()
2281 		 * regardless of whether the caller specified flags that
2282 		 * result in pfn_t_has_page() == false.
2283 		 */
2284 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2285 		err = insert_page(vma, addr, page, pgprot);
2286 	} else {
2287 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2288 	}
2289 
2290 	if (err == -ENOMEM)
2291 		return VM_FAULT_OOM;
2292 	if (err < 0 && err != -EBUSY)
2293 		return VM_FAULT_SIGBUS;
2294 
2295 	return VM_FAULT_NOPAGE;
2296 }
2297 
2298 /**
2299  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2300  * @vma: user vma to map to
2301  * @addr: target user address of this page
2302  * @pfn: source kernel pfn
2303  * @pgprot: pgprot flags for the inserted page
2304  *
2305  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2306  * to override pgprot on a per-page basis.
2307  *
2308  * Typically this function should be used by drivers to set caching- and
2309  * encryption bits different than those of @vma->vm_page_prot, because
2310  * the caching- or encryption mode may not be known at mmap() time.
2311  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2312  * to set caching and encryption bits for those vmas (except for COW pages).
2313  * This is ensured by core vm only modifying these page table entries using
2314  * functions that don't touch caching- or encryption bits, using pte_modify()
2315  * if needed. (See for example mprotect()).
2316  * Also when new page-table entries are created, this is only done using the
2317  * fault() callback, and never using the value of vma->vm_page_prot,
2318  * except for page-table entries that point to anonymous pages as the result
2319  * of COW.
2320  *
2321  * Context: Process context.  May allocate using %GFP_KERNEL.
2322  * Return: vm_fault_t value.
2323  */
vmf_insert_mixed_prot(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot)2324 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2325 				 pfn_t pfn, pgprot_t pgprot)
2326 {
2327 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2328 }
2329 EXPORT_SYMBOL(vmf_insert_mixed_prot);
2330 
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2331 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2332 		pfn_t pfn)
2333 {
2334 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2335 }
2336 EXPORT_SYMBOL(vmf_insert_mixed);
2337 
2338 /*
2339  *  If the insertion of PTE failed because someone else already added a
2340  *  different entry in the mean time, we treat that as success as we assume
2341  *  the same entry was actually inserted.
2342  */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2343 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2344 		unsigned long addr, pfn_t pfn)
2345 {
2346 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2347 }
2348 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2349 
2350 /*
2351  * maps a range of physical memory into the requested pages. the old
2352  * mappings are removed. any references to nonexistent pages results
2353  * in null mappings (currently treated as "copy-on-access")
2354  */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2355 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2356 			unsigned long addr, unsigned long end,
2357 			unsigned long pfn, pgprot_t prot)
2358 {
2359 	pte_t *pte, *mapped_pte;
2360 	spinlock_t *ptl;
2361 	int err = 0;
2362 
2363 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2364 	if (!pte)
2365 		return -ENOMEM;
2366 	arch_enter_lazy_mmu_mode();
2367 	do {
2368 		BUG_ON(!pte_none(*pte));
2369 		if (!pfn_modify_allowed(pfn, prot)) {
2370 			err = -EACCES;
2371 			break;
2372 		}
2373 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2374 		pfn++;
2375 	} while (pte++, addr += PAGE_SIZE, addr != end);
2376 	arch_leave_lazy_mmu_mode();
2377 	pte_unmap_unlock(mapped_pte, ptl);
2378 	return err;
2379 }
2380 
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2381 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2382 			unsigned long addr, unsigned long end,
2383 			unsigned long pfn, pgprot_t prot)
2384 {
2385 	pmd_t *pmd;
2386 	unsigned long next;
2387 	int err;
2388 
2389 	pfn -= addr >> PAGE_SHIFT;
2390 	pmd = pmd_alloc(mm, pud, addr);
2391 	if (!pmd)
2392 		return -ENOMEM;
2393 	VM_BUG_ON(pmd_trans_huge(*pmd));
2394 	do {
2395 		next = pmd_addr_end(addr, end);
2396 		err = remap_pte_range(mm, pmd, addr, next,
2397 				pfn + (addr >> PAGE_SHIFT), prot);
2398 		if (err)
2399 			return err;
2400 	} while (pmd++, addr = next, addr != end);
2401 	return 0;
2402 }
2403 
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2404 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2405 			unsigned long addr, unsigned long end,
2406 			unsigned long pfn, pgprot_t prot)
2407 {
2408 	pud_t *pud;
2409 	unsigned long next;
2410 	int err;
2411 
2412 	pfn -= addr >> PAGE_SHIFT;
2413 	pud = pud_alloc(mm, p4d, addr);
2414 	if (!pud)
2415 		return -ENOMEM;
2416 	do {
2417 		next = pud_addr_end(addr, end);
2418 		err = remap_pmd_range(mm, pud, addr, next,
2419 				pfn + (addr >> PAGE_SHIFT), prot);
2420 		if (err)
2421 			return err;
2422 	} while (pud++, addr = next, addr != end);
2423 	return 0;
2424 }
2425 
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2426 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2427 			unsigned long addr, unsigned long end,
2428 			unsigned long pfn, pgprot_t prot)
2429 {
2430 	p4d_t *p4d;
2431 	unsigned long next;
2432 	int err;
2433 
2434 	pfn -= addr >> PAGE_SHIFT;
2435 	p4d = p4d_alloc(mm, pgd, addr);
2436 	if (!p4d)
2437 		return -ENOMEM;
2438 	do {
2439 		next = p4d_addr_end(addr, end);
2440 		err = remap_pud_range(mm, p4d, addr, next,
2441 				pfn + (addr >> PAGE_SHIFT), prot);
2442 		if (err)
2443 			return err;
2444 	} while (p4d++, addr = next, addr != end);
2445 	return 0;
2446 }
2447 
2448 /*
2449  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2450  * must have pre-validated the caching bits of the pgprot_t.
2451  */
remap_pfn_range_notrack(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2452 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2453 		unsigned long pfn, unsigned long size, pgprot_t prot)
2454 {
2455 	pgd_t *pgd;
2456 	unsigned long next;
2457 	unsigned long end = addr + PAGE_ALIGN(size);
2458 	struct mm_struct *mm = vma->vm_mm;
2459 	int err;
2460 
2461 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2462 		return -EINVAL;
2463 
2464 	/*
2465 	 * Physically remapped pages are special. Tell the
2466 	 * rest of the world about it:
2467 	 *   VM_IO tells people not to look at these pages
2468 	 *	(accesses can have side effects).
2469 	 *   VM_PFNMAP tells the core MM that the base pages are just
2470 	 *	raw PFN mappings, and do not have a "struct page" associated
2471 	 *	with them.
2472 	 *   VM_DONTEXPAND
2473 	 *      Disable vma merging and expanding with mremap().
2474 	 *   VM_DONTDUMP
2475 	 *      Omit vma from core dump, even when VM_IO turned off.
2476 	 *
2477 	 * There's a horrible special case to handle copy-on-write
2478 	 * behaviour that some programs depend on. We mark the "original"
2479 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2480 	 * See vm_normal_page() for details.
2481 	 */
2482 	if (is_cow_mapping(vma->vm_flags)) {
2483 		if (addr != vma->vm_start || end != vma->vm_end)
2484 			return -EINVAL;
2485 		vma->vm_pgoff = pfn;
2486 	}
2487 
2488 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2489 
2490 	BUG_ON(addr >= end);
2491 	pfn -= addr >> PAGE_SHIFT;
2492 	pgd = pgd_offset(mm, addr);
2493 	flush_cache_range(vma, addr, end);
2494 	do {
2495 		next = pgd_addr_end(addr, end);
2496 		err = remap_p4d_range(mm, pgd, addr, next,
2497 				pfn + (addr >> PAGE_SHIFT), prot);
2498 		if (err)
2499 			return err;
2500 	} while (pgd++, addr = next, addr != end);
2501 
2502 	return 0;
2503 }
2504 
2505 /**
2506  * remap_pfn_range - remap kernel memory to userspace
2507  * @vma: user vma to map to
2508  * @addr: target page aligned user address to start at
2509  * @pfn: page frame number of kernel physical memory address
2510  * @size: size of mapping area
2511  * @prot: page protection flags for this mapping
2512  *
2513  * Note: this is only safe if the mm semaphore is held when called.
2514  *
2515  * Return: %0 on success, negative error code otherwise.
2516  */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2517 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2518 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2519 {
2520 	int err;
2521 
2522 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2523 	if (err)
2524 		return -EINVAL;
2525 
2526 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2527 	if (err)
2528 		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2529 	return err;
2530 }
2531 EXPORT_SYMBOL(remap_pfn_range);
2532 
2533 /**
2534  * vm_iomap_memory - remap memory to userspace
2535  * @vma: user vma to map to
2536  * @start: start of the physical memory to be mapped
2537  * @len: size of area
2538  *
2539  * This is a simplified io_remap_pfn_range() for common driver use. The
2540  * driver just needs to give us the physical memory range to be mapped,
2541  * we'll figure out the rest from the vma information.
2542  *
2543  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2544  * whatever write-combining details or similar.
2545  *
2546  * Return: %0 on success, negative error code otherwise.
2547  */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2548 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2549 {
2550 	unsigned long vm_len, pfn, pages;
2551 
2552 	/* Check that the physical memory area passed in looks valid */
2553 	if (start + len < start)
2554 		return -EINVAL;
2555 	/*
2556 	 * You *really* shouldn't map things that aren't page-aligned,
2557 	 * but we've historically allowed it because IO memory might
2558 	 * just have smaller alignment.
2559 	 */
2560 	len += start & ~PAGE_MASK;
2561 	pfn = start >> PAGE_SHIFT;
2562 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2563 	if (pfn + pages < pfn)
2564 		return -EINVAL;
2565 
2566 	/* We start the mapping 'vm_pgoff' pages into the area */
2567 	if (vma->vm_pgoff > pages)
2568 		return -EINVAL;
2569 	pfn += vma->vm_pgoff;
2570 	pages -= vma->vm_pgoff;
2571 
2572 	/* Can we fit all of the mapping? */
2573 	vm_len = vma->vm_end - vma->vm_start;
2574 	if (vm_len >> PAGE_SHIFT > pages)
2575 		return -EINVAL;
2576 
2577 	/* Ok, let it rip */
2578 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2579 }
2580 EXPORT_SYMBOL(vm_iomap_memory);
2581 
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2582 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2583 				     unsigned long addr, unsigned long end,
2584 				     pte_fn_t fn, void *data, bool create,
2585 				     pgtbl_mod_mask *mask)
2586 {
2587 	pte_t *pte, *mapped_pte;
2588 	int err = 0;
2589 	spinlock_t *ptl;
2590 
2591 	if (create) {
2592 		mapped_pte = pte = (mm == &init_mm) ?
2593 			pte_alloc_kernel_track(pmd, addr, mask) :
2594 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2595 		if (!pte)
2596 			return -ENOMEM;
2597 	} else {
2598 		mapped_pte = pte = (mm == &init_mm) ?
2599 			pte_offset_kernel(pmd, addr) :
2600 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2601 	}
2602 
2603 	BUG_ON(pmd_huge(*pmd));
2604 
2605 	arch_enter_lazy_mmu_mode();
2606 
2607 	if (fn) {
2608 		do {
2609 			if (create || !pte_none(*pte)) {
2610 				err = fn(pte++, addr, data);
2611 				if (err)
2612 					break;
2613 			}
2614 		} while (addr += PAGE_SIZE, addr != end);
2615 	}
2616 	*mask |= PGTBL_PTE_MODIFIED;
2617 
2618 	arch_leave_lazy_mmu_mode();
2619 
2620 	if (mm != &init_mm)
2621 		pte_unmap_unlock(mapped_pte, ptl);
2622 	return err;
2623 }
2624 
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2625 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2626 				     unsigned long addr, unsigned long end,
2627 				     pte_fn_t fn, void *data, bool create,
2628 				     pgtbl_mod_mask *mask)
2629 {
2630 	pmd_t *pmd;
2631 	unsigned long next;
2632 	int err = 0;
2633 
2634 	BUG_ON(pud_huge(*pud));
2635 
2636 	if (create) {
2637 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2638 		if (!pmd)
2639 			return -ENOMEM;
2640 	} else {
2641 		pmd = pmd_offset(pud, addr);
2642 	}
2643 	do {
2644 		next = pmd_addr_end(addr, end);
2645 		if (pmd_none(*pmd) && !create)
2646 			continue;
2647 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2648 			return -EINVAL;
2649 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2650 			if (!create)
2651 				continue;
2652 			pmd_clear_bad(pmd);
2653 		}
2654 		err = apply_to_pte_range(mm, pmd, addr, next,
2655 					 fn, data, create, mask);
2656 		if (err)
2657 			break;
2658 	} while (pmd++, addr = next, addr != end);
2659 
2660 	return err;
2661 }
2662 
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2663 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2664 				     unsigned long addr, unsigned long end,
2665 				     pte_fn_t fn, void *data, bool create,
2666 				     pgtbl_mod_mask *mask)
2667 {
2668 	pud_t *pud;
2669 	unsigned long next;
2670 	int err = 0;
2671 
2672 	if (create) {
2673 		pud = pud_alloc_track(mm, p4d, addr, mask);
2674 		if (!pud)
2675 			return -ENOMEM;
2676 	} else {
2677 		pud = pud_offset(p4d, addr);
2678 	}
2679 	do {
2680 		next = pud_addr_end(addr, end);
2681 		if (pud_none(*pud) && !create)
2682 			continue;
2683 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2684 			return -EINVAL;
2685 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2686 			if (!create)
2687 				continue;
2688 			pud_clear_bad(pud);
2689 		}
2690 		err = apply_to_pmd_range(mm, pud, addr, next,
2691 					 fn, data, create, mask);
2692 		if (err)
2693 			break;
2694 	} while (pud++, addr = next, addr != end);
2695 
2696 	return err;
2697 }
2698 
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2699 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2700 				     unsigned long addr, unsigned long end,
2701 				     pte_fn_t fn, void *data, bool create,
2702 				     pgtbl_mod_mask *mask)
2703 {
2704 	p4d_t *p4d;
2705 	unsigned long next;
2706 	int err = 0;
2707 
2708 	if (create) {
2709 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2710 		if (!p4d)
2711 			return -ENOMEM;
2712 	} else {
2713 		p4d = p4d_offset(pgd, addr);
2714 	}
2715 	do {
2716 		next = p4d_addr_end(addr, end);
2717 		if (p4d_none(*p4d) && !create)
2718 			continue;
2719 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2720 			return -EINVAL;
2721 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2722 			if (!create)
2723 				continue;
2724 			p4d_clear_bad(p4d);
2725 		}
2726 		err = apply_to_pud_range(mm, p4d, addr, next,
2727 					 fn, data, create, mask);
2728 		if (err)
2729 			break;
2730 	} while (p4d++, addr = next, addr != end);
2731 
2732 	return err;
2733 }
2734 
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)2735 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2736 				 unsigned long size, pte_fn_t fn,
2737 				 void *data, bool create)
2738 {
2739 	pgd_t *pgd;
2740 	unsigned long start = addr, next;
2741 	unsigned long end = addr + size;
2742 	pgtbl_mod_mask mask = 0;
2743 	int err = 0;
2744 
2745 	if (WARN_ON(addr >= end))
2746 		return -EINVAL;
2747 
2748 	pgd = pgd_offset(mm, addr);
2749 	do {
2750 		next = pgd_addr_end(addr, end);
2751 		if (pgd_none(*pgd) && !create)
2752 			continue;
2753 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2754 			return -EINVAL;
2755 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2756 			if (!create)
2757 				continue;
2758 			pgd_clear_bad(pgd);
2759 		}
2760 		err = apply_to_p4d_range(mm, pgd, addr, next,
2761 					 fn, data, create, &mask);
2762 		if (err)
2763 			break;
2764 	} while (pgd++, addr = next, addr != end);
2765 
2766 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2767 		arch_sync_kernel_mappings(start, start + size);
2768 
2769 	return err;
2770 }
2771 
2772 /*
2773  * Scan a region of virtual memory, filling in page tables as necessary
2774  * and calling a provided function on each leaf page table.
2775  */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2776 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2777 			unsigned long size, pte_fn_t fn, void *data)
2778 {
2779 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2780 }
2781 EXPORT_SYMBOL_GPL(apply_to_page_range);
2782 
2783 /*
2784  * Scan a region of virtual memory, calling a provided function on
2785  * each leaf page table where it exists.
2786  *
2787  * Unlike apply_to_page_range, this does _not_ fill in page tables
2788  * where they are absent.
2789  */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2790 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2791 				 unsigned long size, pte_fn_t fn, void *data)
2792 {
2793 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2794 }
2795 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2796 
2797 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
2798 
2799 /*
2800  * speculative_page_walk_begin() ... speculative_page_walk_end() protects
2801  * against races with page table reclamation.
2802  *
2803  * This is similar to what fast GUP does, but fast GUP also needs to
2804  * protect against races with THP page splitting, so it always needs
2805  * to disable interrupts.
2806  * Speculative page faults need to protect against page table reclamation,
2807  * even with MMU_GATHER_RCU_TABLE_FREE case page table removal slow-path is
2808  * not RCU-safe (see comment inside tlb_remove_table_sync_one), therefore
2809  * we still have to disable IRQs.
2810  */
2811 #define speculative_page_walk_begin() local_irq_disable()
2812 #define speculative_page_walk_end()   local_irq_enable()
2813 
__pte_map_lock(struct vm_fault * vmf)2814 bool __pte_map_lock(struct vm_fault *vmf)
2815 {
2816 	pmd_t pmdval;
2817 	pte_t *pte = vmf->pte;
2818 	spinlock_t *ptl;
2819 
2820 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
2821 		vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2822 		if (!pte)
2823 			vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
2824 		spin_lock(vmf->ptl);
2825 		return true;
2826 	}
2827 
2828 	speculative_page_walk_begin();
2829 	if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
2830 				 SPF_ABORT_PTE_MAP_LOCK_SEQ1))
2831 		goto fail;
2832 	/*
2833 	 * The mmap sequence count check guarantees that the page
2834 	 * tables are still valid at that point, and
2835 	 * speculative_page_walk_begin() ensures that they stay around.
2836 	 */
2837 	/*
2838 	 * We check if the pmd value is still the same to ensure that there
2839 	 * is not a huge collapse operation in progress in our back.
2840 	 * It also ensures that pmd was not cleared by pmd_clear in
2841 	 * free_pte_range and ptl is still valid.
2842 	 */
2843 	pmdval = READ_ONCE(*vmf->pmd);
2844 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
2845 		count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PMD);
2846 		goto fail;
2847 	}
2848 	ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
2849 	if (!pte)
2850 		pte = pte_offset_map(&pmdval, vmf->address);
2851 	/*
2852 	 * Try locking the page table.
2853 	 *
2854 	 * Note that we might race against zap_pte_range() which
2855 	 * invalidates TLBs while holding the page table lock.
2856 	 * We are still under the speculative_page_walk_begin() section,
2857 	 * and zap_pte_range() could thus deadlock with us if we tried
2858 	 * using spin_lock() here.
2859 	 *
2860 	 * We also don't want to retry until spin_trylock() succeeds,
2861 	 * because of the starvation potential against a stream of lockers.
2862 	 */
2863 	if (unlikely(!spin_trylock(ptl))) {
2864 		count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PTL);
2865 		goto fail;
2866 	}
2867 	/*
2868 	 * The check below will fail if __pte_map_lock passed its ptl barrier
2869 	 * before we took the ptl lock.
2870 	 */
2871 	if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
2872 				 SPF_ABORT_PTE_MAP_LOCK_SEQ2))
2873 		goto unlock_fail;
2874 	speculative_page_walk_end();
2875 	vmf->pte = pte;
2876 	vmf->ptl = ptl;
2877 	return true;
2878 
2879 unlock_fail:
2880 	spin_unlock(ptl);
2881 fail:
2882 	if (pte)
2883 		pte_unmap(pte);
2884 	speculative_page_walk_end();
2885 	return false;
2886 }
2887 
2888 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
2889 
2890 /*
2891  * handle_pte_fault chooses page fault handler according to an entry which was
2892  * read non-atomically.  Before making any commitment, on those architectures
2893  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2894  * parts, do_swap_page must check under lock before unmapping the pte and
2895  * proceeding (but do_wp_page is only called after already making such a check;
2896  * and do_anonymous_page can safely check later on).
2897  */
pte_unmap_same(struct mm_struct * mm,pmd_t * pmd,pte_t * page_table,pte_t orig_pte)2898 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2899 				pte_t *page_table, pte_t orig_pte)
2900 {
2901 	int same = 1;
2902 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2903 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2904 		spinlock_t *ptl = pte_lockptr(mm, pmd);
2905 		spin_lock(ptl);
2906 		same = pte_same(*page_table, orig_pte);
2907 		spin_unlock(ptl);
2908 	}
2909 #endif
2910 	pte_unmap(page_table);
2911 	return same;
2912 }
2913 
2914 /*
2915  * Return:
2916  *	0:		copied succeeded
2917  *	-EHWPOISON:	copy failed due to hwpoison in source page
2918  *	-EAGAIN:	copied failed (some other reason)
2919  */
cow_user_page(struct page * dst,struct page * src,struct vm_fault * vmf)2920 static inline int cow_user_page(struct page *dst, struct page *src,
2921 				      struct vm_fault *vmf)
2922 {
2923 	int ret;
2924 	void *kaddr;
2925 	void __user *uaddr;
2926 	bool locked = false;
2927 	struct vm_area_struct *vma = vmf->vma;
2928 	struct mm_struct *mm = vma->vm_mm;
2929 	unsigned long addr = vmf->address;
2930 
2931 	if (likely(src)) {
2932 		if (copy_mc_user_highpage(dst, src, addr, vma)) {
2933 			memory_failure_queue(page_to_pfn(src), 0);
2934 			return -EHWPOISON;
2935 		}
2936 		return 0;
2937 	}
2938 
2939 	/*
2940 	 * If the source page was a PFN mapping, we don't have
2941 	 * a "struct page" for it. We do a best-effort copy by
2942 	 * just copying from the original user address. If that
2943 	 * fails, we just zero-fill it. Live with it.
2944 	 */
2945 	kaddr = kmap_atomic(dst);
2946 	uaddr = (void __user *)(addr & PAGE_MASK);
2947 
2948 	/*
2949 	 * On architectures with software "accessed" bits, we would
2950 	 * take a double page fault, so mark it accessed here.
2951 	 */
2952 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
2953 		pte_t entry;
2954 
2955 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2956 		locked = true;
2957 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2958 			/*
2959 			 * Other thread has already handled the fault
2960 			 * and update local tlb only
2961 			 */
2962 			update_mmu_tlb(vma, addr, vmf->pte);
2963 			ret = -EAGAIN;
2964 			goto pte_unlock;
2965 		}
2966 
2967 		entry = pte_mkyoung(vmf->orig_pte);
2968 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2969 			update_mmu_cache(vma, addr, vmf->pte);
2970 	}
2971 
2972 	/*
2973 	 * This really shouldn't fail, because the page is there
2974 	 * in the page tables. But it might just be unreadable,
2975 	 * in which case we just give up and fill the result with
2976 	 * zeroes.
2977 	 */
2978 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2979 		if (locked)
2980 			goto warn;
2981 
2982 		/* Re-validate under PTL if the page is still mapped */
2983 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2984 		locked = true;
2985 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2986 			/* The PTE changed under us, update local tlb */
2987 			update_mmu_tlb(vma, addr, vmf->pte);
2988 			ret = -EAGAIN;
2989 			goto pte_unlock;
2990 		}
2991 
2992 		/*
2993 		 * The same page can be mapped back since last copy attempt.
2994 		 * Try to copy again under PTL.
2995 		 */
2996 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2997 			/*
2998 			 * Give a warn in case there can be some obscure
2999 			 * use-case
3000 			 */
3001 warn:
3002 			WARN_ON_ONCE(1);
3003 			clear_page(kaddr);
3004 		}
3005 	}
3006 
3007 	ret = 0;
3008 
3009 pte_unlock:
3010 	if (locked)
3011 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3012 	kunmap_atomic(kaddr);
3013 	flush_dcache_page(dst);
3014 
3015 	return ret;
3016 }
3017 
__get_fault_gfp_mask(struct vm_area_struct * vma)3018 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3019 {
3020 	struct file *vm_file = vma->vm_file;
3021 
3022 	if (vm_file)
3023 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3024 
3025 	/*
3026 	 * Special mappings (e.g. VDSO) do not have any file so fake
3027 	 * a default GFP_KERNEL for them.
3028 	 */
3029 	return GFP_KERNEL;
3030 }
3031 
3032 /*
3033  * Notify the address space that the page is about to become writable so that
3034  * it can prohibit this or wait for the page to get into an appropriate state.
3035  *
3036  * We do this without the lock held, so that it can sleep if it needs to.
3037  */
do_page_mkwrite(struct vm_fault * vmf)3038 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
3039 {
3040 	vm_fault_t ret;
3041 	struct page *page = vmf->page;
3042 	unsigned int old_flags = vmf->flags;
3043 
3044 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3045 
3046 	if (vmf->vma->vm_file &&
3047 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3048 		return VM_FAULT_SIGBUS;
3049 
3050 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3051 	/* Restore original flags so that caller is not surprised */
3052 	vmf->flags = old_flags;
3053 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3054 		return ret;
3055 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3056 		lock_page(page);
3057 		if (!page->mapping) {
3058 			unlock_page(page);
3059 			return 0; /* retry */
3060 		}
3061 		ret |= VM_FAULT_LOCKED;
3062 	} else
3063 		VM_BUG_ON_PAGE(!PageLocked(page), page);
3064 	return ret;
3065 }
3066 
3067 /*
3068  * Handle dirtying of a page in shared file mapping on a write fault.
3069  *
3070  * The function expects the page to be locked and unlocks it.
3071  */
fault_dirty_shared_page(struct vm_fault * vmf)3072 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3073 {
3074 	struct vm_area_struct *vma = vmf->vma;
3075 	struct address_space *mapping;
3076 	struct page *page = vmf->page;
3077 	bool dirtied;
3078 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3079 
3080 	dirtied = set_page_dirty(page);
3081 	VM_BUG_ON_PAGE(PageAnon(page), page);
3082 	/*
3083 	 * Take a local copy of the address_space - page.mapping may be zeroed
3084 	 * by truncate after unlock_page().   The address_space itself remains
3085 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3086 	 * release semantics to prevent the compiler from undoing this copying.
3087 	 */
3088 	mapping = page_rmapping(page);
3089 	unlock_page(page);
3090 
3091 	if (!page_mkwrite)
3092 		file_update_time(vma->vm_file);
3093 
3094 	/*
3095 	 * Throttle page dirtying rate down to writeback speed.
3096 	 *
3097 	 * mapping may be NULL here because some device drivers do not
3098 	 * set page.mapping but still dirty their pages
3099 	 *
3100 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3101 	 * is pinning the mapping, as per above.
3102 	 */
3103 	if ((dirtied || page_mkwrite) && mapping) {
3104 		struct file *fpin;
3105 
3106 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3107 		balance_dirty_pages_ratelimited(mapping);
3108 		if (fpin) {
3109 			fput(fpin);
3110 			return VM_FAULT_RETRY;
3111 		}
3112 	}
3113 
3114 	return 0;
3115 }
3116 
3117 /*
3118  * Handle write page faults for pages that can be reused in the current vma
3119  *
3120  * This can happen either due to the mapping being with the VM_SHARED flag,
3121  * or due to us being the last reference standing to the page. In either
3122  * case, all we need to do here is to mark the page as writable and update
3123  * any related book-keeping.
3124  */
wp_page_reuse(struct vm_fault * vmf)3125 static inline void wp_page_reuse(struct vm_fault *vmf)
3126 	__releases(vmf->ptl)
3127 {
3128 	struct vm_area_struct *vma = vmf->vma;
3129 	struct page *page = vmf->page;
3130 	pte_t entry;
3131 	/*
3132 	 * Clear the pages cpupid information as the existing
3133 	 * information potentially belongs to a now completely
3134 	 * unrelated process.
3135 	 */
3136 	if (page)
3137 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3138 
3139 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3140 	entry = pte_mkyoung(vmf->orig_pte);
3141 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3142 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3143 		update_mmu_cache(vma, vmf->address, vmf->pte);
3144 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3145 	count_vm_event(PGREUSE);
3146 }
3147 
3148 /*
3149  * Handle the case of a page which we actually need to copy to a new page.
3150  *
3151  * Called with mmap_lock locked and the old page referenced, but
3152  * without the ptl held.
3153  *
3154  * High level logic flow:
3155  *
3156  * - Allocate a page, copy the content of the old page to the new one.
3157  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3158  * - Take the PTL. If the pte changed, bail out and release the allocated page
3159  * - If the pte is still the way we remember it, update the page table and all
3160  *   relevant references. This includes dropping the reference the page-table
3161  *   held to the old page, as well as updating the rmap.
3162  * - In any case, unlock the PTL and drop the reference we took to the old page.
3163  */
wp_page_copy(struct vm_fault * vmf)3164 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3165 {
3166 	struct vm_area_struct *vma = vmf->vma;
3167 	struct mm_struct *mm = vma->vm_mm;
3168 	struct page *old_page = vmf->page;
3169 	struct page *new_page = NULL;
3170 	pte_t entry;
3171 	int page_copied = 0;
3172 	struct mmu_notifier_range range;
3173 	vm_fault_t ret = VM_FAULT_OOM;
3174 
3175 	if (unlikely(!vma->anon_vma)) {
3176 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3177 			count_vm_spf_event(SPF_ABORT_ANON_VMA);
3178 			ret = VM_FAULT_RETRY;
3179 			goto out;
3180 		}
3181 		if (__anon_vma_prepare(vma))
3182 			goto out;
3183 	}
3184 
3185 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3186 		new_page = alloc_zeroed_user_highpage_movable(vma,
3187 							      vmf->address);
3188 		if (!new_page)
3189 			goto out;
3190 	} else {
3191 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3192 				vmf->address);
3193 		if (!new_page)
3194 			goto out;
3195 
3196 		ret = cow_user_page(new_page, old_page, vmf);
3197 		if (ret) {
3198 			/*
3199 			 * COW failed, if the fault was solved by other,
3200 			 * it's fine. If not, userspace would re-fault on
3201 			 * the same address and we will handle the fault
3202 			 * from the second attempt.
3203 			 * The -EHWPOISON case will not be retried.
3204 			 */
3205 			put_page(new_page);
3206 			if (old_page)
3207 				put_page(old_page);
3208 
3209 			return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3210 		}
3211 	}
3212 
3213 	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
3214 		goto out_free_new;
3215 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3216 
3217 	__SetPageUptodate(new_page);
3218 
3219 	if ((vmf->flags & FAULT_FLAG_SPECULATIVE) &&
3220 	    !mmu_notifier_trylock(mm)) {
3221 		ret = VM_FAULT_RETRY;
3222 		goto out_free_new;
3223 	}
3224 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3225 				vmf->address & PAGE_MASK,
3226 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3227 	mmu_notifier_invalidate_range_start(&range);
3228 
3229 	/*
3230 	 * Re-check the pte - we dropped the lock
3231 	 */
3232 	if (!pte_map_lock(vmf)) {
3233 		ret = VM_FAULT_RETRY;
3234 		/* put_page() will uncharge the page */
3235 		goto out_notify;
3236 	}
3237 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3238 		if (old_page) {
3239 			if (!PageAnon(old_page)) {
3240 				dec_mm_counter_fast(mm,
3241 						mm_counter_file(old_page));
3242 				inc_mm_counter_fast(mm, MM_ANONPAGES);
3243 			}
3244 		} else {
3245 			inc_mm_counter_fast(mm, MM_ANONPAGES);
3246 		}
3247 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3248 		entry = mk_pte(new_page, vma->vm_page_prot);
3249 		entry = pte_sw_mkyoung(entry);
3250 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3251 
3252 		/*
3253 		 * Clear the pte entry and flush it first, before updating the
3254 		 * pte with the new entry, to keep TLBs on different CPUs in
3255 		 * sync. This code used to set the new PTE then flush TLBs, but
3256 		 * that left a window where the new PTE could be loaded into
3257 		 * some TLBs while the old PTE remains in others.
3258 		 */
3259 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3260 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3261 		lru_cache_add_inactive_or_unevictable(new_page, vma);
3262 		/*
3263 		 * We call the notify macro here because, when using secondary
3264 		 * mmu page tables (such as kvm shadow page tables), we want the
3265 		 * new page to be mapped directly into the secondary page table.
3266 		 */
3267 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3268 		update_mmu_cache(vma, vmf->address, vmf->pte);
3269 		if (old_page) {
3270 			/*
3271 			 * Only after switching the pte to the new page may
3272 			 * we remove the mapcount here. Otherwise another
3273 			 * process may come and find the rmap count decremented
3274 			 * before the pte is switched to the new page, and
3275 			 * "reuse" the old page writing into it while our pte
3276 			 * here still points into it and can be read by other
3277 			 * threads.
3278 			 *
3279 			 * The critical issue is to order this
3280 			 * page_remove_rmap with the ptp_clear_flush above.
3281 			 * Those stores are ordered by (if nothing else,)
3282 			 * the barrier present in the atomic_add_negative
3283 			 * in page_remove_rmap.
3284 			 *
3285 			 * Then the TLB flush in ptep_clear_flush ensures that
3286 			 * no process can access the old page before the
3287 			 * decremented mapcount is visible. And the old page
3288 			 * cannot be reused until after the decremented
3289 			 * mapcount is visible. So transitively, TLBs to
3290 			 * old page will be flushed before it can be reused.
3291 			 */
3292 			page_remove_rmap(old_page, false);
3293 		}
3294 
3295 		/* Free the old page.. */
3296 		new_page = old_page;
3297 		page_copied = 1;
3298 	} else {
3299 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3300 	}
3301 
3302 	if (new_page)
3303 		put_page(new_page);
3304 
3305 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3306 	/*
3307 	 * No need to double call mmu_notifier->invalidate_range() callback as
3308 	 * the above ptep_clear_flush_notify() did already call it.
3309 	 */
3310 	mmu_notifier_invalidate_range_only_end(&range);
3311 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3312 		mmu_notifier_unlock(mm);
3313 	if (old_page) {
3314 		/*
3315 		 * Don't let another task, with possibly unlocked vma,
3316 		 * keep the mlocked page.
3317 		 */
3318 		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
3319 			lock_page(old_page);	/* LRU manipulation */
3320 			if (PageMlocked(old_page))
3321 				munlock_vma_page(old_page);
3322 			unlock_page(old_page);
3323 		}
3324 		if (page_copied)
3325 			free_swap_cache(old_page);
3326 		put_page(old_page);
3327 	}
3328 	return page_copied ? VM_FAULT_WRITE : 0;
3329 out_notify:
3330 	mmu_notifier_invalidate_range_only_end(&range);
3331 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3332 		mmu_notifier_unlock(mm);
3333 out_free_new:
3334 	put_page(new_page);
3335 out:
3336 	if (old_page)
3337 		put_page(old_page);
3338 	return ret;
3339 }
3340 
3341 /**
3342  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3343  *			  writeable once the page is prepared
3344  *
3345  * @vmf: structure describing the fault
3346  *
3347  * This function handles all that is needed to finish a write page fault in a
3348  * shared mapping due to PTE being read-only once the mapped page is prepared.
3349  * It handles locking of PTE and modifying it.
3350  *
3351  * The function expects the page to be locked or other protection against
3352  * concurrent faults / writeback (such as DAX radix tree locks).
3353  *
3354  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3355  * we acquired PTE lock.
3356  */
finish_mkwrite_fault(struct vm_fault * vmf)3357 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3358 {
3359 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3360 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3361 				       &vmf->ptl);
3362 	/*
3363 	 * We might have raced with another page fault while we released the
3364 	 * pte_offset_map_lock.
3365 	 */
3366 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3367 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3368 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3369 		return VM_FAULT_NOPAGE;
3370 	}
3371 	wp_page_reuse(vmf);
3372 	return 0;
3373 }
3374 
3375 /*
3376  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3377  * mapping
3378  */
wp_pfn_shared(struct vm_fault * vmf)3379 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3380 {
3381 	struct vm_area_struct *vma = vmf->vma;
3382 
3383 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
3384 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3385 		vm_fault_t ret;
3386 
3387 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3388 		vmf->flags |= FAULT_FLAG_MKWRITE;
3389 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3390 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3391 			return ret;
3392 		return finish_mkwrite_fault(vmf);
3393 	}
3394 	wp_page_reuse(vmf);
3395 	return VM_FAULT_WRITE;
3396 }
3397 
wp_page_shared(struct vm_fault * vmf)3398 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3399 	__releases(vmf->ptl)
3400 {
3401 	struct vm_area_struct *vma = vmf->vma;
3402 	vm_fault_t ret = VM_FAULT_WRITE;
3403 
3404 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
3405 
3406 	get_page(vmf->page);
3407 
3408 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3409 		vm_fault_t tmp;
3410 
3411 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3412 		tmp = do_page_mkwrite(vmf);
3413 		if (unlikely(!tmp || (tmp &
3414 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3415 			put_page(vmf->page);
3416 			return tmp;
3417 		}
3418 		tmp = finish_mkwrite_fault(vmf);
3419 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3420 			unlock_page(vmf->page);
3421 			put_page(vmf->page);
3422 			return tmp;
3423 		}
3424 	} else {
3425 		wp_page_reuse(vmf);
3426 		lock_page(vmf->page);
3427 	}
3428 	ret |= fault_dirty_shared_page(vmf);
3429 	put_page(vmf->page);
3430 
3431 	return ret;
3432 }
3433 
3434 /*
3435  * This routine handles present pages, when users try to write
3436  * to a shared page. It is done by copying the page to a new address
3437  * and decrementing the shared-page counter for the old page.
3438  *
3439  * Note that this routine assumes that the protection checks have been
3440  * done by the caller (the low-level page fault routine in most cases).
3441  * Thus we can safely just mark it writable once we've done any necessary
3442  * COW.
3443  *
3444  * We also mark the page dirty at this point even though the page will
3445  * change only once the write actually happens. This avoids a few races,
3446  * and potentially makes it more efficient.
3447  *
3448  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3449  * but allow concurrent faults), with pte both mapped and locked.
3450  * We return with mmap_lock still held, but pte unmapped and unlocked.
3451  */
do_wp_page(struct vm_fault * vmf)3452 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3453 	__releases(vmf->ptl)
3454 {
3455 	struct vm_area_struct *vma = vmf->vma;
3456 
3457 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3458 		count_vm_spf_event(SPF_ATTEMPT_WP);
3459 
3460 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3461 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3462 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3463 			count_vm_spf_event(SPF_ABORT_USERFAULTFD);
3464 			return VM_FAULT_RETRY;
3465 		}
3466 		return handle_userfault(vmf, VM_UFFD_WP);
3467 	}
3468 
3469 	/*
3470 	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3471 	 * is flushed in this case before copying.
3472 	 */
3473 	if (unlikely(userfaultfd_wp(vmf->vma) &&
3474 		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3475 		flush_tlb_page(vmf->vma, vmf->address);
3476 
3477 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3478 	if (!vmf->page) {
3479 		/*
3480 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3481 		 * VM_PFNMAP VMA.
3482 		 *
3483 		 * We should not cow pages in a shared writeable mapping.
3484 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3485 		 */
3486 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3487 				     (VM_WRITE|VM_SHARED))
3488 			return wp_pfn_shared(vmf);
3489 
3490 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3491 		vmf->pte = NULL;
3492 		return wp_page_copy(vmf);
3493 	}
3494 
3495 	/*
3496 	 * Take out anonymous pages first, anonymous shared vmas are
3497 	 * not dirty accountable.
3498 	 */
3499 	if (PageAnon(vmf->page)) {
3500 		struct page *page = vmf->page;
3501 
3502 		/* PageKsm() doesn't necessarily raise the page refcount */
3503 		if (PageKsm(page) || page_count(page) != 1)
3504 			goto copy;
3505 		if (!trylock_page(page))
3506 			goto copy;
3507 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3508 			unlock_page(page);
3509 			goto copy;
3510 		}
3511 		/*
3512 		 * Ok, we've got the only map reference, and the only
3513 		 * page count reference, and the page is locked,
3514 		 * it's dark out, and we're wearing sunglasses. Hit it.
3515 		 */
3516 		unlock_page(page);
3517 		wp_page_reuse(vmf);
3518 		return VM_FAULT_WRITE;
3519 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3520 					(VM_WRITE|VM_SHARED))) {
3521 		return wp_page_shared(vmf);
3522 	}
3523 copy:
3524 	/*
3525 	 * Ok, we need to copy. Oh, well..
3526 	 */
3527 	get_page(vmf->page);
3528 
3529 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3530 	vmf->pte = NULL;
3531 	return wp_page_copy(vmf);
3532 }
3533 
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3534 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3535 		unsigned long start_addr, unsigned long end_addr,
3536 		struct zap_details *details)
3537 {
3538 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3539 }
3540 
unmap_mapping_range_tree(struct rb_root_cached * root,struct zap_details * details)3541 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3542 					    struct zap_details *details)
3543 {
3544 	struct vm_area_struct *vma;
3545 	pgoff_t vba, vea, zba, zea;
3546 
3547 	vma_interval_tree_foreach(vma, root,
3548 			details->first_index, details->last_index) {
3549 
3550 		vba = vma->vm_pgoff;
3551 		vea = vba + vma_pages(vma) - 1;
3552 		zba = details->first_index;
3553 		if (zba < vba)
3554 			zba = vba;
3555 		zea = details->last_index;
3556 		if (zea > vea)
3557 			zea = vea;
3558 
3559 		unmap_mapping_range_vma(vma,
3560 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3561 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3562 				details);
3563 	}
3564 }
3565 
3566 /**
3567  * unmap_mapping_page() - Unmap single page from processes.
3568  * @page: The locked page to be unmapped.
3569  *
3570  * Unmap this page from any userspace process which still has it mmaped.
3571  * Typically, for efficiency, the range of nearby pages has already been
3572  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3573  * truncation or invalidation holds the lock on a page, it may find that
3574  * the page has been remapped again: and then uses unmap_mapping_page()
3575  * to unmap it finally.
3576  */
unmap_mapping_page(struct page * page)3577 void unmap_mapping_page(struct page *page)
3578 {
3579 	struct address_space *mapping = page->mapping;
3580 	struct zap_details details = { };
3581 
3582 	VM_BUG_ON(!PageLocked(page));
3583 	VM_BUG_ON(PageTail(page));
3584 
3585 	details.check_mapping = mapping;
3586 	details.first_index = page->index;
3587 	details.last_index = page->index + thp_nr_pages(page) - 1;
3588 	details.single_page = page;
3589 
3590 	i_mmap_lock_write(mapping);
3591 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3592 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3593 	i_mmap_unlock_write(mapping);
3594 }
3595 
3596 /**
3597  * unmap_mapping_pages() - Unmap pages from processes.
3598  * @mapping: The address space containing pages to be unmapped.
3599  * @start: Index of first page to be unmapped.
3600  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3601  * @even_cows: Whether to unmap even private COWed pages.
3602  *
3603  * Unmap the pages in this address space from any userspace process which
3604  * has them mmaped.  Generally, you want to remove COWed pages as well when
3605  * a file is being truncated, but not when invalidating pages from the page
3606  * cache.
3607  */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3608 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3609 		pgoff_t nr, bool even_cows)
3610 {
3611 	struct zap_details details = { };
3612 
3613 	details.check_mapping = even_cows ? NULL : mapping;
3614 	details.first_index = start;
3615 	details.last_index = start + nr - 1;
3616 	if (details.last_index < details.first_index)
3617 		details.last_index = ULONG_MAX;
3618 
3619 	i_mmap_lock_write(mapping);
3620 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3621 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3622 	i_mmap_unlock_write(mapping);
3623 }
3624 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3625 
3626 /**
3627  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3628  * address_space corresponding to the specified byte range in the underlying
3629  * file.
3630  *
3631  * @mapping: the address space containing mmaps to be unmapped.
3632  * @holebegin: byte in first page to unmap, relative to the start of
3633  * the underlying file.  This will be rounded down to a PAGE_SIZE
3634  * boundary.  Note that this is different from truncate_pagecache(), which
3635  * must keep the partial page.  In contrast, we must get rid of
3636  * partial pages.
3637  * @holelen: size of prospective hole in bytes.  This will be rounded
3638  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3639  * end of the file.
3640  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3641  * but 0 when invalidating pagecache, don't throw away private data.
3642  */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3643 void unmap_mapping_range(struct address_space *mapping,
3644 		loff_t const holebegin, loff_t const holelen, int even_cows)
3645 {
3646 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3647 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3648 
3649 	/* Check for overflow. */
3650 	if (sizeof(holelen) > sizeof(hlen)) {
3651 		long long holeend =
3652 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3653 		if (holeend & ~(long long)ULONG_MAX)
3654 			hlen = ULONG_MAX - hba + 1;
3655 	}
3656 
3657 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3658 }
3659 EXPORT_SYMBOL(unmap_mapping_range);
3660 
3661 /*
3662  * Restore a potential device exclusive pte to a working pte entry
3663  */
remove_device_exclusive_entry(struct vm_fault * vmf)3664 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3665 {
3666 	struct page *page = vmf->page;
3667 	struct vm_area_struct *vma = vmf->vma;
3668 	struct mmu_notifier_range range;
3669 
3670 	/*
3671 	 * We need a reference to lock the page because we don't hold
3672 	 * the PTL so a racing thread can remove the device-exclusive
3673 	 * entry and unmap it. If the page is free the entry must
3674 	 * have been removed already. If it happens to have already
3675 	 * been re-allocated after being freed all we do is lock and
3676 	 * unlock it.
3677 	 */
3678 	if (!get_page_unless_zero(page))
3679 		return 0;
3680 
3681 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
3682 		put_page(page);
3683 		return VM_FAULT_RETRY;
3684 	}
3685 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3686 				vma->vm_mm, vmf->address & PAGE_MASK,
3687 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3688 	mmu_notifier_invalidate_range_start(&range);
3689 
3690 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3691 				&vmf->ptl);
3692 	if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3693 		restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3694 
3695 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3696 	unlock_page(page);
3697 	put_page(page);
3698 
3699 	mmu_notifier_invalidate_range_end(&range);
3700 	return 0;
3701 }
3702 
3703 /*
3704  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3705  * but allow concurrent faults), and pte mapped but not yet locked.
3706  * We return with pte unmapped and unlocked.
3707  *
3708  * We return with the mmap_lock locked or unlocked in the same cases
3709  * as does filemap_fault().
3710  */
do_swap_page(struct vm_fault * vmf)3711 vm_fault_t do_swap_page(struct vm_fault *vmf)
3712 {
3713 	struct vm_area_struct *vma = vmf->vma;
3714 	struct page *page = NULL, *swapcache;
3715 	struct swap_info_struct *si = NULL;
3716 	swp_entry_t entry;
3717 	pte_t pte;
3718 	int locked;
3719 	int exclusive = 0;
3720 	vm_fault_t ret = 0;
3721 	void *shadow = NULL;
3722 
3723 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3724 		bool allow_swap_spf = false;
3725 
3726 		/* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
3727 #ifndef CONFIG_KSM
3728 		trace_android_vh_do_swap_page_spf(&allow_swap_spf);
3729 #endif
3730 		if (!allow_swap_spf) {
3731 			pte_unmap(vmf->pte);
3732 			count_vm_spf_event(SPF_ABORT_SWAP);
3733 			return VM_FAULT_RETRY;
3734 		}
3735 	}
3736 
3737 	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
3738 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3739 			ret = VM_FAULT_RETRY;
3740 		goto out;
3741 	}
3742 
3743 	entry = pte_to_swp_entry(vmf->orig_pte);
3744 	if (unlikely(non_swap_entry(entry))) {
3745 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3746 			ret = VM_FAULT_RETRY;
3747 			goto out;
3748 		}
3749 		if (is_migration_entry(entry)) {
3750 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3751 					     vmf->address);
3752 		} else if (is_device_exclusive_entry(entry)) {
3753 			vmf->page = pfn_swap_entry_to_page(entry);
3754 			ret = remove_device_exclusive_entry(vmf);
3755 		} else if (is_device_private_entry(entry)) {
3756 			vmf->page = pfn_swap_entry_to_page(entry);
3757 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3758 		} else if (is_hwpoison_entry(entry)) {
3759 			ret = VM_FAULT_HWPOISON;
3760 		} else {
3761 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3762 			ret = VM_FAULT_SIGBUS;
3763 		}
3764 		goto out;
3765 	}
3766 
3767 	/* Prevent swapoff from happening to us. */
3768 	si = get_swap_device(entry);
3769 	if (unlikely(!si))
3770 		goto out;
3771 
3772 	delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
3773 	page = lookup_swap_cache(entry, vma, vmf->address);
3774 	swapcache = page;
3775 
3776 	if (!page) {
3777 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3778 		    __swap_count(entry) == 1) {
3779 			/* skip swapcache */
3780 			gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_CMA;
3781 
3782 			trace_android_rvh_set_skip_swapcache_flags(&flags);
3783 			page = alloc_page_vma(flags, vma, vmf->address);
3784 			if (page) {
3785 				__SetPageLocked(page);
3786 				__SetPageSwapBacked(page);
3787 
3788 				if (mem_cgroup_swapin_charge_page(page,
3789 					vma->vm_mm, GFP_KERNEL, entry)) {
3790 					ret = VM_FAULT_OOM;
3791 					goto out_page;
3792 				}
3793 				mem_cgroup_swapin_uncharge_swap(entry);
3794 
3795 				shadow = get_shadow_from_swap_cache(entry);
3796 				if (shadow)
3797 					workingset_refault(page, shadow);
3798 
3799 				lru_cache_add(page);
3800 
3801 				/* To provide entry to swap_readpage() */
3802 				set_page_private(page, entry.val);
3803 				swap_readpage(page, true);
3804 				set_page_private(page, 0);
3805 			}
3806 		} else if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3807 			/*
3808 			 * Don't try readahead during a speculative page fault
3809 			 * as the VMA's boundaries may change in our back.
3810 			 * If the page is not in the swap cache and synchronous
3811 			 * read is disabled, fall back to the regular page fault
3812 			 * mechanism.
3813 			 */
3814 			delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3815 			ret = VM_FAULT_RETRY;
3816 			goto out;
3817 		} else {
3818 			page = swapin_readahead(entry,
3819 						GFP_HIGHUSER_MOVABLE | __GFP_CMA,
3820 						vmf);
3821 			swapcache = page;
3822 		}
3823 
3824 		if (!page) {
3825 			/*
3826 			 * Back out if somebody else faulted in this pte
3827 			 * while we released the pte lock.
3828 			 */
3829 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3830 					vmf->address, &vmf->ptl);
3831 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3832 				ret = VM_FAULT_OOM;
3833 			delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3834 			goto unlock;
3835 		}
3836 
3837 		/* Had to read the page from swap area: Major fault */
3838 		ret = VM_FAULT_MAJOR;
3839 		count_vm_event(PGMAJFAULT);
3840 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3841 	} else if (PageHWPoison(page)) {
3842 		/*
3843 		 * hwpoisoned dirty swapcache pages are kept for killing
3844 		 * owner processes (which may be unknown at hwpoison time)
3845 		 */
3846 		ret = VM_FAULT_HWPOISON;
3847 		delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3848 		goto out_release;
3849 	}
3850 
3851 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3852 
3853 	delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3854 	if (!locked) {
3855 		ret |= VM_FAULT_RETRY;
3856 		goto out_release;
3857 	}
3858 
3859 	/*
3860 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3861 	 * release the swapcache from under us.  The page pin, and pte_same
3862 	 * test below, are not enough to exclude that.  Even if it is still
3863 	 * swapcache, we need to check that the page's swap has not changed.
3864 	 */
3865 	if (unlikely((!PageSwapCache(page) ||
3866 			page_private(page) != entry.val)) && swapcache)
3867 		goto out_page;
3868 
3869 	page = ksm_might_need_to_copy(page, vma, vmf->address);
3870 	if (unlikely(!page)) {
3871 		ret = VM_FAULT_OOM;
3872 		page = swapcache;
3873 		goto out_page;
3874 	}
3875 
3876 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3877 
3878 	/*
3879 	 * Back out if somebody else already faulted in this pte.
3880 	 */
3881 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3882 			&vmf->ptl);
3883 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3884 		goto out_nomap;
3885 
3886 	if (unlikely(!PageUptodate(page))) {
3887 		ret = VM_FAULT_SIGBUS;
3888 		goto out_nomap;
3889 	}
3890 
3891 	/*
3892 	 * The page isn't present yet, go ahead with the fault.
3893 	 *
3894 	 * Be careful about the sequence of operations here.
3895 	 * To get its accounting right, reuse_swap_page() must be called
3896 	 * while the page is counted on swap but not yet in mapcount i.e.
3897 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3898 	 * must be called after the swap_free(), or it will never succeed.
3899 	 */
3900 
3901 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3902 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3903 	pte = mk_pte(page, vma->vm_page_prot);
3904 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3905 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3906 		vmf->flags &= ~FAULT_FLAG_WRITE;
3907 		ret |= VM_FAULT_WRITE;
3908 		exclusive = RMAP_EXCLUSIVE;
3909 	}
3910 	flush_icache_page(vma, page);
3911 	if (pte_swp_soft_dirty(vmf->orig_pte))
3912 		pte = pte_mksoft_dirty(pte);
3913 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3914 		pte = pte_mkuffd_wp(pte);
3915 		pte = pte_wrprotect(pte);
3916 	}
3917 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3918 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3919 	vmf->orig_pte = pte;
3920 
3921 	/* ksm created a completely new copy */
3922 	if (unlikely(page != swapcache && swapcache)) {
3923 		page_add_new_anon_rmap(page, vma, vmf->address, false);
3924 		lru_cache_add_inactive_or_unevictable(page, vma);
3925 	} else {
3926 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3927 	}
3928 
3929 	swap_free(entry);
3930 	if (mem_cgroup_swap_full(page) ||
3931 	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3932 		try_to_free_swap(page);
3933 	unlock_page(page);
3934 	if (page != swapcache && swapcache) {
3935 		/*
3936 		 * Hold the lock to avoid the swap entry to be reused
3937 		 * until we take the PT lock for the pte_same() check
3938 		 * (to avoid false positives from pte_same). For
3939 		 * further safety release the lock after the swap_free
3940 		 * so that the swap count won't change under a
3941 		 * parallel locked swapcache.
3942 		 */
3943 		unlock_page(swapcache);
3944 		put_page(swapcache);
3945 	}
3946 
3947 	if (vmf->flags & FAULT_FLAG_WRITE) {
3948 		ret |= do_wp_page(vmf);
3949 		if (ret & VM_FAULT_ERROR)
3950 			ret &= VM_FAULT_ERROR;
3951 		goto out;
3952 	}
3953 
3954 	/* No need to invalidate - it was non-present before */
3955 	update_mmu_cache(vma, vmf->address, vmf->pte);
3956 unlock:
3957 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3958 out:
3959 	if (si)
3960 		put_swap_device(si);
3961 	return ret;
3962 out_nomap:
3963 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3964 out_page:
3965 	unlock_page(page);
3966 out_release:
3967 	put_page(page);
3968 	if (page != swapcache && swapcache) {
3969 		unlock_page(swapcache);
3970 		put_page(swapcache);
3971 	}
3972 	if (si)
3973 		put_swap_device(si);
3974 	return ret;
3975 }
3976 
3977 /*
3978  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3979  * but allow concurrent faults), and pte mapped but not yet locked.
3980  * We return with mmap_lock still held, but pte unmapped and unlocked.
3981  */
do_anonymous_page(struct vm_fault * vmf)3982 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3983 {
3984 	struct vm_area_struct *vma = vmf->vma;
3985 	struct page *page = NULL;
3986 	vm_fault_t ret = 0;
3987 	pte_t entry;
3988 
3989 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3990 		count_vm_spf_event(SPF_ATTEMPT_ANON);
3991 
3992 	/* File mapping without ->vm_ops ? */
3993 	if (vma->vm_flags & VM_SHARED)
3994 		return VM_FAULT_SIGBUS;
3995 
3996 	/* Do not check unstable pmd, if it's changed will retry later */
3997 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3998 		goto skip_pmd_checks;
3999 
4000 	/*
4001 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
4002 	 * pte_offset_map() on pmds where a huge pmd might be created
4003 	 * from a different thread.
4004 	 *
4005 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
4006 	 * parallel threads are excluded by other means.
4007 	 *
4008 	 * Here we only have mmap_read_lock(mm).
4009 	 */
4010 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4011 		return VM_FAULT_OOM;
4012 
4013 	/* See comment in __handle_mm_fault() */
4014 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
4015 		return 0;
4016 
4017 skip_pmd_checks:
4018 	/* Use the zero-page for reads */
4019 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4020 			!mm_forbids_zeropage(vma->vm_mm)) {
4021 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4022 						vma->vm_page_prot));
4023 	} else {
4024 		/* Allocate our own private page. */
4025 		if (unlikely(!vma->anon_vma)) {
4026 			if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4027 				count_vm_spf_event(SPF_ABORT_ANON_VMA);
4028 				return VM_FAULT_RETRY;
4029 			}
4030 			if (__anon_vma_prepare(vma))
4031 				goto oom;
4032 		}
4033 		page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4034 		if (!page)
4035 			goto oom;
4036 
4037 		if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
4038 			goto oom_free_page;
4039 		cgroup_throttle_swaprate(page, GFP_KERNEL);
4040 
4041 		/*
4042 		 * The memory barrier inside __SetPageUptodate makes sure that
4043 		 * preceding stores to the page contents become visible before
4044 		 * the set_pte_at() write.
4045 		 */
4046 		__SetPageUptodate(page);
4047 
4048 		entry = mk_pte(page, vma->vm_page_prot);
4049 		entry = pte_sw_mkyoung(entry);
4050 		if (vma->vm_flags & VM_WRITE)
4051 			entry = pte_mkwrite(pte_mkdirty(entry));
4052 	}
4053 
4054 	if (!pte_map_lock(vmf)) {
4055 		ret = VM_FAULT_RETRY;
4056 		goto release;
4057 	}
4058 	if (!pte_none(*vmf->pte)) {
4059 		update_mmu_tlb(vma, vmf->address, vmf->pte);
4060 		goto unlock;
4061 	}
4062 
4063 	ret = check_stable_address_space(vma->vm_mm);
4064 	if (ret)
4065 		goto unlock;
4066 
4067 	/* Deliver the page fault to userland, check inside PT lock */
4068 	if (userfaultfd_missing(vma)) {
4069 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4070 		if (page)
4071 			put_page(page);
4072 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4073 			count_vm_spf_event(SPF_ABORT_USERFAULTFD);
4074 			return VM_FAULT_RETRY;
4075 		}
4076 		return handle_userfault(vmf, VM_UFFD_MISSING);
4077 	}
4078 
4079 	if (page) {
4080 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4081 		page_add_new_anon_rmap(page, vma, vmf->address, false);
4082 		lru_cache_add_inactive_or_unevictable(page, vma);
4083 	}
4084 
4085 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4086 
4087 	/* No need to invalidate - it was non-present before */
4088 	update_mmu_cache(vma, vmf->address, vmf->pte);
4089 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4090 	return 0;
4091 unlock:
4092 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4093 release:
4094 	if (page)
4095 		put_page(page);
4096 	return ret;
4097 oom_free_page:
4098 	put_page(page);
4099 oom:
4100 	return VM_FAULT_OOM;
4101 }
4102 
4103 /*
4104  * The mmap_lock must have been held on entry, and may have been
4105  * released depending on flags and vma->vm_ops->fault() return value.
4106  * See filemap_fault() and __lock_page_retry().
4107  */
__do_fault(struct vm_fault * vmf)4108 static vm_fault_t __do_fault(struct vm_fault *vmf)
4109 {
4110 	struct vm_area_struct *vma = vmf->vma;
4111 	vm_fault_t ret;
4112 
4113 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4114 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4115 		rcu_read_lock();
4116 		if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
4117 					 SPF_ABORT_FAULT)) {
4118 			ret = VM_FAULT_RETRY;
4119 		} else {
4120 			/*
4121 			 * The mmap sequence count check guarantees that the
4122 			 * vma we fetched at the start of the fault was still
4123 			 * current at that point in time. The rcu read lock
4124 			 * ensures vmf->vma->vm_file stays valid.
4125 			 */
4126 			ret = vma->vm_ops->fault(vmf);
4127 		}
4128 		rcu_read_unlock();
4129 	} else
4130 #endif
4131 	{
4132 		/*
4133 		 * Preallocate pte before we take page_lock because
4134 		 * this might lead to deadlocks for memcg reclaim
4135 		 * which waits for pages under writeback:
4136 		 *				lock_page(A)
4137 		 *				SetPageWriteback(A)
4138 		 *				unlock_page(A)
4139 		 * lock_page(B)
4140 		 *				lock_page(B)
4141 		 * pte_alloc_one
4142 		 *   shrink_page_list
4143 		 *     wait_on_page_writeback(A)
4144 		 *				SetPageWriteback(B)
4145 		 *				unlock_page(B)
4146 		 *				# flush A, B to clear writeback
4147 		 */
4148 		if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4149 			vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4150 			if (!vmf->prealloc_pte)
4151 				return VM_FAULT_OOM;
4152 			smp_wmb(); /* See comment in __pte_alloc() */
4153 		}
4154 
4155 		ret = vma->vm_ops->fault(vmf);
4156 	}
4157 
4158 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4159 			    VM_FAULT_DONE_COW)))
4160 		return ret;
4161 
4162 	if (unlikely(PageHWPoison(vmf->page))) {
4163 		struct page *page = vmf->page;
4164 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4165 		if (ret & VM_FAULT_LOCKED) {
4166 			if (page_mapped(page))
4167 				unmap_mapping_pages(page_mapping(page),
4168 						    page->index, 1, false);
4169 			/* Retry if a clean page was removed from the cache. */
4170 			if (invalidate_inode_page(page))
4171 				poisonret = VM_FAULT_NOPAGE;
4172 			unlock_page(page);
4173 		}
4174 		put_page(page);
4175 		vmf->page = NULL;
4176 		return poisonret;
4177 	}
4178 
4179 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4180 		lock_page(vmf->page);
4181 	else
4182 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4183 
4184 	return ret;
4185 }
4186 
4187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)4188 static void deposit_prealloc_pte(struct vm_fault *vmf)
4189 {
4190 	struct vm_area_struct *vma = vmf->vma;
4191 
4192 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4193 	/*
4194 	 * We are going to consume the prealloc table,
4195 	 * count that as nr_ptes.
4196 	 */
4197 	mm_inc_nr_ptes(vma->vm_mm);
4198 	vmf->prealloc_pte = NULL;
4199 }
4200 
do_set_pmd(struct vm_fault * vmf,struct page * page)4201 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4202 {
4203 	struct vm_area_struct *vma = vmf->vma;
4204 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4205 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4206 	pmd_t entry;
4207 	int i;
4208 	vm_fault_t ret = VM_FAULT_FALLBACK;
4209 
4210 	if (!transhuge_vma_suitable(vma, haddr))
4211 		return ret;
4212 
4213 	page = compound_head(page);
4214 	if (compound_order(page) != HPAGE_PMD_ORDER)
4215 		return ret;
4216 
4217 	/*
4218 	 * Just backoff if any subpage of a THP is corrupted otherwise
4219 	 * the corrupted page may mapped by PMD silently to escape the
4220 	 * check.  This kind of THP just can be PTE mapped.  Access to
4221 	 * the corrupted subpage should trigger SIGBUS as expected.
4222 	 */
4223 	if (unlikely(PageHasHWPoisoned(page)))
4224 		return ret;
4225 
4226 	/*
4227 	 * Archs like ppc64 need additional space to store information
4228 	 * related to pte entry. Use the preallocated table for that.
4229 	 */
4230 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4231 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4232 		if (!vmf->prealloc_pte)
4233 			return VM_FAULT_OOM;
4234 		smp_wmb(); /* See comment in __pte_alloc() */
4235 	}
4236 
4237 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4238 	if (unlikely(!pmd_none(*vmf->pmd)))
4239 		goto out;
4240 
4241 	for (i = 0; i < HPAGE_PMD_NR; i++)
4242 		flush_icache_page(vma, page + i);
4243 
4244 	entry = mk_huge_pmd(page, vma->vm_page_prot);
4245 	if (write)
4246 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4247 
4248 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4249 	page_add_file_rmap(page, true);
4250 	/*
4251 	 * deposit and withdraw with pmd lock held
4252 	 */
4253 	if (arch_needs_pgtable_deposit())
4254 		deposit_prealloc_pte(vmf);
4255 
4256 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4257 
4258 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4259 
4260 	/* fault is handled */
4261 	ret = 0;
4262 	count_vm_event(THP_FILE_MAPPED);
4263 out:
4264 	spin_unlock(vmf->ptl);
4265 	return ret;
4266 }
4267 #else
do_set_pmd(struct vm_fault * vmf,struct page * page)4268 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4269 {
4270 	return VM_FAULT_FALLBACK;
4271 }
4272 #endif
4273 
do_set_pte(struct vm_fault * vmf,struct page * page,unsigned long addr)4274 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4275 {
4276 	struct vm_area_struct *vma = vmf->vma;
4277 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4278 	bool prefault = vmf->address != addr;
4279 	pte_t entry;
4280 
4281 	flush_icache_page(vma, page);
4282 	entry = mk_pte(page, vma->vm_page_prot);
4283 
4284 	if (prefault && arch_wants_old_prefaulted_pte())
4285 		entry = pte_mkold(entry);
4286 	else
4287 		entry = pte_sw_mkyoung(entry);
4288 
4289 	if (write)
4290 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4291 	/* copy-on-write page */
4292 	if (write && !(vma->vm_flags & VM_SHARED)) {
4293 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4294 		page_add_new_anon_rmap(page, vma, addr, false);
4295 		lru_cache_add_inactive_or_unevictable(page, vma);
4296 	} else {
4297 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
4298 		page_add_file_rmap(page, false);
4299 	}
4300 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4301 }
4302 
4303 /**
4304  * finish_fault - finish page fault once we have prepared the page to fault
4305  *
4306  * @vmf: structure describing the fault
4307  *
4308  * This function handles all that is needed to finish a page fault once the
4309  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4310  * given page, adds reverse page mapping, handles memcg charges and LRU
4311  * addition.
4312  *
4313  * The function expects the page to be locked and on success it consumes a
4314  * reference of a page being mapped (for the PTE which maps it).
4315  *
4316  * Return: %0 on success, %VM_FAULT_ code in case of error.
4317  */
finish_fault(struct vm_fault * vmf)4318 vm_fault_t finish_fault(struct vm_fault *vmf)
4319 {
4320 	struct vm_area_struct *vma = vmf->vma;
4321 	struct page *page;
4322 	vm_fault_t ret;
4323 
4324 	/* Did we COW the page? */
4325 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4326 		page = vmf->cow_page;
4327 	else
4328 		page = vmf->page;
4329 
4330 	/*
4331 	 * check even for read faults because we might have lost our CoWed
4332 	 * page
4333 	 */
4334 	if (!(vma->vm_flags & VM_SHARED)) {
4335 		ret = check_stable_address_space(vma->vm_mm);
4336 		if (ret)
4337 			return ret;
4338 	}
4339 
4340 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
4341 		if (pmd_none(*vmf->pmd)) {
4342 			if (PageTransCompound(page)) {
4343 				ret = do_set_pmd(vmf, page);
4344 				if (ret != VM_FAULT_FALLBACK)
4345 					return ret;
4346 			}
4347 
4348 			if (vmf->prealloc_pte) {
4349 				vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4350 				if (likely(pmd_none(*vmf->pmd))) {
4351 					mm_inc_nr_ptes(vma->vm_mm);
4352 					pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4353 					vmf->prealloc_pte = NULL;
4354 				}
4355 				spin_unlock(vmf->ptl);
4356 			} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
4357 				return VM_FAULT_OOM;
4358 			}
4359 		}
4360 
4361 		/*
4362 		 * See comment in handle_pte_fault() for how this scenario happens, we
4363 		 * need to return NOPAGE so that we drop this page.
4364 		 */
4365 		if (pmd_devmap_trans_unstable(vmf->pmd))
4366 			return VM_FAULT_NOPAGE;
4367 	}
4368 
4369 	if (!pte_map_lock(vmf))
4370 		return VM_FAULT_RETRY;
4371 	ret = 0;
4372 	/* Re-check under ptl */
4373 	if (likely(pte_none(*vmf->pte)))
4374 		do_set_pte(vmf, page, vmf->address);
4375 	else
4376 		ret = VM_FAULT_NOPAGE;
4377 
4378 	update_mmu_tlb(vma, vmf->address, vmf->pte);
4379 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4380 	return ret;
4381 }
4382 
4383 static unsigned long fault_around_bytes __read_mostly =
4384 	rounddown_pow_of_two(65536);
4385 
4386 #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)4387 static int fault_around_bytes_get(void *data, u64 *val)
4388 {
4389 	*val = fault_around_bytes;
4390 	return 0;
4391 }
4392 
4393 /*
4394  * fault_around_bytes must be rounded down to the nearest page order as it's
4395  * what do_fault_around() expects to see.
4396  */
fault_around_bytes_set(void * data,u64 val)4397 static int fault_around_bytes_set(void *data, u64 val)
4398 {
4399 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4400 		return -EINVAL;
4401 	if (val > PAGE_SIZE)
4402 		fault_around_bytes = rounddown_pow_of_two(val);
4403 	else
4404 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4405 	return 0;
4406 }
4407 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4408 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4409 
fault_around_debugfs(void)4410 static int __init fault_around_debugfs(void)
4411 {
4412 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4413 				   &fault_around_bytes_fops);
4414 	return 0;
4415 }
4416 late_initcall(fault_around_debugfs);
4417 #endif
4418 
4419 /*
4420  * do_fault_around() tries to map few pages around the fault address. The hope
4421  * is that the pages will be needed soon and this will lower the number of
4422  * faults to handle.
4423  *
4424  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4425  * not ready to be mapped: not up-to-date, locked, etc.
4426  *
4427  * This function is called with the page table lock taken. In the split ptlock
4428  * case the page table lock only protects only those entries which belong to
4429  * the page table corresponding to the fault address.
4430  *
4431  * This function doesn't cross the VMA boundaries, in order to call map_pages()
4432  * only once.
4433  *
4434  * fault_around_bytes defines how many bytes we'll try to map.
4435  * do_fault_around() expects it to be set to a power of two less than or equal
4436  * to PTRS_PER_PTE.
4437  *
4438  * The virtual address of the area that we map is naturally aligned to
4439  * fault_around_bytes rounded down to the machine page size
4440  * (and therefore to page order).  This way it's easier to guarantee
4441  * that we don't cross page table boundaries.
4442  */
do_fault_around(struct vm_fault * vmf)4443 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4444 {
4445 	unsigned long address = vmf->address, nr_pages, mask;
4446 	pgoff_t start_pgoff = vmf->pgoff;
4447 	pgoff_t end_pgoff;
4448 	int off;
4449 	vm_fault_t ret;
4450 
4451 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4452 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4453 
4454 	address = max(address & mask, vmf->vma->vm_start);
4455 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4456 	start_pgoff -= off;
4457 
4458 	/*
4459 	 *  end_pgoff is either the end of the page table, the end of
4460 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4461 	 */
4462 	end_pgoff = start_pgoff -
4463 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4464 		PTRS_PER_PTE - 1;
4465 	end_pgoff = min3(end_pgoff, vma_data_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4466 			start_pgoff + nr_pages - 1);
4467 
4468 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
4469 	    pmd_none(*vmf->pmd)) {
4470 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4471 		if (!vmf->prealloc_pte)
4472 			return VM_FAULT_OOM;
4473 		smp_wmb(); /* See comment in __pte_alloc() */
4474 	}
4475 
4476 	rcu_read_lock();
4477 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4478 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4479 		if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq,
4480 					 SPF_ABORT_FAULT)) {
4481 			rcu_read_unlock();
4482 			return VM_FAULT_RETRY;
4483 		}
4484 		/*
4485 		 * the mmap sequence check verified that vmf->vma was still
4486 		 * current at that point in time.
4487 		 * The rcu read lock ensures vmf->vma->vm_file stays valid.
4488 		 */
4489 	}
4490 #endif
4491 	ret = vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4492 	rcu_read_unlock();
4493 	return ret;
4494 }
4495 
do_read_fault(struct vm_fault * vmf)4496 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4497 {
4498 	struct vm_area_struct *vma = vmf->vma;
4499 	vm_fault_t ret = 0;
4500 
4501 	/*
4502 	 * Let's call ->map_pages() first and use ->fault() as fallback
4503 	 * if page by the offset is not ready to be mapped (cold cache or
4504 	 * something).
4505 	 */
4506 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4507 		if (likely(!userfaultfd_minor(vmf->vma))) {
4508 			ret = do_fault_around(vmf);
4509 			if (ret)
4510 				return ret;
4511 		}
4512 	}
4513 
4514 	ret = __do_fault(vmf);
4515 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4516 		return ret;
4517 
4518 	ret |= finish_fault(vmf);
4519 	unlock_page(vmf->page);
4520 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4521 		put_page(vmf->page);
4522 	return ret;
4523 }
4524 
do_cow_fault(struct vm_fault * vmf)4525 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4526 {
4527 	struct vm_area_struct *vma = vmf->vma;
4528 	vm_fault_t ret;
4529 
4530 	if (unlikely(!vma->anon_vma)) {
4531 		if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
4532 			count_vm_spf_event(SPF_ABORT_ANON_VMA);
4533 			return VM_FAULT_RETRY;
4534 		}
4535 		if (__anon_vma_prepare(vma))
4536 			return VM_FAULT_OOM;
4537 	}
4538 
4539 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4540 	if (!vmf->cow_page)
4541 		return VM_FAULT_OOM;
4542 
4543 	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4544 		put_page(vmf->cow_page);
4545 		return VM_FAULT_OOM;
4546 	}
4547 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4548 
4549 	ret = __do_fault(vmf);
4550 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4551 		goto uncharge_out;
4552 	if (ret & VM_FAULT_DONE_COW)
4553 		return ret;
4554 
4555 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4556 	__SetPageUptodate(vmf->cow_page);
4557 
4558 	ret |= finish_fault(vmf);
4559 	unlock_page(vmf->page);
4560 	put_page(vmf->page);
4561 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4562 		goto uncharge_out;
4563 	return ret;
4564 uncharge_out:
4565 	put_page(vmf->cow_page);
4566 	return ret;
4567 }
4568 
do_shared_fault(struct vm_fault * vmf)4569 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4570 {
4571 	struct vm_area_struct *vma = vmf->vma;
4572 	vm_fault_t ret, tmp;
4573 
4574 	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
4575 
4576 	ret = __do_fault(vmf);
4577 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4578 		return ret;
4579 
4580 	/*
4581 	 * Check if the backing address space wants to know that the page is
4582 	 * about to become writable
4583 	 */
4584 	if (vma->vm_ops->page_mkwrite) {
4585 		unlock_page(vmf->page);
4586 		tmp = do_page_mkwrite(vmf);
4587 		if (unlikely(!tmp ||
4588 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4589 			put_page(vmf->page);
4590 			return tmp;
4591 		}
4592 	}
4593 
4594 	ret |= finish_fault(vmf);
4595 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4596 					VM_FAULT_RETRY))) {
4597 		unlock_page(vmf->page);
4598 		put_page(vmf->page);
4599 		return ret;
4600 	}
4601 
4602 	ret |= fault_dirty_shared_page(vmf);
4603 	return ret;
4604 }
4605 
4606 /*
4607  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4608  * but allow concurrent faults).
4609  * The mmap_lock may have been released depending on flags and our
4610  * return value.  See filemap_fault() and __lock_page_or_retry().
4611  * If mmap_lock is released, vma may become invalid (for example
4612  * by other thread calling munmap()).
4613  */
do_fault(struct vm_fault * vmf)4614 static vm_fault_t do_fault(struct vm_fault *vmf)
4615 {
4616 	struct vm_area_struct *vma = vmf->vma;
4617 	struct mm_struct *vm_mm = vma->vm_mm;
4618 	vm_fault_t ret;
4619 
4620 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4621 		count_vm_spf_event(SPF_ATTEMPT_FILE);
4622 
4623 	/*
4624 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4625 	 */
4626 	if (!vma->vm_ops->fault) {
4627 		VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
4628 
4629 		/*
4630 		 * If we find a migration pmd entry or a none pmd entry, which
4631 		 * should never happen, return SIGBUS
4632 		 */
4633 		if (unlikely(!pmd_present(*vmf->pmd)))
4634 			ret = VM_FAULT_SIGBUS;
4635 		else {
4636 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4637 						       vmf->pmd,
4638 						       vmf->address,
4639 						       &vmf->ptl);
4640 			/*
4641 			 * Make sure this is not a temporary clearing of pte
4642 			 * by holding ptl and checking again. A R/M/W update
4643 			 * of pte involves: take ptl, clearing the pte so that
4644 			 * we don't have concurrent modification by hardware
4645 			 * followed by an update.
4646 			 */
4647 			if (unlikely(pte_none(*vmf->pte)))
4648 				ret = VM_FAULT_SIGBUS;
4649 			else
4650 				ret = VM_FAULT_NOPAGE;
4651 
4652 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4653 		}
4654 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4655 		ret = do_read_fault(vmf);
4656 	else if (!(vma->vm_flags & VM_SHARED))
4657 		ret = do_cow_fault(vmf);
4658 	else
4659 		ret = do_shared_fault(vmf);
4660 
4661 	/* preallocated pagetable is unused: free it */
4662 	if (vmf->prealloc_pte) {
4663 		pte_free(vm_mm, vmf->prealloc_pte);
4664 		vmf->prealloc_pte = NULL;
4665 	}
4666 	return ret;
4667 }
4668 
numa_migrate_prep(struct page * page,struct vm_area_struct * vma,unsigned long addr,int page_nid,int * flags)4669 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4670 		      unsigned long addr, int page_nid, int *flags)
4671 {
4672 	get_page(page);
4673 
4674 	count_vm_numa_event(NUMA_HINT_FAULTS);
4675 	if (page_nid == numa_node_id()) {
4676 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4677 		*flags |= TNF_FAULT_LOCAL;
4678 	}
4679 
4680 	return mpol_misplaced(page, vma, addr);
4681 }
4682 
do_numa_page(struct vm_fault * vmf)4683 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4684 {
4685 	struct vm_area_struct *vma = vmf->vma;
4686 	struct page *page = NULL;
4687 	int page_nid = NUMA_NO_NODE;
4688 	int last_cpupid;
4689 	int target_nid;
4690 	pte_t pte, old_pte;
4691 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4692 	int flags = 0;
4693 
4694 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4695 		count_vm_spf_event(SPF_ATTEMPT_NUMA);
4696 
4697 	/*
4698 	 * The "pte" at this point cannot be used safely without
4699 	 * validation through pte_unmap_same(). It's of NUMA type but
4700 	 * the pfn may be screwed if the read is non atomic.
4701 	 */
4702 	if (!pte_spinlock(vmf))
4703 		return VM_FAULT_RETRY;
4704 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4705 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4706 		goto out;
4707 	}
4708 
4709 	/* Get the normal PTE  */
4710 	old_pte = ptep_get(vmf->pte);
4711 	pte = pte_modify(old_pte, vma->vm_page_prot);
4712 
4713 	page = vm_normal_page(vma, vmf->address, pte);
4714 	if (!page)
4715 		goto out_map;
4716 
4717 	/* TODO: handle PTE-mapped THP */
4718 	if (PageCompound(page))
4719 		goto out_map;
4720 
4721 	/*
4722 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4723 	 * much anyway since they can be in shared cache state. This misses
4724 	 * the case where a mapping is writable but the process never writes
4725 	 * to it but pte_write gets cleared during protection updates and
4726 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4727 	 * background writeback, dirty balancing and application behaviour.
4728 	 */
4729 	if (!was_writable)
4730 		flags |= TNF_NO_GROUP;
4731 
4732 	/*
4733 	 * Flag if the page is shared between multiple address spaces. This
4734 	 * is later used when determining whether to group tasks together
4735 	 */
4736 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4737 		flags |= TNF_SHARED;
4738 
4739 	last_cpupid = page_cpupid_last(page);
4740 	page_nid = page_to_nid(page);
4741 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4742 			&flags);
4743 	if (target_nid == NUMA_NO_NODE) {
4744 		put_page(page);
4745 		goto out_map;
4746 	}
4747 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4748 
4749 	/* Migrate to the requested node */
4750 	if (migrate_misplaced_page(page, vma, target_nid)) {
4751 		page_nid = target_nid;
4752 		flags |= TNF_MIGRATED;
4753 	} else {
4754 		flags |= TNF_MIGRATE_FAIL;
4755 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4756 		spin_lock(vmf->ptl);
4757 		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4758 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4759 			goto out;
4760 		}
4761 		goto out_map;
4762 	}
4763 
4764 out:
4765 	if (page_nid != NUMA_NO_NODE)
4766 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4767 	return 0;
4768 out_map:
4769 	/*
4770 	 * Make it present again, depending on how arch implements
4771 	 * non-accessible ptes, some can allow access by kernel mode.
4772 	 */
4773 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4774 	pte = pte_modify(old_pte, vma->vm_page_prot);
4775 	pte = pte_mkyoung(pte);
4776 	if (was_writable)
4777 		pte = pte_mkwrite(pte);
4778 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4779 	update_mmu_cache(vma, vmf->address, vmf->pte);
4780 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4781 	goto out;
4782 }
4783 
create_huge_pmd(struct vm_fault * vmf)4784 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4785 {
4786 	if (vma_is_anonymous(vmf->vma))
4787 		return do_huge_pmd_anonymous_page(vmf);
4788 	if (vmf->vma->vm_ops->huge_fault)
4789 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4790 	return VM_FAULT_FALLBACK;
4791 }
4792 
4793 /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf)4794 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4795 {
4796 	if (vma_is_anonymous(vmf->vma)) {
4797 		if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4798 			return handle_userfault(vmf, VM_UFFD_WP);
4799 		return do_huge_pmd_wp_page(vmf);
4800 	}
4801 	if (vmf->vma->vm_ops->huge_fault) {
4802 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4803 
4804 		if (!(ret & VM_FAULT_FALLBACK))
4805 			return ret;
4806 	}
4807 
4808 	/* COW or write-notify handled on pte level: split pmd. */
4809 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4810 
4811 	return VM_FAULT_FALLBACK;
4812 }
4813 
create_huge_pud(struct vm_fault * vmf)4814 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4815 {
4816 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4817 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4818 	/* No support for anonymous transparent PUD pages yet */
4819 	if (vma_is_anonymous(vmf->vma))
4820 		return VM_FAULT_FALLBACK;
4821 	if (vmf->vma->vm_ops->huge_fault)
4822 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4823 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4824 	return VM_FAULT_FALLBACK;
4825 }
4826 
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)4827 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4828 {
4829 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4830 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4831 	/* No support for anonymous transparent PUD pages yet */
4832 	if (vma_is_anonymous(vmf->vma))
4833 		goto split;
4834 	if (vmf->vma->vm_ops->huge_fault) {
4835 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4836 
4837 		if (!(ret & VM_FAULT_FALLBACK))
4838 			return ret;
4839 	}
4840 split:
4841 	/* COW or write-notify not handled on PUD level: split pud.*/
4842 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4843 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4844 	return VM_FAULT_FALLBACK;
4845 }
4846 
4847 /*
4848  * These routines also need to handle stuff like marking pages dirty
4849  * and/or accessed for architectures that don't do it in hardware (most
4850  * RISC architectures).  The early dirtying is also good on the i386.
4851  *
4852  * There is also a hook called "update_mmu_cache()" that architectures
4853  * with external mmu caches can use to update those (ie the Sparc or
4854  * PowerPC hashed page tables that act as extended TLBs).
4855  *
4856  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4857  * concurrent faults).
4858  *
4859  * The mmap_lock may have been released depending on flags and our return value.
4860  * See filemap_fault() and __lock_page_or_retry().
4861  */
handle_pte_fault(struct vm_fault * vmf)4862 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4863 {
4864 	pte_t entry;
4865 
4866 	if (!vmf->pte) {
4867 		if (vma_is_anonymous(vmf->vma))
4868 			return do_anonymous_page(vmf);
4869 		else
4870 			return do_fault(vmf);
4871 	}
4872 
4873 	if (!pte_present(vmf->orig_pte))
4874 		return do_swap_page(vmf);
4875 
4876 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4877 		return do_numa_page(vmf);
4878 
4879 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4880 		count_vm_spf_event(SPF_ATTEMPT_PTE);
4881 
4882 	if (!pte_spinlock(vmf))
4883 		return VM_FAULT_RETRY;
4884 	entry = vmf->orig_pte;
4885 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4886 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4887 		goto unlock;
4888 	}
4889 	if (vmf->flags & FAULT_FLAG_WRITE) {
4890 		if (!pte_write(entry))
4891 			return do_wp_page(vmf);
4892 		entry = pte_mkdirty(entry);
4893 	}
4894 	entry = pte_mkyoung(entry);
4895 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4896 				vmf->flags & FAULT_FLAG_WRITE)) {
4897 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4898 	} else {
4899 		/* Skip spurious TLB flush for retried page fault */
4900 		if (vmf->flags & FAULT_FLAG_TRIED)
4901 			goto unlock;
4902 		/*
4903 		 * This is needed only for protection faults but the arch code
4904 		 * is not yet telling us if this is a protection fault or not.
4905 		 * This still avoids useless tlb flushes for .text page faults
4906 		 * with threads.
4907 		 */
4908 		if (vmf->flags & FAULT_FLAG_WRITE)
4909 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4910 	}
4911 unlock:
4912 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4913 	return 0;
4914 }
4915 
4916 /*
4917  * By the time we get here, we already hold the mm semaphore
4918  *
4919  * The mmap_lock may have been released depending on flags and our
4920  * return value.  See filemap_fault() and __lock_page_or_retry().
4921  */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long seq)4922 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4923 		unsigned long address, unsigned int flags, unsigned long seq)
4924 {
4925 	struct vm_fault vmf = {
4926 		.vma = vma,
4927 		.address = address & PAGE_MASK,
4928 		.flags = flags,
4929 		.pgoff = linear_page_index(vma, address),
4930 		.gfp_mask = __get_fault_gfp_mask(vma),
4931 	};
4932 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4933 	struct mm_struct *mm = vma->vm_mm;
4934 	pgd_t *pgd;
4935 	p4d_t *p4d;
4936 	vm_fault_t ret;
4937 
4938 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4939 	if (flags & FAULT_FLAG_SPECULATIVE) {
4940 		pgd_t pgdval;
4941 		p4d_t p4dval;
4942 		pud_t pudval;
4943 		bool uffd_missing_sigbus = false;
4944 
4945 #ifdef CONFIG_USERFAULTFD
4946 		/*
4947 		 * Only support SPF for SIGBUS+MISSING userfaults in private
4948 		 * anonymous VMAs.
4949 		 */
4950 		uffd_missing_sigbus = vma_is_anonymous(vma) &&
4951 					(vma->vm_flags & VM_UFFD_MISSING) &&
4952 					userfaultfd_using_sigbus(vma);
4953 #endif
4954 
4955 		vmf.seq = seq;
4956 
4957 		speculative_page_walk_begin();
4958 		pgd = pgd_offset(mm, address);
4959 		pgdval = READ_ONCE(*pgd);
4960 		if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) {
4961 			count_vm_spf_event(SPF_ABORT_PUD);
4962 			goto spf_fail;
4963 		}
4964 
4965 		p4d = p4d_offset(pgd, address);
4966 		if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
4967 			goto spf_fail;
4968 		p4dval = READ_ONCE(*p4d);
4969 		if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) {
4970 			count_vm_spf_event(SPF_ABORT_PUD);
4971 			goto spf_fail;
4972 		}
4973 
4974 		vmf.pud = pud_offset(p4d, address);
4975 		if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
4976 			goto spf_fail;
4977 		pudval = READ_ONCE(*vmf.pud);
4978 		if (pud_none(pudval) || unlikely(pud_bad(pudval)) ||
4979 		    unlikely(pud_trans_huge(pudval)) ||
4980 		    unlikely(pud_devmap(pudval))) {
4981 			count_vm_spf_event(SPF_ABORT_PUD);
4982 			goto spf_fail;
4983 		}
4984 
4985 		vmf.pmd = pmd_offset(vmf.pud, address);
4986 		if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
4987 			goto spf_fail;
4988 		vmf.orig_pmd = READ_ONCE(*vmf.pmd);
4989 
4990 		/*
4991 		 * pmd_none could mean that a hugepage collapse is in
4992 		 * progress in our back as collapse_huge_page() mark
4993 		 * it before invalidating the pte (which is done once
4994 		 * the IPI is catched by all CPU and we have interrupt
4995 		 * disabled).  For this reason we cannot handle THP in
4996 		 * a speculative way since we can't safely identify an
4997 		 * in progress collapse operation done in our back on
4998 		 * that PMD.
4999 		 */
5000 		if (unlikely(pmd_none(vmf.orig_pmd) ||
5001 			     is_swap_pmd(vmf.orig_pmd) ||
5002 			     pmd_trans_huge(vmf.orig_pmd) ||
5003 			     pmd_devmap(vmf.orig_pmd))) {
5004 			count_vm_spf_event(SPF_ABORT_PMD);
5005 			goto spf_fail;
5006 		}
5007 
5008 		/*
5009 		 * The above does not allocate/instantiate page-tables because
5010 		 * doing so would lead to the possibility of instantiating
5011 		 * page-tables after free_pgtables() -- and consequently
5012 		 * leaking them.
5013 		 *
5014 		 * The result is that we take at least one non-speculative
5015 		 * fault per PMD in order to instantiate it.
5016 		 */
5017 
5018 		vmf.pte = pte_offset_map(vmf.pmd, address);
5019 		if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
5020 			pte_unmap(vmf.pte);
5021 			vmf.pte = NULL;
5022 			goto spf_fail;
5023 		}
5024 		vmf.orig_pte = READ_ONCE(*vmf.pte);
5025 		barrier();
5026 		if (pte_none(vmf.orig_pte)) {
5027 			pte_unmap(vmf.pte);
5028 			vmf.pte = NULL;
5029 		}
5030 
5031 		speculative_page_walk_end();
5032 
5033 		if (!vmf.pte && uffd_missing_sigbus)
5034 			return VM_FAULT_SIGBUS;
5035 
5036 		return handle_pte_fault(&vmf);
5037 
5038 	spf_fail:
5039 		speculative_page_walk_end();
5040 		/*
5041 		 * Failing page-table walk is similar to page-missing so give an
5042 		 * opportunity to SIGBUS+MISSING userfault to handle it before
5043 		 * retrying with mmap_lock
5044 		 */
5045 		return uffd_missing_sigbus ? VM_FAULT_SIGBUS : VM_FAULT_RETRY;
5046 	}
5047 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
5048 
5049 	pgd = pgd_offset(mm, address);
5050 	p4d = p4d_alloc(mm, pgd, address);
5051 	if (!p4d)
5052 		return VM_FAULT_OOM;
5053 
5054 	vmf.pud = pud_alloc(mm, p4d, address);
5055 	if (!vmf.pud)
5056 		return VM_FAULT_OOM;
5057 retry_pud:
5058 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
5059 		ret = create_huge_pud(&vmf);
5060 		if (!(ret & VM_FAULT_FALLBACK))
5061 			return ret;
5062 	} else {
5063 		pud_t orig_pud = *vmf.pud;
5064 
5065 		barrier();
5066 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5067 
5068 			/* NUMA case for anonymous PUDs would go here */
5069 
5070 			if (dirty && !pud_write(orig_pud)) {
5071 				ret = wp_huge_pud(&vmf, orig_pud);
5072 				if (!(ret & VM_FAULT_FALLBACK))
5073 					return ret;
5074 			} else {
5075 				huge_pud_set_accessed(&vmf, orig_pud);
5076 				return 0;
5077 			}
5078 		}
5079 	}
5080 
5081 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5082 	if (!vmf.pmd)
5083 		return VM_FAULT_OOM;
5084 
5085 	/* Huge pud page fault raced with pmd_alloc? */
5086 	if (pud_trans_unstable(vmf.pud))
5087 		goto retry_pud;
5088 
5089 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
5090 		ret = create_huge_pmd(&vmf);
5091 		if (!(ret & VM_FAULT_FALLBACK))
5092 			return ret;
5093 	} else {
5094 		vmf.orig_pmd = *vmf.pmd;
5095 
5096 		barrier();
5097 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5098 			VM_BUG_ON(thp_migration_supported() &&
5099 					  !is_pmd_migration_entry(vmf.orig_pmd));
5100 			if (is_pmd_migration_entry(vmf.orig_pmd))
5101 				pmd_migration_entry_wait(mm, vmf.pmd);
5102 			return 0;
5103 		}
5104 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5105 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5106 				return do_huge_pmd_numa_page(&vmf);
5107 
5108 			if (dirty && !pmd_write(vmf.orig_pmd)) {
5109 				ret = wp_huge_pmd(&vmf);
5110 				if (!(ret & VM_FAULT_FALLBACK))
5111 					return ret;
5112 			} else {
5113 				huge_pmd_set_accessed(&vmf);
5114 				return 0;
5115 			}
5116 		}
5117 	}
5118 
5119 	if (unlikely(pmd_none(*vmf.pmd))) {
5120 		/*
5121 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5122 		 * want to allocate huge page, and if we expose page table
5123 		 * for an instant, it will be difficult to retract from
5124 		 * concurrent faults and from rmap lookups.
5125 		 */
5126 		vmf.pte = NULL;
5127 	} else {
5128 		/*
5129 		 * If a huge pmd materialized under us just retry later.  Use
5130 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
5131 		 * of pmd_trans_huge() to ensure the pmd didn't become
5132 		 * pmd_trans_huge under us and then back to pmd_none, as a
5133 		 * result of MADV_DONTNEED running immediately after a huge pmd
5134 		 * fault in a different thread of this mm, in turn leading to a
5135 		 * misleading pmd_trans_huge() retval. All we have to ensure is
5136 		 * that it is a regular pmd that we can walk with
5137 		 * pte_offset_map() and we can do that through an atomic read
5138 		 * in C, which is what pmd_trans_unstable() provides.
5139 		 */
5140 		if (pmd_devmap_trans_unstable(vmf.pmd))
5141 			return 0;
5142 		/*
5143 		 * A regular pmd is established and it can't morph into a huge
5144 		 * pmd from under us anymore at this point because we hold the
5145 		 * mmap_lock read mode and khugepaged takes it in write mode.
5146 		 * So now it's safe to run pte_offset_map().
5147 		 */
5148 		vmf.pte = pte_offset_map(vmf.pmd, vmf.address);
5149 		vmf.orig_pte = *vmf.pte;
5150 
5151 		/*
5152 		 * some architectures can have larger ptes than wordsize,
5153 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
5154 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
5155 		 * accesses.  The code below just needs a consistent view
5156 		 * for the ifs and we later double check anyway with the
5157 		 * ptl lock held. So here a barrier will do.
5158 		 */
5159 		barrier();
5160 		if (pte_none(vmf.orig_pte)) {
5161 			pte_unmap(vmf.pte);
5162 			vmf.pte = NULL;
5163 		}
5164 	}
5165 
5166 	return handle_pte_fault(&vmf);
5167 }
5168 
5169 /**
5170  * mm_account_fault - Do page fault accounting
5171  *
5172  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
5173  *        of perf event counters, but we'll still do the per-task accounting to
5174  *        the task who triggered this page fault.
5175  * @address: the faulted address.
5176  * @flags: the fault flags.
5177  * @ret: the fault retcode.
5178  *
5179  * This will take care of most of the page fault accounting.  Meanwhile, it
5180  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
5181  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
5182  * still be in per-arch page fault handlers at the entry of page fault.
5183  */
mm_account_fault(struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)5184 static inline void mm_account_fault(struct pt_regs *regs,
5185 				    unsigned long address, unsigned int flags,
5186 				    vm_fault_t ret)
5187 {
5188 	bool major;
5189 
5190 	/*
5191 	 * We don't do accounting for some specific faults:
5192 	 *
5193 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
5194 	 *   includes arch_vma_access_permitted() failing before reaching here.
5195 	 *   So this is not a "this many hardware page faults" counter.  We
5196 	 *   should use the hw profiling for that.
5197 	 *
5198 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
5199 	 *   once they're completed.
5200 	 */
5201 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
5202 		return;
5203 
5204 	/*
5205 	 * We define the fault as a major fault when the final successful fault
5206 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5207 	 * handle it immediately previously).
5208 	 */
5209 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5210 
5211 	if (major)
5212 		current->maj_flt++;
5213 	else
5214 		current->min_flt++;
5215 
5216 	/*
5217 	 * If the fault is done for GUP, regs will be NULL.  We only do the
5218 	 * accounting for the per thread fault counters who triggered the
5219 	 * fault, and we skip the perf event updates.
5220 	 */
5221 	if (!regs)
5222 		return;
5223 
5224 	if (major)
5225 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5226 	else
5227 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5228 }
5229 
5230 #ifdef CONFIG_LRU_GEN
lru_gen_enter_fault(struct vm_area_struct * vma)5231 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5232 {
5233 	/* the LRU algorithm only applies to accesses with recency */
5234 	current->in_lru_fault = vma_has_recency(vma);
5235 }
5236 
lru_gen_exit_fault(void)5237 static void lru_gen_exit_fault(void)
5238 {
5239 	current->in_lru_fault = false;
5240 }
5241 #else
lru_gen_enter_fault(struct vm_area_struct * vma)5242 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5243 {
5244 }
5245 
lru_gen_exit_fault(void)5246 static void lru_gen_exit_fault(void)
5247 {
5248 }
5249 #endif /* CONFIG_LRU_GEN */
5250 
5251 /*
5252  * By the time we get here, we already hold the mm semaphore
5253  *
5254  * The mmap_lock may have been released depending on flags and our
5255  * return value.  See filemap_fault() and __lock_page_or_retry().
5256  */
do_handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long seq,struct pt_regs * regs)5257 vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
5258 		unsigned long address, unsigned int flags,
5259 		unsigned long seq, struct pt_regs *regs)
5260 {
5261 	vm_fault_t ret;
5262 
5263 	VM_BUG_ON((flags & FAULT_FLAG_SPECULATIVE) &&
5264 		  !vma_can_speculate(vma, flags));
5265 
5266 	__set_current_state(TASK_RUNNING);
5267 
5268 	count_vm_event(PGFAULT);
5269 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
5270 
5271 	/* do counter updates before entering really critical section. */
5272 	check_sync_rss_stat(current);
5273 
5274 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5275 					    flags & FAULT_FLAG_INSTRUCTION,
5276 					    flags & FAULT_FLAG_REMOTE))
5277 		return VM_FAULT_SIGSEGV;
5278 
5279 	/*
5280 	 * Enable the memcg OOM handling for faults triggered in user
5281 	 * space.  Kernel faults are handled more gracefully.
5282 	 */
5283 	if (flags & FAULT_FLAG_USER)
5284 		mem_cgroup_enter_user_fault();
5285 
5286 	lru_gen_enter_fault(vma);
5287 
5288 	if (unlikely(is_vm_hugetlb_page(vma))) {
5289 		VM_BUG_ON(flags & FAULT_FLAG_SPECULATIVE);
5290 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5291 	} else {
5292 		ret = __handle_mm_fault(vma, address, flags, seq);
5293 	}
5294 
5295 	lru_gen_exit_fault();
5296 
5297 	if (flags & FAULT_FLAG_USER) {
5298 		mem_cgroup_exit_user_fault();
5299 		/*
5300 		 * The task may have entered a memcg OOM situation but
5301 		 * if the allocation error was handled gracefully (no
5302 		 * VM_FAULT_OOM), there is no need to kill anything.
5303 		 * Just clean up the OOM state peacefully.
5304 		 */
5305 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5306 			mem_cgroup_oom_synchronize(false);
5307 	}
5308 
5309 	mm_account_fault(regs, address, flags, ret);
5310 
5311 	return ret;
5312 }
5313 EXPORT_SYMBOL_GPL(do_handle_mm_fault);
5314 
5315 #ifndef __PAGETABLE_P4D_FOLDED
5316 /*
5317  * Allocate p4d page table.
5318  * We've already handled the fast-path in-line.
5319  */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)5320 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5321 {
5322 	p4d_t *new = p4d_alloc_one(mm, address);
5323 	if (!new)
5324 		return -ENOMEM;
5325 
5326 	smp_wmb(); /* See comment in __pte_alloc */
5327 
5328 	spin_lock(&mm->page_table_lock);
5329 	if (pgd_present(*pgd))		/* Another has populated it */
5330 		p4d_free(mm, new);
5331 	else
5332 		pgd_populate(mm, pgd, new);
5333 	spin_unlock(&mm->page_table_lock);
5334 	return 0;
5335 }
5336 #endif /* __PAGETABLE_P4D_FOLDED */
5337 
5338 #ifndef __PAGETABLE_PUD_FOLDED
5339 /*
5340  * Allocate page upper directory.
5341  * We've already handled the fast-path in-line.
5342  */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)5343 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
5344 {
5345 	pud_t *new = pud_alloc_one(mm, address);
5346 	if (!new)
5347 		return -ENOMEM;
5348 
5349 	smp_wmb(); /* See comment in __pte_alloc */
5350 
5351 	spin_lock(&mm->page_table_lock);
5352 	if (!p4d_present(*p4d)) {
5353 		mm_inc_nr_puds(mm);
5354 		p4d_populate(mm, p4d, new);
5355 	} else	/* Another has populated it */
5356 		pud_free(mm, new);
5357 	spin_unlock(&mm->page_table_lock);
5358 	return 0;
5359 }
5360 #endif /* __PAGETABLE_PUD_FOLDED */
5361 
5362 #ifndef __PAGETABLE_PMD_FOLDED
5363 /*
5364  * Allocate page middle directory.
5365  * We've already handled the fast-path in-line.
5366  */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)5367 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5368 {
5369 	spinlock_t *ptl;
5370 	pmd_t *new = pmd_alloc_one(mm, address);
5371 	if (!new)
5372 		return -ENOMEM;
5373 
5374 	smp_wmb(); /* See comment in __pte_alloc */
5375 
5376 	ptl = pud_lock(mm, pud);
5377 	if (!pud_present(*pud)) {
5378 		mm_inc_nr_pmds(mm);
5379 		pud_populate(mm, pud, new);
5380 	} else	/* Another has populated it */
5381 		pmd_free(mm, new);
5382 	spin_unlock(ptl);
5383 	return 0;
5384 }
5385 #endif /* __PAGETABLE_PMD_FOLDED */
5386 
follow_invalidate_pte(struct mm_struct * mm,unsigned long address,struct mmu_notifier_range * range,pte_t ** ptepp,pmd_t ** pmdpp,spinlock_t ** ptlp)5387 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
5388 			  struct mmu_notifier_range *range, pte_t **ptepp,
5389 			  pmd_t **pmdpp, spinlock_t **ptlp)
5390 {
5391 	pgd_t *pgd;
5392 	p4d_t *p4d;
5393 	pud_t *pud;
5394 	pmd_t *pmd;
5395 	pte_t *ptep;
5396 
5397 	pgd = pgd_offset(mm, address);
5398 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5399 		goto out;
5400 
5401 	p4d = p4d_offset(pgd, address);
5402 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5403 		goto out;
5404 
5405 	pud = pud_offset(p4d, address);
5406 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5407 		goto out;
5408 
5409 	pmd = pmd_offset(pud, address);
5410 	VM_BUG_ON(pmd_trans_huge(*pmd));
5411 
5412 	if (pmd_huge(*pmd)) {
5413 		if (!pmdpp)
5414 			goto out;
5415 
5416 		if (range) {
5417 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
5418 						NULL, mm, address & PMD_MASK,
5419 						(address & PMD_MASK) + PMD_SIZE);
5420 			mmu_notifier_invalidate_range_start(range);
5421 		}
5422 		*ptlp = pmd_lock(mm, pmd);
5423 		if (pmd_huge(*pmd)) {
5424 			*pmdpp = pmd;
5425 			return 0;
5426 		}
5427 		spin_unlock(*ptlp);
5428 		if (range)
5429 			mmu_notifier_invalidate_range_end(range);
5430 	}
5431 
5432 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
5433 		goto out;
5434 
5435 	if (range) {
5436 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
5437 					address & PAGE_MASK,
5438 					(address & PAGE_MASK) + PAGE_SIZE);
5439 		mmu_notifier_invalidate_range_start(range);
5440 	}
5441 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
5442 	if (!pte_present(*ptep))
5443 		goto unlock;
5444 	*ptepp = ptep;
5445 	return 0;
5446 unlock:
5447 	pte_unmap_unlock(ptep, *ptlp);
5448 	if (range)
5449 		mmu_notifier_invalidate_range_end(range);
5450 out:
5451 	return -EINVAL;
5452 }
5453 
5454 /**
5455  * follow_pte - look up PTE at a user virtual address
5456  * @mm: the mm_struct of the target address space
5457  * @address: user virtual address
5458  * @ptepp: location to store found PTE
5459  * @ptlp: location to store the lock for the PTE
5460  *
5461  * On a successful return, the pointer to the PTE is stored in @ptepp;
5462  * the corresponding lock is taken and its location is stored in @ptlp.
5463  * The contents of the PTE are only stable until @ptlp is released;
5464  * any further use, if any, must be protected against invalidation
5465  * with MMU notifiers.
5466  *
5467  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
5468  * should be taken for read.
5469  *
5470  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
5471  * it is not a good general-purpose API.
5472  *
5473  * Return: zero on success, -ve otherwise.
5474  */
follow_pte(struct mm_struct * mm,unsigned long address,pte_t ** ptepp,spinlock_t ** ptlp)5475 int follow_pte(struct mm_struct *mm, unsigned long address,
5476 	       pte_t **ptepp, spinlock_t **ptlp)
5477 {
5478 	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
5479 }
5480 EXPORT_SYMBOL_GPL(follow_pte);
5481 
5482 /**
5483  * follow_pfn - look up PFN at a user virtual address
5484  * @vma: memory mapping
5485  * @address: user virtual address
5486  * @pfn: location to store found PFN
5487  *
5488  * Only IO mappings and raw PFN mappings are allowed.
5489  *
5490  * This function does not allow the caller to read the permissions
5491  * of the PTE.  Do not use it.
5492  *
5493  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5494  */
follow_pfn(struct vm_area_struct * vma,unsigned long address,unsigned long * pfn)5495 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5496 	unsigned long *pfn)
5497 {
5498 	int ret = -EINVAL;
5499 	spinlock_t *ptl;
5500 	pte_t *ptep;
5501 
5502 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5503 		return ret;
5504 
5505 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5506 	if (ret)
5507 		return ret;
5508 	*pfn = pte_pfn(*ptep);
5509 	pte_unmap_unlock(ptep, ptl);
5510 	return 0;
5511 }
5512 EXPORT_SYMBOL(follow_pfn);
5513 
5514 #ifdef CONFIG_HAVE_IOREMAP_PROT
follow_phys(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long * prot,resource_size_t * phys)5515 int follow_phys(struct vm_area_struct *vma,
5516 		unsigned long address, unsigned int flags,
5517 		unsigned long *prot, resource_size_t *phys)
5518 {
5519 	int ret = -EINVAL;
5520 	pte_t *ptep, pte;
5521 	spinlock_t *ptl;
5522 
5523 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5524 		goto out;
5525 
5526 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5527 		goto out;
5528 	pte = *ptep;
5529 
5530 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5531 		goto unlock;
5532 
5533 	*prot = pgprot_val(pte_pgprot(pte));
5534 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5535 
5536 	ret = 0;
5537 unlock:
5538 	pte_unmap_unlock(ptep, ptl);
5539 out:
5540 	return ret;
5541 }
5542 
5543 /**
5544  * generic_access_phys - generic implementation for iomem mmap access
5545  * @vma: the vma to access
5546  * @addr: userspace address, not relative offset within @vma
5547  * @buf: buffer to read/write
5548  * @len: length of transfer
5549  * @write: set to FOLL_WRITE when writing, otherwise reading
5550  *
5551  * This is a generic implementation for &vm_operations_struct.access for an
5552  * iomem mapping. This callback is used by access_process_vm() when the @vma is
5553  * not page based.
5554  */
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)5555 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5556 			void *buf, int len, int write)
5557 {
5558 	resource_size_t phys_addr;
5559 	unsigned long prot = 0;
5560 	void __iomem *maddr;
5561 	pte_t *ptep, pte;
5562 	spinlock_t *ptl;
5563 	int offset = offset_in_page(addr);
5564 	int ret = -EINVAL;
5565 
5566 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5567 		return -EINVAL;
5568 
5569 retry:
5570 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5571 		return -EINVAL;
5572 	pte = *ptep;
5573 	pte_unmap_unlock(ptep, ptl);
5574 
5575 	prot = pgprot_val(pte_pgprot(pte));
5576 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5577 
5578 	if ((write & FOLL_WRITE) && !pte_write(pte))
5579 		return -EINVAL;
5580 
5581 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5582 	if (!maddr)
5583 		return -ENOMEM;
5584 
5585 	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5586 		goto out_unmap;
5587 
5588 	if (!pte_same(pte, *ptep)) {
5589 		pte_unmap_unlock(ptep, ptl);
5590 		iounmap(maddr);
5591 
5592 		goto retry;
5593 	}
5594 
5595 	if (write)
5596 		memcpy_toio(maddr + offset, buf, len);
5597 	else
5598 		memcpy_fromio(buf, maddr + offset, len);
5599 	ret = len;
5600 	pte_unmap_unlock(ptep, ptl);
5601 out_unmap:
5602 	iounmap(maddr);
5603 
5604 	return ret;
5605 }
5606 EXPORT_SYMBOL_GPL(generic_access_phys);
5607 #endif
5608 
5609 /*
5610  * Access another process' address space as given in mm.
5611  */
__access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5612 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5613 		       int len, unsigned int gup_flags)
5614 {
5615 	struct vm_area_struct *vma;
5616 	void *old_buf = buf;
5617 	int write = gup_flags & FOLL_WRITE;
5618 
5619 	if (mmap_read_lock_killable(mm))
5620 		return 0;
5621 
5622 	/* ignore errors, just check how much was successfully transferred */
5623 	while (len) {
5624 		int bytes, ret, offset;
5625 		void *maddr;
5626 		struct page *page = NULL;
5627 
5628 		ret = get_user_pages_remote(mm, addr, 1,
5629 				gup_flags, &page, &vma, NULL);
5630 		if (ret <= 0) {
5631 #ifndef CONFIG_HAVE_IOREMAP_PROT
5632 			break;
5633 #else
5634 			/*
5635 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5636 			 * we can access using slightly different code.
5637 			 */
5638 			vma = vma_lookup(mm, addr);
5639 			if (!vma)
5640 				break;
5641 			if (vma->vm_ops && vma->vm_ops->access)
5642 				ret = vma->vm_ops->access(vma, addr, buf,
5643 							  len, write);
5644 			if (ret <= 0)
5645 				break;
5646 			bytes = ret;
5647 #endif
5648 		} else {
5649 			bytes = len;
5650 			offset = addr & (PAGE_SIZE-1);
5651 			if (bytes > PAGE_SIZE-offset)
5652 				bytes = PAGE_SIZE-offset;
5653 
5654 			maddr = kmap(page);
5655 			if (write) {
5656 				copy_to_user_page(vma, page, addr,
5657 						  maddr + offset, buf, bytes);
5658 				set_page_dirty_lock(page);
5659 			} else {
5660 				copy_from_user_page(vma, page, addr,
5661 						    buf, maddr + offset, bytes);
5662 			}
5663 			kunmap(page);
5664 			put_page(page);
5665 		}
5666 		len -= bytes;
5667 		buf += bytes;
5668 		addr += bytes;
5669 	}
5670 	mmap_read_unlock(mm);
5671 
5672 	return buf - old_buf;
5673 }
5674 
5675 /**
5676  * access_remote_vm - access another process' address space
5677  * @mm:		the mm_struct of the target address space
5678  * @addr:	start address to access
5679  * @buf:	source or destination buffer
5680  * @len:	number of bytes to transfer
5681  * @gup_flags:	flags modifying lookup behaviour
5682  *
5683  * The caller must hold a reference on @mm.
5684  *
5685  * Return: number of bytes copied from source to destination.
5686  */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5687 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5688 		void *buf, int len, unsigned int gup_flags)
5689 {
5690 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5691 }
5692 
5693 /*
5694  * Access another process' address space.
5695  * Source/target buffer must be kernel space,
5696  * Do not walk the page table directly, use get_user_pages
5697  */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)5698 int access_process_vm(struct task_struct *tsk, unsigned long addr,
5699 		void *buf, int len, unsigned int gup_flags)
5700 {
5701 	struct mm_struct *mm;
5702 	int ret;
5703 
5704 	mm = get_task_mm(tsk);
5705 	if (!mm)
5706 		return 0;
5707 
5708 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5709 
5710 	mmput(mm);
5711 
5712 	return ret;
5713 }
5714 EXPORT_SYMBOL_GPL(access_process_vm);
5715 
5716 /*
5717  * Print the name of a VMA.
5718  */
print_vma_addr(char * prefix,unsigned long ip)5719 void print_vma_addr(char *prefix, unsigned long ip)
5720 {
5721 	struct mm_struct *mm = current->mm;
5722 	struct vm_area_struct *vma;
5723 
5724 	/*
5725 	 * we might be running from an atomic context so we cannot sleep
5726 	 */
5727 	if (!mmap_read_trylock(mm))
5728 		return;
5729 
5730 	vma = find_vma(mm, ip);
5731 	if (vma && vma->vm_file) {
5732 		struct file *f = vma->vm_file;
5733 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5734 		if (buf) {
5735 			char *p;
5736 
5737 			p = file_path(f, buf, PAGE_SIZE);
5738 			if (IS_ERR(p))
5739 				p = "?";
5740 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5741 					vma->vm_start,
5742 					vma->vm_end - vma->vm_start);
5743 			free_page((unsigned long)buf);
5744 		}
5745 	}
5746 	mmap_read_unlock(mm);
5747 }
5748 
5749 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)5750 void __might_fault(const char *file, int line)
5751 {
5752 	/*
5753 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5754 	 * holding the mmap_lock, this is safe because kernel memory doesn't
5755 	 * get paged out, therefore we'll never actually fault, and the
5756 	 * below annotations will generate false positives.
5757 	 */
5758 	if (uaccess_kernel())
5759 		return;
5760 	if (pagefault_disabled())
5761 		return;
5762 	__might_sleep(file, line, 0);
5763 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5764 	if (current->mm)
5765 		might_lock_read(&current->mm->mmap_lock);
5766 #endif
5767 }
5768 EXPORT_SYMBOL(__might_fault);
5769 #endif
5770 
5771 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5772 /*
5773  * Process all subpages of the specified huge page with the specified
5774  * operation.  The target subpage will be processed last to keep its
5775  * cache lines hot.
5776  */
process_huge_page(unsigned long addr_hint,unsigned int pages_per_huge_page,void (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)5777 static inline void process_huge_page(
5778 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5779 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5780 	void *arg)
5781 {
5782 	int i, n, base, l;
5783 	unsigned long addr = addr_hint &
5784 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5785 
5786 	/* Process target subpage last to keep its cache lines hot */
5787 	might_sleep();
5788 	n = (addr_hint - addr) / PAGE_SIZE;
5789 	if (2 * n <= pages_per_huge_page) {
5790 		/* If target subpage in first half of huge page */
5791 		base = 0;
5792 		l = n;
5793 		/* Process subpages at the end of huge page */
5794 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5795 			cond_resched();
5796 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5797 		}
5798 	} else {
5799 		/* If target subpage in second half of huge page */
5800 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5801 		l = pages_per_huge_page - n;
5802 		/* Process subpages at the begin of huge page */
5803 		for (i = 0; i < base; i++) {
5804 			cond_resched();
5805 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5806 		}
5807 	}
5808 	/*
5809 	 * Process remaining subpages in left-right-left-right pattern
5810 	 * towards the target subpage
5811 	 */
5812 	for (i = 0; i < l; i++) {
5813 		int left_idx = base + i;
5814 		int right_idx = base + 2 * l - 1 - i;
5815 
5816 		cond_resched();
5817 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5818 		cond_resched();
5819 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5820 	}
5821 }
5822 
clear_gigantic_page(struct page * page,unsigned long addr,unsigned int pages_per_huge_page)5823 static void clear_gigantic_page(struct page *page,
5824 				unsigned long addr,
5825 				unsigned int pages_per_huge_page)
5826 {
5827 	int i;
5828 	struct page *p = page;
5829 
5830 	might_sleep();
5831 	for (i = 0; i < pages_per_huge_page;
5832 	     i++, p = mem_map_next(p, page, i)) {
5833 		cond_resched();
5834 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5835 	}
5836 }
5837 
clear_subpage(unsigned long addr,int idx,void * arg)5838 static void clear_subpage(unsigned long addr, int idx, void *arg)
5839 {
5840 	struct page *page = arg;
5841 
5842 	clear_user_highpage(page + idx, addr);
5843 }
5844 
clear_huge_page(struct page * page,unsigned long addr_hint,unsigned int pages_per_huge_page)5845 void clear_huge_page(struct page *page,
5846 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5847 {
5848 	unsigned long addr = addr_hint &
5849 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5850 
5851 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5852 		clear_gigantic_page(page, addr, pages_per_huge_page);
5853 		return;
5854 	}
5855 
5856 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5857 }
5858 
copy_user_gigantic_page(struct page * dst,struct page * src,unsigned long addr,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5859 static void copy_user_gigantic_page(struct page *dst, struct page *src,
5860 				    unsigned long addr,
5861 				    struct vm_area_struct *vma,
5862 				    unsigned int pages_per_huge_page)
5863 {
5864 	int i;
5865 	struct page *dst_base = dst;
5866 	struct page *src_base = src;
5867 
5868 	for (i = 0; i < pages_per_huge_page; ) {
5869 		cond_resched();
5870 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5871 
5872 		i++;
5873 		dst = mem_map_next(dst, dst_base, i);
5874 		src = mem_map_next(src, src_base, i);
5875 	}
5876 }
5877 
5878 struct copy_subpage_arg {
5879 	struct page *dst;
5880 	struct page *src;
5881 	struct vm_area_struct *vma;
5882 };
5883 
copy_subpage(unsigned long addr,int idx,void * arg)5884 static void copy_subpage(unsigned long addr, int idx, void *arg)
5885 {
5886 	struct copy_subpage_arg *copy_arg = arg;
5887 
5888 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5889 			   addr, copy_arg->vma);
5890 }
5891 
copy_user_huge_page(struct page * dst,struct page * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5892 void copy_user_huge_page(struct page *dst, struct page *src,
5893 			 unsigned long addr_hint, struct vm_area_struct *vma,
5894 			 unsigned int pages_per_huge_page)
5895 {
5896 	unsigned long addr = addr_hint &
5897 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5898 	struct copy_subpage_arg arg = {
5899 		.dst = dst,
5900 		.src = src,
5901 		.vma = vma,
5902 	};
5903 
5904 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5905 		copy_user_gigantic_page(dst, src, addr, vma,
5906 					pages_per_huge_page);
5907 		return;
5908 	}
5909 
5910 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5911 }
5912 
copy_huge_page_from_user(struct page * dst_page,const void __user * usr_src,unsigned int pages_per_huge_page,bool allow_pagefault)5913 long copy_huge_page_from_user(struct page *dst_page,
5914 				const void __user *usr_src,
5915 				unsigned int pages_per_huge_page,
5916 				bool allow_pagefault)
5917 {
5918 	void *src = (void *)usr_src;
5919 	void *page_kaddr;
5920 	unsigned long i, rc = 0;
5921 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5922 	struct page *subpage = dst_page;
5923 
5924 	for (i = 0; i < pages_per_huge_page;
5925 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5926 		if (allow_pagefault)
5927 			page_kaddr = kmap(subpage);
5928 		else
5929 			page_kaddr = kmap_atomic(subpage);
5930 		rc = copy_from_user(page_kaddr,
5931 				(const void __user *)(src + i * PAGE_SIZE),
5932 				PAGE_SIZE);
5933 		if (allow_pagefault)
5934 			kunmap(subpage);
5935 		else
5936 			kunmap_atomic(page_kaddr);
5937 
5938 		ret_val -= (PAGE_SIZE - rc);
5939 		if (rc)
5940 			break;
5941 
5942 		flush_dcache_page(subpage);
5943 
5944 		cond_resched();
5945 	}
5946 	return ret_val;
5947 }
5948 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5949 
5950 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5951 
5952 static struct kmem_cache *page_ptl_cachep;
5953 
ptlock_cache_init(void)5954 void __init ptlock_cache_init(void)
5955 {
5956 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5957 			SLAB_PANIC, NULL);
5958 }
5959 
ptlock_alloc(struct page * page)5960 bool ptlock_alloc(struct page *page)
5961 {
5962 	spinlock_t *ptl;
5963 
5964 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5965 	if (!ptl)
5966 		return false;
5967 	page->ptl = ptl;
5968 	return true;
5969 }
5970 
ptlock_free(struct page * page)5971 void ptlock_free(struct page *page)
5972 {
5973 	kmem_cache_free(page_ptl_cachep, page->ptl);
5974 }
5975 #endif
5976