1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/errno.h>
6 #include <linux/mmdebug.h>
7 #include <linux/gfp.h>
8 #include <linux/pgalloc_tag.h>
9 #include <linux/bug.h>
10 #include <linux/list.h>
11 #include <linux/mmzone.h>
12 #include <linux/rbtree.h>
13 #include <linux/atomic.h>
14 #include <linux/debug_locks.h>
15 #include <linux/mm_types.h>
16 #include <linux/mmap_lock.h>
17 #include <linux/range.h>
18 #include <linux/pfn.h>
19 #include <linux/percpu-refcount.h>
20 #include <linux/bit_spinlock.h>
21 #include <linux/shrinker.h>
22 #include <linux/resource.h>
23 #include <linux/page_ext.h>
24 #include <linux/err.h>
25 #include <linux/page-flags.h>
26 #include <linux/page_ref.h>
27 #include <linux/overflow.h>
28 #include <linux/sizes.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/pgsize_migration_inline.h>
32 #include <linux/kasan.h>
33 #include <linux/page_pinner.h>
34 #include <linux/memremap.h>
35 #include <linux/slab.h>
36 #include <linux/rcuwait.h>
37 #include <linux/android_kabi.h>
38 
39 struct mempolicy;
40 struct anon_vma;
41 struct anon_vma_chain;
42 struct user_struct;
43 struct pt_regs;
44 struct folio_batch;
45 
46 extern int sysctl_page_lock_unfairness;
47 
48 void mm_core_init(void);
49 void init_mm_internals(void);
50 
51 #ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
52 extern unsigned long max_mapnr;
53 
set_max_mapnr(unsigned long limit)54 static inline void set_max_mapnr(unsigned long limit)
55 {
56 	max_mapnr = limit;
57 }
58 #else
set_max_mapnr(unsigned long limit)59 static inline void set_max_mapnr(unsigned long limit) { }
60 #endif
61 
62 extern atomic_long_t _totalram_pages;
totalram_pages(void)63 static inline unsigned long totalram_pages(void)
64 {
65 	return (unsigned long)atomic_long_read(&_totalram_pages);
66 }
67 
totalram_pages_inc(void)68 static inline void totalram_pages_inc(void)
69 {
70 	atomic_long_inc(&_totalram_pages);
71 }
72 
totalram_pages_dec(void)73 static inline void totalram_pages_dec(void)
74 {
75 	atomic_long_dec(&_totalram_pages);
76 }
77 
totalram_pages_add(long count)78 static inline void totalram_pages_add(long count)
79 {
80 	atomic_long_add(count, &_totalram_pages);
81 }
82 
83 extern void * high_memory;
84 extern int page_cluster;
85 extern const int page_cluster_max;
86 
87 #ifdef CONFIG_SYSCTL
88 extern int sysctl_legacy_va_layout;
89 #else
90 #define sysctl_legacy_va_layout 0
91 #endif
92 
93 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
94 extern int mmap_rnd_bits_min __ro_after_init;
95 extern int mmap_rnd_bits_max __ro_after_init;
96 extern int mmap_rnd_bits __read_mostly;
97 #endif
98 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
99 extern int mmap_rnd_compat_bits_min __ro_after_init;
100 extern int mmap_rnd_compat_bits_max __ro_after_init;
101 extern int mmap_rnd_compat_bits __read_mostly;
102 #endif
103 
104 #ifndef PHYSMEM_END
105 # ifdef MAX_PHYSMEM_BITS
106 # define PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
107 # else
108 # define PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
109 # endif
110 #endif
111 
112 #include <asm/page.h>
113 #include <asm/processor.h>
114 
115 #ifndef __pa_symbol
116 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
117 #endif
118 
119 #ifndef page_to_virt
120 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
121 #endif
122 
123 #ifndef lm_alias
124 #define lm_alias(x)	__va(__pa_symbol(x))
125 #endif
126 
127 /*
128  * To prevent common memory management code establishing
129  * a zero page mapping on a read fault.
130  * This macro should be defined within <asm/pgtable.h>.
131  * s390 does this to prevent multiplexing of hardware bits
132  * related to the physical page in case of virtualization.
133  */
134 #ifndef mm_forbids_zeropage
135 #define mm_forbids_zeropage(X)	(0)
136 #endif
137 
138 /*
139  * On some architectures it is expensive to call memset() for small sizes.
140  * If an architecture decides to implement their own version of
141  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
142  * define their own version of this macro in <asm/pgtable.h>
143  */
144 #if BITS_PER_LONG == 64
145 /* This function must be updated when the size of struct page grows above 96
146  * or reduces below 56. The idea that compiler optimizes out switch()
147  * statement, and only leaves move/store instructions. Also the compiler can
148  * combine write statements if they are both assignments and can be reordered,
149  * this can result in several of the writes here being dropped.
150  */
151 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
__mm_zero_struct_page(struct page * page)152 static inline void __mm_zero_struct_page(struct page *page)
153 {
154 	unsigned long *_pp = (void *)page;
155 
156 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
157 	BUILD_BUG_ON(sizeof(struct page) & 7);
158 	BUILD_BUG_ON(sizeof(struct page) < 56);
159 	BUILD_BUG_ON(sizeof(struct page) > 96);
160 
161 	switch (sizeof(struct page)) {
162 	case 96:
163 		_pp[11] = 0;
164 		fallthrough;
165 	case 88:
166 		_pp[10] = 0;
167 		fallthrough;
168 	case 80:
169 		_pp[9] = 0;
170 		fallthrough;
171 	case 72:
172 		_pp[8] = 0;
173 		fallthrough;
174 	case 64:
175 		_pp[7] = 0;
176 		fallthrough;
177 	case 56:
178 		_pp[6] = 0;
179 		_pp[5] = 0;
180 		_pp[4] = 0;
181 		_pp[3] = 0;
182 		_pp[2] = 0;
183 		_pp[1] = 0;
184 		_pp[0] = 0;
185 	}
186 }
187 #else
188 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
189 #endif
190 
191 /*
192  * Default maximum number of active map areas, this limits the number of vmas
193  * per mm struct. Users can overwrite this number by sysctl but there is a
194  * problem.
195  *
196  * When a program's coredump is generated as ELF format, a section is created
197  * per a vma. In ELF, the number of sections is represented in unsigned short.
198  * This means the number of sections should be smaller than 65535 at coredump.
199  * Because the kernel adds some informative sections to a image of program at
200  * generating coredump, we need some margin. The number of extra sections is
201  * 1-3 now and depends on arch. We use "5" as safe margin, here.
202  *
203  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
204  * not a hard limit any more. Although some userspace tools can be surprised by
205  * that.
206  */
207 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
208 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
209 
210 extern int sysctl_max_map_count;
211 
212 extern unsigned long sysctl_user_reserve_kbytes;
213 extern unsigned long sysctl_admin_reserve_kbytes;
214 
215 extern int sysctl_overcommit_memory;
216 extern int sysctl_overcommit_ratio;
217 extern unsigned long sysctl_overcommit_kbytes;
218 
219 int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *,
220 		loff_t *);
221 int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *,
222 		loff_t *);
223 int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *,
224 		loff_t *);
225 
226 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
227 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
228 #define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
229 #else
230 #define nth_page(page,n) ((page) + (n))
231 #define folio_page_idx(folio, p)	((p) - &(folio)->page)
232 #endif
233 
234 /* to align the pointer to the (next) page boundary */
235 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
236 
237 /* to align the pointer to the (prev) page boundary */
238 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
239 
240 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
241 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
242 
lru_to_folio(struct list_head * head)243 static inline struct folio *lru_to_folio(struct list_head *head)
244 {
245 	return list_entry((head)->prev, struct folio, lru);
246 }
247 
248 void setup_initial_init_mm(void *start_code, void *end_code,
249 			   void *end_data, void *brk);
250 
251 /*
252  * Linux kernel virtual memory manager primitives.
253  * The idea being to have a "virtual" mm in the same way
254  * we have a virtual fs - giving a cleaner interface to the
255  * mm details, and allowing different kinds of memory mappings
256  * (from shared memory to executable loading to arbitrary
257  * mmap() functions).
258  */
259 
260 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
261 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
262 void vm_area_free(struct vm_area_struct *);
263 
264 #ifndef CONFIG_MMU
265 extern struct rb_root nommu_region_tree;
266 extern struct rw_semaphore nommu_region_sem;
267 
268 extern unsigned int kobjsize(const void *objp);
269 #endif
270 
271 /*
272  * vm_flags in vm_area_struct, see mm_types.h.
273  * When changing, update also include/trace/events/mmflags.h
274  */
275 #define VM_NONE		0x00000000
276 
277 #define VM_READ		0x00000001	/* currently active flags */
278 #define VM_WRITE	0x00000002
279 #define VM_EXEC		0x00000004
280 #define VM_SHARED	0x00000008
281 
282 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
283 #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
284 #define VM_MAYWRITE	0x00000020
285 #define VM_MAYEXEC	0x00000040
286 #define VM_MAYSHARE	0x00000080
287 
288 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
289 #ifdef CONFIG_MMU
290 #define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
291 #else /* CONFIG_MMU */
292 #define VM_MAYOVERLAY	0x00000200	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
293 #define VM_UFFD_MISSING	0
294 #endif /* CONFIG_MMU */
295 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
296 #define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
297 
298 #define VM_LOCKED	0x00002000
299 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
300 
301 					/* Used by sys_madvise() */
302 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
303 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
304 
305 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
306 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
307 #define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
308 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
309 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
310 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
311 #define VM_SYNC		0x00800000	/* Synchronous page faults */
312 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
313 #define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
314 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
315 
316 #ifdef CONFIG_MEM_SOFT_DIRTY
317 # define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
318 #else
319 # define VM_SOFTDIRTY	0
320 #endif
321 
322 #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
323 #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
324 #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
325 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
326 
327 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
328 #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
329 #define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
330 #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
331 #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
332 #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
333 #define VM_HIGH_ARCH_BIT_5	37	/* bit only usable on 64-bit architectures */
334 #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
335 #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
336 #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
337 #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
338 #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
339 #define VM_HIGH_ARCH_5	BIT(VM_HIGH_ARCH_BIT_5)
340 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
341 
342 #ifdef CONFIG_ARCH_HAS_PKEYS
343 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
344 # define VM_PKEY_BIT0  VM_HIGH_ARCH_0
345 # define VM_PKEY_BIT1  VM_HIGH_ARCH_1
346 # define VM_PKEY_BIT2  VM_HIGH_ARCH_2
347 #if CONFIG_ARCH_PKEY_BITS > 3
348 # define VM_PKEY_BIT3  VM_HIGH_ARCH_3
349 #else
350 # define VM_PKEY_BIT3  0
351 #endif
352 #if CONFIG_ARCH_PKEY_BITS > 4
353 # define VM_PKEY_BIT4  VM_HIGH_ARCH_4
354 #else
355 # define VM_PKEY_BIT4  0
356 #endif
357 #endif /* CONFIG_ARCH_HAS_PKEYS */
358 
359 #ifdef CONFIG_X86_USER_SHADOW_STACK
360 /*
361  * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
362  * support core mm.
363  *
364  * These VMAs will get a single end guard page. This helps userspace protect
365  * itself from attacks. A single page is enough for current shadow stack archs
366  * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
367  * for more details on the guard size.
368  */
369 # define VM_SHADOW_STACK	VM_HIGH_ARCH_5
370 #else
371 # define VM_SHADOW_STACK	VM_NONE
372 #endif
373 
374 #if defined(CONFIG_X86)
375 # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
376 #elif defined(CONFIG_PPC64)
377 # define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
378 #elif defined(CONFIG_PARISC)
379 # define VM_GROWSUP	VM_ARCH_1
380 #elif defined(CONFIG_SPARC64)
381 # define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
382 # define VM_ARCH_CLEAR	VM_SPARC_ADI
383 #elif defined(CONFIG_ARM64)
384 # define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
385 # define VM_ARCH_CLEAR	VM_ARM64_BTI
386 #elif !defined(CONFIG_MMU)
387 # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
388 #endif
389 
390 #if defined(CONFIG_ARM64_MTE)
391 # define VM_MTE		VM_HIGH_ARCH_4	/* Use Tagged memory for access control */
392 # define VM_MTE_ALLOWED	VM_HIGH_ARCH_5	/* Tagged memory permitted */
393 #else
394 # define VM_MTE		VM_NONE
395 # define VM_MTE_ALLOWED	VM_NONE
396 #endif
397 
398 #ifndef VM_GROWSUP
399 # define VM_GROWSUP	VM_NONE
400 #endif
401 
402 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
403 # define VM_UFFD_MINOR_BIT	38
404 # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
405 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
406 # define VM_UFFD_MINOR		VM_NONE
407 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
408 
409 /*
410  * This flag is used to connect VFIO to arch specific KVM code. It
411  * indicates that the memory under this VMA is safe for use with any
412  * non-cachable memory type inside KVM. Some VFIO devices, on some
413  * platforms, are thought to be unsafe and can cause machine crashes
414  * if KVM does not lock down the memory type.
415  */
416 #ifdef CONFIG_64BIT
417 #define VM_ALLOW_ANY_UNCACHED_BIT	39
418 #define VM_ALLOW_ANY_UNCACHED		BIT(VM_ALLOW_ANY_UNCACHED_BIT)
419 #else
420 #define VM_ALLOW_ANY_UNCACHED		VM_NONE
421 #endif
422 
423 #ifdef CONFIG_64BIT
424 #define VM_DROPPABLE_BIT	40
425 #define VM_DROPPABLE		BIT(VM_DROPPABLE_BIT)
426 #elif defined(CONFIG_PPC32)
427 #define VM_DROPPABLE		VM_ARCH_1
428 #else
429 #define VM_DROPPABLE		VM_NONE
430 #endif
431 
432 #ifdef CONFIG_64BIT
433 /* VM is sealed, in vm_flags */
434 #define VM_SEALED	_BITUL(63)
435 #endif
436 
437 /* Bits set in the VMA until the stack is in its final location */
438 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
439 
440 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
441 
442 /* Common data flag combinations */
443 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
444 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
445 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
446 				 VM_MAYWRITE | VM_MAYEXEC)
447 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
448 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
449 
450 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
451 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
452 #endif
453 
454 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
455 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
456 #endif
457 
458 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
459 
460 #ifdef CONFIG_STACK_GROWSUP
461 #define VM_STACK	VM_GROWSUP
462 #define VM_STACK_EARLY	VM_GROWSDOWN
463 #else
464 #define VM_STACK	VM_GROWSDOWN
465 #define VM_STACK_EARLY	0
466 #endif
467 
468 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
469 
470 /* VMA basic access permission flags */
471 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
472 
473 
474 /*
475  * Special vmas that are non-mergable, non-mlock()able.
476  */
477 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
478 
479 /* This mask prevents VMA from being scanned with khugepaged */
480 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
481 
482 /* This mask defines which mm->def_flags a process can inherit its parent */
483 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
484 
485 /* This mask represents all the VMA flag bits used by mlock */
486 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
487 
488 /* Arch-specific flags to clear when updating VM flags on protection change */
489 #ifndef VM_ARCH_CLEAR
490 # define VM_ARCH_CLEAR	VM_NONE
491 #endif
492 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
493 
494 /*
495  * mapping from the currently active vm_flags protection bits (the
496  * low four bits) to a page protection mask..
497  */
498 
499 /*
500  * The default fault flags that should be used by most of the
501  * arch-specific page fault handlers.
502  */
503 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
504 			     FAULT_FLAG_KILLABLE | \
505 			     FAULT_FLAG_INTERRUPTIBLE)
506 
507 /**
508  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
509  * @flags: Fault flags.
510  *
511  * This is mostly used for places where we want to try to avoid taking
512  * the mmap_lock for too long a time when waiting for another condition
513  * to change, in which case we can try to be polite to release the
514  * mmap_lock in the first round to avoid potential starvation of other
515  * processes that would also want the mmap_lock.
516  *
517  * Return: true if the page fault allows retry and this is the first
518  * attempt of the fault handling; false otherwise.
519  */
fault_flag_allow_retry_first(enum fault_flag flags)520 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
521 {
522 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
523 	    (!(flags & FAULT_FLAG_TRIED));
524 }
525 
526 #define FAULT_FLAG_TRACE \
527 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
528 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
529 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
530 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
531 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
532 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
533 	{ FAULT_FLAG_USER,		"USER" }, \
534 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
535 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
536 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
537 	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
538 
539 /*
540  * vm_fault is filled by the pagefault handler and passed to the vma's
541  * ->fault function. The vma's ->fault is responsible for returning a bitmask
542  * of VM_FAULT_xxx flags that give details about how the fault was handled.
543  *
544  * MM layer fills up gfp_mask for page allocations but fault handler might
545  * alter it if its implementation requires a different allocation context.
546  *
547  * pgoff should be used in favour of virtual_address, if possible.
548  */
549 struct vm_fault {
550 	const struct {
551 		struct vm_area_struct *vma;	/* Target VMA */
552 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
553 		pgoff_t pgoff;			/* Logical page offset based on vma */
554 		unsigned long address;		/* Faulting virtual address - masked */
555 		unsigned long real_address;	/* Faulting virtual address - unmasked */
556 	};
557 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
558 					 * XXX: should really be 'const' */
559 	pmd_t *pmd;			/* Pointer to pmd entry matching
560 					 * the 'address' */
561 	pud_t *pud;			/* Pointer to pud entry matching
562 					 * the 'address'
563 					 */
564 	union {
565 		pte_t orig_pte;		/* Value of PTE at the time of fault */
566 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
567 					 * used by PMD fault only.
568 					 */
569 	};
570 
571 	struct page *cow_page;		/* Page handler may use for COW fault */
572 	struct page *page;		/* ->fault handlers should return a
573 					 * page here, unless VM_FAULT_NOPAGE
574 					 * is set (which is also implied by
575 					 * VM_FAULT_ERROR).
576 					 */
577 	/* These three entries are valid only while holding ptl lock */
578 	pte_t *pte;			/* Pointer to pte entry matching
579 					 * the 'address'. NULL if the page
580 					 * table hasn't been allocated.
581 					 */
582 	spinlock_t *ptl;		/* Page table lock.
583 					 * Protects pte page table if 'pte'
584 					 * is not NULL, otherwise pmd.
585 					 */
586 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
587 					 * vm_ops->map_pages() sets up a page
588 					 * table from atomic context.
589 					 * do_fault_around() pre-allocates
590 					 * page table to avoid allocation from
591 					 * atomic context.
592 					 */
593 };
594 
595 /*
596  * These are the virtual MM functions - opening of an area, closing and
597  * unmapping it (needed to keep files on disk up-to-date etc), pointer
598  * to the functions called when a no-page or a wp-page exception occurs.
599  */
600 struct vm_operations_struct {
601 	void (*open)(struct vm_area_struct * area);
602 	/**
603 	 * @close: Called when the VMA is being removed from the MM.
604 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
605 	 */
606 	void (*close)(struct vm_area_struct * area);
607 	/* Called any time before splitting to check if it's allowed */
608 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
609 	int (*mremap)(struct vm_area_struct *area);
610 	/*
611 	 * Called by mprotect() to make driver-specific permission
612 	 * checks before mprotect() is finalised.   The VMA must not
613 	 * be modified.  Returns 0 if mprotect() can proceed.
614 	 */
615 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
616 			unsigned long end, unsigned long newflags);
617 	vm_fault_t (*fault)(struct vm_fault *vmf);
618 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
619 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
620 			pgoff_t start_pgoff, pgoff_t end_pgoff);
621 	unsigned long (*pagesize)(struct vm_area_struct * area);
622 
623 	/* notification that a previously read-only page is about to become
624 	 * writable, if an error is returned it will cause a SIGBUS */
625 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
626 
627 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
628 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
629 
630 	/* called by access_process_vm when get_user_pages() fails, typically
631 	 * for use by special VMAs. See also generic_access_phys() for a generic
632 	 * implementation useful for any iomem mapping.
633 	 */
634 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
635 		      void *buf, int len, int write);
636 
637 	/* Called by the /proc/PID/maps code to ask the vma whether it
638 	 * has a special name.  Returning non-NULL will also cause this
639 	 * vma to be dumped unconditionally. */
640 	const char *(*name)(struct vm_area_struct *vma);
641 
642 #ifdef CONFIG_NUMA
643 	/*
644 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
645 	 * to hold the policy upon return.  Caller should pass NULL @new to
646 	 * remove a policy and fall back to surrounding context--i.e. do not
647 	 * install a MPOL_DEFAULT policy, nor the task or system default
648 	 * mempolicy.
649 	 */
650 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
651 
652 	/*
653 	 * get_policy() op must add reference [mpol_get()] to any policy at
654 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
655 	 * in mm/mempolicy.c will do this automatically.
656 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
657 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
658 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
659 	 * must return NULL--i.e., do not "fallback" to task or system default
660 	 * policy.
661 	 */
662 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
663 					unsigned long addr, pgoff_t *ilx);
664 #endif
665 	/*
666 	 * Called by vm_normal_page() for special PTEs to find the
667 	 * page for @addr.  This is useful if the default behavior
668 	 * (using pte_page()) would not find the correct page.
669 	 */
670 	struct page *(*find_special_page)(struct vm_area_struct *vma,
671 					  unsigned long addr);
672 
673 	ANDROID_KABI_RESERVE(1);
674 	ANDROID_KABI_RESERVE(2);
675 	ANDROID_KABI_RESERVE(3);
676 	ANDROID_KABI_RESERVE(4);
677 };
678 
679 #ifdef CONFIG_NUMA_BALANCING
vma_numab_state_init(struct vm_area_struct * vma)680 static inline void vma_numab_state_init(struct vm_area_struct *vma)
681 {
682 	vma->numab_state = NULL;
683 }
vma_numab_state_free(struct vm_area_struct * vma)684 static inline void vma_numab_state_free(struct vm_area_struct *vma)
685 {
686 	kfree(vma->numab_state);
687 }
688 #else
vma_numab_state_init(struct vm_area_struct * vma)689 static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
vma_numab_state_free(struct vm_area_struct * vma)690 static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
691 #endif /* CONFIG_NUMA_BALANCING */
692 
693 #ifdef CONFIG_PER_VMA_LOCK
vma_lock_init(struct vm_area_struct * vma,bool reset_refcnt)694 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
695 {
696 #ifdef CONFIG_DEBUG_LOCK_ALLOC
697 	static struct lock_class_key lockdep_key;
698 
699 	lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
700 #endif
701 	if (reset_refcnt)
702 		refcount_set(&vma->vm_refcnt, 0);
703 	vma->vm_lock_seq = UINT_MAX;
704 }
705 
is_vma_writer_only(int refcnt)706 static inline bool is_vma_writer_only(int refcnt)
707 {
708 	/*
709 	 * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
710 	 * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
711 	 * a detached vma happens only in vma_mark_detached() and is a rare
712 	 * case, therefore most of the time there will be no unnecessary wakeup.
713 	 */
714 	return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1;
715 }
716 
vma_refcount_put(struct vm_area_struct * vma)717 static inline void vma_refcount_put(struct vm_area_struct *vma)
718 {
719 	/* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
720 	struct mm_struct *mm = vma->vm_mm;
721 	int oldcnt;
722 
723 	rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
724 	if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
725 
726 		if (is_vma_writer_only(oldcnt - 1))
727 			rcuwait_wake_up(&mm->vma_writer_wait);
728 	}
729 }
730 
731 /*
732  * Try to read-lock a vma. The function is allowed to occasionally yield false
733  * locked result to avoid performance overhead, in which case we fall back to
734  * using mmap_lock. The function should never yield false unlocked result.
735  * False locked result is possible if mm_lock_seq overflows or if vma gets
736  * reused and attached to a different mm before we lock it.
737  * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
738  * detached.
739  */
vma_start_read(struct mm_struct * mm,struct vm_area_struct * vma)740 static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
741 						    struct vm_area_struct *vma)
742 {
743 	int oldcnt;
744 
745 	/*
746 	 * Check before locking. A race might cause false locked result.
747 	 * We can use READ_ONCE() for the mm_lock_seq here, and don't need
748 	 * ACQUIRE semantics, because this is just a lockless check whose result
749 	 * we don't rely on for anything - the mm_lock_seq read against which we
750 	 * need ordering is below.
751 	 */
752 	if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
753 		return NULL;
754 
755 	/*
756 	 * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
757 	 * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET.
758 	 * Acquire fence is required here to avoid reordering against later
759 	 * vm_lock_seq check and checks inside lock_vma_under_rcu().
760 	 */
761 	if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
762 							      VMA_REF_LIMIT))) {
763 		/* return EAGAIN if vma got detached from under us */
764 		return oldcnt ? NULL : ERR_PTR(-EAGAIN);
765 	}
766 
767 	rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
768 	/*
769 	 * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
770 	 * False unlocked result is impossible because we modify and check
771 	 * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
772 	 * modification invalidates all existing locks.
773 	 *
774 	 * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
775 	 * racing with vma_end_write_all(), we only start reading from the VMA
776 	 * after it has been unlocked.
777 	 * This pairs with RELEASE semantics in vma_end_write_all().
778 	 */
779 	if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
780 		vma_refcount_put(vma);
781 		return NULL;
782 	}
783 
784 	return vma;
785 }
786 
787 /*
788  * Use only while holding mmap read lock which guarantees that locking will not
789  * fail (nobody can concurrently write-lock the vma). vma_start_read() should
790  * not be used in such cases because it might fail due to mm_lock_seq overflow.
791  * This functionality is used to obtain vma read lock and drop the mmap read lock.
792  */
vma_start_read_locked_nested(struct vm_area_struct * vma,int subclass)793 static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
794 {
795 	int oldcnt;
796 
797 	mmap_assert_locked(vma->vm_mm);
798 	if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
799 							      VMA_REF_LIMIT)))
800 		return false;
801 
802 	rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
803 	return true;
804 }
805 
806 /*
807  * Use only while holding mmap read lock which guarantees that locking will not
808  * fail (nobody can concurrently write-lock the vma). vma_start_read() should
809  * not be used in such cases because it might fail due to mm_lock_seq overflow.
810  * This functionality is used to obtain vma read lock and drop the mmap read lock.
811  */
vma_start_read_locked(struct vm_area_struct * vma)812 static inline bool vma_start_read_locked(struct vm_area_struct *vma)
813 {
814 	return vma_start_read_locked_nested(vma, 0);
815 }
816 
vma_end_read(struct vm_area_struct * vma)817 static inline void vma_end_read(struct vm_area_struct *vma)
818 {
819 	vma_refcount_put(vma);
820 }
821 
822 /* WARNING! Can only be used if mmap_lock is expected to be write-locked */
__is_vma_write_locked(struct vm_area_struct * vma,unsigned int * mm_lock_seq)823 static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
824 {
825 	mmap_assert_write_locked(vma->vm_mm);
826 
827 	/*
828 	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
829 	 * mm->mm_lock_seq can't be concurrently modified.
830 	 */
831 	*mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
832 	return (vma->vm_lock_seq == *mm_lock_seq);
833 }
834 
835 void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq);
836 
837 /*
838  * Begin writing to a VMA.
839  * Exclude concurrent readers under the per-VMA lock until the currently
840  * write-locked mmap_lock is dropped or downgraded.
841  */
vma_start_write(struct vm_area_struct * vma)842 static inline void vma_start_write(struct vm_area_struct *vma)
843 {
844 	unsigned int mm_lock_seq;
845 
846 	if (__is_vma_write_locked(vma, &mm_lock_seq))
847 		return;
848 
849 	__vma_start_write(vma, mm_lock_seq);
850 }
851 
vma_assert_write_locked(struct vm_area_struct * vma)852 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
853 {
854 	unsigned int mm_lock_seq;
855 
856 	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
857 }
858 
vma_assert_locked(struct vm_area_struct * vma)859 static inline void vma_assert_locked(struct vm_area_struct *vma)
860 {
861 	unsigned int mm_lock_seq;
862 
863 	VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
864 		      !__is_vma_write_locked(vma, &mm_lock_seq), vma);
865 }
866 
867 /*
868  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
869  * assertions should be made either under mmap_write_lock or when the object
870  * has been isolated under mmap_write_lock, ensuring no competing writers.
871  */
vma_assert_attached(struct vm_area_struct * vma)872 static inline void vma_assert_attached(struct vm_area_struct *vma)
873 {
874 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
875 }
876 
vma_assert_detached(struct vm_area_struct * vma)877 static inline void vma_assert_detached(struct vm_area_struct *vma)
878 {
879 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
880 }
881 
vma_mark_attached(struct vm_area_struct * vma)882 static inline void vma_mark_attached(struct vm_area_struct *vma)
883 {
884 	vma_assert_write_locked(vma);
885 	vma_assert_detached(vma);
886 	refcount_set_release(&vma->vm_refcnt, 1);
887 }
888 
889 void vma_mark_detached(struct vm_area_struct *vma);
890 
release_fault_lock(struct vm_fault * vmf)891 static inline void release_fault_lock(struct vm_fault *vmf)
892 {
893 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
894 		vma_end_read(vmf->vma);
895 	else
896 		mmap_read_unlock(vmf->vma->vm_mm);
897 }
898 
assert_fault_locked(struct vm_fault * vmf)899 static inline void assert_fault_locked(struct vm_fault *vmf)
900 {
901 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
902 		vma_assert_locked(vmf->vma);
903 	else
904 		mmap_assert_locked(vmf->vma->vm_mm);
905 }
906 
907 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
908 					  unsigned long address);
909 
910 #else /* CONFIG_PER_VMA_LOCK */
911 
vma_lock_init(struct vm_area_struct * vma,bool reset_refcnt)912 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
vma_start_read(struct mm_struct * mm,struct vm_area_struct * vma)913 static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
914 						    struct vm_area_struct *vma)
915 		{ return NULL; }
vma_end_read(struct vm_area_struct * vma)916 static inline void vma_end_read(struct vm_area_struct *vma) {}
vma_start_write(struct vm_area_struct * vma)917 static inline void vma_start_write(struct vm_area_struct *vma) {}
vma_assert_write_locked(struct vm_area_struct * vma)918 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
919 		{ mmap_assert_write_locked(vma->vm_mm); }
vma_assert_attached(struct vm_area_struct * vma)920 static inline void vma_assert_attached(struct vm_area_struct *vma) {}
vma_assert_detached(struct vm_area_struct * vma)921 static inline void vma_assert_detached(struct vm_area_struct *vma) {}
vma_mark_attached(struct vm_area_struct * vma)922 static inline void vma_mark_attached(struct vm_area_struct *vma) {}
vma_mark_detached(struct vm_area_struct * vma)923 static inline void vma_mark_detached(struct vm_area_struct *vma) {}
924 
lock_vma_under_rcu(struct mm_struct * mm,unsigned long address)925 static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
926 		unsigned long address)
927 {
928 	return NULL;
929 }
930 
vma_assert_locked(struct vm_area_struct * vma)931 static inline void vma_assert_locked(struct vm_area_struct *vma)
932 {
933 	mmap_assert_locked(vma->vm_mm);
934 }
935 
release_fault_lock(struct vm_fault * vmf)936 static inline void release_fault_lock(struct vm_fault *vmf)
937 {
938 	mmap_read_unlock(vmf->vma->vm_mm);
939 }
940 
assert_fault_locked(struct vm_fault * vmf)941 static inline void assert_fault_locked(struct vm_fault *vmf)
942 {
943 	mmap_assert_locked(vmf->vma->vm_mm);
944 }
945 
946 #endif /* CONFIG_PER_VMA_LOCK */
947 
948 extern const struct vm_operations_struct vma_dummy_vm_ops;
949 
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)950 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
951 {
952 	memset(vma, 0, sizeof(*vma));
953 	vma->vm_mm = mm;
954 	vma->vm_ops = &vma_dummy_vm_ops;
955 	INIT_LIST_HEAD(&vma->anon_vma_chain);
956 	vma_lock_init(vma, false);
957 }
958 
959 /* Use when VMA is not part of the VMA tree and needs no locking */
vm_flags_init(struct vm_area_struct * vma,vm_flags_t flags)960 static inline void vm_flags_init(struct vm_area_struct *vma,
961 				 vm_flags_t flags)
962 {
963 	ACCESS_PRIVATE(vma, __vm_flags) = flags;
964 }
965 
966 /*
967  * Use when VMA is part of the VMA tree and modifications need coordination
968  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
969  * it should be locked explicitly beforehand.
970  */
vm_flags_reset(struct vm_area_struct * vma,vm_flags_t flags)971 static inline void vm_flags_reset(struct vm_area_struct *vma,
972 				  vm_flags_t flags)
973 {
974 	vma_assert_write_locked(vma);
975 	/* Preserve padding flags */
976 	flags = vma_pad_fixup_flags(vma, flags);
977 	vm_flags_init(vma, flags);
978 }
979 
vm_flags_reset_once(struct vm_area_struct * vma,vm_flags_t flags)980 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
981 				       vm_flags_t flags)
982 {
983 	vma_assert_write_locked(vma);
984 	/* Preserve padding flags */
985 	flags = vma_pad_fixup_flags(vma, flags);
986 	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
987 }
988 
vm_flags_set(struct vm_area_struct * vma,vm_flags_t flags)989 static inline void vm_flags_set(struct vm_area_struct *vma,
990 				vm_flags_t flags)
991 {
992 	vma_start_write(vma);
993 	ACCESS_PRIVATE(vma, __vm_flags) |= flags;
994 }
995 
vm_flags_clear(struct vm_area_struct * vma,vm_flags_t flags)996 static inline void vm_flags_clear(struct vm_area_struct *vma,
997 				  vm_flags_t flags)
998 {
999 	vma_start_write(vma);
1000 	ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
1001 }
1002 
1003 /*
1004  * Use only if VMA is not part of the VMA tree or has no other users and
1005  * therefore needs no locking.
1006  */
__vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)1007 static inline void __vm_flags_mod(struct vm_area_struct *vma,
1008 				  vm_flags_t set, vm_flags_t clear)
1009 {
1010 	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
1011 }
1012 
1013 /*
1014  * Use only when the order of set/clear operations is unimportant, otherwise
1015  * use vm_flags_{set|clear} explicitly.
1016  */
vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)1017 static inline void vm_flags_mod(struct vm_area_struct *vma,
1018 				vm_flags_t set, vm_flags_t clear)
1019 {
1020 	vma_start_write(vma);
1021 	__vm_flags_mod(vma, set, clear);
1022 }
1023 
vma_set_anonymous(struct vm_area_struct * vma)1024 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1025 {
1026 	vma->vm_ops = NULL;
1027 }
1028 
vma_is_anonymous(struct vm_area_struct * vma)1029 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1030 {
1031 	return !vma->vm_ops;
1032 }
1033 
1034 /*
1035  * Indicate if the VMA is a heap for the given task; for
1036  * /proc/PID/maps that is the heap of the main task.
1037  */
vma_is_initial_heap(const struct vm_area_struct * vma)1038 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
1039 {
1040 	return vma->vm_start < vma->vm_mm->brk &&
1041 		vma->vm_end > vma->vm_mm->start_brk;
1042 }
1043 
1044 /*
1045  * Indicate if the VMA is a stack for the given task; for
1046  * /proc/PID/maps that is the stack of the main task.
1047  */
vma_is_initial_stack(const struct vm_area_struct * vma)1048 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
1049 {
1050 	/*
1051 	 * We make no effort to guess what a given thread considers to be
1052 	 * its "stack".  It's not even well-defined for programs written
1053 	 * languages like Go.
1054 	 */
1055 	return vma->vm_start <= vma->vm_mm->start_stack &&
1056 		vma->vm_end >= vma->vm_mm->start_stack;
1057 }
1058 
vma_is_temporary_stack(struct vm_area_struct * vma)1059 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
1060 {
1061 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1062 
1063 	if (!maybe_stack)
1064 		return false;
1065 
1066 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1067 						VM_STACK_INCOMPLETE_SETUP)
1068 		return true;
1069 
1070 	return false;
1071 }
1072 
vma_is_foreign(struct vm_area_struct * vma)1073 static inline bool vma_is_foreign(struct vm_area_struct *vma)
1074 {
1075 	if (!current->mm)
1076 		return true;
1077 
1078 	if (current->mm != vma->vm_mm)
1079 		return true;
1080 
1081 	return false;
1082 }
1083 
vma_is_accessible(struct vm_area_struct * vma)1084 static inline bool vma_is_accessible(struct vm_area_struct *vma)
1085 {
1086 	return vma->vm_flags & VM_ACCESS_FLAGS;
1087 }
1088 
is_shared_maywrite(vm_flags_t vm_flags)1089 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
1090 {
1091 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1092 		(VM_SHARED | VM_MAYWRITE);
1093 }
1094 
vma_is_shared_maywrite(struct vm_area_struct * vma)1095 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1096 {
1097 	return is_shared_maywrite(vma->vm_flags);
1098 }
1099 
1100 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)1101 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1102 {
1103 	return mas_find(&vmi->mas, max - 1);
1104 }
1105 
vma_next(struct vma_iterator * vmi)1106 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1107 {
1108 	/*
1109 	 * Uses mas_find() to get the first VMA when the iterator starts.
1110 	 * Calling mas_next() could skip the first entry.
1111 	 */
1112 	return mas_find(&vmi->mas, ULONG_MAX);
1113 }
1114 
1115 static inline
vma_iter_next_range(struct vma_iterator * vmi)1116 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1117 {
1118 	return mas_next_range(&vmi->mas, ULONG_MAX);
1119 }
1120 
1121 
vma_prev(struct vma_iterator * vmi)1122 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1123 {
1124 	return mas_prev(&vmi->mas, 0);
1125 }
1126 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1127 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1128 			unsigned long start, unsigned long end, gfp_t gfp)
1129 {
1130 	__mas_set_range(&vmi->mas, start, end - 1);
1131 	mas_store_gfp(&vmi->mas, NULL, gfp);
1132 	if (unlikely(mas_is_err(&vmi->mas)))
1133 		return -ENOMEM;
1134 
1135 	return 0;
1136 }
1137 
1138 /* Free any unused preallocations */
vma_iter_free(struct vma_iterator * vmi)1139 static inline void vma_iter_free(struct vma_iterator *vmi)
1140 {
1141 	mas_destroy(&vmi->mas);
1142 }
1143 
vma_iter_bulk_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1144 static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1145 				      struct vm_area_struct *vma)
1146 {
1147 	vmi->mas.index = vma->vm_start;
1148 	vmi->mas.last = vma->vm_end - 1;
1149 	mas_store(&vmi->mas, vma);
1150 	if (unlikely(mas_is_err(&vmi->mas)))
1151 		return -ENOMEM;
1152 
1153 	vma_mark_attached(vma);
1154 	return 0;
1155 }
1156 
vma_iter_invalidate(struct vma_iterator * vmi)1157 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1158 {
1159 	mas_pause(&vmi->mas);
1160 }
1161 
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)1162 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1163 {
1164 	mas_set(&vmi->mas, addr);
1165 }
1166 
1167 #define for_each_vma(__vmi, __vma)					\
1168 	while (((__vma) = vma_next(&(__vmi))) != NULL)
1169 
1170 /* The MM code likes to work with exclusive end addresses */
1171 #define for_each_vma_range(__vmi, __vma, __end)				\
1172 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1173 
1174 #ifdef CONFIG_SHMEM
1175 /*
1176  * The vma_is_shmem is not inline because it is used only by slow
1177  * paths in userfault.
1178  */
1179 bool vma_is_shmem(struct vm_area_struct *vma);
1180 bool vma_is_anon_shmem(struct vm_area_struct *vma);
1181 #else
vma_is_shmem(struct vm_area_struct * vma)1182 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
vma_is_anon_shmem(struct vm_area_struct * vma)1183 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1184 #endif
1185 
1186 int vma_is_stack_for_current(struct vm_area_struct *vma);
1187 
1188 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1189 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1190 
1191 struct mmu_gather;
1192 struct inode;
1193 
1194 /*
1195  * compound_order() can be called without holding a reference, which means
1196  * that niceties like page_folio() don't work.  These callers should be
1197  * prepared to handle wild return values.  For example, PG_head may be
1198  * set before the order is initialised, or this may be a tail page.
1199  * See compaction.c for some good examples.
1200  */
compound_order(struct page * page)1201 static inline unsigned int compound_order(struct page *page)
1202 {
1203 	struct folio *folio = (struct folio *)page;
1204 
1205 	if (!test_bit(PG_head, &folio->flags))
1206 		return 0;
1207 	return folio->_flags_1 & 0xff;
1208 }
1209 
1210 /**
1211  * folio_order - The allocation order of a folio.
1212  * @folio: The folio.
1213  *
1214  * A folio is composed of 2^order pages.  See get_order() for the definition
1215  * of order.
1216  *
1217  * Return: The order of the folio.
1218  */
folio_order(const struct folio * folio)1219 static inline unsigned int folio_order(const struct folio *folio)
1220 {
1221 	if (!folio_test_large(folio))
1222 		return 0;
1223 	return folio->_flags_1 & 0xff;
1224 }
1225 
1226 #include <linux/huge_mm.h>
1227 
1228 /*
1229  * Methods to modify the page usage count.
1230  *
1231  * What counts for a page usage:
1232  * - cache mapping   (page->mapping)
1233  * - private data    (page->private)
1234  * - page mapped in a task's page tables, each mapping
1235  *   is counted separately
1236  *
1237  * Also, many kernel routines increase the page count before a critical
1238  * routine so they can be sure the page doesn't go away from under them.
1239  */
1240 
1241 /*
1242  * Drop a ref, return true if the refcount fell to zero (the page has no users)
1243  */
put_page_testzero(struct page * page)1244 static inline int put_page_testzero(struct page *page)
1245 {
1246 	int ret;
1247 
1248 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1249 	ret = page_ref_dec_and_test(page);
1250 	page_pinner_put_page(page);
1251 
1252 	return ret;
1253 }
1254 
folio_put_testzero(struct folio * folio)1255 static inline int folio_put_testzero(struct folio *folio)
1256 {
1257 	return put_page_testzero(&folio->page);
1258 }
1259 
1260 /*
1261  * Try to grab a ref unless the page has a refcount of zero, return false if
1262  * that is the case.
1263  * This can be called when MMU is off so it must not access
1264  * any of the virtual mappings.
1265  */
get_page_unless_zero(struct page * page)1266 static inline bool get_page_unless_zero(struct page *page)
1267 {
1268 	return page_ref_add_unless(page, 1, 0);
1269 }
1270 
folio_get_nontail_page(struct page * page)1271 static inline struct folio *folio_get_nontail_page(struct page *page)
1272 {
1273 	if (unlikely(!get_page_unless_zero(page)))
1274 		return NULL;
1275 	return (struct folio *)page;
1276 }
1277 
1278 extern int page_is_ram(unsigned long pfn);
1279 
1280 enum {
1281 	REGION_INTERSECTS,
1282 	REGION_DISJOINT,
1283 	REGION_MIXED,
1284 };
1285 
1286 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1287 		      unsigned long desc);
1288 
1289 /* Support for virtually mapped pages */
1290 struct page *vmalloc_to_page(const void *addr);
1291 unsigned long vmalloc_to_pfn(const void *addr);
1292 
1293 /*
1294  * Determine if an address is within the vmalloc range
1295  *
1296  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1297  * is no special casing required.
1298  */
1299 #ifdef CONFIG_MMU
1300 extern bool is_vmalloc_addr(const void *x);
1301 extern int is_vmalloc_or_module_addr(const void *x);
1302 #else
is_vmalloc_addr(const void * x)1303 static inline bool is_vmalloc_addr(const void *x)
1304 {
1305 	return false;
1306 }
is_vmalloc_or_module_addr(const void * x)1307 static inline int is_vmalloc_or_module_addr(const void *x)
1308 {
1309 	return 0;
1310 }
1311 #endif
1312 
1313 /*
1314  * How many times the entire folio is mapped as a single unit (eg by a
1315  * PMD or PUD entry).  This is probably not what you want, except for
1316  * debugging purposes or implementation of other core folio_*() primitives.
1317  */
folio_entire_mapcount(const struct folio * folio)1318 static inline int folio_entire_mapcount(const struct folio *folio)
1319 {
1320 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1321 	return atomic_read(&folio->_entire_mapcount) + 1;
1322 }
1323 
folio_large_mapcount(const struct folio * folio)1324 static inline int folio_large_mapcount(const struct folio *folio)
1325 {
1326 	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1327 	return atomic_read(&folio->_large_mapcount) + 1;
1328 }
1329 
1330 /**
1331  * folio_mapcount() - Number of mappings of this folio.
1332  * @folio: The folio.
1333  *
1334  * The folio mapcount corresponds to the number of present user page table
1335  * entries that reference any part of a folio. Each such present user page
1336  * table entry must be paired with exactly on folio reference.
1337  *
1338  * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1339  * exactly once.
1340  *
1341  * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1342  * references the entire folio counts exactly once, even when such special
1343  * page table entries are comprised of multiple ordinary page table entries.
1344  *
1345  * Will report 0 for pages which cannot be mapped into userspace, such as
1346  * slab, page tables and similar.
1347  *
1348  * Return: The number of times this folio is mapped.
1349  */
folio_mapcount(const struct folio * folio)1350 static inline int folio_mapcount(const struct folio *folio)
1351 {
1352 	int mapcount;
1353 
1354 	if (likely(!folio_test_large(folio))) {
1355 		mapcount = atomic_read(&folio->_mapcount) + 1;
1356 		if (page_mapcount_is_type(mapcount))
1357 			mapcount = 0;
1358 		return mapcount;
1359 	}
1360 	return folio_large_mapcount(folio);
1361 }
1362 
1363 /**
1364  * folio_mapped - Is this folio mapped into userspace?
1365  * @folio: The folio.
1366  *
1367  * Return: True if any page in this folio is referenced by user page tables.
1368  */
folio_mapped(const struct folio * folio)1369 static inline bool folio_mapped(const struct folio *folio)
1370 {
1371 	return folio_mapcount(folio) >= 1;
1372 }
1373 
1374 /*
1375  * Return true if this page is mapped into pagetables.
1376  * For compound page it returns true if any sub-page of compound page is mapped,
1377  * even if this particular sub-page is not itself mapped by any PTE or PMD.
1378  */
page_mapped(const struct page * page)1379 static inline bool page_mapped(const struct page *page)
1380 {
1381 	return folio_mapped(page_folio(page));
1382 }
1383 
virt_to_head_page(const void * x)1384 static inline struct page *virt_to_head_page(const void *x)
1385 {
1386 	struct page *page = virt_to_page(x);
1387 
1388 	return compound_head(page);
1389 }
1390 
virt_to_folio(const void * x)1391 static inline struct folio *virt_to_folio(const void *x)
1392 {
1393 	struct page *page = virt_to_page(x);
1394 
1395 	return page_folio(page);
1396 }
1397 
1398 void __folio_put(struct folio *folio);
1399 
1400 void put_pages_list(struct list_head *pages);
1401 
1402 void split_page(struct page *page, unsigned int order);
1403 void folio_copy(struct folio *dst, struct folio *src);
1404 int folio_mc_copy(struct folio *dst, struct folio *src);
1405 
1406 unsigned long nr_free_buffer_pages(void);
1407 
1408 /* Returns the number of bytes in this potentially compound page. */
page_size(struct page * page)1409 static inline unsigned long page_size(struct page *page)
1410 {
1411 	return PAGE_SIZE << compound_order(page);
1412 }
1413 
1414 /* Returns the number of bits needed for the number of bytes in a page */
page_shift(struct page * page)1415 static inline unsigned int page_shift(struct page *page)
1416 {
1417 	return PAGE_SHIFT + compound_order(page);
1418 }
1419 
1420 /**
1421  * thp_order - Order of a transparent huge page.
1422  * @page: Head page of a transparent huge page.
1423  */
thp_order(struct page * page)1424 static inline unsigned int thp_order(struct page *page)
1425 {
1426 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1427 	return compound_order(page);
1428 }
1429 
1430 /**
1431  * thp_size - Size of a transparent huge page.
1432  * @page: Head page of a transparent huge page.
1433  *
1434  * Return: Number of bytes in this page.
1435  */
thp_size(struct page * page)1436 static inline unsigned long thp_size(struct page *page)
1437 {
1438 	return PAGE_SIZE << thp_order(page);
1439 }
1440 
1441 #ifdef CONFIG_MMU
1442 /*
1443  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1444  * servicing faults for write access.  In the normal case, do always want
1445  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1446  * that do not have writing enabled, when used by access_process_vm.
1447  */
maybe_mkwrite(pte_t pte,struct vm_area_struct * vma)1448 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1449 {
1450 	if (likely(vma->vm_flags & VM_WRITE))
1451 		pte = pte_mkwrite(pte, vma);
1452 	return pte;
1453 }
1454 
1455 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1456 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1457 		struct page *page, unsigned int nr, unsigned long addr);
1458 
1459 vm_fault_t finish_fault(struct vm_fault *vmf);
1460 #endif
1461 
1462 /*
1463  * Multiple processes may "see" the same page. E.g. for untouched
1464  * mappings of /dev/null, all processes see the same page full of
1465  * zeroes, and text pages of executables and shared libraries have
1466  * only one copy in memory, at most, normally.
1467  *
1468  * For the non-reserved pages, page_count(page) denotes a reference count.
1469  *   page_count() == 0 means the page is free. page->lru is then used for
1470  *   freelist management in the buddy allocator.
1471  *   page_count() > 0  means the page has been allocated.
1472  *
1473  * Pages are allocated by the slab allocator in order to provide memory
1474  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1475  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1476  * unless a particular usage is carefully commented. (the responsibility of
1477  * freeing the kmalloc memory is the caller's, of course).
1478  *
1479  * A page may be used by anyone else who does a __get_free_page().
1480  * In this case, page_count still tracks the references, and should only
1481  * be used through the normal accessor functions. The top bits of page->flags
1482  * and page->virtual store page management information, but all other fields
1483  * are unused and could be used privately, carefully. The management of this
1484  * page is the responsibility of the one who allocated it, and those who have
1485  * subsequently been given references to it.
1486  *
1487  * The other pages (we may call them "pagecache pages") are completely
1488  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1489  * The following discussion applies only to them.
1490  *
1491  * A pagecache page contains an opaque `private' member, which belongs to the
1492  * page's address_space. Usually, this is the address of a circular list of
1493  * the page's disk buffers. PG_private must be set to tell the VM to call
1494  * into the filesystem to release these pages.
1495  *
1496  * A page may belong to an inode's memory mapping. In this case, page->mapping
1497  * is the pointer to the inode, and page->index is the file offset of the page,
1498  * in units of PAGE_SIZE.
1499  *
1500  * If pagecache pages are not associated with an inode, they are said to be
1501  * anonymous pages. These may become associated with the swapcache, and in that
1502  * case PG_swapcache is set, and page->private is an offset into the swapcache.
1503  *
1504  * In either case (swapcache or inode backed), the pagecache itself holds one
1505  * reference to the page. Setting PG_private should also increment the
1506  * refcount. The each user mapping also has a reference to the page.
1507  *
1508  * The pagecache pages are stored in a per-mapping radix tree, which is
1509  * rooted at mapping->i_pages, and indexed by offset.
1510  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1511  * lists, we instead now tag pages as dirty/writeback in the radix tree.
1512  *
1513  * All pagecache pages may be subject to I/O:
1514  * - inode pages may need to be read from disk,
1515  * - inode pages which have been modified and are MAP_SHARED may need
1516  *   to be written back to the inode on disk,
1517  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1518  *   modified may need to be swapped out to swap space and (later) to be read
1519  *   back into memory.
1520  */
1521 
1522 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1523 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1524 
1525 bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
put_devmap_managed_folio_refs(struct folio * folio,int refs)1526 static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1527 {
1528 	if (!static_branch_unlikely(&devmap_managed_key))
1529 		return false;
1530 	if (!folio_is_zone_device(folio))
1531 		return false;
1532 	return __put_devmap_managed_folio_refs(folio, refs);
1533 }
1534 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
put_devmap_managed_folio_refs(struct folio * folio,int refs)1535 static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1536 {
1537 	return false;
1538 }
1539 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1540 
1541 /* 127: arbitrary random number, small enough to assemble well */
1542 #define folio_ref_zero_or_close_to_overflow(folio) \
1543 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1544 
1545 /**
1546  * folio_get - Increment the reference count on a folio.
1547  * @folio: The folio.
1548  *
1549  * Context: May be called in any context, as long as you know that
1550  * you have a refcount on the folio.  If you do not already have one,
1551  * folio_try_get() may be the right interface for you to use.
1552  */
folio_get(struct folio * folio)1553 static inline void folio_get(struct folio *folio)
1554 {
1555 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1556 	folio_ref_inc(folio);
1557 }
1558 
get_page(struct page * page)1559 static inline void get_page(struct page *page)
1560 {
1561 	folio_get(page_folio(page));
1562 }
1563 
try_get_page(struct page * page)1564 static inline __must_check bool try_get_page(struct page *page)
1565 {
1566 	page = compound_head(page);
1567 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1568 		return false;
1569 	page_ref_inc(page);
1570 	return true;
1571 }
1572 
1573 /**
1574  * folio_put - Decrement the reference count on a folio.
1575  * @folio: The folio.
1576  *
1577  * If the folio's reference count reaches zero, the memory will be
1578  * released back to the page allocator and may be used by another
1579  * allocation immediately.  Do not access the memory or the struct folio
1580  * after calling folio_put() unless you can be sure that it wasn't the
1581  * last reference.
1582  *
1583  * Context: May be called in process or interrupt context, but not in NMI
1584  * context.  May be called while holding a spinlock.
1585  */
folio_put(struct folio * folio)1586 static inline void folio_put(struct folio *folio)
1587 {
1588 	if (folio_put_testzero(folio))
1589 		__folio_put(folio);
1590 }
1591 
1592 /**
1593  * folio_put_refs - Reduce the reference count on a folio.
1594  * @folio: The folio.
1595  * @refs: The amount to subtract from the folio's reference count.
1596  *
1597  * If the folio's reference count reaches zero, the memory will be
1598  * released back to the page allocator and may be used by another
1599  * allocation immediately.  Do not access the memory or the struct folio
1600  * after calling folio_put_refs() unless you can be sure that these weren't
1601  * the last references.
1602  *
1603  * Context: May be called in process or interrupt context, but not in NMI
1604  * context.  May be called while holding a spinlock.
1605  */
folio_put_refs(struct folio * folio,int refs)1606 static inline void folio_put_refs(struct folio *folio, int refs)
1607 {
1608 	if (folio_ref_sub_and_test(folio, refs))
1609 		__folio_put(folio);
1610 }
1611 
1612 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1613 
1614 /*
1615  * union release_pages_arg - an array of pages or folios
1616  *
1617  * release_pages() releases a simple array of multiple pages, and
1618  * accepts various different forms of said page array: either
1619  * a regular old boring array of pages, an array of folios, or
1620  * an array of encoded page pointers.
1621  *
1622  * The transparent union syntax for this kind of "any of these
1623  * argument types" is all kinds of ugly, so look away.
1624  */
1625 typedef union {
1626 	struct page **pages;
1627 	struct folio **folios;
1628 	struct encoded_page **encoded_pages;
1629 } release_pages_arg __attribute__ ((__transparent_union__));
1630 
1631 void release_pages(release_pages_arg, int nr);
1632 
1633 /**
1634  * folios_put - Decrement the reference count on an array of folios.
1635  * @folios: The folios.
1636  *
1637  * Like folio_put(), but for a batch of folios.  This is more efficient
1638  * than writing the loop yourself as it will optimise the locks which need
1639  * to be taken if the folios are freed.  The folios batch is returned
1640  * empty and ready to be reused for another batch; there is no need to
1641  * reinitialise it.
1642  *
1643  * Context: May be called in process or interrupt context, but not in NMI
1644  * context.  May be called while holding a spinlock.
1645  */
folios_put(struct folio_batch * folios)1646 static inline void folios_put(struct folio_batch *folios)
1647 {
1648 	folios_put_refs(folios, NULL);
1649 }
1650 
put_page(struct page * page)1651 static inline void put_page(struct page *page)
1652 {
1653 	struct folio *folio = page_folio(page);
1654 
1655 	/*
1656 	 * For some devmap managed pages we need to catch refcount transition
1657 	 * from 2 to 1:
1658 	 */
1659 	if (put_devmap_managed_folio_refs(folio, 1))
1660 		return;
1661 	folio_put(folio);
1662 }
1663 
1664 /*
1665  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1666  * the page's refcount so that two separate items are tracked: the original page
1667  * reference count, and also a new count of how many pin_user_pages() calls were
1668  * made against the page. ("gup-pinned" is another term for the latter).
1669  *
1670  * With this scheme, pin_user_pages() becomes special: such pages are marked as
1671  * distinct from normal pages. As such, the unpin_user_page() call (and its
1672  * variants) must be used in order to release gup-pinned pages.
1673  *
1674  * Choice of value:
1675  *
1676  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1677  * counts with respect to pin_user_pages() and unpin_user_page() becomes
1678  * simpler, due to the fact that adding an even power of two to the page
1679  * refcount has the effect of using only the upper N bits, for the code that
1680  * counts up using the bias value. This means that the lower bits are left for
1681  * the exclusive use of the original code that increments and decrements by one
1682  * (or at least, by much smaller values than the bias value).
1683  *
1684  * Of course, once the lower bits overflow into the upper bits (and this is
1685  * OK, because subtraction recovers the original values), then visual inspection
1686  * no longer suffices to directly view the separate counts. However, for normal
1687  * applications that don't have huge page reference counts, this won't be an
1688  * issue.
1689  *
1690  * Locking: the lockless algorithm described in folio_try_get_rcu()
1691  * provides safe operation for get_user_pages(), folio_mkclean() and
1692  * other calls that race to set up page table entries.
1693  */
1694 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1695 
1696 void unpin_user_page(struct page *page);
1697 void unpin_folio(struct folio *folio);
1698 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1699 				 bool make_dirty);
1700 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1701 				      bool make_dirty);
1702 void unpin_user_pages(struct page **pages, unsigned long npages);
1703 void unpin_user_folio(struct folio *folio, unsigned long npages);
1704 void unpin_folios(struct folio **folios, unsigned long nfolios);
1705 
is_cow_mapping(vm_flags_t flags)1706 static inline bool is_cow_mapping(vm_flags_t flags)
1707 {
1708 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1709 }
1710 
1711 #ifndef CONFIG_MMU
is_nommu_shared_mapping(vm_flags_t flags)1712 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1713 {
1714 	/*
1715 	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1716 	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1717 	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1718 	 * underlying memory if ptrace is active, so this is only possible if
1719 	 * ptrace does not apply. Note that there is no mprotect() to upgrade
1720 	 * write permissions later.
1721 	 */
1722 	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1723 }
1724 #endif
1725 
1726 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1727 #define SECTION_IN_PAGE_FLAGS
1728 #endif
1729 
1730 /*
1731  * The identification function is mainly used by the buddy allocator for
1732  * determining if two pages could be buddies. We are not really identifying
1733  * the zone since we could be using the section number id if we do not have
1734  * node id available in page flags.
1735  * We only guarantee that it will return the same value for two combinable
1736  * pages in a zone.
1737  */
page_zone_id(struct page * page)1738 static inline int page_zone_id(struct page *page)
1739 {
1740 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1741 }
1742 
1743 #ifdef NODE_NOT_IN_PAGE_FLAGS
1744 int page_to_nid(const struct page *page);
1745 #else
page_to_nid(const struct page * page)1746 static inline int page_to_nid(const struct page *page)
1747 {
1748 	return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
1749 }
1750 #endif
1751 
folio_nid(const struct folio * folio)1752 static inline int folio_nid(const struct folio *folio)
1753 {
1754 	return page_to_nid(&folio->page);
1755 }
1756 
1757 #ifdef CONFIG_NUMA_BALANCING
1758 /* page access time bits needs to hold at least 4 seconds */
1759 #define PAGE_ACCESS_TIME_MIN_BITS	12
1760 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1761 #define PAGE_ACCESS_TIME_BUCKETS				\
1762 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1763 #else
1764 #define PAGE_ACCESS_TIME_BUCKETS	0
1765 #endif
1766 
1767 #define PAGE_ACCESS_TIME_MASK				\
1768 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1769 
cpu_pid_to_cpupid(int cpu,int pid)1770 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1771 {
1772 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1773 }
1774 
cpupid_to_pid(int cpupid)1775 static inline int cpupid_to_pid(int cpupid)
1776 {
1777 	return cpupid & LAST__PID_MASK;
1778 }
1779 
cpupid_to_cpu(int cpupid)1780 static inline int cpupid_to_cpu(int cpupid)
1781 {
1782 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1783 }
1784 
cpupid_to_nid(int cpupid)1785 static inline int cpupid_to_nid(int cpupid)
1786 {
1787 	return cpu_to_node(cpupid_to_cpu(cpupid));
1788 }
1789 
cpupid_pid_unset(int cpupid)1790 static inline bool cpupid_pid_unset(int cpupid)
1791 {
1792 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1793 }
1794 
cpupid_cpu_unset(int cpupid)1795 static inline bool cpupid_cpu_unset(int cpupid)
1796 {
1797 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1798 }
1799 
__cpupid_match_pid(pid_t task_pid,int cpupid)1800 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1801 {
1802 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1803 }
1804 
1805 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1806 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
folio_xchg_last_cpupid(struct folio * folio,int cpupid)1807 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1808 {
1809 	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1810 }
1811 
folio_last_cpupid(struct folio * folio)1812 static inline int folio_last_cpupid(struct folio *folio)
1813 {
1814 	return folio->_last_cpupid;
1815 }
page_cpupid_reset_last(struct page * page)1816 static inline void page_cpupid_reset_last(struct page *page)
1817 {
1818 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1819 }
1820 #else
folio_last_cpupid(struct folio * folio)1821 static inline int folio_last_cpupid(struct folio *folio)
1822 {
1823 	return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1824 }
1825 
1826 int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1827 
page_cpupid_reset_last(struct page * page)1828 static inline void page_cpupid_reset_last(struct page *page)
1829 {
1830 	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1831 }
1832 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1833 
folio_xchg_access_time(struct folio * folio,int time)1834 static inline int folio_xchg_access_time(struct folio *folio, int time)
1835 {
1836 	int last_time;
1837 
1838 	last_time = folio_xchg_last_cpupid(folio,
1839 					   time >> PAGE_ACCESS_TIME_BUCKETS);
1840 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1841 }
1842 
vma_set_access_pid_bit(struct vm_area_struct * vma)1843 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1844 {
1845 	unsigned int pid_bit;
1846 
1847 	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1848 	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1849 		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1850 	}
1851 }
1852 
1853 bool folio_use_access_time(struct folio *folio);
1854 #else /* !CONFIG_NUMA_BALANCING */
folio_xchg_last_cpupid(struct folio * folio,int cpupid)1855 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1856 {
1857 	return folio_nid(folio); /* XXX */
1858 }
1859 
folio_xchg_access_time(struct folio * folio,int time)1860 static inline int folio_xchg_access_time(struct folio *folio, int time)
1861 {
1862 	return 0;
1863 }
1864 
folio_last_cpupid(struct folio * folio)1865 static inline int folio_last_cpupid(struct folio *folio)
1866 {
1867 	return folio_nid(folio); /* XXX */
1868 }
1869 
cpupid_to_nid(int cpupid)1870 static inline int cpupid_to_nid(int cpupid)
1871 {
1872 	return -1;
1873 }
1874 
cpupid_to_pid(int cpupid)1875 static inline int cpupid_to_pid(int cpupid)
1876 {
1877 	return -1;
1878 }
1879 
cpupid_to_cpu(int cpupid)1880 static inline int cpupid_to_cpu(int cpupid)
1881 {
1882 	return -1;
1883 }
1884 
cpu_pid_to_cpupid(int nid,int pid)1885 static inline int cpu_pid_to_cpupid(int nid, int pid)
1886 {
1887 	return -1;
1888 }
1889 
cpupid_pid_unset(int cpupid)1890 static inline bool cpupid_pid_unset(int cpupid)
1891 {
1892 	return true;
1893 }
1894 
page_cpupid_reset_last(struct page * page)1895 static inline void page_cpupid_reset_last(struct page *page)
1896 {
1897 }
1898 
cpupid_match_pid(struct task_struct * task,int cpupid)1899 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1900 {
1901 	return false;
1902 }
1903 
vma_set_access_pid_bit(struct vm_area_struct * vma)1904 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1905 {
1906 }
folio_use_access_time(struct folio * folio)1907 static inline bool folio_use_access_time(struct folio *folio)
1908 {
1909 	return false;
1910 }
1911 #endif /* CONFIG_NUMA_BALANCING */
1912 
1913 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1914 
1915 /*
1916  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1917  * setting tags for all pages to native kernel tag value 0xff, as the default
1918  * value 0x00 maps to 0xff.
1919  */
1920 
page_kasan_tag(const struct page * page)1921 static inline u8 page_kasan_tag(const struct page *page)
1922 {
1923 	u8 tag = KASAN_TAG_KERNEL;
1924 
1925 	if (kasan_enabled()) {
1926 		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1927 		tag ^= 0xff;
1928 	}
1929 
1930 	return tag;
1931 }
1932 
page_kasan_tag_set(struct page * page,u8 tag)1933 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1934 {
1935 	unsigned long old_flags, flags;
1936 
1937 	if (!kasan_enabled())
1938 		return;
1939 
1940 	tag ^= 0xff;
1941 	old_flags = READ_ONCE(page->flags);
1942 	do {
1943 		flags = old_flags;
1944 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1945 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1946 	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1947 }
1948 
page_kasan_tag_reset(struct page * page)1949 static inline void page_kasan_tag_reset(struct page *page)
1950 {
1951 	if (kasan_enabled())
1952 		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1953 }
1954 
1955 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1956 
page_kasan_tag(const struct page * page)1957 static inline u8 page_kasan_tag(const struct page *page)
1958 {
1959 	return 0xff;
1960 }
1961 
page_kasan_tag_set(struct page * page,u8 tag)1962 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
page_kasan_tag_reset(struct page * page)1963 static inline void page_kasan_tag_reset(struct page *page) { }
1964 
1965 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1966 
page_zone(const struct page * page)1967 static inline struct zone *page_zone(const struct page *page)
1968 {
1969 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1970 }
1971 
page_pgdat(const struct page * page)1972 static inline pg_data_t *page_pgdat(const struct page *page)
1973 {
1974 	return NODE_DATA(page_to_nid(page));
1975 }
1976 
folio_zone(const struct folio * folio)1977 static inline struct zone *folio_zone(const struct folio *folio)
1978 {
1979 	return page_zone(&folio->page);
1980 }
1981 
folio_pgdat(const struct folio * folio)1982 static inline pg_data_t *folio_pgdat(const struct folio *folio)
1983 {
1984 	return page_pgdat(&folio->page);
1985 }
1986 
1987 #ifdef SECTION_IN_PAGE_FLAGS
set_page_section(struct page * page,unsigned long section)1988 static inline void set_page_section(struct page *page, unsigned long section)
1989 {
1990 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1991 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1992 }
1993 
page_to_section(const struct page * page)1994 static inline unsigned long page_to_section(const struct page *page)
1995 {
1996 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1997 }
1998 #endif
1999 
2000 /**
2001  * folio_pfn - Return the Page Frame Number of a folio.
2002  * @folio: The folio.
2003  *
2004  * A folio may contain multiple pages.  The pages have consecutive
2005  * Page Frame Numbers.
2006  *
2007  * Return: The Page Frame Number of the first page in the folio.
2008  */
folio_pfn(struct folio * folio)2009 static inline unsigned long folio_pfn(struct folio *folio)
2010 {
2011 	return page_to_pfn(&folio->page);
2012 }
2013 
pfn_folio(unsigned long pfn)2014 static inline struct folio *pfn_folio(unsigned long pfn)
2015 {
2016 	return page_folio(pfn_to_page(pfn));
2017 }
2018 
2019 /**
2020  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
2021  * @folio: The folio.
2022  *
2023  * This function checks if a folio has been pinned via a call to
2024  * a function in the pin_user_pages() family.
2025  *
2026  * For small folios, the return value is partially fuzzy: false is not fuzzy,
2027  * because it means "definitely not pinned for DMA", but true means "probably
2028  * pinned for DMA, but possibly a false positive due to having at least
2029  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
2030  *
2031  * False positives are OK, because: a) it's unlikely for a folio to
2032  * get that many refcounts, and b) all the callers of this routine are
2033  * expected to be able to deal gracefully with a false positive.
2034  *
2035  * For large folios, the result will be exactly correct. That's because
2036  * we have more tracking data available: the _pincount field is used
2037  * instead of the GUP_PIN_COUNTING_BIAS scheme.
2038  *
2039  * For more information, please see Documentation/core-api/pin_user_pages.rst.
2040  *
2041  * Return: True, if it is likely that the folio has been "dma-pinned".
2042  * False, if the folio is definitely not dma-pinned.
2043  */
folio_maybe_dma_pinned(struct folio * folio)2044 static inline bool folio_maybe_dma_pinned(struct folio *folio)
2045 {
2046 	if (folio_test_large(folio))
2047 		return atomic_read(&folio->_pincount) > 0;
2048 
2049 	/*
2050 	 * folio_ref_count() is signed. If that refcount overflows, then
2051 	 * folio_ref_count() returns a negative value, and callers will avoid
2052 	 * further incrementing the refcount.
2053 	 *
2054 	 * Here, for that overflow case, use the sign bit to count a little
2055 	 * bit higher via unsigned math, and thus still get an accurate result.
2056 	 */
2057 	return ((unsigned int)folio_ref_count(folio)) >=
2058 		GUP_PIN_COUNTING_BIAS;
2059 }
2060 
2061 /*
2062  * This should most likely only be called during fork() to see whether we
2063  * should break the cow immediately for an anon page on the src mm.
2064  *
2065  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
2066  */
folio_needs_cow_for_dma(struct vm_area_struct * vma,struct folio * folio)2067 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
2068 					  struct folio *folio)
2069 {
2070 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
2071 
2072 	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
2073 		return false;
2074 
2075 	return folio_maybe_dma_pinned(folio);
2076 }
2077 
2078 /**
2079  * is_zero_page - Query if a page is a zero page
2080  * @page: The page to query
2081  *
2082  * This returns true if @page is one of the permanent zero pages.
2083  */
is_zero_page(const struct page * page)2084 static inline bool is_zero_page(const struct page *page)
2085 {
2086 	return is_zero_pfn(page_to_pfn(page));
2087 }
2088 
2089 /**
2090  * is_zero_folio - Query if a folio is a zero page
2091  * @folio: The folio to query
2092  *
2093  * This returns true if @folio is one of the permanent zero pages.
2094  */
is_zero_folio(const struct folio * folio)2095 static inline bool is_zero_folio(const struct folio *folio)
2096 {
2097 	return is_zero_page(&folio->page);
2098 }
2099 
2100 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2101 #ifdef CONFIG_MIGRATION
2102 extern void _trace_android_vh_mm_customize_longterm_pinnable(struct folio *folio,
2103 		bool *is_longterm_pinnable);
2104 
folio_is_longterm_pinnable(struct folio * folio)2105 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2106 {
2107 	bool is_longterm_pinnable = false;
2108 
2109 	_trace_android_vh_mm_customize_longterm_pinnable(folio, &is_longterm_pinnable);
2110 	if (is_longterm_pinnable)
2111 		return true;
2112 
2113 #ifdef CONFIG_CMA
2114 	int mt = folio_migratetype(folio);
2115 
2116 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2117 		return false;
2118 #endif
2119 	/* The zero page can be "pinned" but gets special handling. */
2120 	if (is_zero_folio(folio))
2121 		return true;
2122 
2123 	/* Coherent device memory must always allow eviction. */
2124 	if (folio_is_device_coherent(folio))
2125 		return false;
2126 
2127 	/* Otherwise, non-movable zone folios can be pinned. */
2128 	return !folio_is_zone_movable(folio);
2129 
2130 }
2131 #else
folio_is_longterm_pinnable(struct folio * folio)2132 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2133 {
2134 	return true;
2135 }
2136 #endif
2137 
set_page_zone(struct page * page,enum zone_type zone)2138 static inline void set_page_zone(struct page *page, enum zone_type zone)
2139 {
2140 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2141 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2142 }
2143 
set_page_node(struct page * page,unsigned long node)2144 static inline void set_page_node(struct page *page, unsigned long node)
2145 {
2146 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2147 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2148 }
2149 
set_page_links(struct page * page,enum zone_type zone,unsigned long node,unsigned long pfn)2150 static inline void set_page_links(struct page *page, enum zone_type zone,
2151 	unsigned long node, unsigned long pfn)
2152 {
2153 	set_page_zone(page, zone);
2154 	set_page_node(page, node);
2155 #ifdef SECTION_IN_PAGE_FLAGS
2156 	set_page_section(page, pfn_to_section_nr(pfn));
2157 #endif
2158 }
2159 
2160 /**
2161  * folio_nr_pages - The number of pages in the folio.
2162  * @folio: The folio.
2163  *
2164  * Return: A positive power of two.
2165  */
folio_nr_pages(const struct folio * folio)2166 static inline long folio_nr_pages(const struct folio *folio)
2167 {
2168 	if (!folio_test_large(folio))
2169 		return 1;
2170 #ifdef CONFIG_64BIT
2171 	return folio->_folio_nr_pages;
2172 #else
2173 	return 1L << (folio->_flags_1 & 0xff);
2174 #endif
2175 }
2176 
2177 /* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2178 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2179 #define MAX_FOLIO_NR_PAGES	(1UL << PUD_ORDER)
2180 #else
2181 #define MAX_FOLIO_NR_PAGES	MAX_ORDER_NR_PAGES
2182 #endif
2183 
2184 /*
2185  * compound_nr() returns the number of pages in this potentially compound
2186  * page.  compound_nr() can be called on a tail page, and is defined to
2187  * return 1 in that case.
2188  */
compound_nr(struct page * page)2189 static inline unsigned long compound_nr(struct page *page)
2190 {
2191 	struct folio *folio = (struct folio *)page;
2192 
2193 	if (!test_bit(PG_head, &folio->flags))
2194 		return 1;
2195 #ifdef CONFIG_64BIT
2196 	return folio->_folio_nr_pages;
2197 #else
2198 	return 1L << (folio->_flags_1 & 0xff);
2199 #endif
2200 }
2201 
2202 /**
2203  * thp_nr_pages - The number of regular pages in this huge page.
2204  * @page: The head page of a huge page.
2205  */
thp_nr_pages(struct page * page)2206 static inline int thp_nr_pages(struct page *page)
2207 {
2208 	return folio_nr_pages((struct folio *)page);
2209 }
2210 
2211 /**
2212  * folio_next - Move to the next physical folio.
2213  * @folio: The folio we're currently operating on.
2214  *
2215  * If you have physically contiguous memory which may span more than
2216  * one folio (eg a &struct bio_vec), use this function to move from one
2217  * folio to the next.  Do not use it if the memory is only virtually
2218  * contiguous as the folios are almost certainly not adjacent to each
2219  * other.  This is the folio equivalent to writing ``page++``.
2220  *
2221  * Context: We assume that the folios are refcounted and/or locked at a
2222  * higher level and do not adjust the reference counts.
2223  * Return: The next struct folio.
2224  */
folio_next(struct folio * folio)2225 static inline struct folio *folio_next(struct folio *folio)
2226 {
2227 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2228 }
2229 
2230 /**
2231  * folio_shift - The size of the memory described by this folio.
2232  * @folio: The folio.
2233  *
2234  * A folio represents a number of bytes which is a power-of-two in size.
2235  * This function tells you which power-of-two the folio is.  See also
2236  * folio_size() and folio_order().
2237  *
2238  * Context: The caller should have a reference on the folio to prevent
2239  * it from being split.  It is not necessary for the folio to be locked.
2240  * Return: The base-2 logarithm of the size of this folio.
2241  */
folio_shift(const struct folio * folio)2242 static inline unsigned int folio_shift(const struct folio *folio)
2243 {
2244 	return PAGE_SHIFT + folio_order(folio);
2245 }
2246 
2247 /**
2248  * folio_size - The number of bytes in a folio.
2249  * @folio: The folio.
2250  *
2251  * Context: The caller should have a reference on the folio to prevent
2252  * it from being split.  It is not necessary for the folio to be locked.
2253  * Return: The number of bytes in this folio.
2254  */
folio_size(const struct folio * folio)2255 static inline size_t folio_size(const struct folio *folio)
2256 {
2257 	return PAGE_SIZE << folio_order(folio);
2258 }
2259 
2260 /**
2261  * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
2262  *				tables of more than one MM
2263  * @folio: The folio.
2264  *
2265  * This function checks if the folio is currently mapped into more than one
2266  * MM ("mapped shared"), or if the folio is only mapped into a single MM
2267  * ("mapped exclusively").
2268  *
2269  * For KSM folios, this function also returns "mapped shared" when a folio is
2270  * mapped multiple times into the same MM, because the individual page mappings
2271  * are independent.
2272  *
2273  * As precise information is not easily available for all folios, this function
2274  * estimates the number of MMs ("sharers") that are currently mapping a folio
2275  * using the number of times the first page of the folio is currently mapped
2276  * into page tables.
2277  *
2278  * For small anonymous folios and anonymous hugetlb folios, the return
2279  * value will be exactly correct: non-KSM folios can only be mapped at most once
2280  * into an MM, and they cannot be partially mapped. KSM folios are
2281  * considered shared even if mapped multiple times into the same MM.
2282  *
2283  * For other folios, the result can be fuzzy:
2284  *    #. For partially-mappable large folios (THP), the return value can wrongly
2285  *       indicate "mapped exclusively" (false negative) when the folio is
2286  *       only partially mapped into at least one MM.
2287  *    #. For pagecache folios (including hugetlb), the return value can wrongly
2288  *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2289  *       cover the same file range.
2290  *
2291  * Further, this function only considers current page table mappings that
2292  * are tracked using the folio mapcount(s).
2293  *
2294  * This function does not consider:
2295  *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2296  *       pagecache, temporary unmapping for migration).
2297  *    #. If the folio is mapped differently (VM_PFNMAP).
2298  *    #. If hugetlb page table sharing applies. Callers might want to check
2299  *       hugetlb_pmd_shared().
2300  *
2301  * Return: Whether the folio is estimated to be mapped into more than one MM.
2302  */
folio_likely_mapped_shared(struct folio * folio)2303 static inline bool folio_likely_mapped_shared(struct folio *folio)
2304 {
2305 	int mapcount = folio_mapcount(folio);
2306 
2307 	/* Only partially-mappable folios require more care. */
2308 	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2309 		return mapcount > 1;
2310 
2311 	/* A single mapping implies "mapped exclusively". */
2312 	if (mapcount <= 1)
2313 		return false;
2314 
2315 	/* If any page is mapped more than once we treat it "mapped shared". */
2316 	if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
2317 		return true;
2318 
2319 	/* Let's guess based on the first subpage. */
2320 	return atomic_read(&folio->_mapcount) > 0;
2321 }
2322 
2323 /**
2324  * folio_expected_ref_count - calculate the expected folio refcount
2325  * @folio: the folio
2326  *
2327  * Calculate the expected folio refcount, taking references from the pagecache,
2328  * swapcache, PG_private and page table mappings into account. Useful in
2329  * combination with folio_ref_count() to detect unexpected references (e.g.,
2330  * GUP or other temporary references).
2331  *
2332  * Does currently not consider references from the LRU cache. If the folio
2333  * was isolated from the LRU (which is the case during migration or split),
2334  * the LRU cache does not apply.
2335  *
2336  * Calling this function on an unmapped folio -- !folio_mapped() -- that is
2337  * locked will return a stable result.
2338  *
2339  * Calling this function on a mapped folio will not result in a stable result,
2340  * because nothing stops additional page table mappings from coming (e.g.,
2341  * fork()) or going (e.g., munmap()).
2342  *
2343  * Calling this function without the folio lock will also not result in a
2344  * stable result: for example, the folio might get dropped from the swapcache
2345  * concurrently.
2346  *
2347  * However, even when called without the folio lock or on a mapped folio,
2348  * this function can be used to detect unexpected references early (for example,
2349  * if it makes sense to even lock the folio and unmap it).
2350  *
2351  * The caller must add any reference (e.g., from folio_try_get()) it might be
2352  * holding itself to the result.
2353  *
2354  * Returns the expected folio refcount.
2355  */
folio_expected_ref_count(const struct folio * folio)2356 static inline int folio_expected_ref_count(const struct folio *folio)
2357 {
2358 	const int order = folio_order(folio);
2359 	int ref_count = 0;
2360 
2361 	if (WARN_ON_ONCE(folio_test_slab(folio)))
2362 		return 0;
2363 
2364 	if (folio_test_anon(folio)) {
2365 		/* One reference per page from the swapcache. */
2366 		ref_count += folio_test_swapcache(folio) << order;
2367 	} else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) {
2368 		/* One reference per page from the pagecache. */
2369 		ref_count += !!folio->mapping << order;
2370 		/* One reference from PG_private. */
2371 		ref_count += folio_test_private(folio);
2372 	}
2373 
2374 	/* One reference per page table mapping. */
2375 	return ref_count + folio_mapcount(folio);
2376 }
2377 
2378 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
arch_make_folio_accessible(struct folio * folio)2379 static inline int arch_make_folio_accessible(struct folio *folio)
2380 {
2381 	return 0;
2382 }
2383 #endif
2384 
2385 /*
2386  * Some inline functions in vmstat.h depend on page_zone()
2387  */
2388 #include <linux/vmstat.h>
2389 
2390 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2391 #define HASHED_PAGE_VIRTUAL
2392 #endif
2393 
2394 #if defined(WANT_PAGE_VIRTUAL)
page_address(const struct page * page)2395 static inline void *page_address(const struct page *page)
2396 {
2397 	return page->virtual;
2398 }
set_page_address(struct page * page,void * address)2399 static inline void set_page_address(struct page *page, void *address)
2400 {
2401 	page->virtual = address;
2402 }
2403 #define page_address_init()  do { } while(0)
2404 #endif
2405 
2406 #if defined(HASHED_PAGE_VIRTUAL)
2407 void *page_address(const struct page *page);
2408 void set_page_address(struct page *page, void *virtual);
2409 void page_address_init(void);
2410 #endif
2411 
lowmem_page_address(const struct page * page)2412 static __always_inline void *lowmem_page_address(const struct page *page)
2413 {
2414 	return page_to_virt(page);
2415 }
2416 
2417 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2418 #define page_address(page) lowmem_page_address(page)
2419 #define set_page_address(page, address)  do { } while(0)
2420 #define page_address_init()  do { } while(0)
2421 #endif
2422 
folio_address(const struct folio * folio)2423 static inline void *folio_address(const struct folio *folio)
2424 {
2425 	return page_address(&folio->page);
2426 }
2427 
2428 /*
2429  * Return true only if the page has been allocated with
2430  * ALLOC_NO_WATERMARKS and the low watermark was not
2431  * met implying that the system is under some pressure.
2432  */
page_is_pfmemalloc(const struct page * page)2433 static inline bool page_is_pfmemalloc(const struct page *page)
2434 {
2435 	/*
2436 	 * lru.next has bit 1 set if the page is allocated from the
2437 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2438 	 * they do not need to preserve that information.
2439 	 */
2440 	return (uintptr_t)page->lru.next & BIT(1);
2441 }
2442 
2443 /*
2444  * Return true only if the folio has been allocated with
2445  * ALLOC_NO_WATERMARKS and the low watermark was not
2446  * met implying that the system is under some pressure.
2447  */
folio_is_pfmemalloc(const struct folio * folio)2448 static inline bool folio_is_pfmemalloc(const struct folio *folio)
2449 {
2450 	/*
2451 	 * lru.next has bit 1 set if the page is allocated from the
2452 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2453 	 * they do not need to preserve that information.
2454 	 */
2455 	return (uintptr_t)folio->lru.next & BIT(1);
2456 }
2457 
2458 /*
2459  * Only to be called by the page allocator on a freshly allocated
2460  * page.
2461  */
set_page_pfmemalloc(struct page * page)2462 static inline void set_page_pfmemalloc(struct page *page)
2463 {
2464 	page->lru.next = (void *)BIT(1);
2465 }
2466 
clear_page_pfmemalloc(struct page * page)2467 static inline void clear_page_pfmemalloc(struct page *page)
2468 {
2469 	page->lru.next = NULL;
2470 }
2471 
2472 /*
2473  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2474  */
2475 extern void pagefault_out_of_memory(void);
2476 
2477 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
2478 #define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
2479 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2480 
2481 /*
2482  * Parameter block passed down to zap_pte_range in exceptional cases.
2483  */
2484 struct zap_details {
2485 	struct folio *single_folio;	/* Locked folio to be unmapped */
2486 	bool even_cows;			/* Zap COWed private pages too? */
2487 	zap_flags_t zap_flags;		/* Extra flags for zapping */
2488 };
2489 
2490 /*
2491  * Whether to drop the pte markers, for example, the uffd-wp information for
2492  * file-backed memory.  This should only be specified when we will completely
2493  * drop the page in the mm, either by truncation or unmapping of the vma.  By
2494  * default, the flag is not set.
2495  */
2496 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
2497 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
2498 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
2499 
2500 #ifdef CONFIG_SCHED_MM_CID
2501 void sched_mm_cid_before_execve(struct task_struct *t);
2502 void sched_mm_cid_after_execve(struct task_struct *t);
2503 void sched_mm_cid_fork(struct task_struct *t);
2504 void sched_mm_cid_exit_signals(struct task_struct *t);
task_mm_cid(struct task_struct * t)2505 static inline int task_mm_cid(struct task_struct *t)
2506 {
2507 	return t->mm_cid;
2508 }
2509 #else
sched_mm_cid_before_execve(struct task_struct * t)2510 static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
sched_mm_cid_after_execve(struct task_struct * t)2511 static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
sched_mm_cid_fork(struct task_struct * t)2512 static inline void sched_mm_cid_fork(struct task_struct *t) { }
sched_mm_cid_exit_signals(struct task_struct * t)2513 static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
task_mm_cid(struct task_struct * t)2514 static inline int task_mm_cid(struct task_struct *t)
2515 {
2516 	/*
2517 	 * Use the processor id as a fall-back when the mm cid feature is
2518 	 * disabled. This provides functional per-cpu data structure accesses
2519 	 * in user-space, althrough it won't provide the memory usage benefits.
2520 	 */
2521 	return raw_smp_processor_id();
2522 }
2523 #endif
2524 
2525 #ifdef CONFIG_MMU
2526 extern bool can_do_mlock(void);
2527 #else
can_do_mlock(void)2528 static inline bool can_do_mlock(void) { return false; }
2529 #endif
2530 extern int user_shm_lock(size_t, struct ucounts *);
2531 extern void user_shm_unlock(size_t, struct ucounts *);
2532 
2533 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2534 			     pte_t pte);
2535 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2536 			     pte_t pte);
2537 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2538 				  unsigned long addr, pmd_t pmd);
2539 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2540 				pmd_t pmd);
2541 
2542 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2543 		  unsigned long size);
2544 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2545 			   unsigned long size, struct zap_details *details);
zap_vma_pages(struct vm_area_struct * vma)2546 static inline void zap_vma_pages(struct vm_area_struct *vma)
2547 {
2548 	zap_page_range_single(vma, vma->vm_start,
2549 			      vma->vm_end - vma->vm_start, NULL);
2550 }
2551 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2552 		struct vm_area_struct *start_vma, unsigned long start,
2553 		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2554 
2555 struct mmu_notifier_range;
2556 
2557 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2558 		unsigned long end, unsigned long floor, unsigned long ceiling);
2559 int
2560 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2561 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2562 			void *buf, int len, int write);
2563 
2564 struct follow_pfnmap_args {
2565 	/**
2566 	 * Inputs:
2567 	 * @vma: Pointer to @vm_area_struct struct
2568 	 * @address: the virtual address to walk
2569 	 */
2570 	struct vm_area_struct *vma;
2571 	unsigned long address;
2572 	/**
2573 	 * Internals:
2574 	 *
2575 	 * The caller shouldn't touch any of these.
2576 	 */
2577 	spinlock_t *lock;
2578 	pte_t *ptep;
2579 	/**
2580 	 * Outputs:
2581 	 *
2582 	 * @pfn: the PFN of the address
2583 	 * @pgprot: the pgprot_t of the mapping
2584 	 * @writable: whether the mapping is writable
2585 	 * @special: whether the mapping is a special mapping (real PFN maps)
2586 	 */
2587 	unsigned long pfn;
2588 	pgprot_t pgprot;
2589 	bool writable;
2590 	bool special;
2591 };
2592 int follow_pfnmap_start(struct follow_pfnmap_args *args);
2593 void follow_pfnmap_end(struct follow_pfnmap_args *args);
2594 
2595 extern void truncate_pagecache(struct inode *inode, loff_t new);
2596 extern void truncate_setsize(struct inode *inode, loff_t newsize);
2597 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2598 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2599 int generic_error_remove_folio(struct address_space *mapping,
2600 		struct folio *folio);
2601 
2602 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2603 		unsigned long address, struct pt_regs *regs);
2604 
2605 #ifdef CONFIG_MMU
2606 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2607 				  unsigned long address, unsigned int flags,
2608 				  struct pt_regs *regs);
2609 extern int fixup_user_fault(struct mm_struct *mm,
2610 			    unsigned long address, unsigned int fault_flags,
2611 			    bool *unlocked);
2612 void unmap_mapping_pages(struct address_space *mapping,
2613 		pgoff_t start, pgoff_t nr, bool even_cows);
2614 void unmap_mapping_range(struct address_space *mapping,
2615 		loff_t const holebegin, loff_t const holelen, int even_cows);
2616 #else
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)2617 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2618 					 unsigned long address, unsigned int flags,
2619 					 struct pt_regs *regs)
2620 {
2621 	/* should never happen if there's no MMU */
2622 	BUG();
2623 	return VM_FAULT_SIGBUS;
2624 }
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)2625 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2626 		unsigned int fault_flags, bool *unlocked)
2627 {
2628 	/* should never happen if there's no MMU */
2629 	BUG();
2630 	return -EFAULT;
2631 }
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)2632 static inline void unmap_mapping_pages(struct address_space *mapping,
2633 		pgoff_t start, pgoff_t nr, bool even_cows) { }
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)2634 static inline void unmap_mapping_range(struct address_space *mapping,
2635 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2636 #endif
2637 
unmap_shared_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen)2638 static inline void unmap_shared_mapping_range(struct address_space *mapping,
2639 		loff_t const holebegin, loff_t const holelen)
2640 {
2641 	unmap_mapping_range(mapping, holebegin, holelen, 0);
2642 }
2643 
2644 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2645 						unsigned long addr);
2646 
2647 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2648 		void *buf, int len, unsigned int gup_flags);
2649 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2650 		void *buf, int len, unsigned int gup_flags);
2651 
2652 long get_user_pages_remote(struct mm_struct *mm,
2653 			   unsigned long start, unsigned long nr_pages,
2654 			   unsigned int gup_flags, struct page **pages,
2655 			   int *locked);
2656 long pin_user_pages_remote(struct mm_struct *mm,
2657 			   unsigned long start, unsigned long nr_pages,
2658 			   unsigned int gup_flags, struct page **pages,
2659 			   int *locked);
2660 
2661 /*
2662  * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2663  */
get_user_page_vma_remote(struct mm_struct * mm,unsigned long addr,int gup_flags,struct vm_area_struct ** vmap)2664 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2665 						    unsigned long addr,
2666 						    int gup_flags,
2667 						    struct vm_area_struct **vmap)
2668 {
2669 	struct page *page;
2670 	struct vm_area_struct *vma;
2671 	int got;
2672 
2673 	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2674 		return ERR_PTR(-EINVAL);
2675 
2676 	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2677 
2678 	if (got < 0)
2679 		return ERR_PTR(got);
2680 
2681 	vma = vma_lookup(mm, addr);
2682 	if (WARN_ON_ONCE(!vma)) {
2683 		put_page(page);
2684 		return ERR_PTR(-EINVAL);
2685 	}
2686 
2687 	*vmap = vma;
2688 	return page;
2689 }
2690 
2691 long get_user_pages(unsigned long start, unsigned long nr_pages,
2692 		    unsigned int gup_flags, struct page **pages);
2693 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2694 		    unsigned int gup_flags, struct page **pages);
2695 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2696 		    struct page **pages, unsigned int gup_flags);
2697 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2698 		    struct page **pages, unsigned int gup_flags);
2699 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
2700 		      struct folio **folios, unsigned int max_folios,
2701 		      pgoff_t *offset);
2702 
2703 int get_user_pages_fast(unsigned long start, int nr_pages,
2704 			unsigned int gup_flags, struct page **pages);
2705 int pin_user_pages_fast(unsigned long start, int nr_pages,
2706 			unsigned int gup_flags, struct page **pages);
2707 void folio_add_pin(struct folio *folio);
2708 
2709 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2710 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2711 			struct task_struct *task, bool bypass_rlim);
2712 
2713 struct kvec;
2714 struct page *get_dump_page(unsigned long addr);
2715 
2716 bool folio_mark_dirty(struct folio *folio);
2717 bool set_page_dirty(struct page *page);
2718 int set_page_dirty_lock(struct page *page);
2719 
2720 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2721 
2722 /*
2723  * Flags used by change_protection().  For now we make it a bitmap so
2724  * that we can pass in multiple flags just like parameters.  However
2725  * for now all the callers are only use one of the flags at the same
2726  * time.
2727  */
2728 /*
2729  * Whether we should manually check if we can map individual PTEs writable,
2730  * because something (e.g., COW, uffd-wp) blocks that from happening for all
2731  * PTEs automatically in a writable mapping.
2732  */
2733 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2734 /* Whether this protection change is for NUMA hints */
2735 #define  MM_CP_PROT_NUMA                   (1UL << 1)
2736 /* Whether this change is for write protecting */
2737 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2738 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2739 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2740 					    MM_CP_UFFD_WP_RESOLVE)
2741 
2742 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2743 			     pte_t pte);
2744 extern long change_protection(struct mmu_gather *tlb,
2745 			      struct vm_area_struct *vma, unsigned long start,
2746 			      unsigned long end, unsigned long cp_flags);
2747 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2748 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
2749 	  unsigned long start, unsigned long end, unsigned long newflags);
2750 
2751 /*
2752  * doesn't attempt to fault and will return short.
2753  */
2754 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2755 			     unsigned int gup_flags, struct page **pages);
2756 
get_user_page_fast_only(unsigned long addr,unsigned int gup_flags,struct page ** pagep)2757 static inline bool get_user_page_fast_only(unsigned long addr,
2758 			unsigned int gup_flags, struct page **pagep)
2759 {
2760 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2761 }
2762 /*
2763  * per-process(per-mm_struct) statistics.
2764  */
get_mm_counter(struct mm_struct * mm,int member)2765 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2766 {
2767 	return percpu_counter_read_positive(&mm->rss_stat[member]);
2768 }
2769 
get_mm_counter_sum(struct mm_struct * mm,int member)2770 static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
2771 {
2772 	return percpu_counter_sum_positive(&mm->rss_stat[member]);
2773 }
2774 
2775 void mm_trace_rss_stat(struct mm_struct *mm, int member);
2776 
add_mm_counter(struct mm_struct * mm,int member,long value)2777 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2778 {
2779 	percpu_counter_add(&mm->rss_stat[member], value);
2780 
2781 	mm_trace_rss_stat(mm, member);
2782 }
2783 
inc_mm_counter(struct mm_struct * mm,int member)2784 static inline void inc_mm_counter(struct mm_struct *mm, int member)
2785 {
2786 	percpu_counter_inc(&mm->rss_stat[member]);
2787 
2788 	mm_trace_rss_stat(mm, member);
2789 }
2790 
dec_mm_counter(struct mm_struct * mm,int member)2791 static inline void dec_mm_counter(struct mm_struct *mm, int member)
2792 {
2793 	percpu_counter_dec(&mm->rss_stat[member]);
2794 
2795 	mm_trace_rss_stat(mm, member);
2796 }
2797 
2798 /* Optimized variant when folio is already known not to be anon */
mm_counter_file(struct folio * folio)2799 static inline int mm_counter_file(struct folio *folio)
2800 {
2801 	if (folio_test_swapbacked(folio))
2802 		return MM_SHMEMPAGES;
2803 	return MM_FILEPAGES;
2804 }
2805 
mm_counter(struct folio * folio)2806 static inline int mm_counter(struct folio *folio)
2807 {
2808 	if (folio_test_anon(folio))
2809 		return MM_ANONPAGES;
2810 	return mm_counter_file(folio);
2811 }
2812 
get_mm_rss(struct mm_struct * mm)2813 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2814 {
2815 	return get_mm_counter(mm, MM_FILEPAGES) +
2816 		get_mm_counter(mm, MM_ANONPAGES) +
2817 		get_mm_counter(mm, MM_SHMEMPAGES);
2818 }
2819 
get_mm_hiwater_rss(struct mm_struct * mm)2820 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2821 {
2822 	return max(mm->hiwater_rss, get_mm_rss(mm));
2823 }
2824 
get_mm_hiwater_vm(struct mm_struct * mm)2825 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2826 {
2827 	return max(mm->hiwater_vm, mm->total_vm);
2828 }
2829 
update_hiwater_rss(struct mm_struct * mm)2830 static inline void update_hiwater_rss(struct mm_struct *mm)
2831 {
2832 	unsigned long _rss = get_mm_rss(mm);
2833 
2834 	if ((mm)->hiwater_rss < _rss)
2835 		(mm)->hiwater_rss = _rss;
2836 }
2837 
update_hiwater_vm(struct mm_struct * mm)2838 static inline void update_hiwater_vm(struct mm_struct *mm)
2839 {
2840 	if (mm->hiwater_vm < mm->total_vm)
2841 		mm->hiwater_vm = mm->total_vm;
2842 }
2843 
reset_mm_hiwater_rss(struct mm_struct * mm)2844 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2845 {
2846 	mm->hiwater_rss = get_mm_rss(mm);
2847 }
2848 
setmax_mm_hiwater_rss(unsigned long * maxrss,struct mm_struct * mm)2849 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2850 					 struct mm_struct *mm)
2851 {
2852 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2853 
2854 	if (*maxrss < hiwater_rss)
2855 		*maxrss = hiwater_rss;
2856 }
2857 
2858 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
pte_special(pte_t pte)2859 static inline int pte_special(pte_t pte)
2860 {
2861 	return 0;
2862 }
2863 
pte_mkspecial(pte_t pte)2864 static inline pte_t pte_mkspecial(pte_t pte)
2865 {
2866 	return pte;
2867 }
2868 #endif
2869 
2870 #ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)2871 static inline bool pmd_special(pmd_t pmd)
2872 {
2873 	return false;
2874 }
2875 
pmd_mkspecial(pmd_t pmd)2876 static inline pmd_t pmd_mkspecial(pmd_t pmd)
2877 {
2878 	return pmd;
2879 }
2880 #endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
2881 
2882 #ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)2883 static inline bool pud_special(pud_t pud)
2884 {
2885 	return false;
2886 }
2887 
pud_mkspecial(pud_t pud)2888 static inline pud_t pud_mkspecial(pud_t pud)
2889 {
2890 	return pud;
2891 }
2892 #endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
2893 
2894 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t pte)2895 static inline int pte_devmap(pte_t pte)
2896 {
2897 	return 0;
2898 }
2899 #endif
2900 
2901 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2902 			       spinlock_t **ptl);
get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)2903 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2904 				    spinlock_t **ptl)
2905 {
2906 	pte_t *ptep;
2907 	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2908 	return ptep;
2909 }
2910 
2911 #ifdef __PAGETABLE_P4D_FOLDED
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)2912 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2913 						unsigned long address)
2914 {
2915 	return 0;
2916 }
2917 #else
2918 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2919 #endif
2920 
2921 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)2922 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2923 						unsigned long address)
2924 {
2925 	return 0;
2926 }
mm_inc_nr_puds(struct mm_struct * mm)2927 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
mm_dec_nr_puds(struct mm_struct * mm)2928 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2929 
2930 #else
2931 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2932 
mm_inc_nr_puds(struct mm_struct * mm)2933 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2934 {
2935 	if (mm_pud_folded(mm))
2936 		return;
2937 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2938 }
2939 
mm_dec_nr_puds(struct mm_struct * mm)2940 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2941 {
2942 	if (mm_pud_folded(mm))
2943 		return;
2944 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2945 }
2946 #endif
2947 
2948 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)2949 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2950 						unsigned long address)
2951 {
2952 	return 0;
2953 }
2954 
mm_inc_nr_pmds(struct mm_struct * mm)2955 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
mm_dec_nr_pmds(struct mm_struct * mm)2956 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2957 
2958 #else
2959 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2960 
mm_inc_nr_pmds(struct mm_struct * mm)2961 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2962 {
2963 	if (mm_pmd_folded(mm))
2964 		return;
2965 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2966 }
2967 
mm_dec_nr_pmds(struct mm_struct * mm)2968 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2969 {
2970 	if (mm_pmd_folded(mm))
2971 		return;
2972 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2973 }
2974 #endif
2975 
2976 #ifdef CONFIG_MMU
mm_pgtables_bytes_init(struct mm_struct * mm)2977 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2978 {
2979 	atomic_long_set(&mm->pgtables_bytes, 0);
2980 }
2981 
mm_pgtables_bytes(const struct mm_struct * mm)2982 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2983 {
2984 	return atomic_long_read(&mm->pgtables_bytes);
2985 }
2986 
mm_inc_nr_ptes(struct mm_struct * mm)2987 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2988 {
2989 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2990 }
2991 
mm_dec_nr_ptes(struct mm_struct * mm)2992 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2993 {
2994 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2995 }
2996 #else
2997 
mm_pgtables_bytes_init(struct mm_struct * mm)2998 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
mm_pgtables_bytes(const struct mm_struct * mm)2999 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3000 {
3001 	return 0;
3002 }
3003 
mm_inc_nr_ptes(struct mm_struct * mm)3004 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
mm_dec_nr_ptes(struct mm_struct * mm)3005 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
3006 #endif
3007 
3008 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
3009 int __pte_alloc_kernel(pmd_t *pmd);
3010 
3011 #if defined(CONFIG_MMU)
3012 
p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)3013 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3014 		unsigned long address)
3015 {
3016 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
3017 		NULL : p4d_offset(pgd, address);
3018 }
3019 
pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)3020 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3021 		unsigned long address)
3022 {
3023 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
3024 		NULL : pud_offset(p4d, address);
3025 }
3026 
pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)3027 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3028 {
3029 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
3030 		NULL: pmd_offset(pud, address);
3031 }
3032 #endif /* CONFIG_MMU */
3033 
virt_to_ptdesc(const void * x)3034 static inline struct ptdesc *virt_to_ptdesc(const void *x)
3035 {
3036 	return page_ptdesc(virt_to_page(x));
3037 }
3038 
ptdesc_to_virt(const struct ptdesc * pt)3039 static inline void *ptdesc_to_virt(const struct ptdesc *pt)
3040 {
3041 	return page_to_virt(ptdesc_page(pt));
3042 }
3043 
ptdesc_address(const struct ptdesc * pt)3044 static inline void *ptdesc_address(const struct ptdesc *pt)
3045 {
3046 	return folio_address(ptdesc_folio(pt));
3047 }
3048 
pagetable_is_reserved(struct ptdesc * pt)3049 static inline bool pagetable_is_reserved(struct ptdesc *pt)
3050 {
3051 	return folio_test_reserved(ptdesc_folio(pt));
3052 }
3053 
3054 /**
3055  * pagetable_alloc - Allocate pagetables
3056  * @gfp:    GFP flags
3057  * @order:  desired pagetable order
3058  *
3059  * pagetable_alloc allocates memory for page tables as well as a page table
3060  * descriptor to describe that memory.
3061  *
3062  * Return: The ptdesc describing the allocated page tables.
3063  */
pagetable_alloc_noprof(gfp_t gfp,unsigned int order)3064 static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
3065 {
3066 	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
3067 
3068 	return page_ptdesc(page);
3069 }
3070 #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
3071 
3072 /**
3073  * pagetable_free - Free pagetables
3074  * @pt:	The page table descriptor
3075  *
3076  * pagetable_free frees the memory of all page tables described by a page
3077  * table descriptor and the memory for the descriptor itself.
3078  */
pagetable_free(struct ptdesc * pt)3079 static inline void pagetable_free(struct ptdesc *pt)
3080 {
3081 	struct page *page = ptdesc_page(pt);
3082 
3083 	__free_pages(page, compound_order(page));
3084 }
3085 
3086 #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
3087 #if ALLOC_SPLIT_PTLOCKS
3088 void __init ptlock_cache_init(void);
3089 bool ptlock_alloc(struct ptdesc *ptdesc);
3090 void ptlock_free(struct ptdesc *ptdesc);
3091 
ptlock_ptr(struct ptdesc * ptdesc)3092 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3093 {
3094 	return ptdesc->ptl;
3095 }
3096 #else /* ALLOC_SPLIT_PTLOCKS */
ptlock_cache_init(void)3097 static inline void ptlock_cache_init(void)
3098 {
3099 }
3100 
ptlock_alloc(struct ptdesc * ptdesc)3101 static inline bool ptlock_alloc(struct ptdesc *ptdesc)
3102 {
3103 	return true;
3104 }
3105 
ptlock_free(struct ptdesc * ptdesc)3106 static inline void ptlock_free(struct ptdesc *ptdesc)
3107 {
3108 }
3109 
ptlock_ptr(struct ptdesc * ptdesc)3110 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3111 {
3112 	return &ptdesc->ptl;
3113 }
3114 #endif /* ALLOC_SPLIT_PTLOCKS */
3115 
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)3116 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3117 {
3118 	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
3119 }
3120 
ptep_lockptr(struct mm_struct * mm,pte_t * pte)3121 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3122 {
3123 	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
3124 	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
3125 	return ptlock_ptr(virt_to_ptdesc(pte));
3126 }
3127 
ptlock_init(struct ptdesc * ptdesc)3128 static inline bool ptlock_init(struct ptdesc *ptdesc)
3129 {
3130 	/*
3131 	 * prep_new_page() initialize page->private (and therefore page->ptl)
3132 	 * with 0. Make sure nobody took it in use in between.
3133 	 *
3134 	 * It can happen if arch try to use slab for page table allocation:
3135 	 * slab code uses page->slab_cache, which share storage with page->ptl.
3136 	 */
3137 	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
3138 	if (!ptlock_alloc(ptdesc))
3139 		return false;
3140 	spin_lock_init(ptlock_ptr(ptdesc));
3141 	return true;
3142 }
3143 
3144 #else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3145 /*
3146  * We use mm->page_table_lock to guard all pagetable pages of the mm.
3147  */
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)3148 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3149 {
3150 	return &mm->page_table_lock;
3151 }
ptep_lockptr(struct mm_struct * mm,pte_t * pte)3152 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3153 {
3154 	return &mm->page_table_lock;
3155 }
ptlock_cache_init(void)3156 static inline void ptlock_cache_init(void) {}
ptlock_init(struct ptdesc * ptdesc)3157 static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
ptlock_free(struct ptdesc * ptdesc)3158 static inline void ptlock_free(struct ptdesc *ptdesc) {}
3159 #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3160 
pagetable_pte_ctor(struct ptdesc * ptdesc)3161 static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
3162 {
3163 	struct folio *folio = ptdesc_folio(ptdesc);
3164 
3165 	if (!ptlock_init(ptdesc))
3166 		return false;
3167 	__folio_set_pgtable(folio);
3168 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3169 	return true;
3170 }
3171 
pagetable_pte_dtor(struct ptdesc * ptdesc)3172 static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
3173 {
3174 	struct folio *folio = ptdesc_folio(ptdesc);
3175 
3176 	ptlock_free(ptdesc);
3177 	__folio_clear_pgtable(folio);
3178 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3179 }
3180 
3181 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
pte_offset_map(pmd_t * pmd,unsigned long addr)3182 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3183 {
3184 	return __pte_offset_map(pmd, addr, NULL);
3185 }
3186 
3187 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3188 			unsigned long addr, spinlock_t **ptlp);
pte_offset_map_lock(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,spinlock_t ** ptlp)3189 static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3190 			unsigned long addr, spinlock_t **ptlp)
3191 {
3192 	pte_t *pte;
3193 
3194 	__cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
3195 	return pte;
3196 }
3197 
3198 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3199 				unsigned long addr, spinlock_t **ptlp);
3200 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3201 				unsigned long addr, pmd_t *pmdvalp,
3202 				spinlock_t **ptlp);
3203 
3204 #define pte_unmap_unlock(pte, ptl)	do {		\
3205 	spin_unlock(ptl);				\
3206 	pte_unmap(pte);					\
3207 } while (0)
3208 
3209 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3210 
3211 #define pte_alloc_map(mm, pmd, address)			\
3212 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3213 
3214 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3215 	(pte_alloc(mm, pmd) ?			\
3216 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3217 
3218 #define pte_alloc_kernel(pmd, address)			\
3219 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3220 		NULL: pte_offset_kernel(pmd, address))
3221 
3222 #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3223 
pmd_pgtable_page(pmd_t * pmd)3224 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3225 {
3226 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3227 	return virt_to_page((void *)((unsigned long) pmd & mask));
3228 }
3229 
pmd_ptdesc(pmd_t * pmd)3230 static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3231 {
3232 	return page_ptdesc(pmd_pgtable_page(pmd));
3233 }
3234 
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3235 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3236 {
3237 	return ptlock_ptr(pmd_ptdesc(pmd));
3238 }
3239 
pmd_ptlock_init(struct ptdesc * ptdesc)3240 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3241 {
3242 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3243 	ptdesc->pmd_huge_pte = NULL;
3244 #endif
3245 	return ptlock_init(ptdesc);
3246 }
3247 
pmd_ptlock_free(struct ptdesc * ptdesc)3248 static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
3249 {
3250 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3251 	VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
3252 #endif
3253 	ptlock_free(ptdesc);
3254 }
3255 
3256 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3257 
3258 #else
3259 
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3260 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3261 {
3262 	return &mm->page_table_lock;
3263 }
3264 
pmd_ptlock_init(struct ptdesc * ptdesc)3265 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
pmd_ptlock_free(struct ptdesc * ptdesc)3266 static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
3267 
3268 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3269 
3270 #endif
3271 
pmd_lock(struct mm_struct * mm,pmd_t * pmd)3272 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3273 {
3274 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3275 	spin_lock(ptl);
3276 	return ptl;
3277 }
3278 
pagetable_pmd_ctor(struct ptdesc * ptdesc)3279 static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3280 {
3281 	struct folio *folio = ptdesc_folio(ptdesc);
3282 
3283 	if (!pmd_ptlock_init(ptdesc))
3284 		return false;
3285 	__folio_set_pgtable(folio);
3286 	ptdesc_pmd_pts_init(ptdesc);
3287 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3288 	return true;
3289 }
3290 
pagetable_pmd_dtor(struct ptdesc * ptdesc)3291 static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
3292 {
3293 	struct folio *folio = ptdesc_folio(ptdesc);
3294 
3295 	pmd_ptlock_free(ptdesc);
3296 	__folio_clear_pgtable(folio);
3297 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3298 }
3299 
3300 /*
3301  * No scalability reason to split PUD locks yet, but follow the same pattern
3302  * as the PMD locks to make it easier if we decide to.  The VM should not be
3303  * considered ready to switch to split PUD locks yet; there may be places
3304  * which need to be converted from page_table_lock.
3305  */
pud_lockptr(struct mm_struct * mm,pud_t * pud)3306 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3307 {
3308 	return &mm->page_table_lock;
3309 }
3310 
pud_lock(struct mm_struct * mm,pud_t * pud)3311 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3312 {
3313 	spinlock_t *ptl = pud_lockptr(mm, pud);
3314 
3315 	spin_lock(ptl);
3316 	return ptl;
3317 }
3318 
pagetable_pud_ctor(struct ptdesc * ptdesc)3319 static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3320 {
3321 	struct folio *folio = ptdesc_folio(ptdesc);
3322 
3323 	__folio_set_pgtable(folio);
3324 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3325 }
3326 
pagetable_pud_dtor(struct ptdesc * ptdesc)3327 static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
3328 {
3329 	struct folio *folio = ptdesc_folio(ptdesc);
3330 
3331 	__folio_clear_pgtable(folio);
3332 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3333 }
3334 
3335 extern void __init pagecache_init(void);
3336 extern void free_initmem(void);
3337 
3338 /*
3339  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3340  * into the buddy system. The freed pages will be poisoned with pattern
3341  * "poison" if it's within range [0, UCHAR_MAX].
3342  * Return pages freed into the buddy system.
3343  */
3344 extern unsigned long free_reserved_area(void *start, void *end,
3345 					int poison, const char *s);
3346 
3347 extern void adjust_managed_page_count(struct page *page, long count);
3348 
3349 extern void reserve_bootmem_region(phys_addr_t start,
3350 				   phys_addr_t end, int nid);
3351 
3352 /* Free the reserved page into the buddy system, so it gets managed. */
3353 void free_reserved_page(struct page *page);
3354 #define free_highmem_page(page) free_reserved_page(page)
3355 
mark_page_reserved(struct page * page)3356 static inline void mark_page_reserved(struct page *page)
3357 {
3358 	SetPageReserved(page);
3359 	adjust_managed_page_count(page, -1);
3360 }
3361 
free_reserved_ptdesc(struct ptdesc * pt)3362 static inline void free_reserved_ptdesc(struct ptdesc *pt)
3363 {
3364 	free_reserved_page(ptdesc_page(pt));
3365 }
3366 
3367 /*
3368  * Default method to free all the __init memory into the buddy system.
3369  * The freed pages will be poisoned with pattern "poison" if it's within
3370  * range [0, UCHAR_MAX].
3371  * Return pages freed into the buddy system.
3372  */
free_initmem_default(int poison)3373 static inline unsigned long free_initmem_default(int poison)
3374 {
3375 	extern char __init_begin[], __init_end[];
3376 
3377 	return free_reserved_area(&__init_begin, &__init_end,
3378 				  poison, "unused kernel image (initmem)");
3379 }
3380 
get_num_physpages(void)3381 static inline unsigned long get_num_physpages(void)
3382 {
3383 	int nid;
3384 	unsigned long phys_pages = 0;
3385 
3386 	for_each_online_node(nid)
3387 		phys_pages += node_present_pages(nid);
3388 
3389 	return phys_pages;
3390 }
3391 
3392 /*
3393  * Using memblock node mappings, an architecture may initialise its
3394  * zones, allocate the backing mem_map and account for memory holes in an
3395  * architecture independent manner.
3396  *
3397  * An architecture is expected to register range of page frames backed by
3398  * physical memory with memblock_add[_node]() before calling
3399  * free_area_init() passing in the PFN each zone ends at. At a basic
3400  * usage, an architecture is expected to do something like
3401  *
3402  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3403  * 							 max_highmem_pfn};
3404  * for_each_valid_physical_page_range()
3405  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3406  * free_area_init(max_zone_pfns);
3407  */
3408 void free_area_init(unsigned long *max_zone_pfn);
3409 unsigned long node_map_pfn_alignment(void);
3410 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3411 						unsigned long end_pfn);
3412 extern void get_pfn_range_for_nid(unsigned int nid,
3413 			unsigned long *start_pfn, unsigned long *end_pfn);
3414 
3415 #ifndef CONFIG_NUMA
early_pfn_to_nid(unsigned long pfn)3416 static inline int early_pfn_to_nid(unsigned long pfn)
3417 {
3418 	return 0;
3419 }
3420 #else
3421 /* please see mm/page_alloc.c */
3422 extern int __meminit early_pfn_to_nid(unsigned long pfn);
3423 #endif
3424 
3425 extern void mem_init(void);
3426 extern void __init mmap_init(void);
3427 
3428 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
show_mem(void)3429 static inline void show_mem(void)
3430 {
3431 	__show_mem(0, NULL, MAX_NR_ZONES - 1);
3432 }
3433 extern long si_mem_available(void);
3434 extern void si_meminfo(struct sysinfo * val);
3435 extern void si_meminfo_node(struct sysinfo *val, int nid);
3436 
3437 extern __printf(3, 4)
3438 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3439 
3440 extern void setup_per_cpu_pageset(void);
3441 
3442 /* nommu.c */
3443 extern atomic_long_t mmap_pages_allocated;
3444 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3445 
3446 /* interval_tree.c */
3447 void vma_interval_tree_insert(struct vm_area_struct *node,
3448 			      struct rb_root_cached *root);
3449 void vma_interval_tree_insert_after(struct vm_area_struct *node,
3450 				    struct vm_area_struct *prev,
3451 				    struct rb_root_cached *root);
3452 void vma_interval_tree_remove(struct vm_area_struct *node,
3453 			      struct rb_root_cached *root);
3454 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3455 				unsigned long start, unsigned long last);
3456 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3457 				unsigned long start, unsigned long last);
3458 
3459 #define vma_interval_tree_foreach(vma, root, start, last)		\
3460 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
3461 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
3462 
3463 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3464 				   struct rb_root_cached *root);
3465 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3466 				   struct rb_root_cached *root);
3467 struct anon_vma_chain *
3468 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3469 				  unsigned long start, unsigned long last);
3470 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3471 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
3472 #ifdef CONFIG_DEBUG_VM_RB
3473 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3474 #endif
3475 
3476 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
3477 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3478 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3479 
3480 /* mmap.c */
3481 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3482 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3483 extern void exit_mmap(struct mm_struct *);
3484 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
3485 
check_data_rlimit(unsigned long rlim,unsigned long new,unsigned long start,unsigned long end_data,unsigned long start_data)3486 static inline int check_data_rlimit(unsigned long rlim,
3487 				    unsigned long new,
3488 				    unsigned long start,
3489 				    unsigned long end_data,
3490 				    unsigned long start_data)
3491 {
3492 	if (rlim < RLIM_INFINITY) {
3493 		if (((new - start) + (end_data - start_data)) > rlim)
3494 			return -ENOSPC;
3495 	}
3496 
3497 	return 0;
3498 }
3499 
3500 extern int mm_take_all_locks(struct mm_struct *mm);
3501 extern void mm_drop_all_locks(struct mm_struct *mm);
3502 
3503 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3504 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3505 extern struct file *get_mm_exe_file(struct mm_struct *mm);
3506 extern struct file *get_task_exe_file(struct task_struct *task);
3507 
3508 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3509 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3510 
3511 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3512 				   const struct vm_special_mapping *sm);
3513 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3514 				   unsigned long addr, unsigned long len,
3515 				   unsigned long flags,
3516 				   const struct vm_special_mapping *spec);
3517 
3518 unsigned long randomize_stack_top(unsigned long stack_top);
3519 unsigned long randomize_page(unsigned long start, unsigned long range);
3520 
3521 unsigned long
3522 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3523 		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3524 
3525 static inline unsigned long
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3526 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3527 		  unsigned long pgoff, unsigned long flags)
3528 {
3529 	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3530 }
3531 
3532 extern unsigned long mmap_region(struct file *file, unsigned long addr,
3533 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
3534 	struct list_head *uf);
3535 extern unsigned long do_mmap(struct file *file, unsigned long addr,
3536 	unsigned long len, unsigned long prot, unsigned long flags,
3537 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3538 	struct list_head *uf);
3539 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3540 			 unsigned long start, size_t len, struct list_head *uf,
3541 			 bool unlock);
3542 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3543 		    struct mm_struct *mm, unsigned long start,
3544 		    unsigned long end, struct list_head *uf, bool unlock);
3545 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3546 		     struct list_head *uf);
3547 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3548 
3549 #ifdef CONFIG_MMU
3550 extern int __mm_populate(unsigned long addr, unsigned long len,
3551 			 int ignore_errors);
mm_populate(unsigned long addr,unsigned long len)3552 static inline void mm_populate(unsigned long addr, unsigned long len)
3553 {
3554 	/* Ignore errors */
3555 	(void) __mm_populate(addr, len, 1);
3556 }
3557 #else
mm_populate(unsigned long addr,unsigned long len)3558 static inline void mm_populate(unsigned long addr, unsigned long len) {}
3559 #endif
3560 
3561 /* This takes the mm semaphore itself */
3562 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3563 extern int vm_munmap(unsigned long, size_t);
3564 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3565         unsigned long, unsigned long,
3566         unsigned long, unsigned long);
3567 
3568 struct vm_unmapped_area_info {
3569 #define VM_UNMAPPED_AREA_TOPDOWN 1
3570 	unsigned long flags;
3571 	unsigned long length;
3572 	unsigned long low_limit;
3573 	unsigned long high_limit;
3574 	unsigned long align_mask;
3575 	unsigned long align_offset;
3576 	unsigned long start_gap;
3577 };
3578 
3579 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3580 
3581 /* truncate.c */
3582 extern void truncate_inode_pages(struct address_space *, loff_t);
3583 extern void truncate_inode_pages_range(struct address_space *,
3584 				       loff_t lstart, loff_t lend);
3585 extern void truncate_inode_pages_final(struct address_space *);
3586 
3587 /* generic vm_area_ops exported for stackable file systems */
3588 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3589 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3590 		pgoff_t start_pgoff, pgoff_t end_pgoff);
3591 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3592 
3593 extern unsigned long stack_guard_gap;
3594 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3595 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3596 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3597 
3598 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
3599 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
3600 
3601 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
3602 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3603 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3604 					     struct vm_area_struct **pprev);
3605 
3606 /*
3607  * Look up the first VMA which intersects the interval [start_addr, end_addr)
3608  * NULL if none.  Assume start_addr < end_addr.
3609  */
3610 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3611 			unsigned long start_addr, unsigned long end_addr);
3612 
3613 /**
3614  * vma_lookup() - Find a VMA at a specific address
3615  * @mm: The process address space.
3616  * @addr: The user address.
3617  *
3618  * Return: The vm_area_struct at the given address, %NULL otherwise.
3619  */
3620 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)3621 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3622 {
3623 	return mtree_load(&mm->mm_mt, addr);
3624 }
3625 
stack_guard_start_gap(struct vm_area_struct * vma)3626 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3627 {
3628 	if (vma->vm_flags & VM_GROWSDOWN)
3629 		return stack_guard_gap;
3630 
3631 	/* See reasoning around the VM_SHADOW_STACK definition */
3632 	if (vma->vm_flags & VM_SHADOW_STACK)
3633 		return PAGE_SIZE;
3634 
3635 	return 0;
3636 }
3637 
vm_start_gap(struct vm_area_struct * vma)3638 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3639 {
3640 	unsigned long gap = stack_guard_start_gap(vma);
3641 	unsigned long vm_start = vma->vm_start;
3642 
3643 	vm_start -= gap;
3644 	if (vm_start > vma->vm_start)
3645 		vm_start = 0;
3646 	return vm_start;
3647 }
3648 
vm_end_gap(struct vm_area_struct * vma)3649 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3650 {
3651 	unsigned long vm_end = vma->vm_end;
3652 
3653 	if (vma->vm_flags & VM_GROWSUP) {
3654 		vm_end += stack_guard_gap;
3655 		if (vm_end < vma->vm_end)
3656 			vm_end = -PAGE_SIZE;
3657 	}
3658 	return vm_end;
3659 }
3660 
vma_pages(struct vm_area_struct * vma)3661 static inline unsigned long vma_pages(struct vm_area_struct *vma)
3662 {
3663 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3664 }
3665 
3666 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
find_exact_vma(struct mm_struct * mm,unsigned long vm_start,unsigned long vm_end)3667 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3668 				unsigned long vm_start, unsigned long vm_end)
3669 {
3670 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3671 
3672 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3673 		vma = NULL;
3674 
3675 	return vma;
3676 }
3677 
range_in_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)3678 static inline bool range_in_vma(struct vm_area_struct *vma,
3679 				unsigned long start, unsigned long end)
3680 {
3681 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3682 }
3683 
3684 #ifdef CONFIG_MMU
3685 pgprot_t vm_get_page_prot(unsigned long vm_flags);
3686 void vma_set_page_prot(struct vm_area_struct *vma);
3687 #else
vm_get_page_prot(unsigned long vm_flags)3688 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3689 {
3690 	return __pgprot(0);
3691 }
vma_set_page_prot(struct vm_area_struct * vma)3692 static inline void vma_set_page_prot(struct vm_area_struct *vma)
3693 {
3694 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3695 }
3696 #endif
3697 
3698 void vma_set_file(struct vm_area_struct *vma, struct file *file);
3699 
3700 #ifdef CONFIG_NUMA_BALANCING
3701 unsigned long change_prot_numa(struct vm_area_struct *vma,
3702 			unsigned long start, unsigned long end);
3703 #endif
3704 
3705 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3706 		unsigned long addr);
3707 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3708 			unsigned long pfn, unsigned long size, pgprot_t);
3709 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3710 		unsigned long pfn, unsigned long size, pgprot_t prot);
3711 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3712 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3713 			struct page **pages, unsigned long *num);
3714 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3715 				unsigned long num);
3716 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3717 				unsigned long num);
3718 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3719 			unsigned long pfn);
3720 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3721 			unsigned long pfn, pgprot_t pgprot);
3722 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3723 			pfn_t pfn);
3724 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3725 		unsigned long addr, pfn_t pfn);
3726 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3727 
vmf_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)3728 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3729 				unsigned long addr, struct page *page)
3730 {
3731 	int err = vm_insert_page(vma, addr, page);
3732 
3733 	if (err == -ENOMEM)
3734 		return VM_FAULT_OOM;
3735 	if (err < 0 && err != -EBUSY)
3736 		return VM_FAULT_SIGBUS;
3737 
3738 	return VM_FAULT_NOPAGE;
3739 }
3740 
3741 #ifndef io_remap_pfn_range
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)3742 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3743 				     unsigned long addr, unsigned long pfn,
3744 				     unsigned long size, pgprot_t prot)
3745 {
3746 	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3747 }
3748 #endif
3749 
vmf_error(int err)3750 static inline vm_fault_t vmf_error(int err)
3751 {
3752 	if (err == -ENOMEM)
3753 		return VM_FAULT_OOM;
3754 	else if (err == -EHWPOISON)
3755 		return VM_FAULT_HWPOISON;
3756 	return VM_FAULT_SIGBUS;
3757 }
3758 
3759 /*
3760  * Convert errno to return value for ->page_mkwrite() calls.
3761  *
3762  * This should eventually be merged with vmf_error() above, but will need a
3763  * careful audit of all vmf_error() callers.
3764  */
vmf_fs_error(int err)3765 static inline vm_fault_t vmf_fs_error(int err)
3766 {
3767 	if (err == 0)
3768 		return VM_FAULT_LOCKED;
3769 	if (err == -EFAULT || err == -EAGAIN)
3770 		return VM_FAULT_NOPAGE;
3771 	if (err == -ENOMEM)
3772 		return VM_FAULT_OOM;
3773 	/* -ENOSPC, -EDQUOT, -EIO ... */
3774 	return VM_FAULT_SIGBUS;
3775 }
3776 
vm_fault_to_errno(vm_fault_t vm_fault,int foll_flags)3777 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3778 {
3779 	if (vm_fault & VM_FAULT_OOM)
3780 		return -ENOMEM;
3781 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3782 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3783 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3784 		return -EFAULT;
3785 	return 0;
3786 }
3787 
3788 /*
3789  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3790  * a (NUMA hinting) fault is required.
3791  */
gup_can_follow_protnone(struct vm_area_struct * vma,unsigned int flags)3792 static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3793 					   unsigned int flags)
3794 {
3795 	/*
3796 	 * If callers don't want to honor NUMA hinting faults, no need to
3797 	 * determine if we would actually have to trigger a NUMA hinting fault.
3798 	 */
3799 	if (!(flags & FOLL_HONOR_NUMA_FAULT))
3800 		return true;
3801 
3802 	/*
3803 	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
3804 	 *
3805 	 * Requiring a fault here even for inaccessible VMAs would mean that
3806 	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
3807 	 * refuses to process NUMA hinting faults in inaccessible VMAs.
3808 	 */
3809 	return !vma_is_accessible(vma);
3810 }
3811 
3812 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3813 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3814 			       unsigned long size, pte_fn_t fn, void *data);
3815 extern int apply_to_existing_page_range(struct mm_struct *mm,
3816 				   unsigned long address, unsigned long size,
3817 				   pte_fn_t fn, void *data);
3818 
3819 #ifdef CONFIG_PAGE_POISONING
3820 extern void __kernel_poison_pages(struct page *page, int numpages);
3821 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3822 extern bool _page_poisoning_enabled_early;
3823 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
page_poisoning_enabled(void)3824 static inline bool page_poisoning_enabled(void)
3825 {
3826 	return _page_poisoning_enabled_early;
3827 }
3828 /*
3829  * For use in fast paths after init_mem_debugging() has run, or when a
3830  * false negative result is not harmful when called too early.
3831  */
page_poisoning_enabled_static(void)3832 static inline bool page_poisoning_enabled_static(void)
3833 {
3834 	return static_branch_unlikely(&_page_poisoning_enabled);
3835 }
kernel_poison_pages(struct page * page,int numpages)3836 static inline void kernel_poison_pages(struct page *page, int numpages)
3837 {
3838 	if (page_poisoning_enabled_static())
3839 		__kernel_poison_pages(page, numpages);
3840 }
kernel_unpoison_pages(struct page * page,int numpages)3841 static inline void kernel_unpoison_pages(struct page *page, int numpages)
3842 {
3843 	if (page_poisoning_enabled_static())
3844 		__kernel_unpoison_pages(page, numpages);
3845 }
3846 #else
page_poisoning_enabled(void)3847 static inline bool page_poisoning_enabled(void) { return false; }
page_poisoning_enabled_static(void)3848 static inline bool page_poisoning_enabled_static(void) { return false; }
__kernel_poison_pages(struct page * page,int nunmpages)3849 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
kernel_poison_pages(struct page * page,int numpages)3850 static inline void kernel_poison_pages(struct page *page, int numpages) { }
kernel_unpoison_pages(struct page * page,int numpages)3851 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3852 #endif
3853 
3854 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
want_init_on_alloc(gfp_t flags)3855 static inline bool want_init_on_alloc(gfp_t flags)
3856 {
3857 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3858 				&init_on_alloc))
3859 		return true;
3860 	return flags & __GFP_ZERO;
3861 }
3862 
3863 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
want_init_on_free(void)3864 static inline bool want_init_on_free(void)
3865 {
3866 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3867 				   &init_on_free);
3868 }
3869 
3870 extern bool _debug_pagealloc_enabled_early;
3871 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3872 
debug_pagealloc_enabled(void)3873 static inline bool debug_pagealloc_enabled(void)
3874 {
3875 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3876 		_debug_pagealloc_enabled_early;
3877 }
3878 
3879 /*
3880  * For use in fast paths after mem_debugging_and_hardening_init() has run,
3881  * or when a false negative result is not harmful when called too early.
3882  */
debug_pagealloc_enabled_static(void)3883 static inline bool debug_pagealloc_enabled_static(void)
3884 {
3885 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3886 		return false;
3887 
3888 	return static_branch_unlikely(&_debug_pagealloc_enabled);
3889 }
3890 
3891 /*
3892  * To support DEBUG_PAGEALLOC architecture must ensure that
3893  * __kernel_map_pages() never fails
3894  */
3895 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3896 #ifdef CONFIG_DEBUG_PAGEALLOC
debug_pagealloc_map_pages(struct page * page,int numpages)3897 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3898 {
3899 	if (debug_pagealloc_enabled_static())
3900 		__kernel_map_pages(page, numpages, 1);
3901 }
3902 
debug_pagealloc_unmap_pages(struct page * page,int numpages)3903 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3904 {
3905 	if (debug_pagealloc_enabled_static())
3906 		__kernel_map_pages(page, numpages, 0);
3907 }
3908 
3909 extern unsigned int _debug_guardpage_minorder;
3910 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3911 
debug_guardpage_minorder(void)3912 static inline unsigned int debug_guardpage_minorder(void)
3913 {
3914 	return _debug_guardpage_minorder;
3915 }
3916 
debug_guardpage_enabled(void)3917 static inline bool debug_guardpage_enabled(void)
3918 {
3919 	return static_branch_unlikely(&_debug_guardpage_enabled);
3920 }
3921 
page_is_guard(struct page * page)3922 static inline bool page_is_guard(struct page *page)
3923 {
3924 	if (!debug_guardpage_enabled())
3925 		return false;
3926 
3927 	return PageGuard(page);
3928 }
3929 
3930 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
set_page_guard(struct zone * zone,struct page * page,unsigned int order)3931 static inline bool set_page_guard(struct zone *zone, struct page *page,
3932 				  unsigned int order)
3933 {
3934 	if (!debug_guardpage_enabled())
3935 		return false;
3936 	return __set_page_guard(zone, page, order);
3937 }
3938 
3939 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)3940 static inline void clear_page_guard(struct zone *zone, struct page *page,
3941 				    unsigned int order)
3942 {
3943 	if (!debug_guardpage_enabled())
3944 		return;
3945 	__clear_page_guard(zone, page, order);
3946 }
3947 
3948 #else	/* CONFIG_DEBUG_PAGEALLOC */
debug_pagealloc_map_pages(struct page * page,int numpages)3949 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
debug_pagealloc_unmap_pages(struct page * page,int numpages)3950 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
debug_guardpage_minorder(void)3951 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
debug_guardpage_enabled(void)3952 static inline bool debug_guardpage_enabled(void) { return false; }
page_is_guard(struct page * page)3953 static inline bool page_is_guard(struct page *page) { return false; }
set_page_guard(struct zone * zone,struct page * page,unsigned int order)3954 static inline bool set_page_guard(struct zone *zone, struct page *page,
3955 			unsigned int order) { return false; }
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)3956 static inline void clear_page_guard(struct zone *zone, struct page *page,
3957 				unsigned int order) {}
3958 #endif	/* CONFIG_DEBUG_PAGEALLOC */
3959 
3960 #ifdef __HAVE_ARCH_GATE_AREA
3961 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3962 extern int in_gate_area_no_mm(unsigned long addr);
3963 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3964 #else
get_gate_vma(struct mm_struct * mm)3965 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3966 {
3967 	return NULL;
3968 }
in_gate_area_no_mm(unsigned long addr)3969 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
in_gate_area(struct mm_struct * mm,unsigned long addr)3970 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3971 {
3972 	return 0;
3973 }
3974 #endif	/* __HAVE_ARCH_GATE_AREA */
3975 
3976 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3977 
3978 #ifdef CONFIG_SYSCTL
3979 extern int sysctl_drop_caches;
3980 int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *,
3981 		loff_t *);
3982 #endif
3983 
3984 void drop_slab(void);
3985 
3986 #ifndef CONFIG_MMU
3987 #define randomize_va_space 0
3988 #else
3989 extern int randomize_va_space;
3990 #endif
3991 
3992 const char * arch_vma_name(struct vm_area_struct *vma);
3993 #ifdef CONFIG_MMU
3994 void print_vma_addr(char *prefix, unsigned long rip);
3995 #else
print_vma_addr(char * prefix,unsigned long rip)3996 static inline void print_vma_addr(char *prefix, unsigned long rip)
3997 {
3998 }
3999 #endif
4000 
4001 void *sparse_buffer_alloc(unsigned long size);
4002 struct page * __populate_section_memmap(unsigned long pfn,
4003 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
4004 		struct dev_pagemap *pgmap);
4005 void pud_init(void *addr);
4006 void pmd_init(void *addr);
4007 void kernel_pte_init(void *addr);
4008 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
4009 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
4010 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
4011 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
4012 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
4013 			    struct vmem_altmap *altmap, struct page *reuse);
4014 void *vmemmap_alloc_block(unsigned long size, int node);
4015 struct vmem_altmap;
4016 void *vmemmap_alloc_block_buf(unsigned long size, int node,
4017 			      struct vmem_altmap *altmap);
4018 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
4019 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
4020 		     unsigned long addr, unsigned long next);
4021 int vmemmap_check_pmd(pmd_t *pmd, int node,
4022 		      unsigned long addr, unsigned long next);
4023 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
4024 			       int node, struct vmem_altmap *altmap);
4025 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
4026 			       int node, struct vmem_altmap *altmap);
4027 int vmemmap_populate(unsigned long start, unsigned long end, int node,
4028 		struct vmem_altmap *altmap);
4029 void vmemmap_populate_print_last(void);
4030 #ifdef CONFIG_MEMORY_HOTPLUG
4031 void vmemmap_free(unsigned long start, unsigned long end,
4032 		struct vmem_altmap *altmap);
4033 #endif
4034 
4035 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmem_altmap_offset(struct vmem_altmap * altmap)4036 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
4037 {
4038 	/* number of pfns from base where pfn_to_page() is valid */
4039 	if (altmap)
4040 		return altmap->reserve + altmap->free;
4041 	return 0;
4042 }
4043 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)4044 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4045 				    unsigned long nr_pfns)
4046 {
4047 	altmap->alloc -= nr_pfns;
4048 }
4049 #else
vmem_altmap_offset(struct vmem_altmap * altmap)4050 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
4051 {
4052 	return 0;
4053 }
4054 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)4055 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4056 				    unsigned long nr_pfns)
4057 {
4058 }
4059 #endif
4060 
4061 #define VMEMMAP_RESERVE_NR	2
4062 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
__vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)4063 static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
4064 					  struct dev_pagemap *pgmap)
4065 {
4066 	unsigned long nr_pages;
4067 	unsigned long nr_vmemmap_pages;
4068 
4069 	if (!pgmap || !is_power_of_2(sizeof(struct page)))
4070 		return false;
4071 
4072 	nr_pages = pgmap_vmemmap_nr(pgmap);
4073 	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
4074 	/*
4075 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
4076 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
4077 	 */
4078 	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
4079 }
4080 /*
4081  * If we don't have an architecture override, use the generic rule
4082  */
4083 #ifndef vmemmap_can_optimize
4084 #define vmemmap_can_optimize __vmemmap_can_optimize
4085 #endif
4086 
4087 #else
vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)4088 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
4089 					   struct dev_pagemap *pgmap)
4090 {
4091 	return false;
4092 }
4093 #endif
4094 
4095 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
4096 				  unsigned long nr_pages);
4097 
4098 enum mf_flags {
4099 	MF_COUNT_INCREASED = 1 << 0,
4100 	MF_ACTION_REQUIRED = 1 << 1,
4101 	MF_MUST_KILL = 1 << 2,
4102 	MF_SOFT_OFFLINE = 1 << 3,
4103 	MF_UNPOISON = 1 << 4,
4104 	MF_SW_SIMULATED = 1 << 5,
4105 	MF_NO_RETRY = 1 << 6,
4106 	MF_MEM_PRE_REMOVE = 1 << 7,
4107 };
4108 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
4109 		      unsigned long count, int mf_flags);
4110 extern int memory_failure(unsigned long pfn, int flags);
4111 extern void memory_failure_queue_kick(int cpu);
4112 extern int unpoison_memory(unsigned long pfn);
4113 extern atomic_long_t num_poisoned_pages __read_mostly;
4114 extern int soft_offline_page(unsigned long pfn, int flags);
4115 #ifdef CONFIG_MEMORY_FAILURE
4116 /*
4117  * Sysfs entries for memory failure handling statistics.
4118  */
4119 extern const struct attribute_group memory_failure_attr_group;
4120 extern void memory_failure_queue(unsigned long pfn, int flags);
4121 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4122 					bool *migratable_cleared);
4123 void num_poisoned_pages_inc(unsigned long pfn);
4124 void num_poisoned_pages_sub(unsigned long pfn, long i);
4125 #else
memory_failure_queue(unsigned long pfn,int flags)4126 static inline void memory_failure_queue(unsigned long pfn, int flags)
4127 {
4128 }
4129 
__get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)4130 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4131 					bool *migratable_cleared)
4132 {
4133 	return 0;
4134 }
4135 
num_poisoned_pages_inc(unsigned long pfn)4136 static inline void num_poisoned_pages_inc(unsigned long pfn)
4137 {
4138 }
4139 
num_poisoned_pages_sub(unsigned long pfn,long i)4140 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4141 {
4142 }
4143 #endif
4144 
4145 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
4146 extern void memblk_nr_poison_inc(unsigned long pfn);
4147 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
4148 #else
memblk_nr_poison_inc(unsigned long pfn)4149 static inline void memblk_nr_poison_inc(unsigned long pfn)
4150 {
4151 }
4152 
memblk_nr_poison_sub(unsigned long pfn,long i)4153 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
4154 {
4155 }
4156 #endif
4157 
4158 #ifndef arch_memory_failure
arch_memory_failure(unsigned long pfn,int flags)4159 static inline int arch_memory_failure(unsigned long pfn, int flags)
4160 {
4161 	return -ENXIO;
4162 }
4163 #endif
4164 
4165 #ifndef arch_is_platform_page
arch_is_platform_page(u64 paddr)4166 static inline bool arch_is_platform_page(u64 paddr)
4167 {
4168 	return false;
4169 }
4170 #endif
4171 
4172 /*
4173  * Error handlers for various types of pages.
4174  */
4175 enum mf_result {
4176 	MF_IGNORED,	/* Error: cannot be handled */
4177 	MF_FAILED,	/* Error: handling failed */
4178 	MF_DELAYED,	/* Will be handled later */
4179 	MF_RECOVERED,	/* Successfully recovered */
4180 };
4181 
4182 enum mf_action_page_type {
4183 	MF_MSG_KERNEL,
4184 	MF_MSG_KERNEL_HIGH_ORDER,
4185 	MF_MSG_DIFFERENT_COMPOUND,
4186 	MF_MSG_HUGE,
4187 	MF_MSG_FREE_HUGE,
4188 	MF_MSG_GET_HWPOISON,
4189 	MF_MSG_UNMAP_FAILED,
4190 	MF_MSG_DIRTY_SWAPCACHE,
4191 	MF_MSG_CLEAN_SWAPCACHE,
4192 	MF_MSG_DIRTY_MLOCKED_LRU,
4193 	MF_MSG_CLEAN_MLOCKED_LRU,
4194 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
4195 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
4196 	MF_MSG_DIRTY_LRU,
4197 	MF_MSG_CLEAN_LRU,
4198 	MF_MSG_TRUNCATED_LRU,
4199 	MF_MSG_BUDDY,
4200 	MF_MSG_DAX,
4201 	MF_MSG_UNSPLIT_THP,
4202 	MF_MSG_ALREADY_POISONED,
4203 	MF_MSG_UNKNOWN,
4204 };
4205 
4206 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4207 void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4208 int copy_user_large_folio(struct folio *dst, struct folio *src,
4209 			  unsigned long addr_hint,
4210 			  struct vm_area_struct *vma);
4211 long copy_folio_from_user(struct folio *dst_folio,
4212 			   const void __user *usr_src,
4213 			   bool allow_pagefault);
4214 
4215 /**
4216  * vma_is_special_huge - Are transhuge page-table entries considered special?
4217  * @vma: Pointer to the struct vm_area_struct to consider
4218  *
4219  * Whether transhuge page-table entries are considered "special" following
4220  * the definition in vm_normal_page().
4221  *
4222  * Return: true if transhuge page-table entries should be considered special,
4223  * false otherwise.
4224  */
vma_is_special_huge(const struct vm_area_struct * vma)4225 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4226 {
4227 	return vma_is_dax(vma) || (vma->vm_file &&
4228 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4229 }
4230 
4231 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4232 
4233 #if MAX_NUMNODES > 1
4234 void __init setup_nr_node_ids(void);
4235 #else
setup_nr_node_ids(void)4236 static inline void setup_nr_node_ids(void) {}
4237 #endif
4238 
4239 extern int memcmp_pages(struct page *page1, struct page *page2);
4240 
pages_identical(struct page * page1,struct page * page2)4241 static inline int pages_identical(struct page *page1, struct page *page2)
4242 {
4243 	return !memcmp_pages(page1, page2);
4244 }
4245 
4246 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
4247 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4248 						pgoff_t first_index, pgoff_t nr,
4249 						pgoff_t bitmap_pgoff,
4250 						unsigned long *bitmap,
4251 						pgoff_t *start,
4252 						pgoff_t *end);
4253 
4254 unsigned long wp_shared_mapping_range(struct address_space *mapping,
4255 				      pgoff_t first_index, pgoff_t nr);
4256 #endif
4257 
4258 extern int sysctl_nr_trim_pages;
4259 extern int reclaim_shmem_address_space(struct address_space *mapping);
4260 
4261 #ifdef CONFIG_PRINTK
4262 void mem_dump_obj(void *object);
4263 #else
mem_dump_obj(void * object)4264 static inline void mem_dump_obj(void *object) {}
4265 #endif
4266 
is_write_sealed(int seals)4267 static inline bool is_write_sealed(int seals)
4268 {
4269 	return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
4270 }
4271 
4272 /**
4273  * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
4274  *                      in which case writes should be disallowing moving
4275  *                      forwards.
4276  * @seals: the seals to check
4277  * @vm_flags: the VMA flags to check
4278  *
4279  * Returns whether readonly sealed, in which case writess should be disallowed
4280  * going forward.
4281  */
is_readonly_sealed(int seals,vm_flags_t vm_flags)4282 static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
4283 {
4284 	/*
4285 	 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
4286 	 * MAP_SHARED and read-only, take care to not allow mprotect to
4287 	 * revert protections on such mappings. Do this only for shared
4288 	 * mappings. For private mappings, don't need to mask
4289 	 * VM_MAYWRITE as we still want them to be COW-writable.
4290 	 */
4291 	if (is_write_sealed(seals) &&
4292 	    ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
4293 		return true;
4294 
4295 	return false;
4296 }
4297 
4298 /**
4299  * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
4300  *                    handle them.
4301  * @seals: the seals to check
4302  * @vma: the vma to operate on
4303  *
4304  * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
4305  * check/handling on the vma flags.  Return 0 if check pass, or <0 for errors.
4306  */
seal_check_write(int seals,struct vm_area_struct * vma)4307 static inline int seal_check_write(int seals, struct vm_area_struct *vma)
4308 {
4309 	if (!is_write_sealed(seals))
4310 		return 0;
4311 
4312 	/*
4313 	 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
4314 	 * write seals are active.
4315 	 */
4316 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
4317 		return -EPERM;
4318 
4319 	return 0;
4320 }
4321 
4322 #ifdef CONFIG_ANON_VMA_NAME
4323 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4324 			  unsigned long len_in,
4325 			  struct anon_vma_name *anon_name);
4326 #else
4327 static inline int
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)4328 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4329 		      unsigned long len_in, struct anon_vma_name *anon_name) {
4330 	return 0;
4331 }
4332 #endif
4333 
4334 #ifdef CONFIG_UNACCEPTED_MEMORY
4335 
4336 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
4337 void accept_memory(phys_addr_t start, unsigned long size);
4338 
4339 #else
4340 
range_contains_unaccepted_memory(phys_addr_t start,unsigned long size)4341 static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4342 						    unsigned long size)
4343 {
4344 	return false;
4345 }
4346 
accept_memory(phys_addr_t start,unsigned long size)4347 static inline void accept_memory(phys_addr_t start, unsigned long size)
4348 {
4349 }
4350 
4351 #endif
4352 
pfn_is_unaccepted_memory(unsigned long pfn)4353 static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4354 {
4355 	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
4356 }
4357 
4358 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4359 void vma_pgtable_walk_end(struct vm_area_struct *vma);
4360 
4361 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
4362 
4363 void zone_pageset_high_and_batch_update(struct zone *zone, int new_high_min,
4364 					int new_high_max, int new_batch);
4365 
4366 #ifdef CONFIG_64BIT
4367 int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
4368 #else
do_mseal(unsigned long start,size_t len_in,unsigned long flags)4369 static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
4370 {
4371 	/* noop on 32 bit */
4372 	return 0;
4373 }
4374 #endif
4375 
4376 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4377 void free_hpage(struct page *page, int __bitwise fpi_flags);
4378 void prep_new_hpage(struct page *page, gfp_t gfp_flags, unsigned int alloc_flags);
4379 void prep_compound_page(struct page *page, unsigned int order);
4380 #endif
4381 
4382 /* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
4383  * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
4384  * the head page of compound page and bit 1 for pfmemalloc page.
4385  * page_is_pfmemalloc() is checked in __page_pool_put_page() to avoid recycling
4386  * the pfmemalloc page.
4387  */
4388 #define PP_MAGIC_MASK ~0x3UL
4389 
4390 #ifdef CONFIG_PAGE_POOL
page_pool_page_is_pp(struct page * page)4391 static inline bool page_pool_page_is_pp(struct page *page)
4392 {
4393 	return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
4394 }
4395 #else
page_pool_page_is_pp(struct page * page)4396 static inline bool page_pool_page_is_pp(struct page *page)
4397 {
4398 	return false;
4399 }
4400 #endif
4401 
4402 #endif /* _LINUX_MM_H */
4403