1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * vma_internal.h
4  *
5  * Header providing userland wrappers and shims for the functionality provided
6  * by mm/vma_internal.h.
7  *
8  * We make the header guard the same as mm/vma_internal.h, so if this shim
9  * header is included, it precludes the inclusion of the kernel one.
10  */
11 
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14 
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18 
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21 
22 #include <stdlib.h>
23 
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/refcount.h>
29 
30 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
31 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
32 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
33 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
34 
35 #define VM_NONE		0x00000000
36 #define VM_READ		0x00000001
37 #define VM_WRITE	0x00000002
38 #define VM_EXEC		0x00000004
39 #define VM_SHARED	0x00000008
40 #define VM_MAYREAD	0x00000010
41 #define VM_MAYWRITE	0x00000020
42 #define VM_GROWSDOWN	0x00000100
43 #define VM_PFNMAP	0x00000400
44 #define VM_LOCKED	0x00002000
45 #define VM_IO           0x00004000
46 #define VM_DONTEXPAND	0x00040000
47 #define VM_ACCOUNT	0x00100000
48 #define VM_MIXEDMAP	0x10000000
49 #define VM_STACK	VM_GROWSDOWN
50 #define VM_SHADOW_STACK	VM_NONE
51 #define VM_SOFTDIRTY	0
52 
53 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
54 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
55 
56 #define FIRST_USER_ADDRESS	0UL
57 #define USER_PGTABLES_CEILING	0UL
58 
59 #define vma_policy(vma) NULL
60 
61 #define down_write_nest_lock(sem, nest_lock)
62 
63 #define pgprot_val(x)		((x).pgprot)
64 #define __pgprot(x)		((pgprot_t) { (x) } )
65 
66 #define for_each_vma(__vmi, __vma)					\
67 	while (((__vma) = vma_next(&(__vmi))) != NULL)
68 
69 /* The MM code likes to work with exclusive end addresses */
70 #define for_each_vma_range(__vmi, __vma, __end)				\
71 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
72 
73 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
74 
75 #define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
76 
77 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
78 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
79 
80 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
81 
82 #define AS_MM_ALL_LOCKS 2
83 
84 /* We hardcode this for now. */
85 #define sysctl_max_map_count 0x1000000UL
86 
87 #define pgoff_t unsigned long
88 typedef unsigned long	pgprotval_t;
89 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
90 typedef unsigned long vm_flags_t;
91 typedef __bitwise unsigned int vm_fault_t;
92 
93 /*
94  * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
95  * either way :)
96  */
97 #define pr_warn_once pr_err
98 
99 struct kref {
100 	refcount_t refcount;
101 };
102 
103 /*
104  * Define the task command name length as enum, then it can be visible to
105  * BPF programs.
106  */
107 enum {
108 	TASK_COMM_LEN = 16,
109 };
110 
111 struct task_struct {
112 	char comm[TASK_COMM_LEN];
113 	pid_t pid;
114 	struct mm_struct *mm;
115 };
116 
117 struct task_struct *get_current(void);
118 #define current get_current()
119 
120 struct anon_vma {
121 	struct anon_vma *root;
122 	struct rb_root_cached rb_root;
123 
124 	/* Test fields. */
125 	bool was_cloned;
126 	bool was_unlinked;
127 };
128 
129 struct anon_vma_chain {
130 	struct anon_vma *anon_vma;
131 	struct list_head same_vma;
132 };
133 
134 struct anon_vma_name {
135 	struct kref kref;
136 	/* The name needs to be at the end because it is dynamically sized. */
137 	char name[];
138 };
139 
140 struct vma_iterator {
141 	struct ma_state mas;
142 };
143 
144 #define VMA_ITERATOR(name, __mm, __addr)				\
145 	struct vma_iterator name = {					\
146 		.mas = {						\
147 			.tree = &(__mm)->mm_mt,				\
148 			.index = __addr,				\
149 			.node = NULL,					\
150 			.status = ma_start,				\
151 		},							\
152 	}
153 
154 struct address_space {
155 	struct rb_root_cached	i_mmap;
156 	unsigned long		flags;
157 	atomic_t		i_mmap_writable;
158 };
159 
160 struct vm_userfaultfd_ctx {};
161 struct mempolicy {};
162 struct mmu_gather {};
163 struct mutex {};
164 #define DEFINE_MUTEX(mutexname) \
165 	struct mutex mutexname = {}
166 
167 struct mm_struct {
168 	struct maple_tree mm_mt;
169 	int map_count;			/* number of VMAs */
170 	unsigned long total_vm;	   /* Total pages mapped */
171 	unsigned long locked_vm;   /* Pages that have PG_mlocked set */
172 	unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
173 	unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
174 	unsigned long stack_vm;	   /* VM_STACK */
175 };
176 
177 struct file {
178 	struct address_space	*f_mapping;
179 };
180 
181 #define VMA_LOCK_OFFSET	0x40000000
182 
183 struct vm_area_struct {
184 	/* The first cache line has the info for VMA tree walking. */
185 
186 	union {
187 		struct {
188 			/* VMA covers [vm_start; vm_end) addresses within mm */
189 			unsigned long vm_start;
190 			unsigned long vm_end;
191 		};
192 #ifdef CONFIG_PER_VMA_LOCK
193 		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
194 #endif
195 	};
196 
197 	struct mm_struct *vm_mm;	/* The address space we belong to. */
198 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
199 
200 	/*
201 	 * Flags, see mm.h.
202 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
203 	 */
204 	union {
205 		const vm_flags_t vm_flags;
206 		vm_flags_t __private __vm_flags;
207 	};
208 
209 #ifdef CONFIG_PER_VMA_LOCK
210 	/*
211 	 * Can only be written (using WRITE_ONCE()) while holding both:
212 	 *  - mmap_lock (in write mode)
213 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set
214 	 * Can be read reliably while holding one of:
215 	 *  - mmap_lock (in read or write mode)
216 	 *  - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
217 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
218 	 * while holding nothing (except RCU to keep the VMA struct allocated).
219 	 *
220 	 * This sequence counter is explicitly allowed to overflow; sequence
221 	 * counter reuse can only lead to occasional unnecessary use of the
222 	 * slowpath.
223 	 */
224 	unsigned int vm_lock_seq;
225 #endif
226 
227 	/*
228 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
229 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
230 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
231 	 * or brk vma (with NULL file) can only be in an anon_vma list.
232 	 */
233 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
234 					  * page_table_lock */
235 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
236 
237 	/* Function pointers to deal with this struct. */
238 	const struct vm_operations_struct *vm_ops;
239 
240 	/* Information about our backing store: */
241 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
242 					   units */
243 	struct file * vm_file;		/* File we map to (can be NULL). */
244 	void * vm_private_data;		/* was vm_pte (shared mem) */
245 
246 #ifdef CONFIG_SWAP
247 	atomic_long_t swap_readahead_info;
248 #endif
249 #ifndef CONFIG_MMU
250 	struct vm_region *vm_region;	/* NOMMU mapping region */
251 #endif
252 #ifdef CONFIG_NUMA
253 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
254 #endif
255 #ifdef CONFIG_NUMA_BALANCING
256 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
257 #endif
258 #ifdef CONFIG_PER_VMA_LOCK
259 	/* Unstable RCU readers are allowed to read this. */
260 	refcount_t vm_refcnt;
261 #endif
262 	/*
263 	 * For areas with an address space and backing store,
264 	 * linkage into the address_space->i_mmap interval tree.
265 	 *
266 	 */
267 	struct {
268 		struct rb_node rb;
269 		unsigned long rb_subtree_last;
270 	} shared;
271 #ifdef CONFIG_ANON_VMA_NAME
272 	/*
273 	 * For private and shared anonymous mappings, a pointer to a null
274 	 * terminated string containing the name given to the vma, or NULL if
275 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
276 	 */
277 	struct anon_vma_name *anon_name;
278 #endif
279 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
280 } __randomize_layout;
281 
282 struct vm_fault {};
283 
284 struct vm_operations_struct {
285 	void (*open)(struct vm_area_struct * area);
286 	/**
287 	 * @close: Called when the VMA is being removed from the MM.
288 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
289 	 */
290 	void (*close)(struct vm_area_struct * area);
291 	/* Called any time before splitting to check if it's allowed */
292 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
293 	int (*mremap)(struct vm_area_struct *area);
294 	/*
295 	 * Called by mprotect() to make driver-specific permission
296 	 * checks before mprotect() is finalised.   The VMA must not
297 	 * be modified.  Returns 0 if mprotect() can proceed.
298 	 */
299 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
300 			unsigned long end, unsigned long newflags);
301 	vm_fault_t (*fault)(struct vm_fault *vmf);
302 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
303 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
304 			pgoff_t start_pgoff, pgoff_t end_pgoff);
305 	unsigned long (*pagesize)(struct vm_area_struct * area);
306 
307 	/* notification that a previously read-only page is about to become
308 	 * writable, if an error is returned it will cause a SIGBUS */
309 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
310 
311 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
312 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
313 
314 	/* called by access_process_vm when get_user_pages() fails, typically
315 	 * for use by special VMAs. See also generic_access_phys() for a generic
316 	 * implementation useful for any iomem mapping.
317 	 */
318 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
319 		      void *buf, int len, int write);
320 
321 	/* Called by the /proc/PID/maps code to ask the vma whether it
322 	 * has a special name.  Returning non-NULL will also cause this
323 	 * vma to be dumped unconditionally. */
324 	const char *(*name)(struct vm_area_struct *vma);
325 
326 #ifdef CONFIG_NUMA
327 	/*
328 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
329 	 * to hold the policy upon return.  Caller should pass NULL @new to
330 	 * remove a policy and fall back to surrounding context--i.e. do not
331 	 * install a MPOL_DEFAULT policy, nor the task or system default
332 	 * mempolicy.
333 	 */
334 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
335 
336 	/*
337 	 * get_policy() op must add reference [mpol_get()] to any policy at
338 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
339 	 * in mm/mempolicy.c will do this automatically.
340 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
341 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
342 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
343 	 * must return NULL--i.e., do not "fallback" to task or system default
344 	 * policy.
345 	 */
346 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
347 					unsigned long addr, pgoff_t *ilx);
348 #endif
349 	/*
350 	 * Called by vm_normal_page() for special PTEs to find the
351 	 * page for @addr.  This is useful if the default behavior
352 	 * (using pte_page()) would not find the correct page.
353 	 */
354 	struct page *(*find_special_page)(struct vm_area_struct *vma,
355 					  unsigned long addr);
356 };
357 
vma_iter_invalidate(struct vma_iterator * vmi)358 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
359 {
360 	mas_pause(&vmi->mas);
361 }
362 
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)363 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
364 {
365 	return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
366 }
367 
vm_get_page_prot(unsigned long vm_flags)368 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
369 {
370 	return __pgprot(vm_flags);
371 }
372 
is_shared_maywrite(vm_flags_t vm_flags)373 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
374 {
375 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
376 		(VM_SHARED | VM_MAYWRITE);
377 }
378 
vma_is_shared_maywrite(struct vm_area_struct * vma)379 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
380 {
381 	return is_shared_maywrite(vma->vm_flags);
382 }
383 
vma_next(struct vma_iterator * vmi)384 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
385 {
386 	/*
387 	 * Uses mas_find() to get the first VMA when the iterator starts.
388 	 * Calling mas_next() could skip the first entry.
389 	 */
390 	return mas_find(&vmi->mas, ULONG_MAX);
391 }
392 
393 /*
394  * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
395  * assertions should be made either under mmap_write_lock or when the object
396  * has been isolated under mmap_write_lock, ensuring no competing writers.
397  */
vma_assert_attached(struct vm_area_struct * vma)398 static inline void vma_assert_attached(struct vm_area_struct *vma)
399 {
400 	WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
401 }
402 
vma_assert_detached(struct vm_area_struct * vma)403 static inline void vma_assert_detached(struct vm_area_struct *vma)
404 {
405 	WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
406 }
407 
408 static inline void vma_assert_write_locked(struct vm_area_struct *);
vma_mark_attached(struct vm_area_struct * vma)409 static inline void vma_mark_attached(struct vm_area_struct *vma)
410 {
411 	vma_assert_write_locked(vma);
412 	vma_assert_detached(vma);
413 	refcount_set_release(&vma->vm_refcnt, 1);
414 }
415 
vma_mark_detached(struct vm_area_struct * vma)416 static inline void vma_mark_detached(struct vm_area_struct *vma)
417 {
418 	vma_assert_write_locked(vma);
419 	vma_assert_attached(vma);
420 	/* We are the only writer, so no need to use vma_refcount_put(). */
421 	if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
422 		/*
423 		 * Reader must have temporarily raised vm_refcnt but it will
424 		 * drop it without using the vma since vma is write-locked.
425 		 */
426 	}
427 }
428 
429 extern const struct vm_operations_struct vma_dummy_vm_ops;
430 
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)431 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
432 {
433 	memset(vma, 0, sizeof(*vma));
434 	vma->vm_mm = mm;
435 	vma->vm_ops = &vma_dummy_vm_ops;
436 	INIT_LIST_HEAD(&vma->anon_vma_chain);
437 	vma->vm_lock_seq = UINT_MAX;
438 }
439 
vm_area_alloc(struct mm_struct * mm)440 static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
441 {
442 	struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct));
443 
444 	if (!vma)
445 		return NULL;
446 
447 	vma_init(vma, mm);
448 
449 	return vma;
450 }
451 
vm_area_dup(struct vm_area_struct * orig)452 static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
453 {
454 	struct vm_area_struct *new = calloc(1, sizeof(struct vm_area_struct));
455 
456 	if (!new)
457 		return NULL;
458 
459 	memcpy(new, orig, sizeof(*new));
460 	refcount_set(&new->vm_refcnt, 0);
461 	new->vm_lock_seq = UINT_MAX;
462 	INIT_LIST_HEAD(&new->anon_vma_chain);
463 
464 	return new;
465 }
466 
467 /*
468  * These are defined in vma.h, but sadly vm_stat_account() is referenced by
469  * kernel/fork.c, so we have to these broadly available there, and temporarily
470  * define them here to resolve the dependency cycle.
471  */
472 
473 #define is_exec_mapping(flags) \
474 	((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
475 
476 #define is_stack_mapping(flags) \
477 	(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
478 
479 #define is_data_mapping(flags) \
480 	((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
481 
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)482 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
483 				   long npages)
484 {
485 	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
486 
487 	if (is_exec_mapping(flags))
488 		mm->exec_vm += npages;
489 	else if (is_stack_mapping(flags))
490 		mm->stack_vm += npages;
491 	else if (is_data_mapping(flags))
492 		mm->data_vm += npages;
493 }
494 
495 #undef is_exec_mapping
496 #undef is_stack_mapping
497 #undef is_data_mapping
498 
499 /* Currently stubbed but we may later wish to un-stub. */
500 static inline void vm_acct_memory(long pages);
vm_unacct_memory(long pages)501 static inline void vm_unacct_memory(long pages)
502 {
503 	vm_acct_memory(-pages);
504 }
505 
mapping_allow_writable(struct address_space * mapping)506 static inline void mapping_allow_writable(struct address_space *mapping)
507 {
508 	atomic_inc(&mapping->i_mmap_writable);
509 }
510 
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)511 static inline void vma_set_range(struct vm_area_struct *vma,
512 				 unsigned long start, unsigned long end,
513 				 pgoff_t pgoff)
514 {
515 	vma->vm_start = start;
516 	vma->vm_end = end;
517 	vma->vm_pgoff = pgoff;
518 }
519 
520 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)521 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
522 {
523 	return mas_find(&vmi->mas, max - 1);
524 }
525 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)526 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
527 			unsigned long start, unsigned long end, gfp_t gfp)
528 {
529 	__mas_set_range(&vmi->mas, start, end - 1);
530 	mas_store_gfp(&vmi->mas, NULL, gfp);
531 	if (unlikely(mas_is_err(&vmi->mas)))
532 		return -ENOMEM;
533 
534 	return 0;
535 }
536 
537 static inline void mmap_assert_locked(struct mm_struct *);
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)538 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
539 						unsigned long start_addr,
540 						unsigned long end_addr)
541 {
542 	unsigned long index = start_addr;
543 
544 	mmap_assert_locked(mm);
545 	return mt_find(&mm->mm_mt, &index, end_addr - 1);
546 }
547 
548 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)549 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
550 {
551 	return mtree_load(&mm->mm_mt, addr);
552 }
553 
vma_prev(struct vma_iterator * vmi)554 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
555 {
556 	return mas_prev(&vmi->mas, 0);
557 }
558 
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)559 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
560 {
561 	mas_set(&vmi->mas, addr);
562 }
563 
vma_is_anonymous(struct vm_area_struct * vma)564 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
565 {
566 	return !vma->vm_ops;
567 }
568 
569 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
570 #define vma_iter_load(vmi) \
571 	mas_walk(&(vmi)->mas)
572 
573 static inline struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)574 find_vma_prev(struct mm_struct *mm, unsigned long addr,
575 			struct vm_area_struct **pprev)
576 {
577 	struct vm_area_struct *vma;
578 	VMA_ITERATOR(vmi, mm, addr);
579 
580 	vma = vma_iter_load(&vmi);
581 	*pprev = vma_prev(&vmi);
582 	if (!vma)
583 		vma = vma_next(&vmi);
584 	return vma;
585 }
586 
587 #undef vma_iter_load
588 
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)589 static inline void vma_iter_init(struct vma_iterator *vmi,
590 		struct mm_struct *mm, unsigned long addr)
591 {
592 	mas_init(&vmi->mas, &mm->mm_mt, addr);
593 }
594 
595 /* Stubbed functions. */
596 
anon_vma_name(struct vm_area_struct * vma)597 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
598 {
599 	return NULL;
600 }
601 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)602 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
603 					struct vm_userfaultfd_ctx vm_ctx)
604 {
605 	return true;
606 }
607 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)608 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
609 				    struct anon_vma_name *anon_name2)
610 {
611 	return true;
612 }
613 
might_sleep(void)614 static inline void might_sleep(void)
615 {
616 }
617 
vma_pages(struct vm_area_struct * vma)618 static inline unsigned long vma_pages(struct vm_area_struct *vma)
619 {
620 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
621 }
622 
fput(struct file *)623 static inline void fput(struct file *)
624 {
625 }
626 
mpol_put(struct mempolicy *)627 static inline void mpol_put(struct mempolicy *)
628 {
629 }
630 
vm_area_free(struct vm_area_struct * vma)631 static inline void vm_area_free(struct vm_area_struct *vma)
632 {
633 	free(vma);
634 }
635 
lru_add_drain(void)636 static inline void lru_add_drain(void)
637 {
638 }
639 
tlb_gather_mmu(struct mmu_gather *,struct mm_struct *)640 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
641 {
642 }
643 
update_hiwater_rss(struct mm_struct *)644 static inline void update_hiwater_rss(struct mm_struct *)
645 {
646 }
647 
update_hiwater_vm(struct mm_struct *)648 static inline void update_hiwater_vm(struct mm_struct *)
649 {
650 }
651 
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)652 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
653 		      struct vm_area_struct *vma, unsigned long start_addr,
654 		      unsigned long end_addr, unsigned long tree_end,
655 		      bool mm_wr_locked)
656 {
657 	(void)tlb;
658 	(void)mas;
659 	(void)vma;
660 	(void)start_addr;
661 	(void)end_addr;
662 	(void)tree_end;
663 	(void)mm_wr_locked;
664 }
665 
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)666 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
667 		   struct vm_area_struct *vma, unsigned long floor,
668 		   unsigned long ceiling, bool mm_wr_locked)
669 {
670 	(void)tlb;
671 	(void)mas;
672 	(void)vma;
673 	(void)floor;
674 	(void)ceiling;
675 	(void)mm_wr_locked;
676 }
677 
mapping_unmap_writable(struct address_space *)678 static inline void mapping_unmap_writable(struct address_space *)
679 {
680 }
681 
flush_dcache_mmap_lock(struct address_space *)682 static inline void flush_dcache_mmap_lock(struct address_space *)
683 {
684 }
685 
tlb_finish_mmu(struct mmu_gather *)686 static inline void tlb_finish_mmu(struct mmu_gather *)
687 {
688 }
689 
get_file(struct file *)690 static inline void get_file(struct file *)
691 {
692 }
693 
vma_dup_policy(struct vm_area_struct *,struct vm_area_struct *)694 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
695 {
696 	return 0;
697 }
698 
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src)699 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
700 {
701 	/* For testing purposes. We indicate that an anon_vma has been cloned. */
702 	if (src->anon_vma != NULL) {
703 		dst->anon_vma = src->anon_vma;
704 		dst->anon_vma->was_cloned = true;
705 	}
706 
707 	return 0;
708 }
709 
vma_start_write(struct vm_area_struct * vma)710 static inline void vma_start_write(struct vm_area_struct *vma)
711 {
712 	/* Used to indicate to tests that a write operation has begun. */
713 	vma->vm_lock_seq++;
714 }
715 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)716 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
717 					 unsigned long start,
718 					 unsigned long end,
719 					 long adjust_next)
720 {
721 	(void)vma;
722 	(void)start;
723 	(void)end;
724 	(void)adjust_next;
725 }
726 
hugetlb_split(struct vm_area_struct *,unsigned long)727 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
728 
vma_iter_free(struct vma_iterator * vmi)729 static inline void vma_iter_free(struct vma_iterator *vmi)
730 {
731 	mas_destroy(&vmi->mas);
732 }
733 
734 static inline
vma_iter_next_range(struct vma_iterator * vmi)735 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
736 {
737 	return mas_next_range(&vmi->mas, ULONG_MAX);
738 }
739 
vm_acct_memory(long pages)740 static inline void vm_acct_memory(long pages)
741 {
742 }
743 
vma_interval_tree_insert(struct vm_area_struct *,struct rb_root_cached *)744 static inline void vma_interval_tree_insert(struct vm_area_struct *,
745 					    struct rb_root_cached *)
746 {
747 }
748 
vma_interval_tree_remove(struct vm_area_struct *,struct rb_root_cached *)749 static inline void vma_interval_tree_remove(struct vm_area_struct *,
750 					    struct rb_root_cached *)
751 {
752 }
753 
flush_dcache_mmap_unlock(struct address_space *)754 static inline void flush_dcache_mmap_unlock(struct address_space *)
755 {
756 }
757 
anon_vma_interval_tree_insert(struct anon_vma_chain *,struct rb_root_cached *)758 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
759 						 struct rb_root_cached *)
760 {
761 }
762 
anon_vma_interval_tree_remove(struct anon_vma_chain *,struct rb_root_cached *)763 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
764 						 struct rb_root_cached *)
765 {
766 }
767 
uprobe_mmap(struct vm_area_struct *)768 static inline void uprobe_mmap(struct vm_area_struct *)
769 {
770 }
771 
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)772 static inline void uprobe_munmap(struct vm_area_struct *vma,
773 				 unsigned long start, unsigned long end)
774 {
775 	(void)vma;
776 	(void)start;
777 	(void)end;
778 }
779 
i_mmap_lock_write(struct address_space *)780 static inline void i_mmap_lock_write(struct address_space *)
781 {
782 }
783 
anon_vma_lock_write(struct anon_vma *)784 static inline void anon_vma_lock_write(struct anon_vma *)
785 {
786 }
787 
vma_assert_write_locked(struct vm_area_struct *)788 static inline void vma_assert_write_locked(struct vm_area_struct *)
789 {
790 }
791 
unlink_anon_vmas(struct vm_area_struct * vma)792 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
793 {
794 	/* For testing purposes, indicate that the anon_vma was unlinked. */
795 	vma->anon_vma->was_unlinked = true;
796 }
797 
anon_vma_unlock_write(struct anon_vma *)798 static inline void anon_vma_unlock_write(struct anon_vma *)
799 {
800 }
801 
i_mmap_unlock_write(struct address_space *)802 static inline void i_mmap_unlock_write(struct address_space *)
803 {
804 }
805 
anon_vma_merge(struct vm_area_struct *,struct vm_area_struct *)806 static inline void anon_vma_merge(struct vm_area_struct *,
807 				  struct vm_area_struct *)
808 {
809 }
810 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)811 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
812 					 unsigned long start,
813 					 unsigned long end,
814 					 struct list_head *unmaps)
815 {
816 	(void)vma;
817 	(void)start;
818 	(void)end;
819 	(void)unmaps;
820 
821 	return 0;
822 }
823 
mmap_write_downgrade(struct mm_struct *)824 static inline void mmap_write_downgrade(struct mm_struct *)
825 {
826 }
827 
mmap_read_unlock(struct mm_struct *)828 static inline void mmap_read_unlock(struct mm_struct *)
829 {
830 }
831 
mmap_write_unlock(struct mm_struct *)832 static inline void mmap_write_unlock(struct mm_struct *)
833 {
834 }
835 
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)836 static inline bool can_modify_mm(struct mm_struct *mm,
837 				 unsigned long start,
838 				 unsigned long end)
839 {
840 	(void)mm;
841 	(void)start;
842 	(void)end;
843 
844 	return true;
845 }
846 
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)847 static inline void arch_unmap(struct mm_struct *mm,
848 				 unsigned long start,
849 				 unsigned long end)
850 {
851 	(void)mm;
852 	(void)start;
853 	(void)end;
854 }
855 
mmap_assert_locked(struct mm_struct *)856 static inline void mmap_assert_locked(struct mm_struct *)
857 {
858 }
859 
mpol_equal(struct mempolicy *,struct mempolicy *)860 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
861 {
862 	return true;
863 }
864 
khugepaged_enter_vma(struct vm_area_struct * vma,unsigned long vm_flags)865 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
866 			  unsigned long vm_flags)
867 {
868 	(void)vma;
869 	(void)vm_flags;
870 }
871 
mapping_can_writeback(struct address_space *)872 static inline bool mapping_can_writeback(struct address_space *)
873 {
874 	return true;
875 }
876 
is_vm_hugetlb_page(struct vm_area_struct *)877 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
878 {
879 	return false;
880 }
881 
vma_soft_dirty_enabled(struct vm_area_struct *)882 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
883 {
884 	return false;
885 }
886 
userfaultfd_wp(struct vm_area_struct *)887 static inline bool userfaultfd_wp(struct vm_area_struct *)
888 {
889 	return false;
890 }
891 
mmap_assert_write_locked(struct mm_struct *)892 static inline void mmap_assert_write_locked(struct mm_struct *)
893 {
894 }
895 
mutex_lock(struct mutex *)896 static inline void mutex_lock(struct mutex *)
897 {
898 }
899 
mutex_unlock(struct mutex *)900 static inline void mutex_unlock(struct mutex *)
901 {
902 }
903 
mutex_is_locked(struct mutex *)904 static inline bool mutex_is_locked(struct mutex *)
905 {
906 	return true;
907 }
908 
signal_pending(void *)909 static inline bool signal_pending(void *)
910 {
911 	return false;
912 }
913 
914 #endif	/* __MM_VMA_INTERNAL_H */
915