• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "maple-shared.h"
8 #include "vma_internal.h"
9 
10 /* Include so header guard set. */
11 #include "../../../mm/vma.h"
12 
13 static bool fail_prealloc;
14 
15 /* Then override vma_iter_prealloc() so we can choose to fail it. */
16 #define vma_iter_prealloc(vmi, vma)					\
17 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
18 
19 /*
20  * Directly import the VMA implementation here. Our vma_internal.h wrapper
21  * provides userland-equivalent functionality for everything vma.c uses.
22  */
23 #include "../../../mm/vma.c"
24 
25 const struct vm_operations_struct vma_dummy_vm_ops;
26 static struct anon_vma dummy_anon_vma;
27 
28 #define ASSERT_TRUE(_expr)						\
29 	do {								\
30 		if (!(_expr)) {						\
31 			fprintf(stderr,					\
32 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
33 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
34 			return false;					\
35 		}							\
36 	} while (0)
37 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
38 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
39 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
40 
41 static struct task_struct __current;
42 
get_current(void)43 struct task_struct *get_current(void)
44 {
45 	return &__current;
46 }
47 
48 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)49 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
50 					unsigned long start,
51 					unsigned long end,
52 					pgoff_t pgoff,
53 					vm_flags_t flags)
54 {
55 	struct vm_area_struct *ret = vm_area_alloc(mm);
56 
57 	if (ret == NULL)
58 		return NULL;
59 
60 	ret->vm_start = start;
61 	ret->vm_end = end;
62 	ret->vm_pgoff = pgoff;
63 	ret->__vm_flags = flags;
64 	vma_assert_detached(ret);
65 
66 	return ret;
67 }
68 
69 /* Helper function to allocate a VMA and link it to the tree. */
attach_vma(struct mm_struct * mm,struct vm_area_struct * vma)70 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
71 {
72 	int res;
73 
74 	res = vma_link(mm, vma);
75 	if (!res)
76 		vma_assert_attached(vma);
77 	return res;
78 }
79 
80 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)81 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
82 						 unsigned long start,
83 						 unsigned long end,
84 						 pgoff_t pgoff,
85 						 vm_flags_t flags)
86 {
87 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
88 
89 	if (vma == NULL)
90 		return NULL;
91 
92 	if (attach_vma(mm, vma)) {
93 		vm_area_free(vma);
94 		return NULL;
95 	}
96 
97 	/*
98 	 * Reset this counter which we use to track whether writes have
99 	 * begun. Linking to the tree will have caused this to be incremented,
100 	 * which means we will get a false positive otherwise.
101 	 */
102 	vma->vm_lock_seq = UINT_MAX;
103 
104 	return vma;
105 }
106 
107 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)108 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
109 {
110 	struct vm_area_struct *vma;
111 	/*
112 	 * For convenience, get prev and next VMAs. Which the new VMA operation
113 	 * requires.
114 	 */
115 	vmg->next = vma_next(vmg->vmi);
116 	vmg->prev = vma_prev(vmg->vmi);
117 	vma_iter_next_range(vmg->vmi);
118 
119 	vma = vma_merge_new_range(vmg);
120 	if (vma)
121 		vma_assert_attached(vma);
122 
123 	return vma;
124 }
125 
126 /*
127  * Helper function which provides a wrapper around a merge existing VMA
128  * operation.
129  */
merge_existing(struct vma_merge_struct * vmg)130 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
131 {
132 	struct vm_area_struct *vma;
133 
134 	vma = vma_merge_existing_range(vmg);
135 	if (vma)
136 		vma_assert_attached(vma);
137 	return vma;
138 }
139 
140 /*
141  * Helper function which provides a wrapper around the expansion of an existing
142  * VMA.
143  */
expand_existing(struct vma_merge_struct * vmg)144 static int expand_existing(struct vma_merge_struct *vmg)
145 {
146 	return vma_expand(vmg);
147 }
148 
149 /*
150  * Helper function to reset merge state the associated VMA iterator to a
151  * specified new range.
152  */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)153 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
154 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
155 {
156 	vma_iter_set(vmg->vmi, start);
157 
158 	vmg->prev = NULL;
159 	vmg->next = NULL;
160 	vmg->vma = NULL;
161 
162 	vmg->start = start;
163 	vmg->end = end;
164 	vmg->pgoff = pgoff;
165 	vmg->flags = flags;
166 }
167 
168 /*
169  * Helper function to try to merge a new VMA.
170  *
171  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
172  * VMA, link it to the maple tree and return it.
173  */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,bool * was_merged)174 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
175 						struct vma_merge_struct *vmg,
176 						unsigned long start, unsigned long end,
177 						pgoff_t pgoff, vm_flags_t flags,
178 						bool *was_merged)
179 {
180 	struct vm_area_struct *merged;
181 
182 	vmg_set_range(vmg, start, end, pgoff, flags);
183 
184 	merged = merge_new(vmg);
185 	if (merged) {
186 		*was_merged = true;
187 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
188 		return merged;
189 	}
190 
191 	*was_merged = false;
192 
193 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
194 
195 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
196 }
197 
198 /*
199  * Helper function to reset the dummy anon_vma to indicate it has not been
200  * duplicated.
201  */
reset_dummy_anon_vma(void)202 static void reset_dummy_anon_vma(void)
203 {
204 	dummy_anon_vma.was_cloned = false;
205 	dummy_anon_vma.was_unlinked = false;
206 }
207 
208 /*
209  * Helper function to remove all VMAs and destroy the maple tree associated with
210  * a virtual address space. Returns a count of VMAs in the tree.
211  */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)212 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
213 {
214 	struct vm_area_struct *vma;
215 	int count = 0;
216 
217 	fail_prealloc = false;
218 	reset_dummy_anon_vma();
219 
220 	vma_iter_set(vmi, 0);
221 	for_each_vma(*vmi, vma) {
222 		vm_area_free(vma);
223 		count++;
224 	}
225 
226 	mtree_destroy(&mm->mm_mt);
227 	mm->map_count = 0;
228 	return count;
229 }
230 
231 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)232 static bool vma_write_started(struct vm_area_struct *vma)
233 {
234 	int seq = vma->vm_lock_seq;
235 
236 	/* We reset after each check. */
237 	vma->vm_lock_seq = UINT_MAX;
238 
239 	/* The vma_start_write() stub simply increments this value. */
240 	return seq > -1;
241 }
242 
243 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)244 static void dummy_close(struct vm_area_struct *)
245 {
246 }
247 
test_simple_merge(void)248 static bool test_simple_merge(void)
249 {
250 	struct vm_area_struct *vma;
251 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
252 	struct mm_struct mm = {};
253 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
254 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
255 	VMA_ITERATOR(vmi, &mm, 0x1000);
256 	struct vma_merge_struct vmg = {
257 		.mm = &mm,
258 		.vmi = &vmi,
259 		.start = 0x1000,
260 		.end = 0x2000,
261 		.flags = flags,
262 		.pgoff = 1,
263 	};
264 
265 	ASSERT_FALSE(attach_vma(&mm, vma_left));
266 	ASSERT_FALSE(attach_vma(&mm, vma_right));
267 
268 	vma = merge_new(&vmg);
269 	ASSERT_NE(vma, NULL);
270 
271 	ASSERT_EQ(vma->vm_start, 0);
272 	ASSERT_EQ(vma->vm_end, 0x3000);
273 	ASSERT_EQ(vma->vm_pgoff, 0);
274 	ASSERT_EQ(vma->vm_flags, flags);
275 
276 	vm_area_free(vma);
277 	mtree_destroy(&mm.mm_mt);
278 
279 	return true;
280 }
281 
test_simple_modify(void)282 static bool test_simple_modify(void)
283 {
284 	struct vm_area_struct *vma;
285 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
286 	struct mm_struct mm = {};
287 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
288 	VMA_ITERATOR(vmi, &mm, 0x1000);
289 
290 	ASSERT_FALSE(attach_vma(&mm, init_vma));
291 
292 	/*
293 	 * The flags will not be changed, the vma_modify_flags() function
294 	 * performs the merge/split only.
295 	 */
296 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
297 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
298 	ASSERT_NE(vma, NULL);
299 	/* We modify the provided VMA, and on split allocate new VMAs. */
300 	ASSERT_EQ(vma, init_vma);
301 
302 	ASSERT_EQ(vma->vm_start, 0x1000);
303 	ASSERT_EQ(vma->vm_end, 0x2000);
304 	ASSERT_EQ(vma->vm_pgoff, 1);
305 
306 	/*
307 	 * Now walk through the three split VMAs and make sure they are as
308 	 * expected.
309 	 */
310 
311 	vma_iter_set(&vmi, 0);
312 	vma = vma_iter_load(&vmi);
313 
314 	ASSERT_EQ(vma->vm_start, 0);
315 	ASSERT_EQ(vma->vm_end, 0x1000);
316 	ASSERT_EQ(vma->vm_pgoff, 0);
317 
318 	vm_area_free(vma);
319 	vma_iter_clear(&vmi);
320 
321 	vma = vma_next(&vmi);
322 
323 	ASSERT_EQ(vma->vm_start, 0x1000);
324 	ASSERT_EQ(vma->vm_end, 0x2000);
325 	ASSERT_EQ(vma->vm_pgoff, 1);
326 
327 	vm_area_free(vma);
328 	vma_iter_clear(&vmi);
329 
330 	vma = vma_next(&vmi);
331 
332 	ASSERT_EQ(vma->vm_start, 0x2000);
333 	ASSERT_EQ(vma->vm_end, 0x3000);
334 	ASSERT_EQ(vma->vm_pgoff, 2);
335 
336 	vm_area_free(vma);
337 	mtree_destroy(&mm.mm_mt);
338 
339 	return true;
340 }
341 
test_simple_expand(void)342 static bool test_simple_expand(void)
343 {
344 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
345 	struct mm_struct mm = {};
346 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
347 	VMA_ITERATOR(vmi, &mm, 0);
348 	struct vma_merge_struct vmg = {
349 		.vmi = &vmi,
350 		.vma = vma,
351 		.start = 0,
352 		.end = 0x3000,
353 		.pgoff = 0,
354 	};
355 
356 	ASSERT_FALSE(attach_vma(&mm, vma));
357 
358 	ASSERT_FALSE(expand_existing(&vmg));
359 
360 	ASSERT_EQ(vma->vm_start, 0);
361 	ASSERT_EQ(vma->vm_end, 0x3000);
362 	ASSERT_EQ(vma->vm_pgoff, 0);
363 
364 	vm_area_free(vma);
365 	mtree_destroy(&mm.mm_mt);
366 
367 	return true;
368 }
369 
test_simple_shrink(void)370 static bool test_simple_shrink(void)
371 {
372 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
373 	struct mm_struct mm = {};
374 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
375 	VMA_ITERATOR(vmi, &mm, 0);
376 
377 	ASSERT_FALSE(attach_vma(&mm, vma));
378 
379 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
380 
381 	ASSERT_EQ(vma->vm_start, 0);
382 	ASSERT_EQ(vma->vm_end, 0x1000);
383 	ASSERT_EQ(vma->vm_pgoff, 0);
384 
385 	vm_area_free(vma);
386 	mtree_destroy(&mm.mm_mt);
387 
388 	return true;
389 }
390 
test_merge_new(void)391 static bool test_merge_new(void)
392 {
393 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
394 	struct mm_struct mm = {};
395 	VMA_ITERATOR(vmi, &mm, 0);
396 	struct vma_merge_struct vmg = {
397 		.mm = &mm,
398 		.vmi = &vmi,
399 	};
400 	struct anon_vma_chain dummy_anon_vma_chain_a = {
401 		.anon_vma = &dummy_anon_vma,
402 	};
403 	struct anon_vma_chain dummy_anon_vma_chain_b = {
404 		.anon_vma = &dummy_anon_vma,
405 	};
406 	struct anon_vma_chain dummy_anon_vma_chain_c = {
407 		.anon_vma = &dummy_anon_vma,
408 	};
409 	struct anon_vma_chain dummy_anon_vma_chain_d = {
410 		.anon_vma = &dummy_anon_vma,
411 	};
412 	const struct vm_operations_struct vm_ops = {
413 		.close = dummy_close,
414 	};
415 	int count;
416 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
417 	bool merged;
418 
419 	/*
420 	 * 0123456789abc
421 	 * AA B       CC
422 	 */
423 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
424 	ASSERT_NE(vma_a, NULL);
425 	/* We give each VMA a single avc so we can test anon_vma duplication. */
426 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
427 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
428 
429 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
430 	ASSERT_NE(vma_b, NULL);
431 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
432 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
433 
434 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
435 	ASSERT_NE(vma_c, NULL);
436 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
437 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
438 
439 	/*
440 	 * NO merge.
441 	 *
442 	 * 0123456789abc
443 	 * AA B   **  CC
444 	 */
445 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
446 	ASSERT_NE(vma_d, NULL);
447 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
448 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
449 	ASSERT_FALSE(merged);
450 	ASSERT_EQ(mm.map_count, 4);
451 
452 	/*
453 	 * Merge BOTH sides.
454 	 *
455 	 * 0123456789abc
456 	 * AA*B   DD  CC
457 	 */
458 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
459 	vma_b->anon_vma = &dummy_anon_vma;
460 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
461 	ASSERT_EQ(vma, vma_a);
462 	/* Merge with A, delete B. */
463 	ASSERT_TRUE(merged);
464 	ASSERT_EQ(vma->vm_start, 0);
465 	ASSERT_EQ(vma->vm_end, 0x4000);
466 	ASSERT_EQ(vma->vm_pgoff, 0);
467 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
468 	ASSERT_TRUE(vma_write_started(vma));
469 	ASSERT_EQ(mm.map_count, 3);
470 
471 	/*
472 	 * Merge to PREVIOUS VMA.
473 	 *
474 	 * 0123456789abc
475 	 * AAAA*  DD  CC
476 	 */
477 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
478 	ASSERT_EQ(vma, vma_a);
479 	/* Extend A. */
480 	ASSERT_TRUE(merged);
481 	ASSERT_EQ(vma->vm_start, 0);
482 	ASSERT_EQ(vma->vm_end, 0x5000);
483 	ASSERT_EQ(vma->vm_pgoff, 0);
484 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
485 	ASSERT_TRUE(vma_write_started(vma));
486 	ASSERT_EQ(mm.map_count, 3);
487 
488 	/*
489 	 * Merge to NEXT VMA.
490 	 *
491 	 * 0123456789abc
492 	 * AAAAA *DD  CC
493 	 */
494 	vma_d->anon_vma = &dummy_anon_vma;
495 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
496 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
497 	ASSERT_EQ(vma, vma_d);
498 	/* Prepend. */
499 	ASSERT_TRUE(merged);
500 	ASSERT_EQ(vma->vm_start, 0x6000);
501 	ASSERT_EQ(vma->vm_end, 0x9000);
502 	ASSERT_EQ(vma->vm_pgoff, 6);
503 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
504 	ASSERT_TRUE(vma_write_started(vma));
505 	ASSERT_EQ(mm.map_count, 3);
506 
507 	/*
508 	 * Merge BOTH sides.
509 	 *
510 	 * 0123456789abc
511 	 * AAAAA*DDD  CC
512 	 */
513 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
514 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
515 	ASSERT_EQ(vma, vma_a);
516 	/* Merge with A, delete D. */
517 	ASSERT_TRUE(merged);
518 	ASSERT_EQ(vma->vm_start, 0);
519 	ASSERT_EQ(vma->vm_end, 0x9000);
520 	ASSERT_EQ(vma->vm_pgoff, 0);
521 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
522 	ASSERT_TRUE(vma_write_started(vma));
523 	ASSERT_EQ(mm.map_count, 2);
524 
525 	/*
526 	 * Merge to NEXT VMA.
527 	 *
528 	 * 0123456789abc
529 	 * AAAAAAAAA *CC
530 	 */
531 	vma_c->anon_vma = &dummy_anon_vma;
532 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
533 	ASSERT_EQ(vma, vma_c);
534 	/* Prepend C. */
535 	ASSERT_TRUE(merged);
536 	ASSERT_EQ(vma->vm_start, 0xa000);
537 	ASSERT_EQ(vma->vm_end, 0xc000);
538 	ASSERT_EQ(vma->vm_pgoff, 0xa);
539 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
540 	ASSERT_TRUE(vma_write_started(vma));
541 	ASSERT_EQ(mm.map_count, 2);
542 
543 	/*
544 	 * Merge BOTH sides.
545 	 *
546 	 * 0123456789abc
547 	 * AAAAAAAAA*CCC
548 	 */
549 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
550 	ASSERT_EQ(vma, vma_a);
551 	/* Extend A and delete C. */
552 	ASSERT_TRUE(merged);
553 	ASSERT_EQ(vma->vm_start, 0);
554 	ASSERT_EQ(vma->vm_end, 0xc000);
555 	ASSERT_EQ(vma->vm_pgoff, 0);
556 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
557 	ASSERT_TRUE(vma_write_started(vma));
558 	ASSERT_EQ(mm.map_count, 1);
559 
560 	/*
561 	 * Final state.
562 	 *
563 	 * 0123456789abc
564 	 * AAAAAAAAAAAAA
565 	 */
566 
567 	count = 0;
568 	vma_iter_set(&vmi, 0);
569 	for_each_vma(vmi, vma) {
570 		ASSERT_NE(vma, NULL);
571 		ASSERT_EQ(vma->vm_start, 0);
572 		ASSERT_EQ(vma->vm_end, 0xc000);
573 		ASSERT_EQ(vma->vm_pgoff, 0);
574 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
575 
576 		vm_area_free(vma);
577 		count++;
578 	}
579 
580 	/* Should only have one VMA left (though freed) after all is done.*/
581 	ASSERT_EQ(count, 1);
582 
583 	mtree_destroy(&mm.mm_mt);
584 	return true;
585 }
586 
test_vma_merge_special_flags(void)587 static bool test_vma_merge_special_flags(void)
588 {
589 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
590 	struct mm_struct mm = {};
591 	VMA_ITERATOR(vmi, &mm, 0);
592 	struct vma_merge_struct vmg = {
593 		.mm = &mm,
594 		.vmi = &vmi,
595 	};
596 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
597 	vm_flags_t all_special_flags = 0;
598 	int i;
599 	struct vm_area_struct *vma_left, *vma;
600 
601 	/* Make sure there aren't new VM_SPECIAL flags. */
602 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
603 		all_special_flags |= special_flags[i];
604 	}
605 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
606 
607 	/*
608 	 * 01234
609 	 * AAA
610 	 */
611 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
612 	ASSERT_NE(vma_left, NULL);
613 
614 	/* 1. Set up new VMA with special flag that would otherwise merge. */
615 
616 	/*
617 	 * 01234
618 	 * AAA*
619 	 *
620 	 * This should merge if not for the VM_SPECIAL flag.
621 	 */
622 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
623 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
624 		vm_flags_t special_flag = special_flags[i];
625 
626 		vma_left->__vm_flags = flags | special_flag;
627 		vmg.flags = flags | special_flag;
628 		vma = merge_new(&vmg);
629 		ASSERT_EQ(vma, NULL);
630 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
631 	}
632 
633 	/* 2. Modify VMA with special flag that would otherwise merge. */
634 
635 	/*
636 	 * 01234
637 	 * AAAB
638 	 *
639 	 * Create a VMA to modify.
640 	 */
641 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
642 	ASSERT_NE(vma, NULL);
643 	vmg.vma = vma;
644 
645 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
646 		vm_flags_t special_flag = special_flags[i];
647 
648 		vma_left->__vm_flags = flags | special_flag;
649 		vmg.flags = flags | special_flag;
650 		vma = merge_existing(&vmg);
651 		ASSERT_EQ(vma, NULL);
652 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
653 	}
654 
655 	cleanup_mm(&mm, &vmi);
656 	return true;
657 }
658 
test_vma_merge_with_close(void)659 static bool test_vma_merge_with_close(void)
660 {
661 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
662 	struct mm_struct mm = {};
663 	VMA_ITERATOR(vmi, &mm, 0);
664 	struct vma_merge_struct vmg = {
665 		.mm = &mm,
666 		.vmi = &vmi,
667 	};
668 	const struct vm_operations_struct vm_ops = {
669 		.close = dummy_close,
670 	};
671 	struct vm_area_struct *vma_prev, *vma_next, *vma;
672 
673 	/*
674 	 * When merging VMAs we are not permitted to remove any VMA that has a
675 	 * vm_ops->close() hook.
676 	 *
677 	 * Considering the two possible adjacent VMAs to which a VMA can be
678 	 * merged:
679 	 *
680 	 * [ prev ][ vma ][ next ]
681 	 *
682 	 * In no case will we need to delete prev. If the operation is
683 	 * mergeable, then prev will be extended with one or both of vma and
684 	 * next deleted.
685 	 *
686 	 * As a result, during initial mergeability checks, only
687 	 * can_vma_merge_before() (which implies the VMA being merged with is
688 	 * 'next' as shown above) bothers to check to see whether the next VMA
689 	 * has a vm_ops->close() callback that will need to be called when
690 	 * removed.
691 	 *
692 	 * If it does, then we cannot merge as the resources that the close()
693 	 * operation potentially clears down are tied only to the existing VMA
694 	 * range and we have no way of extending those to the nearly merged one.
695 	 *
696 	 * We must consider two scenarios:
697 	 *
698 	 * A.
699 	 *
700 	 * vm_ops->close:     -       -    !NULL
701 	 *                 [ prev ][ vma ][ next ]
702 	 *
703 	 * Where prev may or may not be present/mergeable.
704 	 *
705 	 * This is picked up by a specific check in can_vma_merge_before().
706 	 *
707 	 * B.
708 	 *
709 	 * vm_ops->close:     -     !NULL
710 	 *                 [ prev ][ vma ]
711 	 *
712 	 * Where prev and vma are present and mergeable.
713 	 *
714 	 * This is picked up by a specific check in the modified VMA merge.
715 	 *
716 	 * IMPORTANT NOTE: We make the assumption that the following case:
717 	 *
718 	 *    -     !NULL   NULL
719 	 * [ prev ][ vma ][ next ]
720 	 *
721 	 * Cannot occur, because vma->vm_ops being the same implies the same
722 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
723 	 * would be set too, and thus scenario A would pick this up.
724 	 */
725 
726 	/*
727 	 * The only case of a new VMA merge that results in a VMA being deleted
728 	 * is one where both the previous and next VMAs are merged - in this
729 	 * instance the next VMA is deleted, and the previous VMA is extended.
730 	 *
731 	 * If we are unable to do so, we reduce the operation to simply
732 	 * extending the prev VMA and not merging next.
733 	 *
734 	 * 0123456789
735 	 * PPP**NNNN
736 	 *             ->
737 	 * 0123456789
738 	 * PPPPPPNNN
739 	 */
740 
741 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
742 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
743 	vma_next->vm_ops = &vm_ops;
744 
745 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
746 	ASSERT_EQ(merge_new(&vmg), vma_prev);
747 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
748 	ASSERT_EQ(vma_prev->vm_start, 0);
749 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
750 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
751 
752 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
753 
754 	/*
755 	 * When modifying an existing VMA there are further cases where we
756 	 * delete VMAs.
757 	 *
758 	 *    <>
759 	 * 0123456789
760 	 * PPPVV
761 	 *
762 	 * In this instance, if vma has a close hook, the merge simply cannot
763 	 * proceed.
764 	 */
765 
766 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
767 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
768 	vma->vm_ops = &vm_ops;
769 
770 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
771 	vmg.prev = vma_prev;
772 	vmg.vma = vma;
773 
774 	/*
775 	 * The VMA being modified in a way that would otherwise merge should
776 	 * also fail.
777 	 */
778 	ASSERT_EQ(merge_existing(&vmg), NULL);
779 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
780 
781 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
782 
783 	/*
784 	 * This case is mirrored if merging with next.
785 	 *
786 	 *    <>
787 	 * 0123456789
788 	 *    VVNNNN
789 	 *
790 	 * In this instance, if vma has a close hook, the merge simply cannot
791 	 * proceed.
792 	 */
793 
794 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
795 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
796 	vma->vm_ops = &vm_ops;
797 
798 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
799 	vmg.vma = vma;
800 	ASSERT_EQ(merge_existing(&vmg), NULL);
801 	/*
802 	 * Initially this is misapprehended as an out of memory report, as the
803 	 * close() check is handled in the same way as anon_vma duplication
804 	 * failures, however a subsequent patch resolves this.
805 	 */
806 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
807 
808 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
809 
810 	/*
811 	 * Finally, we consider two variants of the case where we modify a VMA
812 	 * to merge with both the previous and next VMAs.
813 	 *
814 	 * The first variant is where vma has a close hook. In this instance, no
815 	 * merge can proceed.
816 	 *
817 	 *    <>
818 	 * 0123456789
819 	 * PPPVVNNNN
820 	 */
821 
822 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
823 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
824 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
825 	vma->vm_ops = &vm_ops;
826 
827 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
828 	vmg.prev = vma_prev;
829 	vmg.vma = vma;
830 
831 	ASSERT_EQ(merge_existing(&vmg), NULL);
832 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
833 
834 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
835 
836 	/*
837 	 * The second variant is where next has a close hook. In this instance,
838 	 * we reduce the operation to a merge between prev and vma.
839 	 *
840 	 *    <>
841 	 * 0123456789
842 	 * PPPVVNNNN
843 	 *            ->
844 	 * 0123456789
845 	 * PPPPPNNNN
846 	 */
847 
848 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
849 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
850 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
851 	vma_next->vm_ops = &vm_ops;
852 
853 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
854 	vmg.prev = vma_prev;
855 	vmg.vma = vma;
856 
857 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
858 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
859 	ASSERT_EQ(vma_prev->vm_start, 0);
860 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
861 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
862 
863 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
864 
865 	return true;
866 }
867 
test_vma_merge_new_with_close(void)868 static bool test_vma_merge_new_with_close(void)
869 {
870 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
871 	struct mm_struct mm = {};
872 	VMA_ITERATOR(vmi, &mm, 0);
873 	struct vma_merge_struct vmg = {
874 		.mm = &mm,
875 		.vmi = &vmi,
876 	};
877 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
878 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
879 	const struct vm_operations_struct vm_ops = {
880 		.close = dummy_close,
881 	};
882 	struct vm_area_struct *vma;
883 
884 	/*
885 	 * We should allow the partial merge of a proposed new VMA if the
886 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
887 	 * compatible), e.g.:
888 	 *
889 	 *        New VMA
890 	 *    A  v-------v  B
891 	 * |-----|       |-----|
892 	 *  close         close
893 	 *
894 	 * Since the rule is to not DELETE a VMA with a close operation, this
895 	 * should be permitted, only rather than expanding A and deleting B, we
896 	 * should simply expand A and leave B intact, e.g.:
897 	 *
898 	 *        New VMA
899 	 *       A          B
900 	 * |------------||-----|
901 	 *  close         close
902 	 */
903 
904 	/* Have prev and next have a vm_ops->close() hook. */
905 	vma_prev->vm_ops = &vm_ops;
906 	vma_next->vm_ops = &vm_ops;
907 
908 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
909 	vma = merge_new(&vmg);
910 	ASSERT_NE(vma, NULL);
911 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
912 	ASSERT_EQ(vma->vm_start, 0);
913 	ASSERT_EQ(vma->vm_end, 0x5000);
914 	ASSERT_EQ(vma->vm_pgoff, 0);
915 	ASSERT_EQ(vma->vm_ops, &vm_ops);
916 	ASSERT_TRUE(vma_write_started(vma));
917 	ASSERT_EQ(mm.map_count, 2);
918 
919 	cleanup_mm(&mm, &vmi);
920 	return true;
921 }
922 
test_merge_existing(void)923 static bool test_merge_existing(void)
924 {
925 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
926 	struct mm_struct mm = {};
927 	VMA_ITERATOR(vmi, &mm, 0);
928 	struct vm_area_struct *vma, *vma_prev, *vma_next;
929 	struct vma_merge_struct vmg = {
930 		.mm = &mm,
931 		.vmi = &vmi,
932 	};
933 	const struct vm_operations_struct vm_ops = {
934 		.close = dummy_close,
935 	};
936 
937 	/*
938 	 * Merge right case - partial span.
939 	 *
940 	 *    <->
941 	 * 0123456789
942 	 *   VVVVNNN
943 	 *            ->
944 	 * 0123456789
945 	 *   VNNNNNN
946 	 */
947 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
948 	vma->vm_ops = &vm_ops; /* This should have no impact. */
949 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
950 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
951 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
952 	vmg.vma = vma;
953 	vmg.prev = vma;
954 	vma->anon_vma = &dummy_anon_vma;
955 	ASSERT_EQ(merge_existing(&vmg), vma_next);
956 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
957 	ASSERT_EQ(vma_next->vm_start, 0x3000);
958 	ASSERT_EQ(vma_next->vm_end, 0x9000);
959 	ASSERT_EQ(vma_next->vm_pgoff, 3);
960 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
961 	ASSERT_EQ(vma->vm_start, 0x2000);
962 	ASSERT_EQ(vma->vm_end, 0x3000);
963 	ASSERT_EQ(vma->vm_pgoff, 2);
964 	ASSERT_TRUE(vma_write_started(vma));
965 	ASSERT_TRUE(vma_write_started(vma_next));
966 	ASSERT_EQ(mm.map_count, 2);
967 
968 	/* Clear down and reset. */
969 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
970 
971 	/*
972 	 * Merge right case - full span.
973 	 *
974 	 *   <-->
975 	 * 0123456789
976 	 *   VVVVNNN
977 	 *            ->
978 	 * 0123456789
979 	 *   NNNNNNN
980 	 */
981 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
982 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
983 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
984 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
985 	vmg.vma = vma;
986 	vma->anon_vma = &dummy_anon_vma;
987 	ASSERT_EQ(merge_existing(&vmg), vma_next);
988 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
989 	ASSERT_EQ(vma_next->vm_start, 0x2000);
990 	ASSERT_EQ(vma_next->vm_end, 0x9000);
991 	ASSERT_EQ(vma_next->vm_pgoff, 2);
992 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
993 	ASSERT_TRUE(vma_write_started(vma_next));
994 	ASSERT_EQ(mm.map_count, 1);
995 
996 	/* Clear down and reset. We should have deleted vma. */
997 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
998 
999 	/*
1000 	 * Merge left case - partial span.
1001 	 *
1002 	 *    <->
1003 	 * 0123456789
1004 	 * PPPVVVV
1005 	 *            ->
1006 	 * 0123456789
1007 	 * PPPPPPV
1008 	 */
1009 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1010 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1011 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1012 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1013 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
1014 	vmg.prev = vma_prev;
1015 	vmg.vma = vma;
1016 	vma->anon_vma = &dummy_anon_vma;
1017 
1018 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1019 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1020 	ASSERT_EQ(vma_prev->vm_start, 0);
1021 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1022 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1023 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1024 	ASSERT_EQ(vma->vm_start, 0x6000);
1025 	ASSERT_EQ(vma->vm_end, 0x7000);
1026 	ASSERT_EQ(vma->vm_pgoff, 6);
1027 	ASSERT_TRUE(vma_write_started(vma_prev));
1028 	ASSERT_TRUE(vma_write_started(vma));
1029 	ASSERT_EQ(mm.map_count, 2);
1030 
1031 	/* Clear down and reset. */
1032 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1033 
1034 	/*
1035 	 * Merge left case - full span.
1036 	 *
1037 	 *    <-->
1038 	 * 0123456789
1039 	 * PPPVVVV
1040 	 *            ->
1041 	 * 0123456789
1042 	 * PPPPPPP
1043 	 */
1044 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1045 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1046 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1047 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1048 	vmg.prev = vma_prev;
1049 	vmg.vma = vma;
1050 	vma->anon_vma = &dummy_anon_vma;
1051 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1052 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1053 	ASSERT_EQ(vma_prev->vm_start, 0);
1054 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1055 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1056 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1057 	ASSERT_TRUE(vma_write_started(vma_prev));
1058 	ASSERT_EQ(mm.map_count, 1);
1059 
1060 	/* Clear down and reset. We should have deleted vma. */
1061 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1062 
1063 	/*
1064 	 * Merge both case.
1065 	 *
1066 	 *    <-->
1067 	 * 0123456789
1068 	 * PPPVVVVNNN
1069 	 *             ->
1070 	 * 0123456789
1071 	 * PPPPPPPPPP
1072 	 */
1073 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1074 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1075 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1076 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1077 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1078 	vmg.prev = vma_prev;
1079 	vmg.vma = vma;
1080 	vma->anon_vma = &dummy_anon_vma;
1081 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1082 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1083 	ASSERT_EQ(vma_prev->vm_start, 0);
1084 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1085 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1086 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1087 	ASSERT_TRUE(vma_write_started(vma_prev));
1088 	ASSERT_EQ(mm.map_count, 1);
1089 
1090 	/* Clear down and reset. We should have deleted prev and next. */
1091 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1092 
1093 	/*
1094 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1095 	 * caller always specifies ranges within the input VMA so we need only
1096 	 * examine these cases.
1097 	 *
1098 	 *     -
1099 	 *      -
1100 	 *       -
1101 	 *     <->
1102 	 *     <>
1103 	 *      <>
1104 	 * 0123456789a
1105 	 * PPPVVVVVNNN
1106 	 */
1107 
1108 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1109 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1110 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1111 
1112 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1113 	vmg.prev = vma;
1114 	vmg.vma = vma;
1115 	ASSERT_EQ(merge_existing(&vmg), NULL);
1116 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1117 
1118 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1119 	vmg.prev = vma;
1120 	vmg.vma = vma;
1121 	ASSERT_EQ(merge_existing(&vmg), NULL);
1122 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1123 
1124 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1125 	vmg.prev = vma;
1126 	vmg.vma = vma;
1127 	ASSERT_EQ(merge_existing(&vmg), NULL);
1128 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1129 
1130 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1131 	vmg.prev = vma;
1132 	vmg.vma = vma;
1133 	ASSERT_EQ(merge_existing(&vmg), NULL);
1134 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1135 
1136 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1137 	vmg.prev = vma;
1138 	vmg.vma = vma;
1139 	ASSERT_EQ(merge_existing(&vmg), NULL);
1140 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1141 
1142 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1143 	vmg.prev = vma;
1144 	vmg.vma = vma;
1145 	ASSERT_EQ(merge_existing(&vmg), NULL);
1146 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1147 
1148 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1149 
1150 	return true;
1151 }
1152 
test_anon_vma_non_mergeable(void)1153 static bool test_anon_vma_non_mergeable(void)
1154 {
1155 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1156 	struct mm_struct mm = {};
1157 	VMA_ITERATOR(vmi, &mm, 0);
1158 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1159 	struct vma_merge_struct vmg = {
1160 		.mm = &mm,
1161 		.vmi = &vmi,
1162 	};
1163 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1164 		.anon_vma = &dummy_anon_vma,
1165 	};
1166 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1167 		.anon_vma = &dummy_anon_vma,
1168 	};
1169 
1170 	/*
1171 	 * In the case of modified VMA merge, merging both left and right VMAs
1172 	 * but where prev and next have incompatible anon_vma objects, we revert
1173 	 * to a merge of prev and VMA:
1174 	 *
1175 	 *    <-->
1176 	 * 0123456789
1177 	 * PPPVVVVNNN
1178 	 *            ->
1179 	 * 0123456789
1180 	 * PPPPPPPNNN
1181 	 */
1182 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1183 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1184 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1185 
1186 	/*
1187 	 * Give both prev and next single anon_vma_chain fields, so they will
1188 	 * merge with the NULL vmg->anon_vma.
1189 	 *
1190 	 * However, when prev is compared to next, the merge should fail.
1191 	 */
1192 
1193 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1194 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1195 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1196 	vma_prev->anon_vma = &dummy_anon_vma;
1197 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1198 
1199 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1200 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1201 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1202 	vma_next->anon_vma = (struct anon_vma *)2;
1203 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1204 
1205 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1206 
1207 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1208 	vmg.prev = vma_prev;
1209 	vmg.vma = vma;
1210 
1211 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1212 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1213 	ASSERT_EQ(vma_prev->vm_start, 0);
1214 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1215 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1216 	ASSERT_TRUE(vma_write_started(vma_prev));
1217 	ASSERT_FALSE(vma_write_started(vma_next));
1218 
1219 	/* Clear down and reset. */
1220 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1221 
1222 	/*
1223 	 * Now consider the new VMA case. This is equivalent, only adding a new
1224 	 * VMA in a gap between prev and next.
1225 	 *
1226 	 *    <-->
1227 	 * 0123456789
1228 	 * PPP****NNN
1229 	 *            ->
1230 	 * 0123456789
1231 	 * PPPPPPPNNN
1232 	 */
1233 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1234 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1235 
1236 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1237 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1238 	vma_prev->anon_vma = (struct anon_vma *)1;
1239 
1240 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1241 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1242 	vma_next->anon_vma = (struct anon_vma *)2;
1243 
1244 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1245 	vmg.prev = vma_prev;
1246 
1247 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1248 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1249 	ASSERT_EQ(vma_prev->vm_start, 0);
1250 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1251 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1252 	ASSERT_TRUE(vma_write_started(vma_prev));
1253 	ASSERT_FALSE(vma_write_started(vma_next));
1254 
1255 	/* Final cleanup. */
1256 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1257 
1258 	return true;
1259 }
1260 
test_dup_anon_vma(void)1261 static bool test_dup_anon_vma(void)
1262 {
1263 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1264 	struct mm_struct mm = {};
1265 	VMA_ITERATOR(vmi, &mm, 0);
1266 	struct vma_merge_struct vmg = {
1267 		.mm = &mm,
1268 		.vmi = &vmi,
1269 	};
1270 	struct anon_vma_chain dummy_anon_vma_chain = {
1271 		.anon_vma = &dummy_anon_vma,
1272 	};
1273 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1274 
1275 	reset_dummy_anon_vma();
1276 
1277 	/*
1278 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1279 	 * assigns it to the expanded VMA.
1280 	 *
1281 	 * This covers new VMA merging, as these operations amount to a VMA
1282 	 * expand.
1283 	 */
1284 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1285 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1286 	vma_next->anon_vma = &dummy_anon_vma;
1287 
1288 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1289 	vmg.vma = vma_prev;
1290 	vmg.next = vma_next;
1291 
1292 	ASSERT_EQ(expand_existing(&vmg), 0);
1293 
1294 	/* Will have been cloned. */
1295 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1296 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1297 
1298 	/* Cleanup ready for next run. */
1299 	cleanup_mm(&mm, &vmi);
1300 
1301 	/*
1302 	 * next has anon_vma, we assign to prev.
1303 	 *
1304 	 *         |<----->|
1305 	 * |-------*********-------|
1306 	 *   prev     vma     next
1307 	 *  extend   delete  delete
1308 	 */
1309 
1310 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1311 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1312 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1313 
1314 	/* Initialise avc so mergeability check passes. */
1315 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1316 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1317 
1318 	vma_next->anon_vma = &dummy_anon_vma;
1319 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1320 	vmg.prev = vma_prev;
1321 	vmg.vma = vma;
1322 
1323 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1324 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1325 
1326 	ASSERT_EQ(vma_prev->vm_start, 0);
1327 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1328 
1329 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1330 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1331 
1332 	cleanup_mm(&mm, &vmi);
1333 
1334 	/*
1335 	 * vma has anon_vma, we assign to prev.
1336 	 *
1337 	 *         |<----->|
1338 	 * |-------*********-------|
1339 	 *   prev     vma     next
1340 	 *  extend   delete  delete
1341 	 */
1342 
1343 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1344 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1345 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1346 
1347 	vma->anon_vma = &dummy_anon_vma;
1348 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1349 	vmg.prev = vma_prev;
1350 	vmg.vma = vma;
1351 
1352 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1353 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1354 
1355 	ASSERT_EQ(vma_prev->vm_start, 0);
1356 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1357 
1358 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1359 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1360 
1361 	cleanup_mm(&mm, &vmi);
1362 
1363 	/*
1364 	 * vma has anon_vma, we assign to prev.
1365 	 *
1366 	 *         |<----->|
1367 	 * |-------*************
1368 	 *   prev       vma
1369 	 *  extend shrink/delete
1370 	 */
1371 
1372 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1373 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1374 
1375 	vma->anon_vma = &dummy_anon_vma;
1376 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1377 	vmg.prev = vma_prev;
1378 	vmg.vma = vma;
1379 
1380 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1381 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1382 
1383 	ASSERT_EQ(vma_prev->vm_start, 0);
1384 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1385 
1386 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1387 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1388 
1389 	cleanup_mm(&mm, &vmi);
1390 
1391 	/*
1392 	 * vma has anon_vma, we assign to next.
1393 	 *
1394 	 *     |<----->|
1395 	 * *************-------|
1396 	 *      vma       next
1397 	 * shrink/delete extend
1398 	 */
1399 
1400 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1401 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1402 
1403 	vma->anon_vma = &dummy_anon_vma;
1404 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1405 	vmg.prev = vma;
1406 	vmg.vma = vma;
1407 
1408 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1409 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1410 
1411 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1412 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1413 
1414 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1415 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1416 
1417 	cleanup_mm(&mm, &vmi);
1418 	return true;
1419 }
1420 
test_vmi_prealloc_fail(void)1421 static bool test_vmi_prealloc_fail(void)
1422 {
1423 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1424 	struct mm_struct mm = {};
1425 	VMA_ITERATOR(vmi, &mm, 0);
1426 	struct vma_merge_struct vmg = {
1427 		.mm = &mm,
1428 		.vmi = &vmi,
1429 	};
1430 	struct vm_area_struct *vma_prev, *vma;
1431 
1432 	/*
1433 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1434 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1435 	 * the duplicated anon_vma is unlinked.
1436 	 */
1437 
1438 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1439 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1440 	vma->anon_vma = &dummy_anon_vma;
1441 
1442 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1443 	vmg.prev = vma_prev;
1444 	vmg.vma = vma;
1445 
1446 	fail_prealloc = true;
1447 
1448 	/* This will cause the merge to fail. */
1449 	ASSERT_EQ(merge_existing(&vmg), NULL);
1450 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1451 	/* We will already have assigned the anon_vma. */
1452 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1453 	/* And it was both cloned and unlinked. */
1454 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1455 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1456 
1457 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1458 
1459 	/*
1460 	 * We repeat the same operation for expanding a VMA, which is what new
1461 	 * VMA merging ultimately uses too. This asserts that unlinking is
1462 	 * performed in this case too.
1463 	 */
1464 
1465 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1466 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1467 	vma->anon_vma = &dummy_anon_vma;
1468 
1469 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1470 	vmg.vma = vma_prev;
1471 	vmg.next = vma;
1472 
1473 	fail_prealloc = true;
1474 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1475 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1476 
1477 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1478 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1479 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1480 
1481 	cleanup_mm(&mm, &vmi);
1482 	return true;
1483 }
1484 
test_merge_extend(void)1485 static bool test_merge_extend(void)
1486 {
1487 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1488 	struct mm_struct mm = {};
1489 	VMA_ITERATOR(vmi, &mm, 0x1000);
1490 	struct vm_area_struct *vma;
1491 
1492 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1493 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1494 
1495 	/*
1496 	 * Extend a VMA into the gap between itself and the following VMA.
1497 	 * This should result in a merge.
1498 	 *
1499 	 * <->
1500 	 * *  *
1501 	 *
1502 	 */
1503 
1504 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1505 	ASSERT_EQ(vma->vm_start, 0);
1506 	ASSERT_EQ(vma->vm_end, 0x4000);
1507 	ASSERT_EQ(vma->vm_pgoff, 0);
1508 	ASSERT_TRUE(vma_write_started(vma));
1509 	ASSERT_EQ(mm.map_count, 1);
1510 
1511 	cleanup_mm(&mm, &vmi);
1512 	return true;
1513 }
1514 
test_copy_vma(void)1515 static bool test_copy_vma(void)
1516 {
1517 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1518 	struct mm_struct mm = {};
1519 	bool need_locks = false;
1520 	VMA_ITERATOR(vmi, &mm, 0);
1521 	struct vm_area_struct *vma, *vma_new, *vma_next;
1522 
1523 	/* Move backwards and do not merge. */
1524 
1525 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1526 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1527 	ASSERT_NE(vma_new, vma);
1528 	ASSERT_EQ(vma_new->vm_start, 0);
1529 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1530 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1531 	vma_assert_attached(vma_new);
1532 
1533 	cleanup_mm(&mm, &vmi);
1534 
1535 	/* Move a VMA into position next to another and merge the two. */
1536 
1537 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1538 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1539 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1540 	vma_assert_attached(vma_new);
1541 
1542 	ASSERT_EQ(vma_new, vma_next);
1543 
1544 	cleanup_mm(&mm, &vmi);
1545 	return true;
1546 }
1547 
test_expand_only_mode(void)1548 static bool test_expand_only_mode(void)
1549 {
1550 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1551 	struct mm_struct mm = {};
1552 	VMA_ITERATOR(vmi, &mm, 0);
1553 	struct vm_area_struct *vma_prev, *vma;
1554 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1555 
1556 	/*
1557 	 * Place a VMA prior to the one we're expanding so we assert that we do
1558 	 * not erroneously try to traverse to the previous VMA even though we
1559 	 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1560 	 * need to do so.
1561 	 */
1562 	alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1563 
1564 	/*
1565 	 * We will be positioned at the prev VMA, but looking to expand to
1566 	 * 0x9000.
1567 	 */
1568 	vma_iter_set(&vmi, 0x3000);
1569 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1570 	vmg.prev = vma_prev;
1571 	vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1572 
1573 	vma = vma_merge_new_range(&vmg);
1574 	ASSERT_NE(vma, NULL);
1575 	ASSERT_EQ(vma, vma_prev);
1576 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1577 	ASSERT_EQ(vma->vm_start, 0x3000);
1578 	ASSERT_EQ(vma->vm_end, 0x9000);
1579 	ASSERT_EQ(vma->vm_pgoff, 3);
1580 	ASSERT_TRUE(vma_write_started(vma));
1581 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1582 	vma_assert_attached(vma);
1583 
1584 	cleanup_mm(&mm, &vmi);
1585 	return true;
1586 }
1587 
main(void)1588 int main(void)
1589 {
1590 	int num_tests = 0, num_fail = 0;
1591 
1592 	maple_tree_init();
1593 
1594 #define TEST(name)							\
1595 	do {								\
1596 		num_tests++;						\
1597 		if (!test_##name()) {					\
1598 			num_fail++;					\
1599 			fprintf(stderr, "Test " #name " FAILED\n");	\
1600 		}							\
1601 	} while (0)
1602 
1603 	/* Very simple tests to kick the tyres. */
1604 	TEST(simple_merge);
1605 	TEST(simple_modify);
1606 	TEST(simple_expand);
1607 	TEST(simple_shrink);
1608 
1609 	TEST(merge_new);
1610 	TEST(vma_merge_special_flags);
1611 	TEST(vma_merge_with_close);
1612 	TEST(vma_merge_new_with_close);
1613 	TEST(merge_existing);
1614 	TEST(anon_vma_non_mergeable);
1615 	TEST(dup_anon_vma);
1616 	TEST(vmi_prealloc_fail);
1617 	TEST(merge_extend);
1618 	TEST(copy_vma);
1619 	TEST(expand_only_mode);
1620 
1621 #undef TEST
1622 
1623 	printf("%d tests run, %d passed, %d failed.\n",
1624 	       num_tests, num_tests - num_fail, num_fail);
1625 
1626 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1627 }
1628