1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/limits.h>
10 #include <linux/userfaultfd.h>
11 #include <setjmp.h>
12 #include <signal.h>
13 #include <stdbool.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/ioctl.h>
18 #include <sys/mman.h>
19 #include <sys/syscall.h>
20 #include <sys/uio.h>
21 #include <unistd.h>
22 #include "vm_util.h"
23
24 #include "../pidfd/pidfd.h"
25
26 /*
27 * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
28 *
29 * "If the signal occurs other than as the result of calling the abort or raise
30 * function, the behavior is undefined if the signal handler refers to any
31 * object with static storage duration other than by assigning a value to an
32 * object declared as volatile sig_atomic_t"
33 */
34 static volatile sig_atomic_t signal_jump_set;
35 static sigjmp_buf signal_jmp_buf;
36
37 /*
38 * Ignore the checkpatch warning, we must read from x but don't want to do
39 * anything with it in order to trigger a read page fault. We therefore must use
40 * volatile to stop the compiler from optimising this away.
41 */
42 #define FORCE_READ(x) (*(volatile typeof(x) *)x)
43
44 /*
45 * How is the test backing the mapping being tested?
46 */
47 enum backing_type {
48 ANON_BACKED,
49 SHMEM_BACKED,
50 LOCAL_FILE_BACKED,
51 };
52
FIXTURE(guard_regions)53 FIXTURE(guard_regions)
54 {
55 unsigned long page_size;
56 char path[PATH_MAX];
57 int fd;
58 };
59
FIXTURE_VARIANT(guard_regions)60 FIXTURE_VARIANT(guard_regions)
61 {
62 enum backing_type backing;
63 };
64
FIXTURE_VARIANT_ADD(guard_regions,anon)65 FIXTURE_VARIANT_ADD(guard_regions, anon)
66 {
67 .backing = ANON_BACKED,
68 };
69
FIXTURE_VARIANT_ADD(guard_regions,shmem)70 FIXTURE_VARIANT_ADD(guard_regions, shmem)
71 {
72 .backing = SHMEM_BACKED,
73 };
74
FIXTURE_VARIANT_ADD(guard_regions,file)75 FIXTURE_VARIANT_ADD(guard_regions, file)
76 {
77 .backing = LOCAL_FILE_BACKED,
78 };
79
is_anon_backed(const FIXTURE_VARIANT (guard_regions)* variant)80 static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
81 {
82 switch (variant->backing) {
83 case ANON_BACKED:
84 case SHMEM_BACKED:
85 return true;
86 default:
87 return false;
88 }
89 }
90
mmap_(FIXTURE_DATA (guard_regions)* self,const FIXTURE_VARIANT (guard_regions)* variant,void * addr,size_t length,int prot,int extra_flags,off_t offset)91 static void *mmap_(FIXTURE_DATA(guard_regions) * self,
92 const FIXTURE_VARIANT(guard_regions) * variant,
93 void *addr, size_t length, int prot, int extra_flags,
94 off_t offset)
95 {
96 int fd;
97 int flags = extra_flags;
98
99 switch (variant->backing) {
100 case ANON_BACKED:
101 flags |= MAP_PRIVATE | MAP_ANON;
102 fd = -1;
103 break;
104 case SHMEM_BACKED:
105 case LOCAL_FILE_BACKED:
106 flags |= MAP_SHARED;
107 fd = self->fd;
108 break;
109 default:
110 ksft_exit_fail();
111 break;
112 }
113
114 return mmap(addr, length, prot, flags, fd, offset);
115 }
116
userfaultfd(int flags)117 static int userfaultfd(int flags)
118 {
119 return syscall(SYS_userfaultfd, flags);
120 }
121
handle_fatal(int c)122 static void handle_fatal(int c)
123 {
124 if (!signal_jump_set)
125 return;
126
127 siglongjmp(signal_jmp_buf, c);
128 }
129
sys_process_madvise(int pidfd,const struct iovec * iovec,size_t n,int advice,unsigned int flags)130 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
131 size_t n, int advice, unsigned int flags)
132 {
133 return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
134 }
135
136 /*
137 * Enable our signal catcher and try to read/write the specified buffer. The
138 * return value indicates whether the read/write succeeds without a fatal
139 * signal.
140 */
try_access_buf(char * ptr,bool write)141 static bool try_access_buf(char *ptr, bool write)
142 {
143 bool failed;
144
145 /* Tell signal handler to jump back here on fatal signal. */
146 signal_jump_set = true;
147 /* If a fatal signal arose, we will jump back here and failed is set. */
148 failed = sigsetjmp(signal_jmp_buf, 0) != 0;
149
150 if (!failed) {
151 if (write)
152 *ptr = 'x';
153 else
154 FORCE_READ(ptr);
155 }
156
157 signal_jump_set = false;
158 return !failed;
159 }
160
161 /* Try and read from a buffer, return true if no fatal signal. */
try_read_buf(char * ptr)162 static bool try_read_buf(char *ptr)
163 {
164 return try_access_buf(ptr, false);
165 }
166
167 /* Try and write to a buffer, return true if no fatal signal. */
try_write_buf(char * ptr)168 static bool try_write_buf(char *ptr)
169 {
170 return try_access_buf(ptr, true);
171 }
172
173 /*
174 * Try and BOTH read from AND write to a buffer, return true if BOTH operations
175 * succeed.
176 */
try_read_write_buf(char * ptr)177 static bool try_read_write_buf(char *ptr)
178 {
179 return try_read_buf(ptr) && try_write_buf(ptr);
180 }
181
setup_sighandler(void)182 static void setup_sighandler(void)
183 {
184 struct sigaction act = {
185 .sa_handler = &handle_fatal,
186 .sa_flags = SA_NODEFER,
187 };
188
189 sigemptyset(&act.sa_mask);
190 if (sigaction(SIGSEGV, &act, NULL))
191 ksft_exit_fail_perror("sigaction");
192 }
193
teardown_sighandler(void)194 static void teardown_sighandler(void)
195 {
196 struct sigaction act = {
197 .sa_handler = SIG_DFL,
198 .sa_flags = SA_NODEFER,
199 };
200
201 sigemptyset(&act.sa_mask);
202 sigaction(SIGSEGV, &act, NULL);
203 }
204
open_file(const char * prefix,char * path)205 static int open_file(const char *prefix, char *path)
206 {
207 int fd;
208
209 snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
210 fd = mkstemp(path);
211 if (fd < 0)
212 ksft_exit_fail_perror("mkstemp");
213
214 return fd;
215 }
216
217 /* Establish a varying pattern in a buffer. */
set_pattern(char * ptr,size_t num_pages,size_t page_size)218 static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
219 {
220 size_t i;
221
222 for (i = 0; i < num_pages; i++) {
223 char *ptr2 = &ptr[i * page_size];
224
225 memset(ptr2, 'a' + (i % 26), page_size);
226 }
227 }
228
229 /*
230 * Check that a buffer contains the pattern set by set_pattern(), starting at a
231 * page offset of pgoff within the buffer.
232 */
check_pattern_offset(char * ptr,size_t num_pages,size_t page_size,size_t pgoff)233 static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
234 size_t pgoff)
235 {
236 size_t i;
237
238 for (i = 0; i < num_pages * page_size; i++) {
239 size_t offset = pgoff * page_size + i;
240 char actual = ptr[offset];
241 char expected = 'a' + ((offset / page_size) % 26);
242
243 if (actual != expected)
244 return false;
245 }
246
247 return true;
248 }
249
250 /* Check that a buffer contains the pattern set by set_pattern(). */
check_pattern(char * ptr,size_t num_pages,size_t page_size)251 static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
252 {
253 return check_pattern_offset(ptr, num_pages, page_size, 0);
254 }
255
256 /* Determine if a buffer contains only repetitions of a specified char. */
is_buf_eq(char * buf,size_t size,char chr)257 static bool is_buf_eq(char *buf, size_t size, char chr)
258 {
259 size_t i;
260
261 for (i = 0; i < size; i++) {
262 if (buf[i] != chr)
263 return false;
264 }
265
266 return true;
267 }
268
FIXTURE_SETUP(guard_regions)269 FIXTURE_SETUP(guard_regions)
270 {
271 self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
272 setup_sighandler();
273
274 if (variant->backing == ANON_BACKED)
275 return;
276
277 self->fd = open_file(
278 variant->backing == SHMEM_BACKED ? "/tmp/" : "",
279 self->path);
280
281 /* We truncate file to at least 100 pages, tests can modify as needed. */
282 ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
283 };
284
FIXTURE_TEARDOWN_PARENT(guard_regions)285 FIXTURE_TEARDOWN_PARENT(guard_regions)
286 {
287 teardown_sighandler();
288
289 if (variant->backing == ANON_BACKED)
290 return;
291
292 if (self->fd >= 0)
293 close(self->fd);
294
295 if (self->path[0] != '\0')
296 unlink(self->path);
297 }
298
TEST_F(guard_regions,basic)299 TEST_F(guard_regions, basic)
300 {
301 const unsigned long NUM_PAGES = 10;
302 const unsigned long page_size = self->page_size;
303 char *ptr;
304 int i;
305
306 ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
307 PROT_READ | PROT_WRITE, 0, 0);
308 ASSERT_NE(ptr, MAP_FAILED);
309
310 /* Trivially assert we can touch the first page. */
311 ASSERT_TRUE(try_read_write_buf(ptr));
312
313 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
314
315 /* Establish that 1st page SIGSEGV's. */
316 ASSERT_FALSE(try_read_write_buf(ptr));
317
318 /* Ensure we can touch everything else.*/
319 for (i = 1; i < NUM_PAGES; i++) {
320 char *curr = &ptr[i * page_size];
321
322 ASSERT_TRUE(try_read_write_buf(curr));
323 }
324
325 /* Establish a guard page at the end of the mapping. */
326 ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
327 MADV_GUARD_INSTALL), 0);
328
329 /* Check that both guard pages result in SIGSEGV. */
330 ASSERT_FALSE(try_read_write_buf(ptr));
331 ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
332
333 /* Remove the first guard page. */
334 ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
335
336 /* Make sure we can touch it. */
337 ASSERT_TRUE(try_read_write_buf(ptr));
338
339 /* Remove the last guard page. */
340 ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
341 MADV_GUARD_REMOVE));
342
343 /* Make sure we can touch it. */
344 ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
345
346 /*
347 * Test setting a _range_ of pages, namely the first 3. The first of
348 * these be faulted in, so this also tests that we can install guard
349 * pages over backed pages.
350 */
351 ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
352
353 /* Make sure they are all guard pages. */
354 for (i = 0; i < 3; i++) {
355 char *curr = &ptr[i * page_size];
356
357 ASSERT_FALSE(try_read_write_buf(curr));
358 }
359
360 /* Make sure the rest are not. */
361 for (i = 3; i < NUM_PAGES; i++) {
362 char *curr = &ptr[i * page_size];
363
364 ASSERT_TRUE(try_read_write_buf(curr));
365 }
366
367 /* Remove guard pages. */
368 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
369
370 /* Now make sure we can touch everything. */
371 for (i = 0; i < NUM_PAGES; i++) {
372 char *curr = &ptr[i * page_size];
373
374 ASSERT_TRUE(try_read_write_buf(curr));
375 }
376
377 /*
378 * Now remove all guard pages, make sure we don't remove existing
379 * entries.
380 */
381 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
382
383 for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
384 char chr = ptr[i];
385
386 ASSERT_EQ(chr, 'x');
387 }
388
389 ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
390 }
391
392 /* Assert that operations applied across multiple VMAs work as expected. */
TEST_F(guard_regions,multi_vma)393 TEST_F(guard_regions, multi_vma)
394 {
395 const unsigned long page_size = self->page_size;
396 char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
397 int i;
398
399 /* Reserve a 100 page region over which we can install VMAs. */
400 ptr_region = mmap_(self, variant, NULL, 100 * page_size,
401 PROT_NONE, 0, 0);
402 ASSERT_NE(ptr_region, MAP_FAILED);
403
404 /* Place a VMA of 10 pages size at the start of the region. */
405 ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
406 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
407 ASSERT_NE(ptr1, MAP_FAILED);
408
409 /* Place a VMA of 5 pages size 50 pages into the region. */
410 ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
411 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
412 ASSERT_NE(ptr2, MAP_FAILED);
413
414 /* Place a VMA of 20 pages size at the end of the region. */
415 ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
416 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
417 ASSERT_NE(ptr3, MAP_FAILED);
418
419 /* Unmap gaps. */
420 ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
421 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
422
423 /*
424 * We end up with VMAs like this:
425 *
426 * 0 10 .. 50 55 .. 80 100
427 * [---] [---] [---]
428 */
429
430 /*
431 * Now mark the whole range as guard pages and make sure all VMAs are as
432 * such.
433 */
434
435 /*
436 * madvise() is certifiable and lets you perform operations over gaps,
437 * everything works, but it indicates an error and errno is set to
438 * -ENOMEM. Also if anything runs out of memory it is set to
439 * -ENOMEM. You are meant to guess which is which.
440 */
441 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
442 ASSERT_EQ(errno, ENOMEM);
443
444 for (i = 0; i < 10; i++) {
445 char *curr = &ptr1[i * page_size];
446
447 ASSERT_FALSE(try_read_write_buf(curr));
448 }
449
450 for (i = 0; i < 5; i++) {
451 char *curr = &ptr2[i * page_size];
452
453 ASSERT_FALSE(try_read_write_buf(curr));
454 }
455
456 for (i = 0; i < 20; i++) {
457 char *curr = &ptr3[i * page_size];
458
459 ASSERT_FALSE(try_read_write_buf(curr));
460 }
461
462 /* Now remove guar pages over range and assert the opposite. */
463
464 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
465 ASSERT_EQ(errno, ENOMEM);
466
467 for (i = 0; i < 10; i++) {
468 char *curr = &ptr1[i * page_size];
469
470 ASSERT_TRUE(try_read_write_buf(curr));
471 }
472
473 for (i = 0; i < 5; i++) {
474 char *curr = &ptr2[i * page_size];
475
476 ASSERT_TRUE(try_read_write_buf(curr));
477 }
478
479 for (i = 0; i < 20; i++) {
480 char *curr = &ptr3[i * page_size];
481
482 ASSERT_TRUE(try_read_write_buf(curr));
483 }
484
485 /* Now map incompatible VMAs in the gaps. */
486 ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
487 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
488 ASSERT_NE(ptr, MAP_FAILED);
489 ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
490 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
491 ASSERT_NE(ptr, MAP_FAILED);
492
493 /*
494 * We end up with VMAs like this:
495 *
496 * 0 10 .. 50 55 .. 80 100
497 * [---][xxxx][---][xxxx][---]
498 *
499 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
500 * them.
501 */
502
503 /* Multiple VMAs adjacent to one another should result in no error. */
504 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
505 for (i = 0; i < 100; i++) {
506 char *curr = &ptr_region[i * page_size];
507
508 ASSERT_FALSE(try_read_write_buf(curr));
509 }
510 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
511 for (i = 0; i < 100; i++) {
512 char *curr = &ptr_region[i * page_size];
513
514 ASSERT_TRUE(try_read_write_buf(curr));
515 }
516
517 /* Cleanup. */
518 ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
519 }
520
521 /*
522 * Assert that batched operations performed using process_madvise() work as
523 * expected.
524 */
TEST_F(guard_regions,process_madvise)525 TEST_F(guard_regions, process_madvise)
526 {
527 const unsigned long page_size = self->page_size;
528 char *ptr_region, *ptr1, *ptr2, *ptr3;
529 ssize_t count;
530 struct iovec vec[6];
531
532 /* Reserve region to map over. */
533 ptr_region = mmap_(self, variant, NULL, 100 * page_size,
534 PROT_NONE, 0, 0);
535 ASSERT_NE(ptr_region, MAP_FAILED);
536
537 /*
538 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
539 * overwrite existing entries and test this code path against
540 * overwriting existing entries.
541 */
542 ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
543 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
544 ASSERT_NE(ptr1, MAP_FAILED);
545 /* We want guard markers at start/end of each VMA. */
546 vec[0].iov_base = ptr1;
547 vec[0].iov_len = page_size;
548 vec[1].iov_base = &ptr1[9 * page_size];
549 vec[1].iov_len = page_size;
550
551 /* 5 pages offset 50 pages into reserve region. */
552 ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
553 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
554 ASSERT_NE(ptr2, MAP_FAILED);
555 vec[2].iov_base = ptr2;
556 vec[2].iov_len = page_size;
557 vec[3].iov_base = &ptr2[4 * page_size];
558 vec[3].iov_len = page_size;
559
560 /* 20 pages offset 79 pages into reserve region. */
561 ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
562 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
563 ASSERT_NE(ptr3, MAP_FAILED);
564 vec[4].iov_base = ptr3;
565 vec[4].iov_len = page_size;
566 vec[5].iov_base = &ptr3[19 * page_size];
567 vec[5].iov_len = page_size;
568
569 /* Free surrounding VMAs. */
570 ASSERT_EQ(munmap(ptr_region, page_size), 0);
571 ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
572 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
573 ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
574
575 /* Now guard in one step. */
576 count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
577
578 /* OK we don't have permission to do this, skip. */
579 if (count == -1 && errno == EPERM)
580 ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
581
582 /* Returns the number of bytes advised. */
583 ASSERT_EQ(count, 6 * page_size);
584
585 /* Now make sure the guarding was applied. */
586
587 ASSERT_FALSE(try_read_write_buf(ptr1));
588 ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
589
590 ASSERT_FALSE(try_read_write_buf(ptr2));
591 ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
592
593 ASSERT_FALSE(try_read_write_buf(ptr3));
594 ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
595
596 /* Now do the same with unguard... */
597 count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
598
599 /* ...and everything should now succeed. */
600
601 ASSERT_TRUE(try_read_write_buf(ptr1));
602 ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
603
604 ASSERT_TRUE(try_read_write_buf(ptr2));
605 ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
606
607 ASSERT_TRUE(try_read_write_buf(ptr3));
608 ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
609
610 /* Cleanup. */
611 ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
612 ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
613 ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
614 }
615
616 /* Assert that unmapping ranges does not leave guard markers behind. */
TEST_F(guard_regions,munmap)617 TEST_F(guard_regions, munmap)
618 {
619 const unsigned long page_size = self->page_size;
620 char *ptr, *ptr_new1, *ptr_new2;
621
622 ptr = mmap_(self, variant, NULL, 10 * page_size,
623 PROT_READ | PROT_WRITE, 0, 0);
624 ASSERT_NE(ptr, MAP_FAILED);
625
626 /* Guard first and last pages. */
627 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
628 ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
629
630 /* Assert that they are guarded. */
631 ASSERT_FALSE(try_read_write_buf(ptr));
632 ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
633
634 /* Unmap them. */
635 ASSERT_EQ(munmap(ptr, page_size), 0);
636 ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
637
638 /* Map over them.*/
639 ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
640 MAP_FIXED, 0);
641 ASSERT_NE(ptr_new1, MAP_FAILED);
642 ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
643 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
644 ASSERT_NE(ptr_new2, MAP_FAILED);
645
646 /* Assert that they are now not guarded. */
647 ASSERT_TRUE(try_read_write_buf(ptr_new1));
648 ASSERT_TRUE(try_read_write_buf(ptr_new2));
649
650 /* Cleanup. */
651 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
652 }
653
654 /* Assert that mprotect() operations have no bearing on guard markers. */
TEST_F(guard_regions,mprotect)655 TEST_F(guard_regions, mprotect)
656 {
657 const unsigned long page_size = self->page_size;
658 char *ptr;
659 int i;
660
661 ptr = mmap_(self, variant, NULL, 10 * page_size,
662 PROT_READ | PROT_WRITE, 0, 0);
663 ASSERT_NE(ptr, MAP_FAILED);
664
665 /* Guard the middle of the range. */
666 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
667 MADV_GUARD_INSTALL), 0);
668
669 /* Assert that it is indeed guarded. */
670 ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
671 ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
672
673 /* Now make these pages read-only. */
674 ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
675
676 /* Make sure the range is still guarded. */
677 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
678 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
679
680 /* Make sure we can guard again without issue.*/
681 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
682 MADV_GUARD_INSTALL), 0);
683
684 /* Make sure the range is, yet again, still guarded. */
685 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
686 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
687
688 /* Now unguard the whole range. */
689 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
690
691 /* Make sure the whole range is readable. */
692 for (i = 0; i < 10; i++) {
693 char *curr = &ptr[i * page_size];
694
695 ASSERT_TRUE(try_read_buf(curr));
696 }
697
698 /* Cleanup. */
699 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
700 }
701
702 /* Split and merge VMAs and make sure guard pages still behave. */
TEST_F(guard_regions,split_merge)703 TEST_F(guard_regions, split_merge)
704 {
705 const unsigned long page_size = self->page_size;
706 char *ptr, *ptr_new;
707 int i;
708
709 ptr = mmap_(self, variant, NULL, 10 * page_size,
710 PROT_READ | PROT_WRITE, 0, 0);
711 ASSERT_NE(ptr, MAP_FAILED);
712
713 /* Guard the whole range. */
714 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
715
716 /* Make sure the whole range is guarded. */
717 for (i = 0; i < 10; i++) {
718 char *curr = &ptr[i * page_size];
719
720 ASSERT_FALSE(try_read_write_buf(curr));
721 }
722
723 /* Now unmap some pages in the range so we split. */
724 ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
725 ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
726 ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
727
728 /* Make sure the remaining ranges are guarded post-split. */
729 for (i = 0; i < 2; i++) {
730 char *curr = &ptr[i * page_size];
731
732 ASSERT_FALSE(try_read_write_buf(curr));
733 }
734 for (i = 2; i < 5; i++) {
735 char *curr = &ptr[i * page_size];
736
737 ASSERT_FALSE(try_read_write_buf(curr));
738 }
739 for (i = 6; i < 8; i++) {
740 char *curr = &ptr[i * page_size];
741
742 ASSERT_FALSE(try_read_write_buf(curr));
743 }
744 for (i = 9; i < 10; i++) {
745 char *curr = &ptr[i * page_size];
746
747 ASSERT_FALSE(try_read_write_buf(curr));
748 }
749
750 /* Now map them again - the unmap will have cleared the guards. */
751 ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
752 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
753 ASSERT_NE(ptr_new, MAP_FAILED);
754 ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
755 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
756 ASSERT_NE(ptr_new, MAP_FAILED);
757 ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
758 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
759 ASSERT_NE(ptr_new, MAP_FAILED);
760
761 /* Now make sure guard pages are established. */
762 for (i = 0; i < 10; i++) {
763 char *curr = &ptr[i * page_size];
764 bool result = try_read_write_buf(curr);
765 bool expect_true = i == 2 || i == 5 || i == 8;
766
767 ASSERT_TRUE(expect_true ? result : !result);
768 }
769
770 /* Now guard everything again. */
771 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
772
773 /* Make sure the whole range is guarded. */
774 for (i = 0; i < 10; i++) {
775 char *curr = &ptr[i * page_size];
776
777 ASSERT_FALSE(try_read_write_buf(curr));
778 }
779
780 /* Now split the range into three. */
781 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
782 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
783
784 /* Make sure the whole range is guarded for read. */
785 for (i = 0; i < 10; i++) {
786 char *curr = &ptr[i * page_size];
787
788 ASSERT_FALSE(try_read_buf(curr));
789 }
790
791 /* Now reset protection bits so we merge the whole thing. */
792 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
793 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
794 PROT_READ | PROT_WRITE), 0);
795
796 /* Make sure the whole range is still guarded. */
797 for (i = 0; i < 10; i++) {
798 char *curr = &ptr[i * page_size];
799
800 ASSERT_FALSE(try_read_write_buf(curr));
801 }
802
803 /* Split range into 3 again... */
804 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
805 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
806
807 /* ...and unguard the whole range. */
808 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
809
810 /* Make sure the whole range is remedied for read. */
811 for (i = 0; i < 10; i++) {
812 char *curr = &ptr[i * page_size];
813
814 ASSERT_TRUE(try_read_buf(curr));
815 }
816
817 /* Merge them again. */
818 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
819 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
820 PROT_READ | PROT_WRITE), 0);
821
822 /* Now ensure the merged range is remedied for read/write. */
823 for (i = 0; i < 10; i++) {
824 char *curr = &ptr[i * page_size];
825
826 ASSERT_TRUE(try_read_write_buf(curr));
827 }
828
829 /* Cleanup. */
830 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
831 }
832
833 /* Assert that MADV_DONTNEED does not remove guard markers. */
TEST_F(guard_regions,dontneed)834 TEST_F(guard_regions, dontneed)
835 {
836 const unsigned long page_size = self->page_size;
837 char *ptr;
838 int i;
839
840 ptr = mmap_(self, variant, NULL, 10 * page_size,
841 PROT_READ | PROT_WRITE, 0, 0);
842 ASSERT_NE(ptr, MAP_FAILED);
843
844 /* Back the whole range. */
845 for (i = 0; i < 10; i++) {
846 char *curr = &ptr[i * page_size];
847
848 *curr = 'y';
849 }
850
851 /* Guard every other page. */
852 for (i = 0; i < 10; i += 2) {
853 char *curr = &ptr[i * page_size];
854 int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
855
856 ASSERT_EQ(res, 0);
857 }
858
859 /* Indicate that we don't need any of the range. */
860 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
861
862 /* Check to ensure guard markers are still in place. */
863 for (i = 0; i < 10; i++) {
864 char *curr = &ptr[i * page_size];
865 bool result = try_read_buf(curr);
866
867 if (i % 2 == 0) {
868 ASSERT_FALSE(result);
869 } else {
870 ASSERT_TRUE(result);
871 switch (variant->backing) {
872 case ANON_BACKED:
873 /* If anon, then we get a zero page. */
874 ASSERT_EQ(*curr, '\0');
875 break;
876 default:
877 /* Otherwise, we get the file data. */
878 ASSERT_EQ(*curr, 'y');
879 break;
880 }
881 }
882
883 /* Now write... */
884 result = try_write_buf(&ptr[i * page_size]);
885
886 /* ...and make sure same result. */
887 ASSERT_TRUE(i % 2 != 0 ? result : !result);
888 }
889
890 /* Cleanup. */
891 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
892 }
893
894 /* Assert that mlock()'ed pages work correctly with guard markers. */
TEST_F(guard_regions,mlock)895 TEST_F(guard_regions, mlock)
896 {
897 const unsigned long page_size = self->page_size;
898 char *ptr;
899 int i;
900
901 ptr = mmap_(self, variant, NULL, 10 * page_size,
902 PROT_READ | PROT_WRITE, 0, 0);
903 ASSERT_NE(ptr, MAP_FAILED);
904
905 /* Populate. */
906 for (i = 0; i < 10; i++) {
907 char *curr = &ptr[i * page_size];
908
909 *curr = 'y';
910 }
911
912 /* Lock. */
913 ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
914
915 /* Now try to guard, should fail with EINVAL. */
916 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
917 ASSERT_EQ(errno, EINVAL);
918
919 /* OK unlock. */
920 ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
921
922 /* Guard first half of range, should now succeed. */
923 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
924
925 /* Make sure guard works. */
926 for (i = 0; i < 10; i++) {
927 char *curr = &ptr[i * page_size];
928 bool result = try_read_write_buf(curr);
929
930 if (i < 5) {
931 ASSERT_FALSE(result);
932 } else {
933 ASSERT_TRUE(result);
934 ASSERT_EQ(*curr, 'x');
935 }
936 }
937
938 /*
939 * Now lock the latter part of the range. We can't lock the guard pages,
940 * as this would result in the pages being populated and the guarding
941 * would cause this to error out.
942 */
943 ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
944
945 /*
946 * Now remove guard pages, we permit mlock()'d ranges to have guard
947 * pages removed as it is a non-destructive operation.
948 */
949 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
950
951 /* Now check that no guard pages remain. */
952 for (i = 0; i < 10; i++) {
953 char *curr = &ptr[i * page_size];
954
955 ASSERT_TRUE(try_read_write_buf(curr));
956 }
957
958 /* Cleanup. */
959 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
960 }
961
962 /*
963 * Assert that moving, extending and shrinking memory via mremap() retains
964 * guard markers where possible.
965 *
966 * - Moving a mapping alone should retain markers as they are.
967 */
TEST_F(guard_regions,mremap_move)968 TEST_F(guard_regions, mremap_move)
969 {
970 const unsigned long page_size = self->page_size;
971 char *ptr, *ptr_new;
972
973 /* Map 5 pages. */
974 ptr = mmap_(self, variant, NULL, 5 * page_size,
975 PROT_READ | PROT_WRITE, 0, 0);
976 ASSERT_NE(ptr, MAP_FAILED);
977
978 /* Place guard markers at both ends of the 5 page span. */
979 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
980 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
981
982 /* Make sure the guard pages are in effect. */
983 ASSERT_FALSE(try_read_write_buf(ptr));
984 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
985
986 /* Map a new region we will move this range into. Doing this ensures
987 * that we have reserved a range to map into.
988 */
989 ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
990 ASSERT_NE(ptr_new, MAP_FAILED);
991
992 ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
993 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
994
995 /* Make sure the guard markers are retained. */
996 ASSERT_FALSE(try_read_write_buf(ptr_new));
997 ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
998
999 /*
1000 * Clean up - we only need reference the new pointer as we overwrote the
1001 * PROT_NONE range and moved the existing one.
1002 */
1003 munmap(ptr_new, 5 * page_size);
1004 }
1005
1006 /*
1007 * Assert that moving, extending and shrinking memory via mremap() retains
1008 * guard markers where possible.
1009 *
1010 * Expanding should retain guard pages, only now in different position. The user
1011 * will have to remove guard pages manually to fix up (they'd have to do the
1012 * same if it were a PROT_NONE mapping).
1013 */
TEST_F(guard_regions,mremap_expand)1014 TEST_F(guard_regions, mremap_expand)
1015 {
1016 const unsigned long page_size = self->page_size;
1017 char *ptr, *ptr_new;
1018
1019 /* Map 10 pages... */
1020 ptr = mmap_(self, variant, NULL, 10 * page_size,
1021 PROT_READ | PROT_WRITE, 0, 0);
1022 ASSERT_NE(ptr, MAP_FAILED);
1023 /* ...But unmap the last 5 so we can ensure we can expand into them. */
1024 ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
1025
1026 /* Place guard markers at both ends of the 5 page span. */
1027 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1028 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1029
1030 /* Make sure the guarding is in effect. */
1031 ASSERT_FALSE(try_read_write_buf(ptr));
1032 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1033
1034 /* Now expand to 10 pages. */
1035 ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
1036 ASSERT_NE(ptr, MAP_FAILED);
1037
1038 /*
1039 * Make sure the guard markers are retained in their original positions.
1040 */
1041 ASSERT_FALSE(try_read_write_buf(ptr));
1042 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1043
1044 /* Reserve a region which we can move to and expand into. */
1045 ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
1046 ASSERT_NE(ptr_new, MAP_FAILED);
1047
1048 /* Now move and expand into it. */
1049 ptr = mremap(ptr, 10 * page_size, 20 * page_size,
1050 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
1051 ASSERT_EQ(ptr, ptr_new);
1052
1053 /*
1054 * Again, make sure the guard markers are retained in their original positions.
1055 */
1056 ASSERT_FALSE(try_read_write_buf(ptr));
1057 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1058
1059 /*
1060 * A real user would have to remove guard markers, but would reasonably
1061 * expect all characteristics of the mapping to be retained, including
1062 * guard markers.
1063 */
1064
1065 /* Cleanup. */
1066 munmap(ptr, 20 * page_size);
1067 }
1068 /*
1069 * Assert that moving, extending and shrinking memory via mremap() retains
1070 * guard markers where possible.
1071 *
1072 * Shrinking will result in markers that are shrunk over being removed. Again,
1073 * if the user were using a PROT_NONE mapping they'd have to manually fix this
1074 * up also so this is OK.
1075 */
TEST_F(guard_regions,mremap_shrink)1076 TEST_F(guard_regions, mremap_shrink)
1077 {
1078 const unsigned long page_size = self->page_size;
1079 char *ptr;
1080 int i;
1081
1082 /* Map 5 pages. */
1083 ptr = mmap_(self, variant, NULL, 5 * page_size,
1084 PROT_READ | PROT_WRITE, 0, 0);
1085 ASSERT_NE(ptr, MAP_FAILED);
1086
1087 /* Place guard markers at both ends of the 5 page span. */
1088 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1089 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1090
1091 /* Make sure the guarding is in effect. */
1092 ASSERT_FALSE(try_read_write_buf(ptr));
1093 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1094
1095 /* Now shrink to 3 pages. */
1096 ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
1097 ASSERT_NE(ptr, MAP_FAILED);
1098
1099 /* We expect the guard marker at the start to be retained... */
1100 ASSERT_FALSE(try_read_write_buf(ptr));
1101
1102 /* ...But remaining pages will not have guard markers. */
1103 for (i = 1; i < 3; i++) {
1104 char *curr = &ptr[i * page_size];
1105
1106 ASSERT_TRUE(try_read_write_buf(curr));
1107 }
1108
1109 /*
1110 * As with expansion, a real user would have to remove guard pages and
1111 * fixup. But you'd have to do similar manual things with PROT_NONE
1112 * mappings too.
1113 */
1114
1115 /*
1116 * If we expand back to the original size, the end marker will, of
1117 * course, no longer be present.
1118 */
1119 ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
1120 ASSERT_NE(ptr, MAP_FAILED);
1121
1122 /* Again, we expect the guard marker at the start to be retained... */
1123 ASSERT_FALSE(try_read_write_buf(ptr));
1124
1125 /* ...But remaining pages will not have guard markers. */
1126 for (i = 1; i < 5; i++) {
1127 char *curr = &ptr[i * page_size];
1128
1129 ASSERT_TRUE(try_read_write_buf(curr));
1130 }
1131
1132 /* Cleanup. */
1133 munmap(ptr, 5 * page_size);
1134 }
1135
1136 /*
1137 * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1138 * retain guard pages.
1139 */
TEST_F(guard_regions,fork)1140 TEST_F(guard_regions, fork)
1141 {
1142 const unsigned long page_size = self->page_size;
1143 char *ptr;
1144 pid_t pid;
1145 int i;
1146
1147 /* Map 10 pages. */
1148 ptr = mmap_(self, variant, NULL, 10 * page_size,
1149 PROT_READ | PROT_WRITE, 0, 0);
1150 ASSERT_NE(ptr, MAP_FAILED);
1151
1152 /* Establish guard pages in the first 5 pages. */
1153 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1154
1155 pid = fork();
1156 ASSERT_NE(pid, -1);
1157 if (!pid) {
1158 /* This is the child process now. */
1159
1160 /* Assert that the guarding is in effect. */
1161 for (i = 0; i < 10; i++) {
1162 char *curr = &ptr[i * page_size];
1163 bool result = try_read_write_buf(curr);
1164
1165 ASSERT_TRUE(i >= 5 ? result : !result);
1166 }
1167
1168 /* Now unguard the range.*/
1169 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1170
1171 exit(0);
1172 }
1173
1174 /* Parent process. */
1175
1176 /* Parent simply waits on child. */
1177 waitpid(pid, NULL, 0);
1178
1179 /* Child unguard does not impact parent page table state. */
1180 for (i = 0; i < 10; i++) {
1181 char *curr = &ptr[i * page_size];
1182 bool result = try_read_write_buf(curr);
1183
1184 ASSERT_TRUE(i >= 5 ? result : !result);
1185 }
1186
1187 /* Cleanup. */
1188 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1189 }
1190
1191 /*
1192 * Assert expected behaviour after we fork populated ranges of anonymous memory
1193 * and then guard and unguard the range.
1194 */
TEST_F(guard_regions,fork_cow)1195 TEST_F(guard_regions, fork_cow)
1196 {
1197 const unsigned long page_size = self->page_size;
1198 char *ptr;
1199 pid_t pid;
1200 int i;
1201
1202 if (variant->backing != ANON_BACKED)
1203 SKIP(return, "CoW only supported on anon mappings");
1204
1205 /* Map 10 pages. */
1206 ptr = mmap_(self, variant, NULL, 10 * page_size,
1207 PROT_READ | PROT_WRITE, 0, 0);
1208 ASSERT_NE(ptr, MAP_FAILED);
1209
1210 /* Populate range. */
1211 for (i = 0; i < 10 * page_size; i++) {
1212 char chr = 'a' + (i % 26);
1213
1214 ptr[i] = chr;
1215 }
1216
1217 pid = fork();
1218 ASSERT_NE(pid, -1);
1219 if (!pid) {
1220 /* This is the child process now. */
1221
1222 /* Ensure the range is as expected. */
1223 for (i = 0; i < 10 * page_size; i++) {
1224 char expected = 'a' + (i % 26);
1225 char actual = ptr[i];
1226
1227 ASSERT_EQ(actual, expected);
1228 }
1229
1230 /* Establish guard pages across the whole range. */
1231 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1232 /* Remove it. */
1233 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1234
1235 /*
1236 * By removing the guard pages, the page tables will be
1237 * cleared. Assert that we are looking at the zero page now.
1238 */
1239 for (i = 0; i < 10 * page_size; i++) {
1240 char actual = ptr[i];
1241
1242 ASSERT_EQ(actual, '\0');
1243 }
1244
1245 exit(0);
1246 }
1247
1248 /* Parent process. */
1249
1250 /* Parent simply waits on child. */
1251 waitpid(pid, NULL, 0);
1252
1253 /* Ensure the range is unchanged in parent anon range. */
1254 for (i = 0; i < 10 * page_size; i++) {
1255 char expected = 'a' + (i % 26);
1256 char actual = ptr[i];
1257
1258 ASSERT_EQ(actual, expected);
1259 }
1260
1261 /* Cleanup. */
1262 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1263 }
1264
1265 /*
1266 * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1267 * behave as expected.
1268 */
TEST_F(guard_regions,fork_wipeonfork)1269 TEST_F(guard_regions, fork_wipeonfork)
1270 {
1271 const unsigned long page_size = self->page_size;
1272 char *ptr;
1273 pid_t pid;
1274 int i;
1275
1276 if (variant->backing != ANON_BACKED)
1277 SKIP(return, "Wipe on fork only supported on anon mappings");
1278
1279 /* Map 10 pages. */
1280 ptr = mmap_(self, variant, NULL, 10 * page_size,
1281 PROT_READ | PROT_WRITE, 0, 0);
1282 ASSERT_NE(ptr, MAP_FAILED);
1283
1284 /* Mark wipe on fork. */
1285 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1286
1287 /* Guard the first 5 pages. */
1288 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1289
1290 pid = fork();
1291 ASSERT_NE(pid, -1);
1292 if (!pid) {
1293 /* This is the child process now. */
1294
1295 /* Guard will have been wiped. */
1296 for (i = 0; i < 10; i++) {
1297 char *curr = &ptr[i * page_size];
1298
1299 ASSERT_TRUE(try_read_write_buf(curr));
1300 }
1301
1302 exit(0);
1303 }
1304
1305 /* Parent process. */
1306
1307 waitpid(pid, NULL, 0);
1308
1309 /* Guard markers should be in effect.*/
1310 for (i = 0; i < 10; i++) {
1311 char *curr = &ptr[i * page_size];
1312 bool result = try_read_write_buf(curr);
1313
1314 ASSERT_TRUE(i >= 5 ? result : !result);
1315 }
1316
1317 /* Cleanup. */
1318 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1319 }
1320
1321 /* Ensure that MADV_FREE retains guard entries as expected. */
TEST_F(guard_regions,lazyfree)1322 TEST_F(guard_regions, lazyfree)
1323 {
1324 const unsigned long page_size = self->page_size;
1325 char *ptr;
1326 int i;
1327
1328 if (variant->backing != ANON_BACKED)
1329 SKIP(return, "MADV_FREE only supported on anon mappings");
1330
1331 /* Map 10 pages. */
1332 ptr = mmap_(self, variant, NULL, 10 * page_size,
1333 PROT_READ | PROT_WRITE, 0, 0);
1334 ASSERT_NE(ptr, MAP_FAILED);
1335
1336 /* Guard range. */
1337 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1338
1339 /* Ensure guarded. */
1340 for (i = 0; i < 10; i++) {
1341 char *curr = &ptr[i * page_size];
1342
1343 ASSERT_FALSE(try_read_write_buf(curr));
1344 }
1345
1346 /* Lazyfree range. */
1347 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1348
1349 /* This should leave the guard markers in place. */
1350 for (i = 0; i < 10; i++) {
1351 char *curr = &ptr[i * page_size];
1352
1353 ASSERT_FALSE(try_read_write_buf(curr));
1354 }
1355
1356 /* Cleanup. */
1357 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1358 }
1359
1360 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
TEST_F(guard_regions,populate)1361 TEST_F(guard_regions, populate)
1362 {
1363 const unsigned long page_size = self->page_size;
1364 char *ptr;
1365
1366 /* Map 10 pages. */
1367 ptr = mmap_(self, variant, NULL, 10 * page_size,
1368 PROT_READ | PROT_WRITE, 0, 0);
1369 ASSERT_NE(ptr, MAP_FAILED);
1370
1371 /* Guard range. */
1372 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1373
1374 /* Populate read should error out... */
1375 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1376 ASSERT_EQ(errno, EFAULT);
1377
1378 /* ...as should populate write. */
1379 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1380 ASSERT_EQ(errno, EFAULT);
1381
1382 /* Cleanup. */
1383 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1384 }
1385
1386 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
TEST_F(guard_regions,cold_pageout)1387 TEST_F(guard_regions, cold_pageout)
1388 {
1389 const unsigned long page_size = self->page_size;
1390 char *ptr;
1391 int i;
1392
1393 /* Map 10 pages. */
1394 ptr = mmap_(self, variant, NULL, 10 * page_size,
1395 PROT_READ | PROT_WRITE, 0, 0);
1396 ASSERT_NE(ptr, MAP_FAILED);
1397
1398 /* Guard range. */
1399 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1400
1401 /* Ensured guarded. */
1402 for (i = 0; i < 10; i++) {
1403 char *curr = &ptr[i * page_size];
1404
1405 ASSERT_FALSE(try_read_write_buf(curr));
1406 }
1407
1408 /* Now mark cold. This should have no impact on guard markers. */
1409 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1410
1411 /* Should remain guarded. */
1412 for (i = 0; i < 10; i++) {
1413 char *curr = &ptr[i * page_size];
1414
1415 ASSERT_FALSE(try_read_write_buf(curr));
1416 }
1417
1418 /* OK, now page out. This should equally, have no effect on markers. */
1419 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1420
1421 /* Should remain guarded. */
1422 for (i = 0; i < 10; i++) {
1423 char *curr = &ptr[i * page_size];
1424
1425 ASSERT_FALSE(try_read_write_buf(curr));
1426 }
1427
1428 /* Cleanup. */
1429 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1430 }
1431
1432 /* Ensure that guard pages do not break userfaultd. */
TEST_F(guard_regions,uffd)1433 TEST_F(guard_regions, uffd)
1434 {
1435 const unsigned long page_size = self->page_size;
1436 int uffd;
1437 char *ptr;
1438 int i;
1439 struct uffdio_api api = {
1440 .api = UFFD_API,
1441 .features = 0,
1442 };
1443 struct uffdio_register reg;
1444 struct uffdio_range range;
1445
1446 if (!is_anon_backed(variant))
1447 SKIP(return, "uffd only works on anon backing");
1448
1449 /* Set up uffd. */
1450 uffd = userfaultfd(0);
1451 if (uffd == -1 && errno == EPERM)
1452 ksft_exit_skip("No userfaultfd permissions, try running as root.\n");
1453 ASSERT_NE(uffd, -1);
1454
1455 ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1456
1457 /* Map 10 pages. */
1458 ptr = mmap_(self, variant, NULL, 10 * page_size,
1459 PROT_READ | PROT_WRITE, 0, 0);
1460 ASSERT_NE(ptr, MAP_FAILED);
1461
1462 /* Register the range with uffd. */
1463 range.start = (unsigned long)ptr;
1464 range.len = 10 * page_size;
1465 reg.range = range;
1466 reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1467 ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, ®), 0);
1468
1469 /* Guard the range. This should not trigger the uffd. */
1470 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1471
1472 /* The guarding should behave as usual with no uffd intervention. */
1473 for (i = 0; i < 10; i++) {
1474 char *curr = &ptr[i * page_size];
1475
1476 ASSERT_FALSE(try_read_write_buf(curr));
1477 }
1478
1479 /* Cleanup. */
1480 ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1481 close(uffd);
1482 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1483 }
1484
1485 /*
1486 * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1487 * aggressively read-ahead, then install guard regions and assert that it
1488 * behaves correctly.
1489 *
1490 * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1491 * cache folios, meaning we maximise the possibility of some broken readahead.
1492 */
TEST_F(guard_regions,madvise_sequential)1493 TEST_F(guard_regions, madvise_sequential)
1494 {
1495 char *ptr;
1496 int i;
1497 const unsigned long page_size = self->page_size;
1498
1499 if (variant->backing == ANON_BACKED)
1500 SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
1501
1502 ptr = mmap_(self, variant, NULL, 10 * page_size,
1503 PROT_READ | PROT_WRITE, 0, 0);
1504 ASSERT_NE(ptr, MAP_FAILED);
1505
1506 /* Establish a pattern of data in the file. */
1507 set_pattern(ptr, 10, page_size);
1508 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1509
1510 /* Mark it as being accessed sequentially. */
1511 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
1512
1513 /* Mark every other page a guard page. */
1514 for (i = 0; i < 10; i += 2) {
1515 char *ptr2 = &ptr[i * page_size];
1516
1517 ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
1518 }
1519
1520 /* Now page it out. */
1521 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1522
1523 /* Now make sure pages are as expected. */
1524 for (i = 0; i < 10; i++) {
1525 char *chrp = &ptr[i * page_size];
1526
1527 if (i % 2 == 0) {
1528 bool result = try_read_write_buf(chrp);
1529
1530 ASSERT_FALSE(result);
1531 } else {
1532 ASSERT_EQ(*chrp, 'a' + i);
1533 }
1534 }
1535
1536 /* Now remove guard pages. */
1537 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1538
1539 /* Now make sure all data is as expected. */
1540 if (!check_pattern(ptr, 10, page_size))
1541 ASSERT_TRUE(false);
1542
1543 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1544 }
1545
1546 /*
1547 * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1548 * correctly.
1549 */
TEST_F(guard_regions,map_private)1550 TEST_F(guard_regions, map_private)
1551 {
1552 const unsigned long page_size = self->page_size;
1553 char *ptr_shared, *ptr_private;
1554 int i;
1555
1556 if (variant->backing == ANON_BACKED)
1557 SKIP(return, "MAP_PRIVATE test specific to file-backed");
1558
1559 ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1560 ASSERT_NE(ptr_shared, MAP_FAILED);
1561
1562 /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
1563 ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
1564 ASSERT_NE(ptr_private, MAP_FAILED);
1565
1566 /* Set pattern in shared mapping. */
1567 set_pattern(ptr_shared, 10, page_size);
1568
1569 /* Install guard regions in every other page in the shared mapping. */
1570 for (i = 0; i < 10; i += 2) {
1571 char *ptr = &ptr_shared[i * page_size];
1572
1573 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1574 }
1575
1576 for (i = 0; i < 10; i++) {
1577 /* Every even shared page should be guarded. */
1578 ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1579 /* Private mappings should always be readable. */
1580 ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1581 }
1582
1583 /* Install guard regions in every other page in the private mapping. */
1584 for (i = 0; i < 10; i += 2) {
1585 char *ptr = &ptr_private[i * page_size];
1586
1587 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1588 }
1589
1590 for (i = 0; i < 10; i++) {
1591 /* Every even shared page should be guarded. */
1592 ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1593 /* Every odd private page should be guarded. */
1594 ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1595 }
1596
1597 /* Remove guard regions from shared mapping. */
1598 ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
1599
1600 for (i = 0; i < 10; i++) {
1601 /* Shared mappings should always be readable. */
1602 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1603 /* Every even private page should be guarded. */
1604 ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1605 }
1606
1607 /* Remove guard regions from private mapping. */
1608 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1609
1610 for (i = 0; i < 10; i++) {
1611 /* Shared mappings should always be readable. */
1612 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1613 /* Private mappings should always be readable. */
1614 ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1615 }
1616
1617 /* Ensure patterns are intact. */
1618 ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
1619 ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
1620
1621 /* Now write out every other page to MAP_PRIVATE. */
1622 for (i = 0; i < 10; i += 2) {
1623 char *ptr = &ptr_private[i * page_size];
1624
1625 memset(ptr, 'a' + i, page_size);
1626 }
1627
1628 /*
1629 * At this point the mapping is:
1630 *
1631 * 0123456789
1632 * SPSPSPSPSP
1633 *
1634 * Where S = shared, P = private mappings.
1635 */
1636
1637 /* Now mark the beginning of the mapping guarded. */
1638 ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
1639
1640 /*
1641 * This renders the mapping:
1642 *
1643 * 0123456789
1644 * xxxxxPSPSP
1645 */
1646
1647 for (i = 0; i < 10; i++) {
1648 char *ptr = &ptr_private[i * page_size];
1649
1650 /* Ensure guard regions as expected. */
1651 ASSERT_EQ(try_read_buf(ptr), i >= 5);
1652 /* The shared mapping should always succeed. */
1653 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1654 }
1655
1656 /* Remove the guard regions altogether. */
1657 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1658
1659 /*
1660 *
1661 * We now expect the mapping to be:
1662 *
1663 * 0123456789
1664 * SSSSSPSPSP
1665 *
1666 * As we removed guard regions, the private pages from the first 5 will
1667 * have been zapped, so on fault will reestablish the shared mapping.
1668 */
1669
1670 for (i = 0; i < 10; i++) {
1671 char *ptr = &ptr_private[i * page_size];
1672
1673 /*
1674 * Assert that shared mappings in the MAP_PRIVATE mapping match
1675 * the shared mapping.
1676 */
1677 if (i < 5 || i % 2 == 0) {
1678 char *ptr_s = &ptr_shared[i * page_size];
1679
1680 ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
1681 continue;
1682 }
1683
1684 /* Everything else is a private mapping. */
1685 ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
1686 }
1687
1688 ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
1689 ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
1690 }
1691
1692 /* Test that guard regions established over a read-only mapping function correctly. */
TEST_F(guard_regions,readonly_file)1693 TEST_F(guard_regions, readonly_file)
1694 {
1695 const unsigned long page_size = self->page_size;
1696 char *ptr;
1697 int i;
1698
1699 if (variant->backing == ANON_BACKED)
1700 SKIP(return, "Read-only test specific to file-backed");
1701
1702 /* Map shared so we can populate with pattern, populate it, unmap. */
1703 ptr = mmap_(self, variant, NULL, 10 * page_size,
1704 PROT_READ | PROT_WRITE, 0, 0);
1705 ASSERT_NE(ptr, MAP_FAILED);
1706 set_pattern(ptr, 10, page_size);
1707 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1708 /* Close the fd so we can re-open read-only. */
1709 ASSERT_EQ(close(self->fd), 0);
1710
1711 /* Re-open read-only. */
1712 self->fd = open(self->path, O_RDONLY);
1713 ASSERT_NE(self->fd, -1);
1714 /* Re-map read-only. */
1715 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1716 ASSERT_NE(ptr, MAP_FAILED);
1717
1718 /* Mark every other page guarded. */
1719 for (i = 0; i < 10; i += 2) {
1720 char *ptr_pg = &ptr[i * page_size];
1721
1722 ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
1723 }
1724
1725 /* Assert that the guard regions are in place.*/
1726 for (i = 0; i < 10; i++) {
1727 char *ptr_pg = &ptr[i * page_size];
1728
1729 ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
1730 }
1731
1732 /* Remove guard regions. */
1733 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1734
1735 /* Ensure the data is as expected. */
1736 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1737
1738 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1739 }
1740
TEST_F(guard_regions,fault_around)1741 TEST_F(guard_regions, fault_around)
1742 {
1743 const unsigned long page_size = self->page_size;
1744 char *ptr;
1745 int i;
1746
1747 if (variant->backing == ANON_BACKED)
1748 SKIP(return, "Fault-around test specific to file-backed");
1749
1750 ptr = mmap_(self, variant, NULL, 10 * page_size,
1751 PROT_READ | PROT_WRITE, 0, 0);
1752 ASSERT_NE(ptr, MAP_FAILED);
1753
1754 /* Establish a pattern in the backing file. */
1755 set_pattern(ptr, 10, page_size);
1756
1757 /*
1758 * Now drop it from the page cache so we get major faults when next we
1759 * map it.
1760 */
1761 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1762
1763 /* Unmap and remap 'to be sure'. */
1764 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1765 ptr = mmap_(self, variant, NULL, 10 * page_size,
1766 PROT_READ | PROT_WRITE, 0, 0);
1767 ASSERT_NE(ptr, MAP_FAILED);
1768
1769 /* Now make every even page guarded. */
1770 for (i = 0; i < 10; i += 2) {
1771 char *ptr_p = &ptr[i * page_size];
1772
1773 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1774 }
1775
1776 /* Now fault in every odd page. This should trigger fault-around. */
1777 for (i = 1; i < 10; i += 2) {
1778 char *ptr_p = &ptr[i * page_size];
1779
1780 ASSERT_TRUE(try_read_buf(ptr_p));
1781 }
1782
1783 /* Finally, ensure that guard regions are intact as expected. */
1784 for (i = 0; i < 10; i++) {
1785 char *ptr_p = &ptr[i * page_size];
1786
1787 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1788 }
1789
1790 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1791 }
1792
TEST_F(guard_regions,truncation)1793 TEST_F(guard_regions, truncation)
1794 {
1795 const unsigned long page_size = self->page_size;
1796 char *ptr;
1797 int i;
1798
1799 if (variant->backing == ANON_BACKED)
1800 SKIP(return, "Truncation test specific to file-backed");
1801
1802 ptr = mmap_(self, variant, NULL, 10 * page_size,
1803 PROT_READ | PROT_WRITE, 0, 0);
1804 ASSERT_NE(ptr, MAP_FAILED);
1805
1806 /*
1807 * Establish a pattern in the backing file, just so there is data
1808 * there.
1809 */
1810 set_pattern(ptr, 10, page_size);
1811
1812 /* Now make every even page guarded. */
1813 for (i = 0; i < 10; i += 2) {
1814 char *ptr_p = &ptr[i * page_size];
1815
1816 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1817 }
1818
1819 /* Now assert things are as expected. */
1820 for (i = 0; i < 10; i++) {
1821 char *ptr_p = &ptr[i * page_size];
1822
1823 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1824 }
1825
1826 /* Now truncate to actually used size (initialised to 100). */
1827 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1828
1829 /* Here the guard regions will remain intact. */
1830 for (i = 0; i < 10; i++) {
1831 char *ptr_p = &ptr[i * page_size];
1832
1833 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1834 }
1835
1836 /* Now truncate to half the size, then truncate again to the full size. */
1837 ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
1838 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1839
1840 /* Again, guard pages will remain intact. */
1841 for (i = 0; i < 10; i++) {
1842 char *ptr_p = &ptr[i * page_size];
1843
1844 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1845 }
1846
1847 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1848 }
1849
TEST_F(guard_regions,hole_punch)1850 TEST_F(guard_regions, hole_punch)
1851 {
1852 const unsigned long page_size = self->page_size;
1853 char *ptr;
1854 int i;
1855
1856 if (variant->backing == ANON_BACKED)
1857 SKIP(return, "Truncation test specific to file-backed");
1858
1859 /* Establish pattern in mapping. */
1860 ptr = mmap_(self, variant, NULL, 10 * page_size,
1861 PROT_READ | PROT_WRITE, 0, 0);
1862 ASSERT_NE(ptr, MAP_FAILED);
1863 set_pattern(ptr, 10, page_size);
1864
1865 /* Install a guard region in the middle of the mapping. */
1866 ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1867 MADV_GUARD_INSTALL), 0);
1868
1869 /*
1870 * The buffer will now be:
1871 *
1872 * 0123456789
1873 * ***xxxx***
1874 *
1875 * Where * is data and x is the guard region.
1876 */
1877
1878 /* Ensure established. */
1879 for (i = 0; i < 10; i++) {
1880 char *ptr_p = &ptr[i * page_size];
1881
1882 ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1883 }
1884
1885 /* Now hole punch the guarded region. */
1886 ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1887 MADV_REMOVE), 0);
1888
1889 /* Ensure guard regions remain. */
1890 for (i = 0; i < 10; i++) {
1891 char *ptr_p = &ptr[i * page_size];
1892
1893 ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1894 }
1895
1896 /* Now remove guard region throughout. */
1897 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1898
1899 /* Check that the pattern exists in non-hole punched region. */
1900 ASSERT_TRUE(check_pattern(ptr, 3, page_size));
1901 /* Check that hole punched region is zeroed. */
1902 ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
1903 /* Check that the pattern exists in the remainder of the file. */
1904 ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
1905
1906 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1907 }
1908
1909 /*
1910 * Ensure that a memfd works correctly with guard regions, that we can write
1911 * seal it then open the mapping read-only and still establish guard regions
1912 * within, remove those guard regions and have everything work correctly.
1913 */
TEST_F(guard_regions,memfd_write_seal)1914 TEST_F(guard_regions, memfd_write_seal)
1915 {
1916 const unsigned long page_size = self->page_size;
1917 char *ptr;
1918 int i;
1919
1920 if (variant->backing != SHMEM_BACKED)
1921 SKIP(return, "memfd write seal test specific to shmem");
1922
1923 /* OK, we need a memfd, so close existing one. */
1924 ASSERT_EQ(close(self->fd), 0);
1925
1926 /* Create and truncate memfd. */
1927 self->fd = memfd_create("guard_regions_memfd_seals_test",
1928 MFD_ALLOW_SEALING);
1929 ASSERT_NE(self->fd, -1);
1930 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1931
1932 /* Map, set pattern, unmap. */
1933 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1934 ASSERT_NE(ptr, MAP_FAILED);
1935 set_pattern(ptr, 10, page_size);
1936 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1937
1938 /* Write-seal the memfd. */
1939 ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
1940
1941 /* Now map the memfd readonly. */
1942 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1943 ASSERT_NE(ptr, MAP_FAILED);
1944
1945 /* Ensure pattern is as expected. */
1946 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1947
1948 /* Now make every even page guarded. */
1949 for (i = 0; i < 10; i += 2) {
1950 char *ptr_p = &ptr[i * page_size];
1951
1952 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1953 }
1954
1955 /* Now assert things are as expected. */
1956 for (i = 0; i < 10; i++) {
1957 char *ptr_p = &ptr[i * page_size];
1958
1959 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1960 }
1961
1962 /* Now remove guard regions. */
1963 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1964
1965 /* Ensure pattern is as expected. */
1966 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1967
1968 /* Ensure write seal intact. */
1969 for (i = 0; i < 10; i++) {
1970 char *ptr_p = &ptr[i * page_size];
1971
1972 ASSERT_FALSE(try_write_buf(ptr_p));
1973 }
1974
1975 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1976 }
1977
1978
1979 /*
1980 * Since we are now permitted to establish guard regions in read-only anonymous
1981 * mappings, for the sake of thoroughness, though it probably has no practical
1982 * use, test that guard regions function with a mapping to the anonymous zero
1983 * page.
1984 */
TEST_F(guard_regions,anon_zeropage)1985 TEST_F(guard_regions, anon_zeropage)
1986 {
1987 const unsigned long page_size = self->page_size;
1988 char *ptr;
1989 int i;
1990
1991 if (!is_anon_backed(variant))
1992 SKIP(return, "anon zero page test specific to anon/shmem");
1993
1994 /* Obtain a read-only i.e. anon zero page mapping. */
1995 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1996 ASSERT_NE(ptr, MAP_FAILED);
1997
1998 /* Now make every even page guarded. */
1999 for (i = 0; i < 10; i += 2) {
2000 char *ptr_p = &ptr[i * page_size];
2001
2002 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2003 }
2004
2005 /* Now assert things are as expected. */
2006 for (i = 0; i < 10; i++) {
2007 char *ptr_p = &ptr[i * page_size];
2008
2009 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2010 }
2011
2012 /* Now remove all guard regions. */
2013 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2014
2015 /* Now assert things are as expected. */
2016 for (i = 0; i < 10; i++) {
2017 char *ptr_p = &ptr[i * page_size];
2018
2019 ASSERT_TRUE(try_read_buf(ptr_p));
2020 }
2021
2022 /* Ensure zero page...*/
2023 ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
2024
2025 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2026 }
2027
2028 /*
2029 * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2030 */
TEST_F(guard_regions,pagemap)2031 TEST_F(guard_regions, pagemap)
2032 {
2033 const unsigned long page_size = self->page_size;
2034 int proc_fd;
2035 char *ptr;
2036 int i;
2037
2038 proc_fd = open("/proc/self/pagemap", O_RDONLY);
2039 ASSERT_NE(proc_fd, -1);
2040
2041 ptr = mmap_(self, variant, NULL, 10 * page_size,
2042 PROT_READ | PROT_WRITE, 0, 0);
2043 ASSERT_NE(ptr, MAP_FAILED);
2044
2045 /* Read from pagemap, and assert no guard regions are detected. */
2046 for (i = 0; i < 10; i++) {
2047 char *ptr_p = &ptr[i * page_size];
2048 unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2049 unsigned long masked = entry & PM_GUARD_REGION;
2050
2051 ASSERT_EQ(masked, 0);
2052 }
2053
2054 /* Install a guard region in every other page. */
2055 for (i = 0; i < 10; i += 2) {
2056 char *ptr_p = &ptr[i * page_size];
2057
2058 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2059 }
2060
2061 /* Re-read from pagemap, and assert guard regions are detected. */
2062 for (i = 0; i < 10; i++) {
2063 char *ptr_p = &ptr[i * page_size];
2064 unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2065 unsigned long masked = entry & PM_GUARD_REGION;
2066
2067 ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
2068 }
2069
2070 ASSERT_EQ(close(proc_fd), 0);
2071 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2072 }
2073
2074 TEST_HARNESS_MAIN
2075