• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2005-2006 IBM Corporation.
4  * Author: David Gibson & Adam Litke
5  */
6 
7 /*\
8  * [Description]
9  *
10  * Tests copy-on-write semantics of large pages where a number of threads
11  * map the same file with the MAP_PRIVATE flag. The threads then write
12  * into their copy of the mapping and recheck the contents to ensure they
13  * were not corrupted by the other threads.
14  */
15 
16 #include "hugetlb.h"
17 
18 #define THREADS 5
19 #define NR_HUGEPAGES 6
20 #define MNTPOINT "hugetlbfs/"
21 
22 static int fd = -1;
23 static long hpage_size;
24 
do_work(int thread,size_t size,int fd)25 static void do_work(int thread, size_t size, int fd)
26 {
27 	char *addr;
28 	size_t i;
29 	char pattern = thread+65;
30 
31 	addr = SAFE_MMAP(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
32 
33 	tst_res(TINFO, "Thread %d (pid %d): Mapped at address %p",
34 	       thread, getpid(), addr);
35 
36 	for (i = 0; i < size; i++)
37 		memcpy((char *)addr+i, &pattern, 1);
38 
39 	if (msync(addr, size, MS_SYNC))
40 		tst_brk(TBROK | TERRNO, "Thread %d (pid %d): msync() failed",
41 				thread, getpid());
42 
43 	for (i = 0; i < size; i++) {
44 		if (addr[i] != pattern) {
45 			tst_res(TFAIL, "thread %d (pid: %d): Corruption at %p; "
46 				   "Got %c, Expected %c", thread, getpid(),
47 				   &addr[i], addr[i], pattern);
48 			goto cleanup;
49 		}
50 	}
51 
52 	tst_res(TINFO, "Thread %d (pid %d): Pattern verified",
53 			thread, getpid());
54 
55 cleanup:
56 	SAFE_MUNMAP(addr, size);
57 	exit(0);
58 }
59 
run_test(void)60 static void run_test(void)
61 {
62 	int i, pid;
63 	char *addr;
64 	size_t size, itr;
65 	pid_t *wait_list;
66 
67 	wait_list = SAFE_MALLOC(THREADS * sizeof(pid_t));
68 	size = (NR_HUGEPAGES / (THREADS+1)) * hpage_size;
69 
70 	/*
71 	 * First, mmap the file with MAP_SHARED and fill with data
72 	 * If this is not done, then the fault handler will not be
73 	 * called in the kernel since private mappings will be
74 	 * created for the children at prefault time.
75 	 */
76 	addr = SAFE_MMAP(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
77 
78 	for (itr = 0; itr < size; itr += 8)
79 		memcpy(addr+itr, "deadbeef", 8);
80 
81 	for (i = 0; i < THREADS; i++) {
82 		pid = SAFE_FORK();
83 
84 		if (pid == 0)
85 			do_work(i, size, fd);
86 
87 		wait_list[i] = pid;
88 	}
89 	tst_reap_children();
90 
91 	SAFE_MUNMAP(addr, size);
92 	free(wait_list);
93 	tst_res(TPASS, "mmap COW working as expected.");
94 }
95 
setup(void)96 static void setup(void)
97 {
98 	hpage_size = tst_get_hugepage_size();
99 	fd = tst_creat_unlinked(MNTPOINT, 0);
100 }
101 
cleanup(void)102 static void cleanup(void)
103 {
104 	if (fd >= 1)
105 		SAFE_CLOSE(fd);
106 }
107 
108 static struct tst_test test = {
109 	.needs_root = 1,
110 	.mntpoint = MNTPOINT,
111 	.needs_hugetlbfs = 1,
112 	.needs_tmpdir = 1,
113 	.forks_child = 1,
114 	.setup = setup,
115 	.cleanup = cleanup,
116 	.test_all = run_test,
117 	.hugepages = {NR_HUGEPAGES, TST_NEEDS},
118 };
119