• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2009 IBM Corporation.
4  * Author: David Gibson
5  */
6 
7 /*\
8  * [Description]
9  *
10  * Kernel has bug in mremap for some architecture. mremap() can cause
11  * crashes on architectures with holes in the address space (like ia64)
12  * and on powerpc with it's distict page size slices.
13  *
14  * This test perform mremap() with normal and hugepages around powerpc
15  * slice boundary.
16  */
17 
18 #define _GNU_SOURCE
19 #include "hugetlb.h"
20 
21 #define RANDOM_CONSTANT 0x1234ABCD
22 #define MNTPOINT "hugetlbfs/"
23 
24 static int  fd = -1;
25 static unsigned long slice_boundary;
26 static long hpage_size, page_size;
27 
init_slice_boundary(int fd)28 static int init_slice_boundary(int fd)
29 {
30 	unsigned long slice_size;
31 	void *p, *heap;
32 	int i;
33 #if defined(__LP64__) && !defined(__aarch64__)
34 	/* powerpc: 1TB slices starting at 1 TB */
35 	slice_boundary = 0x10000000000;
36 	slice_size = 0x10000000000;
37 #else
38 	/* powerpc: 256MB slices up to 4GB */
39 	slice_boundary = 0x00000000;
40 	slice_size = 0x10000000;
41 #endif
42 
43 	/* dummy malloc so we know where is heap */
44 	heap = malloc(1);
45 	free(heap);
46 
47 	/* Find 2 neighbour slices with couple huge pages free
48 	 * around slice boundary.
49 	 * 16 is the maximum number of slices (low/high)
50 	 */
51 	for (i = 0; i < 16-1; i++) {
52 		slice_boundary += slice_size;
53 		p = mmap((void *)(slice_boundary-2*hpage_size), 4*hpage_size,
54 			PROT_READ, MAP_SHARED | MAP_FIXED, fd, 0);
55 		if (p == MAP_FAILED) {
56 			tst_res(TINFO|TERRNO, "can't use slice_boundary: 0x%lx",
57 					slice_boundary);
58 		} else {
59 			SAFE_MUNMAP(p, 4*hpage_size);
60 			break;
61 		}
62 	}
63 
64 	if (p == MAP_FAILED) {
65 		tst_res(TFAIL|TERRNO, "couldn't find 2 free neighbour slices");
66 		return -1;
67 	}
68 
69 	tst_res(TINFO, "using slice_boundary: 0x%lx", slice_boundary);
70 
71 	return 0;
72 }
73 
run_test(void)74 static void run_test(void)
75 {
76 	void *p = NULL, *q = NULL, *r;
77 	long p_size, q_size;
78 	int ret;
79 
80 	fd = tst_creat_unlinked(MNTPOINT, 0);
81 	ret = init_slice_boundary(fd);
82 	if (ret)
83 		goto cleanup;
84 
85 	/* First, hugepages above, normal below */
86 	tst_res(TINFO, "Testing with hpage above & normal below the slice_boundary");
87 	p_size = hpage_size;
88 	p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
89 		 PROT_READ | PROT_WRITE,
90 		 MAP_SHARED | MAP_FIXED, fd, 0);
91 
92 	ret = do_readback(p, p_size, "huge above");
93 	if (ret)
94 		goto cleanup;
95 
96 	q_size = page_size;
97 	q = SAFE_MMAP((void *)(slice_boundary - page_size), q_size,
98 		 PROT_READ | PROT_WRITE,
99 		 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
100 
101 	ret = do_readback(q, q_size, "normal below");
102 	if (ret)
103 		goto cleanup;
104 
105 	r = mremap(q, page_size, 2*page_size, 0);
106 	if (r == MAP_FAILED) {
107 		tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
108 				q, page_size, 2*page_size);
109 	} else {
110 		q_size = 2*page_size;
111 		if (r != q) {
112 			tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
113 			ret = -1;
114 		} else
115 			ret = do_readback(q, 2*page_size, "normal below expanded");
116 	}
117 
118 	SAFE_MUNMAP(p, p_size);
119 	SAFE_MUNMAP(q, q_size);
120 	if (ret)
121 		goto cleanup_fd;
122 
123 	/* Next, normal pages above, huge below */
124 	tst_res(TINFO, "Testing with normal above & hpage below the slice_boundary");
125 	p_size = page_size;
126 	p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
127 		 PROT_READ|PROT_WRITE,
128 		 MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
129 
130 	ret = do_readback(p, p_size, "normal above");
131 	if (ret)
132 		goto cleanup;
133 
134 	q_size = hpage_size;
135 	q = SAFE_MMAP((void *)(slice_boundary - hpage_size),
136 		 q_size, PROT_READ | PROT_WRITE,
137 		 MAP_SHARED | MAP_FIXED, fd, 0);
138 
139 	ret = do_readback(q, q_size, "huge below");
140 	if (ret)
141 		goto cleanup;
142 
143 	r = mremap(q, hpage_size, 2*hpage_size, 0);
144 	if (r == MAP_FAILED) {
145 		tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
146 				q, hpage_size, 2*hpage_size);
147 	} else {
148 		q_size = 2*hpage_size;
149 		if (r != q) {
150 			tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
151 			ret = -1;
152 		} else
153 			ret = do_readback(q, 2*hpage_size, "huge below expanded");
154 	}
155 	if (ret)
156 		goto cleanup;
157 
158 	tst_res(TPASS, "Successful");
159 
160 cleanup:
161 	if (p)
162 		SAFE_MUNMAP(p, p_size);
163 	if (q)
164 		SAFE_MUNMAP(q, q_size);
165 cleanup_fd:
166 	SAFE_CLOSE(fd);
167 }
168 
setup(void)169 static void setup(void)
170 {
171 	hpage_size = tst_get_hugepage_size();
172 	page_size = getpagesize();
173 }
174 
cleanup(void)175 static void cleanup(void)
176 {
177 	if (fd >= 0)
178 		SAFE_CLOSE(fd);
179 }
180 
181 static struct tst_test test = {
182 	.needs_root = 1,
183 	.mntpoint = MNTPOINT,
184 	.needs_hugetlbfs = 1,
185 	.needs_tmpdir = 1,
186 	.setup = setup,
187 	.cleanup = cleanup,
188 	.test_all = run_test,
189 	.hugepages = {4, TST_NEEDS},
190 };
191