1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3 * Copyright (C) 2009 IBM Corporation.
4 * Author: David Gibson
5 */
6
7 /*\
8 * [Description]
9 *
10 * Kernel has bug in mremap for some architecture. mremap() can cause
11 * crashes on architectures with holes in the address space (like ia64)
12 * and on powerpc with it's distict page size slices.
13 *
14 * This test perform mremap() with normal and hugepages around powerpc
15 * slice boundary.
16 */
17
18 #define _GNU_SOURCE
19 #include "hugetlb.h"
20
21 #define RANDOM_CONSTANT 0x1234ABCD
22 #define MNTPOINT "hugetlbfs/"
23
24 static int fd = -1;
25 static unsigned long slice_boundary;
26 static unsigned long hpage_size, page_size;
27
init_slice_boundary(int fd)28 static int init_slice_boundary(int fd)
29 {
30 unsigned long slice_size;
31 void *p, *heap;
32 int i;
33 #if defined(__LP64__) && !defined(__aarch64__)
34 /* powerpc: 1TB slices starting at 1 TB */
35 slice_boundary = 0x10000000000;
36 slice_size = 0x10000000000;
37 #else
38 /* powerpc: 256MB slices up to 4GB */
39 slice_boundary = 0x00000000;
40 slice_size = 0x10000000;
41 #endif
42
43 /* dummy malloc so we know where is heap */
44 heap = malloc(1);
45 free(heap);
46
47 /* Avoid underflow on systems with large huge pages.
48 * The additionally plus heap address is to reduce the possibility
49 * of MAP_FIXED stomp over existing mappings.
50 */
51 while (slice_boundary + slice_size < (unsigned long)heap + 2*hpage_size)
52 slice_boundary += slice_size;
53
54 /* Find 2 neighbour slices with couple huge pages free
55 * around slice boundary.
56 * 16 is the maximum number of slices (low/high)
57 */
58 for (i = 0; i < 16-1; i++) {
59 slice_boundary += slice_size;
60 p = mmap((void *)(slice_boundary-2*hpage_size), 4*hpage_size,
61 PROT_READ, MAP_SHARED | MAP_FIXED, fd, 0);
62 if (p == MAP_FAILED) {
63 tst_res(TINFO|TERRNO, "can't use slice_boundary: 0x%lx",
64 slice_boundary);
65 } else {
66 SAFE_MUNMAP(p, 4*hpage_size);
67 break;
68 }
69 }
70
71 if (p == MAP_FAILED) {
72 tst_res(TFAIL|TERRNO, "couldn't find 2 free neighbour slices");
73 return -1;
74 }
75
76 tst_res(TINFO, "using slice_boundary: 0x%lx", slice_boundary);
77
78 return 0;
79 }
80
run_test(void)81 static void run_test(void)
82 {
83 void *p = NULL, *q = NULL, *r;
84 long p_size, q_size;
85 int ret;
86
87 fd = tst_creat_unlinked(MNTPOINT, 0);
88 ret = init_slice_boundary(fd);
89 if (ret)
90 goto cleanup;
91
92 /* First, hugepages above, normal below */
93 tst_res(TINFO, "Testing with hpage above & normal below the slice_boundary");
94 p_size = hpage_size;
95 p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
96 PROT_READ | PROT_WRITE,
97 MAP_SHARED | MAP_FIXED, fd, 0);
98
99 ret = do_readback(p, p_size, "huge above");
100 if (ret)
101 goto cleanup;
102
103 q_size = page_size;
104 q = SAFE_MMAP((void *)(slice_boundary - page_size), q_size,
105 PROT_READ | PROT_WRITE,
106 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
107
108 ret = do_readback(q, q_size, "normal below");
109 if (ret)
110 goto cleanup;
111
112 r = mremap(q, page_size, 2*page_size, 0);
113 if (r == MAP_FAILED) {
114 tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
115 q, page_size, 2*page_size);
116 } else {
117 q_size = 2*page_size;
118 if (r != q) {
119 tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
120 ret = -1;
121 } else
122 ret = do_readback(q, 2*page_size, "normal below expanded");
123 }
124
125 SAFE_MUNMAP(p, p_size);
126 SAFE_MUNMAP(q, q_size);
127 if (ret)
128 goto cleanup_fd;
129
130 /* Next, normal pages above, huge below */
131 tst_res(TINFO, "Testing with normal above & hpage below the slice_boundary");
132 p_size = page_size;
133 p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
134 PROT_READ|PROT_WRITE,
135 MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
136
137 ret = do_readback(p, p_size, "normal above");
138 if (ret)
139 goto cleanup;
140
141 q_size = hpage_size;
142 q = SAFE_MMAP((void *)(slice_boundary - hpage_size),
143 q_size, PROT_READ | PROT_WRITE,
144 MAP_SHARED | MAP_FIXED, fd, 0);
145
146 ret = do_readback(q, q_size, "huge below");
147 if (ret)
148 goto cleanup;
149
150 r = mremap(q, hpage_size, 2*hpage_size, 0);
151 if (r == MAP_FAILED) {
152 tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
153 q, hpage_size, 2*hpage_size);
154 } else {
155 q_size = 2*hpage_size;
156 if (r != q) {
157 tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
158 ret = -1;
159 } else
160 ret = do_readback(q, 2*hpage_size, "huge below expanded");
161 }
162 if (ret)
163 goto cleanup;
164
165 tst_res(TPASS, "Successful");
166
167 cleanup:
168 if (p)
169 SAFE_MUNMAP(p, p_size);
170 if (q)
171 SAFE_MUNMAP(q, q_size);
172 cleanup_fd:
173 SAFE_CLOSE(fd);
174 }
175
setup(void)176 static void setup(void)
177 {
178 hpage_size = tst_get_hugepage_size();
179 page_size = getpagesize();
180 }
181
cleanup(void)182 static void cleanup(void)
183 {
184 if (fd >= 0)
185 SAFE_CLOSE(fd);
186 }
187
188 static struct tst_test test = {
189 .needs_root = 1,
190 .mntpoint = MNTPOINT,
191 .needs_hugetlbfs = 1,
192 .needs_tmpdir = 1,
193 .setup = setup,
194 .cleanup = cleanup,
195 .test_all = run_test,
196 .hugepages = {4, TST_NEEDS},
197 };
198