1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <sys/mman.h>
20
21 #include <memory>
22
23 #include "common_runtime_test.h"
24 #include "base/memory_tool.h"
25 #include "base/unix_file/fd_file.h"
26
27 namespace art {
28
29 class MemMapTest : public CommonRuntimeTest {
30 public:
BaseBegin(MemMap * mem_map)31 static uint8_t* BaseBegin(MemMap* mem_map) {
32 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
33 }
34
BaseSize(MemMap * mem_map)35 static size_t BaseSize(MemMap* mem_map) {
36 return mem_map->base_size_;
37 }
38
GetValidMapAddress(size_t size,bool low_4gb)39 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
40 // Find a valid map address and unmap it before returning.
41 std::string error_msg;
42 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
43 nullptr,
44 size,
45 PROT_READ,
46 low_4gb,
47 false,
48 &error_msg));
49 CHECK(map != nullptr);
50 return map->Begin();
51 }
52
RemapAtEndTest(bool low_4gb)53 static void RemapAtEndTest(bool low_4gb) {
54 std::string error_msg;
55 // Cast the page size to size_t.
56 const size_t page_size = static_cast<size_t>(kPageSize);
57 // Map a two-page memory region.
58 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
59 nullptr,
60 2 * page_size,
61 PROT_READ | PROT_WRITE,
62 low_4gb,
63 false,
64 &error_msg);
65 // Check its state and write to it.
66 uint8_t* base0 = m0->Begin();
67 ASSERT_TRUE(base0 != nullptr) << error_msg;
68 size_t size0 = m0->Size();
69 EXPECT_EQ(m0->Size(), 2 * page_size);
70 EXPECT_EQ(BaseBegin(m0), base0);
71 EXPECT_EQ(BaseSize(m0), size0);
72 memset(base0, 42, 2 * page_size);
73 // Remap the latter half into a second MemMap.
74 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
75 "MemMapTest_RemapAtEndTest_map1",
76 PROT_READ | PROT_WRITE,
77 &error_msg);
78 // Check the states of the two maps.
79 EXPECT_EQ(m0->Begin(), base0) << error_msg;
80 EXPECT_EQ(m0->Size(), page_size);
81 EXPECT_EQ(BaseBegin(m0), base0);
82 EXPECT_EQ(BaseSize(m0), page_size);
83 uint8_t* base1 = m1->Begin();
84 size_t size1 = m1->Size();
85 EXPECT_EQ(base1, base0 + page_size);
86 EXPECT_EQ(size1, page_size);
87 EXPECT_EQ(BaseBegin(m1), base1);
88 EXPECT_EQ(BaseSize(m1), size1);
89 // Write to the second region.
90 memset(base1, 43, page_size);
91 // Check the contents of the two regions.
92 for (size_t i = 0; i < page_size; ++i) {
93 EXPECT_EQ(base0[i], 42);
94 }
95 for (size_t i = 0; i < page_size; ++i) {
96 EXPECT_EQ(base1[i], 43);
97 }
98 // Unmap the first region.
99 delete m0;
100 // Make sure the second region is still accessible after the first
101 // region is unmapped.
102 for (size_t i = 0; i < page_size; ++i) {
103 EXPECT_EQ(base1[i], 43);
104 }
105 delete m1;
106 }
107
CommonInit()108 void CommonInit() {
109 MemMap::Init();
110 }
111
112 #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()113 static uintptr_t GetLinearScanPos() {
114 return MemMap::next_mem_pos_;
115 }
116 #endif
117 };
118
119 #if defined(__LP64__) && !defined(__x86_64__)
120
121 #ifdef __BIONIC__
122 extern uintptr_t CreateStartPos(uint64_t input);
123 #endif
124
TEST_F(MemMapTest,Start)125 TEST_F(MemMapTest, Start) {
126 CommonInit();
127 uintptr_t start = GetLinearScanPos();
128 EXPECT_LE(64 * KB, start);
129 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
130 #ifdef __BIONIC__
131 // Test a couple of values. Make sure they are different.
132 uintptr_t last = 0;
133 for (size_t i = 0; i < 100; ++i) {
134 uintptr_t random_start = CreateStartPos(i * kPageSize);
135 EXPECT_NE(last, random_start);
136 last = random_start;
137 }
138
139 // Even on max, should be below ART_BASE_ADDRESS.
140 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
141 #endif
142 // End of test.
143 }
144 #endif
145
TEST_F(MemMapTest,MapAnonymousEmpty)146 TEST_F(MemMapTest, MapAnonymousEmpty) {
147 CommonInit();
148 std::string error_msg;
149 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
150 nullptr,
151 0,
152 PROT_READ,
153 false,
154 false,
155 &error_msg));
156 ASSERT_TRUE(map.get() != nullptr) << error_msg;
157 ASSERT_TRUE(error_msg.empty());
158 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
159 nullptr,
160 kPageSize,
161 PROT_READ | PROT_WRITE,
162 false,
163 false,
164 &error_msg));
165 ASSERT_TRUE(map.get() != nullptr) << error_msg;
166 ASSERT_TRUE(error_msg.empty());
167 }
168
TEST_F(MemMapTest,MapAnonymousFailNullError)169 TEST_F(MemMapTest, MapAnonymousFailNullError) {
170 CommonInit();
171 // Test that we don't crash with a null error_str when mapping at an invalid location.
172 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
173 reinterpret_cast<uint8_t*>(kPageSize),
174 0x20000,
175 PROT_READ | PROT_WRITE,
176 false,
177 false,
178 nullptr));
179 ASSERT_EQ(nullptr, map.get());
180 }
181
182 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)183 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
184 CommonInit();
185 std::string error_msg;
186 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
187 nullptr,
188 kPageSize,
189 PROT_READ | PROT_WRITE,
190 true,
191 false,
192 &error_msg));
193 ASSERT_TRUE(map.get() != nullptr) << error_msg;
194 ASSERT_TRUE(error_msg.empty());
195 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
196 }
TEST_F(MemMapTest,MapFile32Bit)197 TEST_F(MemMapTest, MapFile32Bit) {
198 CommonInit();
199 std::string error_msg;
200 ScratchFile scratch_file;
201 constexpr size_t kMapSize = kPageSize;
202 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
203 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
204 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
205 PROT_READ,
206 MAP_PRIVATE,
207 scratch_file.GetFd(),
208 /*start*/0,
209 /*low_4gb*/true,
210 scratch_file.GetFilename().c_str(),
211 &error_msg));
212 ASSERT_TRUE(map != nullptr) << error_msg;
213 ASSERT_TRUE(error_msg.empty());
214 ASSERT_EQ(map->Size(), kMapSize);
215 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
216 }
217 #endif
218
TEST_F(MemMapTest,MapAnonymousExactAddr)219 TEST_F(MemMapTest, MapAnonymousExactAddr) {
220 CommonInit();
221 std::string error_msg;
222 // Find a valid address.
223 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
224 // Map at an address that should work, which should succeed.
225 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
226 valid_address,
227 kPageSize,
228 PROT_READ | PROT_WRITE,
229 false,
230 false,
231 &error_msg));
232 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
233 ASSERT_TRUE(error_msg.empty());
234 ASSERT_TRUE(map0->BaseBegin() == valid_address);
235 // Map at an unspecified address, which should succeed.
236 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
237 nullptr,
238 kPageSize,
239 PROT_READ | PROT_WRITE,
240 false,
241 false,
242 &error_msg));
243 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
244 ASSERT_TRUE(error_msg.empty());
245 ASSERT_TRUE(map1->BaseBegin() != nullptr);
246 // Attempt to map at the same address, which should fail.
247 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
248 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
249 kPageSize,
250 PROT_READ | PROT_WRITE,
251 false,
252 false,
253 &error_msg));
254 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
255 ASSERT_TRUE(!error_msg.empty());
256 }
257
TEST_F(MemMapTest,RemapAtEnd)258 TEST_F(MemMapTest, RemapAtEnd) {
259 RemapAtEndTest(false);
260 }
261
262 #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)263 TEST_F(MemMapTest, RemapAtEnd32bit) {
264 RemapAtEndTest(true);
265 }
266 #endif
267
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)268 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
269 // Some MIPS32 hardware (namely the Creator Ci20 development board)
270 // cannot allocate in the 2GB-4GB region.
271 TEST_DISABLED_FOR_MIPS();
272
273 CommonInit();
274 // This test may not work under valgrind.
275 if (RUNNING_ON_MEMORY_TOOL == 0) {
276 constexpr size_t size = 0x100000;
277 // Try all addresses starting from 2GB to 4GB.
278 size_t start_addr = 2 * GB;
279 std::string error_msg;
280 std::unique_ptr<MemMap> map;
281 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
282 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
283 reinterpret_cast<uint8_t*>(start_addr),
284 size,
285 PROT_READ | PROT_WRITE,
286 /*low_4gb*/true,
287 false,
288 &error_msg));
289 if (map != nullptr) {
290 break;
291 }
292 }
293 ASSERT_TRUE(map.get() != nullptr) << error_msg;
294 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
295 ASSERT_TRUE(error_msg.empty());
296 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
297 }
298 }
299
TEST_F(MemMapTest,MapAnonymousOverflow)300 TEST_F(MemMapTest, MapAnonymousOverflow) {
301 CommonInit();
302 std::string error_msg;
303 uintptr_t ptr = 0;
304 ptr -= kPageSize; // Now it's close to the top.
305 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
306 reinterpret_cast<uint8_t*>(ptr),
307 2 * kPageSize, // brings it over the top.
308 PROT_READ | PROT_WRITE,
309 false,
310 false,
311 &error_msg));
312 ASSERT_EQ(nullptr, map.get());
313 ASSERT_FALSE(error_msg.empty());
314 }
315
316 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)317 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
318 CommonInit();
319 std::string error_msg;
320 std::unique_ptr<MemMap> map(
321 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
322 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
323 kPageSize,
324 PROT_READ | PROT_WRITE,
325 true,
326 false,
327 &error_msg));
328 ASSERT_EQ(nullptr, map.get());
329 ASSERT_FALSE(error_msg.empty());
330 }
331
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)332 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
333 CommonInit();
334 std::string error_msg;
335 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
336 reinterpret_cast<uint8_t*>(0xF0000000),
337 0x20000000,
338 PROT_READ | PROT_WRITE,
339 true,
340 false,
341 &error_msg));
342 ASSERT_EQ(nullptr, map.get());
343 ASSERT_FALSE(error_msg.empty());
344 }
345 #endif
346
TEST_F(MemMapTest,MapAnonymousReuse)347 TEST_F(MemMapTest, MapAnonymousReuse) {
348 CommonInit();
349 std::string error_msg;
350 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
351 nullptr,
352 0x20000,
353 PROT_READ | PROT_WRITE,
354 false,
355 false,
356 &error_msg));
357 ASSERT_NE(nullptr, map.get());
358 ASSERT_TRUE(error_msg.empty());
359 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
360 reinterpret_cast<uint8_t*>(map->BaseBegin()),
361 0x10000,
362 PROT_READ | PROT_WRITE,
363 false,
364 true,
365 &error_msg));
366 ASSERT_NE(nullptr, map2.get());
367 ASSERT_TRUE(error_msg.empty());
368 }
369
TEST_F(MemMapTest,CheckNoGaps)370 TEST_F(MemMapTest, CheckNoGaps) {
371 CommonInit();
372 std::string error_msg;
373 constexpr size_t kNumPages = 3;
374 // Map a 3-page mem map.
375 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
376 nullptr,
377 kPageSize * kNumPages,
378 PROT_READ | PROT_WRITE,
379 false,
380 false,
381 &error_msg));
382 ASSERT_TRUE(map.get() != nullptr) << error_msg;
383 ASSERT_TRUE(error_msg.empty());
384 // Record the base address.
385 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
386 // Unmap it.
387 map.reset();
388
389 // Map at the same address, but in page-sized separate mem maps,
390 // assuming the space at the address is still available.
391 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
392 map_base,
393 kPageSize,
394 PROT_READ | PROT_WRITE,
395 false,
396 false,
397 &error_msg));
398 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
399 ASSERT_TRUE(error_msg.empty());
400 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
401 map_base + kPageSize,
402 kPageSize,
403 PROT_READ | PROT_WRITE,
404 false,
405 false,
406 &error_msg));
407 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
408 ASSERT_TRUE(error_msg.empty());
409 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
410 map_base + kPageSize * 2,
411 kPageSize,
412 PROT_READ | PROT_WRITE,
413 false,
414 false,
415 &error_msg));
416 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
417 ASSERT_TRUE(error_msg.empty());
418
419 // One-map cases.
420 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
421 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
422 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
423
424 // Two or three-map cases.
425 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
426 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
427 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
428
429 // Unmap the middle one.
430 map1.reset();
431
432 // Should return false now that there's a gap in the middle.
433 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
434 }
435
TEST_F(MemMapTest,AlignBy)436 TEST_F(MemMapTest, AlignBy) {
437 CommonInit();
438 std::string error_msg;
439 // Cast the page size to size_t.
440 const size_t page_size = static_cast<size_t>(kPageSize);
441 // Map a region.
442 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
443 nullptr,
444 14 * page_size,
445 PROT_READ | PROT_WRITE,
446 false,
447 false,
448 &error_msg));
449 uint8_t* base0 = m0->Begin();
450 ASSERT_TRUE(base0 != nullptr) << error_msg;
451 ASSERT_EQ(m0->Size(), 14 * page_size);
452 ASSERT_EQ(BaseBegin(m0.get()), base0);
453 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
454
455 // Break it into several regions by using RemapAtEnd.
456 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
457 "MemMapTest_AlignByTest_map1",
458 PROT_READ | PROT_WRITE,
459 &error_msg));
460 uint8_t* base1 = m1->Begin();
461 ASSERT_TRUE(base1 != nullptr) << error_msg;
462 ASSERT_EQ(base1, base0 + 3 * page_size);
463 ASSERT_EQ(m0->Size(), 3 * page_size);
464
465 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
466 "MemMapTest_AlignByTest_map2",
467 PROT_READ | PROT_WRITE,
468 &error_msg));
469 uint8_t* base2 = m2->Begin();
470 ASSERT_TRUE(base2 != nullptr) << error_msg;
471 ASSERT_EQ(base2, base1 + 4 * page_size);
472 ASSERT_EQ(m1->Size(), 4 * page_size);
473
474 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
475 "MemMapTest_AlignByTest_map1",
476 PROT_READ | PROT_WRITE,
477 &error_msg));
478 uint8_t* base3 = m3->Begin();
479 ASSERT_TRUE(base3 != nullptr) << error_msg;
480 ASSERT_EQ(base3, base2 + 3 * page_size);
481 ASSERT_EQ(m2->Size(), 3 * page_size);
482 ASSERT_EQ(m3->Size(), 4 * page_size);
483
484 uint8_t* end0 = base0 + m0->Size();
485 uint8_t* end1 = base1 + m1->Size();
486 uint8_t* end2 = base2 + m2->Size();
487 uint8_t* end3 = base3 + m3->Size();
488
489 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
490
491 if (IsAlignedParam(base0, 2 * page_size)) {
492 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
493 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
494 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
495 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
496 } else {
497 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
498 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
499 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
500 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
501 }
502
503 // Align by 2 * page_size;
504 m0->AlignBy(2 * page_size);
505 m1->AlignBy(2 * page_size);
506 m2->AlignBy(2 * page_size);
507 m3->AlignBy(2 * page_size);
508
509 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
510 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
511 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
512 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
513
514 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
515 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
516 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
517 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
518
519 if (IsAlignedParam(base0, 2 * page_size)) {
520 EXPECT_EQ(m0->Begin(), base0);
521 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
522 EXPECT_EQ(m1->Begin(), base1 + page_size);
523 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
524 EXPECT_EQ(m2->Begin(), base2 + page_size);
525 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
526 EXPECT_EQ(m3->Begin(), base3);
527 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
528 } else {
529 EXPECT_EQ(m0->Begin(), base0 + page_size);
530 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
531 EXPECT_EQ(m1->Begin(), base1);
532 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
533 EXPECT_EQ(m2->Begin(), base2);
534 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
535 EXPECT_EQ(m3->Begin(), base3 + page_size);
536 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
537 }
538 }
539
540 } // namespace art
541