1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <memory>
20 #include <random>
21
22 #include "bit_utils.h"
23 #include "common_art_test.h"
24 #include "logging.h"
25 #include "memory_tool.h"
26 #include "mman.h"
27 #include "unix_file/fd_file.h"
28
29 namespace art {
30
31 class MemMapTest : public CommonArtTest {
32 public:
IsAddressMapped(void * addr)33 static bool IsAddressMapped(void* addr) {
34 bool res = msync(addr, 1, MS_SYNC) == 0;
35 if (!res && errno != ENOMEM) {
36 PLOG(FATAL) << "Unexpected error occurred on msync";
37 }
38 return res;
39 }
40
RandomData(size_t size)41 static std::vector<uint8_t> RandomData(size_t size) {
42 std::random_device rd;
43 std::uniform_int_distribution<uint8_t> dist;
44 std::vector<uint8_t> res;
45 res.resize(size);
46 for (size_t i = 0; i < size; i++) {
47 res[i] = dist(rd);
48 }
49 return res;
50 }
51
GetValidMapAddress(size_t size,bool low_4gb)52 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
53 // Find a valid map address and unmap it before returning.
54 std::string error_msg;
55 MemMap map = MemMap::MapAnonymous("temp",
56 size,
57 PROT_READ,
58 low_4gb,
59 &error_msg);
60 CHECK(map.IsValid());
61 return map.Begin();
62 }
63
RemapAtEndTest(bool low_4gb)64 static void RemapAtEndTest(bool low_4gb) {
65 std::string error_msg;
66 // Cast the page size to size_t.
67 const size_t page_size = MemMap::GetPageSize();
68 // Map a two-page memory region.
69 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
70 2 * page_size,
71 PROT_READ | PROT_WRITE,
72 low_4gb,
73 &error_msg);
74 // Check its state and write to it.
75 ASSERT_TRUE(m0.IsValid());
76 uint8_t* base0 = m0.Begin();
77 ASSERT_TRUE(base0 != nullptr) << error_msg;
78 size_t size0 = m0.Size();
79 EXPECT_EQ(m0.Size(), 2 * page_size);
80 EXPECT_EQ(m0.BaseBegin(), base0);
81 EXPECT_EQ(m0.BaseSize(), size0);
82 memset(base0, 42, 2 * page_size);
83 // Remap the latter half into a second MemMap.
84 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
85 "MemMapTest_RemapAtEndTest_map1",
86 PROT_READ | PROT_WRITE,
87 &error_msg);
88 // Check the states of the two maps.
89 EXPECT_EQ(m0.Begin(), base0) << error_msg;
90 EXPECT_EQ(m0.Size(), page_size);
91 EXPECT_EQ(m0.BaseBegin(), base0);
92 EXPECT_EQ(m0.BaseSize(), page_size);
93 uint8_t* base1 = m1.Begin();
94 size_t size1 = m1.Size();
95 EXPECT_EQ(base1, base0 + page_size);
96 EXPECT_EQ(size1, page_size);
97 EXPECT_EQ(m1.BaseBegin(), base1);
98 EXPECT_EQ(m1.BaseSize(), size1);
99 // Write to the second region.
100 memset(base1, 43, page_size);
101 // Check the contents of the two regions.
102 for (size_t i = 0; i < page_size; ++i) {
103 EXPECT_EQ(base0[i], 42);
104 }
105 for (size_t i = 0; i < page_size; ++i) {
106 EXPECT_EQ(base1[i], 43);
107 }
108 // Unmap the first region.
109 m0.Reset();
110 // Make sure the second region is still accessible after the first
111 // region is unmapped.
112 for (size_t i = 0; i < page_size; ++i) {
113 EXPECT_EQ(base1[i], 43);
114 }
115 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
116 "MemMapTest_RemapAtEndTest_map1",
117 PROT_READ | PROT_WRITE,
118 &error_msg);
119 ASSERT_TRUE(m2.IsValid()) << error_msg;
120 ASSERT_FALSE(m1.IsValid());
121 }
122
CommonInit()123 void CommonInit() {
124 MemMap::Init();
125 }
126
127 #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()128 static uintptr_t GetLinearScanPos() {
129 return MemMap::next_mem_pos_;
130 }
131 #endif
132 };
133
134 #if defined(__LP64__) && !defined(__x86_64__)
135
136 #ifdef __BIONIC__
137 extern uintptr_t CreateStartPos(uint64_t input, uint64_t page_size);
138 #endif
139
TEST_F(MemMapTest,PageSize)140 TEST_F(MemMapTest, PageSize) {
141 const size_t page_size = MemMap::GetPageSize();
142 EXPECT_EQ(page_size, GetPageSizeSlow());
143 }
144
TEST_F(MemMapTest,Start)145 TEST_F(MemMapTest, Start) {
146 CommonInit();
147 uintptr_t start = GetLinearScanPos();
148 EXPECT_LE(64 * KB, start);
149 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
150 #ifdef __BIONIC__
151 const size_t page_size = MemMap::GetPageSize();
152 // Test a couple of values. Make sure they are different.
153 uintptr_t last = 0;
154 for (size_t i = 0; i < 100; ++i) {
155 uintptr_t random_start = CreateStartPos(i * page_size, page_size);
156 EXPECT_NE(last, random_start);
157 last = random_start;
158 }
159
160 // Even on max, should be below ART_BASE_ADDRESS.
161 EXPECT_LT(CreateStartPos(~0, page_size), static_cast<uintptr_t>(ART_BASE_ADDRESS));
162 #endif
163 // End of test.
164 }
165 #endif
166
167 // We need mremap to be able to test ReplaceMapping at all
168 #if HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest,ReplaceMapping_SameSize)169 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
170 const size_t page_size = MemMap::GetPageSize();
171 std::string error_msg;
172 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
173 page_size,
174 PROT_READ,
175 /*low_4gb=*/ false,
176 &error_msg);
177 ASSERT_TRUE(dest.IsValid());
178 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
179 page_size,
180 PROT_WRITE | PROT_READ,
181 /*low_4gb=*/ false,
182 &error_msg);
183 ASSERT_TRUE(source.IsValid());
184 void* source_addr = source.Begin();
185 void* dest_addr = dest.Begin();
186 ASSERT_TRUE(IsAddressMapped(source_addr));
187 ASSERT_TRUE(IsAddressMapped(dest_addr));
188
189 std::vector<uint8_t> data = RandomData(page_size);
190 memcpy(source.Begin(), data.data(), data.size());
191
192 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
193
194 ASSERT_FALSE(IsAddressMapped(source_addr));
195 ASSERT_TRUE(IsAddressMapped(dest_addr));
196 ASSERT_FALSE(source.IsValid());
197
198 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
199
200 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
201 }
202
TEST_F(MemMapTest,ReplaceMapping_MakeLarger)203 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
204 const size_t page_size = MemMap::GetPageSize();
205 std::string error_msg;
206 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
207 5 * page_size, // Need to make it larger
208 // initially so we know
209 // there won't be mappings
210 // in the way when we move
211 // source.
212 PROT_READ,
213 /*low_4gb=*/ false,
214 &error_msg);
215 ASSERT_TRUE(dest.IsValid());
216 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
217 3 * page_size,
218 PROT_WRITE | PROT_READ,
219 /*low_4gb=*/ false,
220 &error_msg);
221 ASSERT_TRUE(source.IsValid());
222 uint8_t* source_addr = source.Begin();
223 uint8_t* dest_addr = dest.Begin();
224 ASSERT_TRUE(IsAddressMapped(source_addr));
225
226 // Fill the source with random data.
227 std::vector<uint8_t> data = RandomData(3 * page_size);
228 memcpy(source.Begin(), data.data(), data.size());
229
230 // Make the dest smaller so that we know we'll have space.
231 dest.SetSize(page_size);
232
233 ASSERT_TRUE(IsAddressMapped(dest_addr));
234 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * page_size));
235 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
236
237 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
238
239 ASSERT_FALSE(IsAddressMapped(source_addr));
240 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * page_size));
241 ASSERT_TRUE(IsAddressMapped(dest_addr));
242 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * page_size));
243 ASSERT_FALSE(source.IsValid());
244
245 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
246 }
247
TEST_F(MemMapTest,ReplaceMapping_MakeSmaller)248 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
249 const size_t page_size = MemMap::GetPageSize();
250 std::string error_msg;
251 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
252 3 * page_size,
253 PROT_READ,
254 /*low_4gb=*/ false,
255 &error_msg);
256 ASSERT_TRUE(dest.IsValid());
257 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
258 page_size,
259 PROT_WRITE | PROT_READ,
260 /*low_4gb=*/ false,
261 &error_msg);
262 ASSERT_TRUE(source.IsValid());
263 uint8_t* source_addr = source.Begin();
264 uint8_t* dest_addr = dest.Begin();
265 ASSERT_TRUE(IsAddressMapped(source_addr));
266 ASSERT_TRUE(IsAddressMapped(dest_addr));
267 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * page_size));
268 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * page_size));
269
270 std::vector<uint8_t> data = RandomData(page_size);
271 memcpy(source.Begin(), data.data(), page_size);
272
273 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
274
275 ASSERT_FALSE(IsAddressMapped(source_addr));
276 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
277 ASSERT_TRUE(IsAddressMapped(dest_addr));
278 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * page_size));
279 ASSERT_FALSE(source.IsValid());
280
281 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
282 }
283
TEST_F(MemMapTest,ReplaceMapping_FailureOverlap)284 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
285 const size_t page_size = MemMap::GetPageSize();
286 std::string error_msg;
287 MemMap dest =
288 MemMap::MapAnonymous(
289 "MapAnonymousEmpty-atomic-replace-dest",
290 3 * page_size, // Need to make it larger initially so we know there won't be mappings in
291 // the way when we move source.
292 PROT_READ | PROT_WRITE,
293 /*low_4gb=*/ false,
294 &error_msg);
295 ASSERT_TRUE(dest.IsValid());
296 // Resize down to 1 page so we can remap the rest.
297 dest.SetSize(page_size);
298 // Create source from the last 2 pages
299 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
300 dest.Begin() + page_size,
301 2 * page_size,
302 PROT_WRITE | PROT_READ,
303 /*low_4gb=*/ false,
304 /*reuse=*/ false,
305 /*reservation=*/ nullptr,
306 &error_msg);
307 ASSERT_TRUE(source.IsValid());
308 ASSERT_EQ(dest.Begin() + page_size, source.Begin());
309 uint8_t* source_addr = source.Begin();
310 uint8_t* dest_addr = dest.Begin();
311 ASSERT_TRUE(IsAddressMapped(source_addr));
312
313 // Fill the source and dest with random data.
314 std::vector<uint8_t> data = RandomData(2 * page_size);
315 memcpy(source.Begin(), data.data(), data.size());
316 std::vector<uint8_t> dest_data = RandomData(page_size);
317 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
318
319 ASSERT_TRUE(IsAddressMapped(dest_addr));
320 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
321
322 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
323
324 ASSERT_TRUE(IsAddressMapped(source_addr));
325 ASSERT_TRUE(IsAddressMapped(dest_addr));
326 ASSERT_EQ(source.Size(), data.size());
327 ASSERT_EQ(dest.Size(), dest_data.size());
328
329 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
330 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
331 }
332 #endif // HAVE_MREMAP_SYSCALL
333
TEST_F(MemMapTest,MapAnonymousEmpty)334 TEST_F(MemMapTest, MapAnonymousEmpty) {
335 CommonInit();
336 const size_t page_size = MemMap::GetPageSize();
337 std::string error_msg;
338 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
339 /*byte_count=*/ 0,
340 PROT_READ,
341 /*low_4gb=*/ false,
342 &error_msg);
343 ASSERT_FALSE(map.IsValid()) << error_msg;
344 ASSERT_FALSE(error_msg.empty());
345
346 error_msg.clear();
347 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
348 page_size,
349 PROT_READ | PROT_WRITE,
350 /*low_4gb=*/ false,
351 &error_msg);
352 ASSERT_TRUE(map.IsValid()) << error_msg;
353 ASSERT_TRUE(error_msg.empty());
354 }
355
TEST_F(MemMapTest,MapAnonymousFailNullError)356 TEST_F(MemMapTest, MapAnonymousFailNullError) {
357 // Host system's mmap_min_addr configuration could allow for arbitrarily low addresses to be
358 // successfully mapped, breaking the expectation that the MapAnonymous call should fail.
359 TEST_DISABLED_FOR_HOST();
360
361 CommonInit();
362 uint8_t* invalid_page[16]; // Use this address as mmap hint address.
363 const size_t page_size = MemMap::GetPageSize();
364 // Test that we don't crash with a null error_str when mapping at an invalid location.
365 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
366 reinterpret_cast<uint8_t*>(AlignDown(invalid_page, page_size)),
367 0x20000,
368 PROT_READ | PROT_WRITE,
369 /*low_4gb=*/false,
370 /*reuse=*/false,
371 /*reservation=*/nullptr,
372 nullptr);
373 ASSERT_FALSE(map.IsValid());
374 }
375
376 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)377 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
378 CommonInit();
379 const size_t page_size = MemMap::GetPageSize();
380 std::string error_msg;
381 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
382 /*byte_count=*/ 0,
383 PROT_READ,
384 /*low_4gb=*/ true,
385 &error_msg);
386 ASSERT_FALSE(map.IsValid()) << error_msg;
387 ASSERT_FALSE(error_msg.empty());
388
389 error_msg.clear();
390 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
391 page_size,
392 PROT_READ | PROT_WRITE,
393 /*low_4gb=*/ true,
394 &error_msg);
395 ASSERT_TRUE(map.IsValid()) << error_msg;
396 ASSERT_TRUE(error_msg.empty());
397 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
398 }
TEST_F(MemMapTest,MapFile32Bit)399 TEST_F(MemMapTest, MapFile32Bit) {
400 CommonInit();
401 std::string error_msg;
402 ScratchFile scratch_file;
403 const size_t map_size = MemMap::GetPageSize();
404 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
405 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
406 MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
407 PROT_READ,
408 MAP_PRIVATE,
409 scratch_file.GetFd(),
410 /*start=*/0,
411 /*low_4gb=*/true,
412 scratch_file.GetFilename().c_str(),
413 &error_msg);
414 ASSERT_TRUE(map.IsValid()) << error_msg;
415 ASSERT_TRUE(error_msg.empty());
416 ASSERT_EQ(map.Size(), map_size);
417 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
418 }
419 #endif
420
TEST_F(MemMapTest,MapAnonymousExactAddr)421 TEST_F(MemMapTest, MapAnonymousExactAddr) {
422 // TODO: The semantics of the MemMap::MapAnonymous() with a given address but without
423 // `reuse == true` or `reservation != nullptr` is weird. We should either drop support
424 // for it, or take it only as a hint and allow the result to be mapped elsewhere.
425 // Currently we're seeing failures with ASAN. b/118408378
426 TEST_DISABLED_FOR_MEMORY_TOOL();
427
428 CommonInit();
429 const size_t page_size = MemMap::GetPageSize();
430 std::string error_msg;
431 // Find a valid address.
432 uint8_t* valid_address = GetValidMapAddress(page_size, /*low_4gb=*/false);
433 // Map at an address that should work, which should succeed.
434 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
435 valid_address,
436 page_size,
437 PROT_READ | PROT_WRITE,
438 /*low_4gb=*/ false,
439 /*reuse=*/ false,
440 /*reservation=*/ nullptr,
441 &error_msg);
442 ASSERT_TRUE(map0.IsValid()) << error_msg;
443 ASSERT_TRUE(error_msg.empty());
444 ASSERT_TRUE(map0.BaseBegin() == valid_address);
445 // Map at an unspecified address, which should succeed.
446 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
447 page_size,
448 PROT_READ | PROT_WRITE,
449 /*low_4gb=*/ false,
450 &error_msg);
451 ASSERT_TRUE(map1.IsValid()) << error_msg;
452 ASSERT_TRUE(error_msg.empty());
453 ASSERT_TRUE(map1.BaseBegin() != nullptr);
454 // Attempt to map at the same address, which should fail.
455 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
456 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
457 page_size,
458 PROT_READ | PROT_WRITE,
459 /*low_4gb=*/ false,
460 /*reuse=*/ false,
461 /*reservation=*/ nullptr,
462 &error_msg);
463 ASSERT_FALSE(map2.IsValid()) << error_msg;
464 ASSERT_TRUE(!error_msg.empty());
465 }
466
TEST_F(MemMapTest,RemapAtEnd)467 TEST_F(MemMapTest, RemapAtEnd) {
468 RemapAtEndTest(false);
469 }
470
471 #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)472 TEST_F(MemMapTest, RemapAtEnd32bit) {
473 RemapAtEndTest(true);
474 }
475 #endif
476
TEST_F(MemMapTest,RemapFileViewAtEnd)477 TEST_F(MemMapTest, RemapFileViewAtEnd) {
478 CommonInit();
479 const size_t page_size = MemMap::GetPageSize();
480 std::string error_msg;
481 ScratchFile scratch_file;
482
483 // Create a scratch file 3 pages large.
484 const size_t map_size = 3 * page_size;
485 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
486 memset(data.get(), 1, page_size);
487 memset(&data[0], 0x55, page_size);
488 memset(&data[page_size], 0x5a, page_size);
489 memset(&data[2 * page_size], 0xaa, page_size);
490 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
491
492 MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
493 PROT_READ,
494 MAP_PRIVATE,
495 scratch_file.GetFd(),
496 /*start=*/0,
497 /*low_4gb=*/true,
498 scratch_file.GetFilename().c_str(),
499 &error_msg);
500 ASSERT_TRUE(map.IsValid()) << error_msg;
501 ASSERT_TRUE(error_msg.empty());
502 ASSERT_EQ(map.Size(), map_size);
503 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
504 ASSERT_EQ(data[0], *map.Begin());
505 ASSERT_EQ(data[page_size], *(map.Begin() + page_size));
506 ASSERT_EQ(data[2 * page_size], *(map.Begin() + 2 * page_size));
507
508 for (size_t offset = 2 * page_size; offset > 0; offset -= page_size) {
509 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
510 "bad_offset_map",
511 PROT_READ,
512 MAP_PRIVATE | MAP_FIXED,
513 scratch_file.GetFd(),
514 offset,
515 &error_msg);
516 ASSERT_TRUE(tail.IsValid()) << error_msg;
517 ASSERT_TRUE(error_msg.empty());
518 ASSERT_EQ(offset, map.Size());
519 ASSERT_EQ(static_cast<size_t>(page_size), tail.Size());
520 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
521 ASSERT_EQ(data[offset], *tail.Begin());
522 }
523 }
524
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)525 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
526 // This test does not work under AddressSanitizer.
527 // Historical note: This test did not work under Valgrind either.
528 TEST_DISABLED_FOR_MEMORY_TOOL();
529
530 CommonInit();
531 constexpr size_t size = 0x100000;
532 // Try all addresses starting from 2GB to 4GB.
533 size_t start_addr = 2 * GB;
534 std::string error_msg;
535 MemMap map;
536 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
537 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
538 reinterpret_cast<uint8_t*>(start_addr),
539 size,
540 PROT_READ | PROT_WRITE,
541 /*low_4gb=*/ true,
542 /*reuse=*/ false,
543 /*reservation=*/ nullptr,
544 &error_msg);
545 if (map.IsValid()) {
546 break;
547 }
548 }
549 ASSERT_TRUE(map.IsValid()) << error_msg;
550 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
551 ASSERT_TRUE(error_msg.empty());
552 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
553 }
554
TEST_F(MemMapTest,MapAnonymousOverflow)555 TEST_F(MemMapTest, MapAnonymousOverflow) {
556 CommonInit();
557 const size_t page_size = MemMap::GetPageSize();
558 std::string error_msg;
559 uintptr_t ptr = 0;
560 ptr -= page_size; // Now it's close to the top.
561 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
562 reinterpret_cast<uint8_t*>(ptr),
563 2 * page_size, // brings it over the top.
564 PROT_READ | PROT_WRITE,
565 /*low_4gb=*/ false,
566 /*reuse=*/ false,
567 /*reservation=*/ nullptr,
568 &error_msg);
569 ASSERT_FALSE(map.IsValid());
570 ASSERT_FALSE(error_msg.empty());
571 }
572
573 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)574 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
575 CommonInit();
576 const size_t page_size = MemMap::GetPageSize();
577 std::string error_msg;
578 MemMap map =
579 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
580 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
581 page_size,
582 PROT_READ | PROT_WRITE,
583 /*low_4gb=*/ true,
584 /*reuse=*/ false,
585 /*reservation=*/ nullptr,
586 &error_msg);
587 ASSERT_FALSE(map.IsValid());
588 ASSERT_FALSE(error_msg.empty());
589 }
590
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)591 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
592 CommonInit();
593 std::string error_msg;
594 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
595 /*addr=*/ reinterpret_cast<uint8_t*>(0xF0000000),
596 /*byte_count=*/ 0x20000000,
597 PROT_READ | PROT_WRITE,
598 /*low_4gb=*/ true,
599 /*reuse=*/ false,
600 /*reservation=*/ nullptr,
601 &error_msg);
602 ASSERT_FALSE(map.IsValid());
603 ASSERT_FALSE(error_msg.empty());
604 }
605 #endif
606
TEST_F(MemMapTest,MapAnonymousReuse)607 TEST_F(MemMapTest, MapAnonymousReuse) {
608 CommonInit();
609 std::string error_msg;
610 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
611 /*byte_count=*/ 0x20000,
612 PROT_READ | PROT_WRITE,
613 /*low_4gb=*/ false,
614 &error_msg);
615 ASSERT_TRUE(map.IsValid());
616 ASSERT_TRUE(error_msg.empty());
617 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
618 /*addr=*/ reinterpret_cast<uint8_t*>(map.BaseBegin()),
619 /*byte_count=*/ 0x10000,
620 PROT_READ | PROT_WRITE,
621 /*low_4gb=*/ false,
622 /*reuse=*/ true,
623 /*reservation=*/ nullptr,
624 &error_msg);
625 ASSERT_TRUE(map2.IsValid());
626 ASSERT_TRUE(error_msg.empty());
627 }
628
TEST_F(MemMapTest,CheckNoGaps)629 TEST_F(MemMapTest, CheckNoGaps) {
630 CommonInit();
631 const size_t page_size = MemMap::GetPageSize();
632 std::string error_msg;
633 constexpr size_t kNumPages = 3;
634 // Map a 3-page mem map.
635 MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
636 page_size * kNumPages,
637 PROT_READ | PROT_WRITE,
638 /*low_4gb=*/ false,
639 &error_msg);
640 ASSERT_TRUE(reservation.IsValid()) << error_msg;
641 ASSERT_TRUE(error_msg.empty());
642 // Record the base address.
643 uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
644
645 // Map at the same address, taking from the `map` reservation.
646 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
647 page_size,
648 PROT_READ | PROT_WRITE,
649 /*low_4gb=*/ false,
650 &reservation,
651 &error_msg);
652 ASSERT_TRUE(map0.IsValid()) << error_msg;
653 ASSERT_TRUE(error_msg.empty());
654 ASSERT_EQ(map_base, map0.Begin());
655 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
656 page_size,
657 PROT_READ | PROT_WRITE,
658 /*low_4gb=*/ false,
659 &reservation,
660 &error_msg);
661 ASSERT_TRUE(map1.IsValid()) << error_msg;
662 ASSERT_TRUE(error_msg.empty());
663 ASSERT_EQ(map_base + page_size, map1.Begin());
664 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
665 page_size,
666 PROT_READ | PROT_WRITE,
667 /*low_4gb=*/ false,
668 &reservation,
669 &error_msg);
670 ASSERT_TRUE(map2.IsValid()) << error_msg;
671 ASSERT_TRUE(error_msg.empty());
672 ASSERT_EQ(map_base + 2 * page_size, map2.Begin());
673 ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
674
675 // One-map cases.
676 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
677 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
678 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
679
680 // Two or three-map cases.
681 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
682 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
683 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
684
685 // Unmap the middle one.
686 map1.Reset();
687
688 // Should return false now that there's a gap in the middle.
689 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
690 }
691
TEST_F(MemMapTest,AlignBy)692 TEST_F(MemMapTest, AlignBy) {
693 CommonInit();
694 const size_t page_size = MemMap::GetPageSize();
695 std::string error_msg;
696 // Map a region.
697 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
698 14 * page_size,
699 PROT_READ | PROT_WRITE,
700 /*low_4gb=*/ false,
701 &error_msg);
702 ASSERT_TRUE(m0.IsValid());
703 uint8_t* base0 = m0.Begin();
704 ASSERT_TRUE(base0 != nullptr) << error_msg;
705 ASSERT_EQ(m0.Size(), 14 * page_size);
706 ASSERT_EQ(m0.BaseBegin(), base0);
707 ASSERT_EQ(m0.BaseSize(), m0.Size());
708
709 // Break it into several regions by using RemapAtEnd.
710 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
711 "MemMapTest_AlignByTest_map1",
712 PROT_READ | PROT_WRITE,
713 &error_msg);
714 uint8_t* base1 = m1.Begin();
715 ASSERT_TRUE(base1 != nullptr) << error_msg;
716 ASSERT_EQ(base1, base0 + 3 * page_size);
717 ASSERT_EQ(m0.Size(), 3 * page_size);
718
719 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
720 "MemMapTest_AlignByTest_map2",
721 PROT_READ | PROT_WRITE,
722 &error_msg);
723 uint8_t* base2 = m2.Begin();
724 ASSERT_TRUE(base2 != nullptr) << error_msg;
725 ASSERT_EQ(base2, base1 + 4 * page_size);
726 ASSERT_EQ(m1.Size(), 4 * page_size);
727
728 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
729 "MemMapTest_AlignByTest_map1",
730 PROT_READ | PROT_WRITE,
731 &error_msg);
732 uint8_t* base3 = m3.Begin();
733 ASSERT_TRUE(base3 != nullptr) << error_msg;
734 ASSERT_EQ(base3, base2 + 3 * page_size);
735 ASSERT_EQ(m2.Size(), 3 * page_size);
736 ASSERT_EQ(m3.Size(), 4 * page_size);
737
738 uint8_t* end0 = base0 + m0.Size();
739 uint8_t* end1 = base1 + m1.Size();
740 uint8_t* end2 = base2 + m2.Size();
741 uint8_t* end3 = base3 + m3.Size();
742
743 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
744
745 if (IsAlignedParam(base0, 2 * page_size)) {
746 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
747 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
748 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
749 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
750 } else {
751 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
752 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
753 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
754 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
755 }
756
757 // Align by 2 * page_size;
758 m0.AlignBy(2 * page_size);
759 m1.AlignBy(2 * page_size);
760 m2.AlignBy(2 * page_size);
761 m3.AlignBy(2 * page_size);
762
763 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
764 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
765 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
766 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
767
768 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
769 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
770 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
771 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
772
773 if (IsAlignedParam(base0, 2 * page_size)) {
774 EXPECT_EQ(m0.Begin(), base0);
775 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
776 EXPECT_EQ(m1.Begin(), base1 + page_size);
777 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
778 EXPECT_EQ(m2.Begin(), base2 + page_size);
779 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
780 EXPECT_EQ(m3.Begin(), base3);
781 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
782 } else {
783 EXPECT_EQ(m0.Begin(), base0 + page_size);
784 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
785 EXPECT_EQ(m1.Begin(), base1);
786 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
787 EXPECT_EQ(m2.Begin(), base2);
788 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
789 EXPECT_EQ(m3.Begin(), base3 + page_size);
790 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
791 }
792 }
793
TEST_F(MemMapTest,Reservation)794 TEST_F(MemMapTest, Reservation) {
795 CommonInit();
796 const size_t page_size = MemMap::GetPageSize();
797 std::string error_msg;
798 ScratchFile scratch_file;
799 const size_t map_size = 5 * page_size;
800 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
801 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
802
803 MemMap reservation = MemMap::MapAnonymous("Test reservation",
804 map_size,
805 PROT_NONE,
806 /*low_4gb=*/ false,
807 &error_msg);
808 ASSERT_TRUE(reservation.IsValid());
809 ASSERT_TRUE(error_msg.empty());
810
811 // Map first part of the reservation.
812 const size_t chunk1_size = page_size - 1u;
813 ASSERT_LT(chunk1_size, map_size) << "We want to split the reservation.";
814 uint8_t* addr1 = reservation.Begin();
815 MemMap map1 = MemMap::MapFileAtAddress(addr1,
816 /*byte_count=*/ chunk1_size,
817 PROT_READ,
818 MAP_PRIVATE,
819 scratch_file.GetFd(),
820 /*start=*/ 0,
821 /*low_4gb=*/ false,
822 scratch_file.GetFilename().c_str(),
823 /*reuse=*/ false,
824 &reservation,
825 &error_msg);
826 ASSERT_TRUE(map1.IsValid()) << error_msg;
827 ASSERT_TRUE(error_msg.empty());
828 ASSERT_EQ(map1.Size(), chunk1_size);
829 ASSERT_EQ(addr1, map1.Begin());
830 ASSERT_TRUE(reservation.IsValid());
831 // Entire pages are taken from the `reservation`.
832 ASSERT_LT(map1.End(), map1.BaseEnd());
833 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
834
835 // Map second part as an anonymous mapping.
836 const size_t chunk2_size = 2 * page_size;
837 DCHECK_LT(chunk2_size, reservation.Size()); // We want to split the reservation.
838 uint8_t* addr2 = reservation.Begin();
839 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
840 addr2,
841 /*byte_count=*/ chunk2_size,
842 PROT_READ,
843 /*low_4gb=*/ false,
844 /*reuse=*/ false,
845 &reservation,
846 &error_msg);
847 ASSERT_TRUE(map2.IsValid()) << error_msg;
848 ASSERT_TRUE(error_msg.empty());
849 ASSERT_EQ(map2.Size(), chunk2_size);
850 ASSERT_EQ(addr2, map2.Begin());
851 ASSERT_EQ(map2.End(), map2.BaseEnd()); // chunk2_size is page aligned.
852 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
853
854 // Map the rest of the reservation except the last byte.
855 const size_t chunk3_size = reservation.Size() - 1u;
856 uint8_t* addr3 = reservation.Begin();
857 MemMap map3 = MemMap::MapFileAtAddress(addr3,
858 /*byte_count=*/ chunk3_size,
859 PROT_READ,
860 MAP_PRIVATE,
861 scratch_file.GetFd(),
862 /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
863 /*low_4gb=*/ false,
864 scratch_file.GetFilename().c_str(),
865 /*reuse=*/ false,
866 &reservation,
867 &error_msg);
868 ASSERT_TRUE(map3.IsValid()) << error_msg;
869 ASSERT_TRUE(error_msg.empty());
870 ASSERT_EQ(map3.Size(), chunk3_size);
871 ASSERT_EQ(addr3, map3.Begin());
872 // Entire pages are taken from the `reservation`, so it's now exhausted.
873 ASSERT_FALSE(reservation.IsValid());
874
875 // Now split the MiddleReservation.
876 const size_t chunk2a_size = page_size - 1u;
877 DCHECK_LT(chunk2a_size, map2.Size()); // We want to split the reservation.
878 MemMap map2a = map2.TakeReservedMemory(chunk2a_size);
879 ASSERT_TRUE(map2a.IsValid()) << error_msg;
880 ASSERT_TRUE(error_msg.empty());
881 ASSERT_EQ(map2a.Size(), chunk2a_size);
882 ASSERT_EQ(addr2, map2a.Begin());
883 ASSERT_TRUE(map2.IsValid());
884 ASSERT_LT(map2a.End(), map2a.BaseEnd());
885 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
886
887 // And take the rest of the middle reservation.
888 const size_t chunk2b_size = map2.Size() - 1u;
889 uint8_t* addr2b = map2.Begin();
890 MemMap map2b = map2.TakeReservedMemory(chunk2b_size);
891 ASSERT_TRUE(map2b.IsValid()) << error_msg;
892 ASSERT_TRUE(error_msg.empty());
893 ASSERT_EQ(map2b.Size(), chunk2a_size);
894 ASSERT_EQ(addr2b, map2b.Begin());
895 ASSERT_FALSE(map2.IsValid());
896 }
897
898 } // namespace art
899
900 namespace {
901
902 class DumpMapsOnFailListener : public ::testing::EmptyTestEventListener {
OnTestPartResult(const::testing::TestPartResult & result)903 void OnTestPartResult(const ::testing::TestPartResult& result) override {
904 switch (result.type()) {
905 case ::testing::TestPartResult::kFatalFailure:
906 art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
907 break;
908
909 // TODO: Could consider logging on EXPECT failures.
910 case ::testing::TestPartResult::kNonFatalFailure:
911 case ::testing::TestPartResult::kSkip:
912 case ::testing::TestPartResult::kSuccess:
913 break;
914 }
915 }
916 };
917
918 } // namespace
919
920 // Inject our listener into the test runner.
921 extern "C"
922 __attribute__((visibility("default"))) __attribute__((used))
ArtTestGlobalInit()923 void ArtTestGlobalInit() {
924 ::testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
925 }
926