1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <memory>
20 #include <random>
21
22 #include "common_art_test.h"
23 #include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
24 #include "logging.h"
25 #include "memory_tool.h"
26 #include "mman.h"
27 #include "unix_file/fd_file.h"
28
29 namespace art {
30
31 class MemMapTest : public CommonArtTest {
32 public:
IsAddressMapped(void * addr)33 static bool IsAddressMapped(void* addr) {
34 bool res = msync(addr, 1, MS_SYNC) == 0;
35 if (!res && errno != ENOMEM) {
36 PLOG(FATAL) << "Unexpected error occurred on msync";
37 }
38 return res;
39 }
40
RandomData(size_t size)41 static std::vector<uint8_t> RandomData(size_t size) {
42 std::random_device rd;
43 std::uniform_int_distribution<uint8_t> dist;
44 std::vector<uint8_t> res;
45 res.resize(size);
46 for (size_t i = 0; i < size; i++) {
47 res[i] = dist(rd);
48 }
49 return res;
50 }
51
GetValidMapAddress(size_t size,bool low_4gb)52 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
53 // Find a valid map address and unmap it before returning.
54 std::string error_msg;
55 MemMap map = MemMap::MapAnonymous("temp",
56 size,
57 PROT_READ,
58 low_4gb,
59 &error_msg);
60 CHECK(map.IsValid());
61 return map.Begin();
62 }
63
RemapAtEndTest(bool low_4gb)64 static void RemapAtEndTest(bool low_4gb) {
65 std::string error_msg;
66 // Cast the page size to size_t.
67 const size_t page_size = static_cast<size_t>(kPageSize);
68 // Map a two-page memory region.
69 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
70 2 * page_size,
71 PROT_READ | PROT_WRITE,
72 low_4gb,
73 &error_msg);
74 // Check its state and write to it.
75 ASSERT_TRUE(m0.IsValid());
76 uint8_t* base0 = m0.Begin();
77 ASSERT_TRUE(base0 != nullptr) << error_msg;
78 size_t size0 = m0.Size();
79 EXPECT_EQ(m0.Size(), 2 * page_size);
80 EXPECT_EQ(m0.BaseBegin(), base0);
81 EXPECT_EQ(m0.BaseSize(), size0);
82 memset(base0, 42, 2 * page_size);
83 // Remap the latter half into a second MemMap.
84 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
85 "MemMapTest_RemapAtEndTest_map1",
86 PROT_READ | PROT_WRITE,
87 &error_msg);
88 // Check the states of the two maps.
89 EXPECT_EQ(m0.Begin(), base0) << error_msg;
90 EXPECT_EQ(m0.Size(), page_size);
91 EXPECT_EQ(m0.BaseBegin(), base0);
92 EXPECT_EQ(m0.BaseSize(), page_size);
93 uint8_t* base1 = m1.Begin();
94 size_t size1 = m1.Size();
95 EXPECT_EQ(base1, base0 + page_size);
96 EXPECT_EQ(size1, page_size);
97 EXPECT_EQ(m1.BaseBegin(), base1);
98 EXPECT_EQ(m1.BaseSize(), size1);
99 // Write to the second region.
100 memset(base1, 43, page_size);
101 // Check the contents of the two regions.
102 for (size_t i = 0; i < page_size; ++i) {
103 EXPECT_EQ(base0[i], 42);
104 }
105 for (size_t i = 0; i < page_size; ++i) {
106 EXPECT_EQ(base1[i], 43);
107 }
108 // Unmap the first region.
109 m0.Reset();
110 // Make sure the second region is still accessible after the first
111 // region is unmapped.
112 for (size_t i = 0; i < page_size; ++i) {
113 EXPECT_EQ(base1[i], 43);
114 }
115 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
116 "MemMapTest_RemapAtEndTest_map1",
117 PROT_READ | PROT_WRITE,
118 &error_msg);
119 ASSERT_TRUE(m2.IsValid()) << error_msg;
120 ASSERT_FALSE(m1.IsValid());
121 }
122
CommonInit()123 void CommonInit() {
124 MemMap::Init();
125 }
126
127 #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()128 static uintptr_t GetLinearScanPos() {
129 return MemMap::next_mem_pos_;
130 }
131 #endif
132 };
133
134 #if defined(__LP64__) && !defined(__x86_64__)
135
136 #ifdef __BIONIC__
137 extern uintptr_t CreateStartPos(uint64_t input);
138 #endif
139
TEST_F(MemMapTest,Start)140 TEST_F(MemMapTest, Start) {
141 CommonInit();
142 uintptr_t start = GetLinearScanPos();
143 EXPECT_LE(64 * KB, start);
144 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
145 #ifdef __BIONIC__
146 // Test a couple of values. Make sure they are different.
147 uintptr_t last = 0;
148 for (size_t i = 0; i < 100; ++i) {
149 uintptr_t random_start = CreateStartPos(i * kPageSize);
150 EXPECT_NE(last, random_start);
151 last = random_start;
152 }
153
154 // Even on max, should be below ART_BASE_ADDRESS.
155 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
156 #endif
157 // End of test.
158 }
159 #endif
160
161 // We need mremap to be able to test ReplaceMapping at all
162 #if HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest,ReplaceMapping_SameSize)163 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
164 std::string error_msg;
165 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
166 kPageSize,
167 PROT_READ,
168 /*low_4gb=*/ false,
169 &error_msg);
170 ASSERT_TRUE(dest.IsValid());
171 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
172 kPageSize,
173 PROT_WRITE | PROT_READ,
174 /*low_4gb=*/ false,
175 &error_msg);
176 ASSERT_TRUE(source.IsValid());
177 void* source_addr = source.Begin();
178 void* dest_addr = dest.Begin();
179 ASSERT_TRUE(IsAddressMapped(source_addr));
180 ASSERT_TRUE(IsAddressMapped(dest_addr));
181
182 std::vector<uint8_t> data = RandomData(kPageSize);
183 memcpy(source.Begin(), data.data(), data.size());
184
185 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
186
187 ASSERT_FALSE(IsAddressMapped(source_addr));
188 ASSERT_TRUE(IsAddressMapped(dest_addr));
189 ASSERT_FALSE(source.IsValid());
190
191 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
192
193 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
194 }
195
TEST_F(MemMapTest,ReplaceMapping_MakeLarger)196 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
197 std::string error_msg;
198 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
199 5 * kPageSize, // Need to make it larger
200 // initially so we know
201 // there won't be mappings
202 // in the way when we move
203 // source.
204 PROT_READ,
205 /*low_4gb=*/ false,
206 &error_msg);
207 ASSERT_TRUE(dest.IsValid());
208 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
209 3 * kPageSize,
210 PROT_WRITE | PROT_READ,
211 /*low_4gb=*/ false,
212 &error_msg);
213 ASSERT_TRUE(source.IsValid());
214 uint8_t* source_addr = source.Begin();
215 uint8_t* dest_addr = dest.Begin();
216 ASSERT_TRUE(IsAddressMapped(source_addr));
217
218 // Fill the source with random data.
219 std::vector<uint8_t> data = RandomData(3 * kPageSize);
220 memcpy(source.Begin(), data.data(), data.size());
221
222 // Make the dest smaller so that we know we'll have space.
223 dest.SetSize(kPageSize);
224
225 ASSERT_TRUE(IsAddressMapped(dest_addr));
226 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
227 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
228
229 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
230
231 ASSERT_FALSE(IsAddressMapped(source_addr));
232 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
233 ASSERT_TRUE(IsAddressMapped(dest_addr));
234 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
235 ASSERT_FALSE(source.IsValid());
236
237 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
238 }
239
TEST_F(MemMapTest,ReplaceMapping_MakeSmaller)240 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
241 std::string error_msg;
242 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
243 3 * kPageSize,
244 PROT_READ,
245 /*low_4gb=*/ false,
246 &error_msg);
247 ASSERT_TRUE(dest.IsValid());
248 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
249 kPageSize,
250 PROT_WRITE | PROT_READ,
251 /*low_4gb=*/ false,
252 &error_msg);
253 ASSERT_TRUE(source.IsValid());
254 uint8_t* source_addr = source.Begin();
255 uint8_t* dest_addr = dest.Begin();
256 ASSERT_TRUE(IsAddressMapped(source_addr));
257 ASSERT_TRUE(IsAddressMapped(dest_addr));
258 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
259 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
260
261 std::vector<uint8_t> data = RandomData(kPageSize);
262 memcpy(source.Begin(), data.data(), kPageSize);
263
264 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
265
266 ASSERT_FALSE(IsAddressMapped(source_addr));
267 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
268 ASSERT_TRUE(IsAddressMapped(dest_addr));
269 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
270 ASSERT_FALSE(source.IsValid());
271
272 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
273 }
274
TEST_F(MemMapTest,ReplaceMapping_FailureOverlap)275 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
276 std::string error_msg;
277 MemMap dest =
278 MemMap::MapAnonymous(
279 "MapAnonymousEmpty-atomic-replace-dest",
280 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
281 // the way when we move source.
282 PROT_READ | PROT_WRITE,
283 /*low_4gb=*/ false,
284 &error_msg);
285 ASSERT_TRUE(dest.IsValid());
286 // Resize down to 1 page so we can remap the rest.
287 dest.SetSize(kPageSize);
288 // Create source from the last 2 pages
289 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
290 dest.Begin() + kPageSize,
291 2 * kPageSize,
292 PROT_WRITE | PROT_READ,
293 /*low_4gb=*/ false,
294 /*reuse=*/ false,
295 /*reservation=*/ nullptr,
296 &error_msg);
297 ASSERT_TRUE(source.IsValid());
298 ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
299 uint8_t* source_addr = source.Begin();
300 uint8_t* dest_addr = dest.Begin();
301 ASSERT_TRUE(IsAddressMapped(source_addr));
302
303 // Fill the source and dest with random data.
304 std::vector<uint8_t> data = RandomData(2 * kPageSize);
305 memcpy(source.Begin(), data.data(), data.size());
306 std::vector<uint8_t> dest_data = RandomData(kPageSize);
307 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
308
309 ASSERT_TRUE(IsAddressMapped(dest_addr));
310 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
311
312 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
313
314 ASSERT_TRUE(IsAddressMapped(source_addr));
315 ASSERT_TRUE(IsAddressMapped(dest_addr));
316 ASSERT_EQ(source.Size(), data.size());
317 ASSERT_EQ(dest.Size(), dest_data.size());
318
319 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
320 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
321 }
322 #endif // HAVE_MREMAP_SYSCALL
323
TEST_F(MemMapTest,MapAnonymousEmpty)324 TEST_F(MemMapTest, MapAnonymousEmpty) {
325 CommonInit();
326 std::string error_msg;
327 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
328 /*byte_count=*/ 0,
329 PROT_READ,
330 /*low_4gb=*/ false,
331 &error_msg);
332 ASSERT_FALSE(map.IsValid()) << error_msg;
333 ASSERT_FALSE(error_msg.empty());
334
335 error_msg.clear();
336 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
337 kPageSize,
338 PROT_READ | PROT_WRITE,
339 /*low_4gb=*/ false,
340 &error_msg);
341 ASSERT_TRUE(map.IsValid()) << error_msg;
342 ASSERT_TRUE(error_msg.empty());
343 }
344
TEST_F(MemMapTest,MapAnonymousFailNullError)345 TEST_F(MemMapTest, MapAnonymousFailNullError) {
346 CommonInit();
347 // Test that we don't crash with a null error_str when mapping at an invalid location.
348 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
349 reinterpret_cast<uint8_t*>(kPageSize),
350 0x20000,
351 PROT_READ | PROT_WRITE,
352 /*low_4gb=*/ false,
353 /*reuse=*/ false,
354 /*reservation=*/ nullptr,
355 nullptr);
356 ASSERT_FALSE(map.IsValid());
357 }
358
359 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)360 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
361 CommonInit();
362 std::string error_msg;
363 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
364 /*byte_count=*/ 0,
365 PROT_READ,
366 /*low_4gb=*/ true,
367 &error_msg);
368 ASSERT_FALSE(map.IsValid()) << error_msg;
369 ASSERT_FALSE(error_msg.empty());
370
371 error_msg.clear();
372 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
373 kPageSize,
374 PROT_READ | PROT_WRITE,
375 /*low_4gb=*/ true,
376 &error_msg);
377 ASSERT_TRUE(map.IsValid()) << error_msg;
378 ASSERT_TRUE(error_msg.empty());
379 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
380 }
TEST_F(MemMapTest,MapFile32Bit)381 TEST_F(MemMapTest, MapFile32Bit) {
382 CommonInit();
383 std::string error_msg;
384 ScratchFile scratch_file;
385 constexpr size_t kMapSize = kPageSize;
386 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
387 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
388 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
389 PROT_READ,
390 MAP_PRIVATE,
391 scratch_file.GetFd(),
392 /*start=*/0,
393 /*low_4gb=*/true,
394 scratch_file.GetFilename().c_str(),
395 &error_msg);
396 ASSERT_TRUE(map.IsValid()) << error_msg;
397 ASSERT_TRUE(error_msg.empty());
398 ASSERT_EQ(map.Size(), kMapSize);
399 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
400 }
401 #endif
402
TEST_F(MemMapTest,MapAnonymousExactAddr)403 TEST_F(MemMapTest, MapAnonymousExactAddr) {
404 // TODO: The semantics of the MemMap::MapAnonymous() with a given address but without
405 // `reuse == true` or `reservation != nullptr` is weird. We should either drop support
406 // for it, or take it only as a hint and allow the result to be mapped elsewhere.
407 // Currently we're seeing failures with ASAN. b/118408378
408 TEST_DISABLED_FOR_MEMORY_TOOL();
409
410 CommonInit();
411 std::string error_msg;
412 // Find a valid address.
413 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
414 // Map at an address that should work, which should succeed.
415 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
416 valid_address,
417 kPageSize,
418 PROT_READ | PROT_WRITE,
419 /*low_4gb=*/ false,
420 /*reuse=*/ false,
421 /*reservation=*/ nullptr,
422 &error_msg);
423 ASSERT_TRUE(map0.IsValid()) << error_msg;
424 ASSERT_TRUE(error_msg.empty());
425 ASSERT_TRUE(map0.BaseBegin() == valid_address);
426 // Map at an unspecified address, which should succeed.
427 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
428 kPageSize,
429 PROT_READ | PROT_WRITE,
430 /*low_4gb=*/ false,
431 &error_msg);
432 ASSERT_TRUE(map1.IsValid()) << error_msg;
433 ASSERT_TRUE(error_msg.empty());
434 ASSERT_TRUE(map1.BaseBegin() != nullptr);
435 // Attempt to map at the same address, which should fail.
436 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
437 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
438 kPageSize,
439 PROT_READ | PROT_WRITE,
440 /*low_4gb=*/ false,
441 /*reuse=*/ false,
442 /*reservation=*/ nullptr,
443 &error_msg);
444 ASSERT_FALSE(map2.IsValid()) << error_msg;
445 ASSERT_TRUE(!error_msg.empty());
446 }
447
TEST_F(MemMapTest,RemapAtEnd)448 TEST_F(MemMapTest, RemapAtEnd) {
449 RemapAtEndTest(false);
450 }
451
452 #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)453 TEST_F(MemMapTest, RemapAtEnd32bit) {
454 RemapAtEndTest(true);
455 }
456 #endif
457
TEST_F(MemMapTest,RemapFileViewAtEnd)458 TEST_F(MemMapTest, RemapFileViewAtEnd) {
459 CommonInit();
460 std::string error_msg;
461 ScratchFile scratch_file;
462
463 // Create a scratch file 3 pages large.
464 constexpr size_t kMapSize = 3 * kPageSize;
465 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
466 memset(data.get(), 1, kPageSize);
467 memset(&data[0], 0x55, kPageSize);
468 memset(&data[kPageSize], 0x5a, kPageSize);
469 memset(&data[2 * kPageSize], 0xaa, kPageSize);
470 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
471
472 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
473 PROT_READ,
474 MAP_PRIVATE,
475 scratch_file.GetFd(),
476 /*start=*/0,
477 /*low_4gb=*/true,
478 scratch_file.GetFilename().c_str(),
479 &error_msg);
480 ASSERT_TRUE(map.IsValid()) << error_msg;
481 ASSERT_TRUE(error_msg.empty());
482 ASSERT_EQ(map.Size(), kMapSize);
483 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
484 ASSERT_EQ(data[0], *map.Begin());
485 ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
486 ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
487
488 for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
489 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
490 "bad_offset_map",
491 PROT_READ,
492 MAP_PRIVATE | MAP_FIXED,
493 scratch_file.GetFd(),
494 offset,
495 &error_msg);
496 ASSERT_TRUE(tail.IsValid()) << error_msg;
497 ASSERT_TRUE(error_msg.empty());
498 ASSERT_EQ(offset, map.Size());
499 ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
500 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
501 ASSERT_EQ(data[offset], *tail.Begin());
502 }
503 }
504
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)505 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
506 // Some MIPS32 hardware (namely the Creator Ci20 development board)
507 // cannot allocate in the 2GB-4GB region.
508 TEST_DISABLED_FOR_MIPS();
509
510 // This test does not work under AddressSanitizer.
511 // Historical note: This test did not work under Valgrind either.
512 TEST_DISABLED_FOR_MEMORY_TOOL();
513
514 CommonInit();
515 constexpr size_t size = 0x100000;
516 // Try all addresses starting from 2GB to 4GB.
517 size_t start_addr = 2 * GB;
518 std::string error_msg;
519 MemMap map;
520 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
521 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
522 reinterpret_cast<uint8_t*>(start_addr),
523 size,
524 PROT_READ | PROT_WRITE,
525 /*low_4gb=*/ true,
526 /*reuse=*/ false,
527 /*reservation=*/ nullptr,
528 &error_msg);
529 if (map.IsValid()) {
530 break;
531 }
532 }
533 ASSERT_TRUE(map.IsValid()) << error_msg;
534 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
535 ASSERT_TRUE(error_msg.empty());
536 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
537 }
538
TEST_F(MemMapTest,MapAnonymousOverflow)539 TEST_F(MemMapTest, MapAnonymousOverflow) {
540 CommonInit();
541 std::string error_msg;
542 uintptr_t ptr = 0;
543 ptr -= kPageSize; // Now it's close to the top.
544 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
545 reinterpret_cast<uint8_t*>(ptr),
546 2 * kPageSize, // brings it over the top.
547 PROT_READ | PROT_WRITE,
548 /*low_4gb=*/ false,
549 /*reuse=*/ false,
550 /*reservation=*/ nullptr,
551 &error_msg);
552 ASSERT_FALSE(map.IsValid());
553 ASSERT_FALSE(error_msg.empty());
554 }
555
556 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)557 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
558 CommonInit();
559 std::string error_msg;
560 MemMap map =
561 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
562 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
563 kPageSize,
564 PROT_READ | PROT_WRITE,
565 /*low_4gb=*/ true,
566 /*reuse=*/ false,
567 /*reservation=*/ nullptr,
568 &error_msg);
569 ASSERT_FALSE(map.IsValid());
570 ASSERT_FALSE(error_msg.empty());
571 }
572
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)573 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
574 CommonInit();
575 std::string error_msg;
576 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
577 /*addr=*/ reinterpret_cast<uint8_t*>(0xF0000000),
578 /*byte_count=*/ 0x20000000,
579 PROT_READ | PROT_WRITE,
580 /*low_4gb=*/ true,
581 /*reuse=*/ false,
582 /*reservation=*/ nullptr,
583 &error_msg);
584 ASSERT_FALSE(map.IsValid());
585 ASSERT_FALSE(error_msg.empty());
586 }
587 #endif
588
TEST_F(MemMapTest,MapAnonymousReuse)589 TEST_F(MemMapTest, MapAnonymousReuse) {
590 CommonInit();
591 std::string error_msg;
592 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
593 /*byte_count=*/ 0x20000,
594 PROT_READ | PROT_WRITE,
595 /*low_4gb=*/ false,
596 &error_msg);
597 ASSERT_TRUE(map.IsValid());
598 ASSERT_TRUE(error_msg.empty());
599 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
600 /*addr=*/ reinterpret_cast<uint8_t*>(map.BaseBegin()),
601 /*byte_count=*/ 0x10000,
602 PROT_READ | PROT_WRITE,
603 /*low_4gb=*/ false,
604 /*reuse=*/ true,
605 /*reservation=*/ nullptr,
606 &error_msg);
607 ASSERT_TRUE(map2.IsValid());
608 ASSERT_TRUE(error_msg.empty());
609 }
610
TEST_F(MemMapTest,CheckNoGaps)611 TEST_F(MemMapTest, CheckNoGaps) {
612 CommonInit();
613 std::string error_msg;
614 constexpr size_t kNumPages = 3;
615 // Map a 3-page mem map.
616 MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
617 kPageSize * kNumPages,
618 PROT_READ | PROT_WRITE,
619 /*low_4gb=*/ false,
620 &error_msg);
621 ASSERT_TRUE(reservation.IsValid()) << error_msg;
622 ASSERT_TRUE(error_msg.empty());
623 // Record the base address.
624 uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
625
626 // Map at the same address, taking from the `map` reservation.
627 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
628 kPageSize,
629 PROT_READ | PROT_WRITE,
630 /*low_4gb=*/ false,
631 &reservation,
632 &error_msg);
633 ASSERT_TRUE(map0.IsValid()) << error_msg;
634 ASSERT_TRUE(error_msg.empty());
635 ASSERT_EQ(map_base, map0.Begin());
636 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
637 kPageSize,
638 PROT_READ | PROT_WRITE,
639 /*low_4gb=*/ false,
640 &reservation,
641 &error_msg);
642 ASSERT_TRUE(map1.IsValid()) << error_msg;
643 ASSERT_TRUE(error_msg.empty());
644 ASSERT_EQ(map_base + kPageSize, map1.Begin());
645 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
646 kPageSize,
647 PROT_READ | PROT_WRITE,
648 /*low_4gb=*/ false,
649 &reservation,
650 &error_msg);
651 ASSERT_TRUE(map2.IsValid()) << error_msg;
652 ASSERT_TRUE(error_msg.empty());
653 ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
654 ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
655
656 // One-map cases.
657 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
658 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
659 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
660
661 // Two or three-map cases.
662 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
663 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
664 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
665
666 // Unmap the middle one.
667 map1.Reset();
668
669 // Should return false now that there's a gap in the middle.
670 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
671 }
672
TEST_F(MemMapTest,AlignBy)673 TEST_F(MemMapTest, AlignBy) {
674 CommonInit();
675 std::string error_msg;
676 // Cast the page size to size_t.
677 const size_t page_size = static_cast<size_t>(kPageSize);
678 // Map a region.
679 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
680 14 * page_size,
681 PROT_READ | PROT_WRITE,
682 /*low_4gb=*/ false,
683 &error_msg);
684 ASSERT_TRUE(m0.IsValid());
685 uint8_t* base0 = m0.Begin();
686 ASSERT_TRUE(base0 != nullptr) << error_msg;
687 ASSERT_EQ(m0.Size(), 14 * page_size);
688 ASSERT_EQ(m0.BaseBegin(), base0);
689 ASSERT_EQ(m0.BaseSize(), m0.Size());
690
691 // Break it into several regions by using RemapAtEnd.
692 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
693 "MemMapTest_AlignByTest_map1",
694 PROT_READ | PROT_WRITE,
695 &error_msg);
696 uint8_t* base1 = m1.Begin();
697 ASSERT_TRUE(base1 != nullptr) << error_msg;
698 ASSERT_EQ(base1, base0 + 3 * page_size);
699 ASSERT_EQ(m0.Size(), 3 * page_size);
700
701 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
702 "MemMapTest_AlignByTest_map2",
703 PROT_READ | PROT_WRITE,
704 &error_msg);
705 uint8_t* base2 = m2.Begin();
706 ASSERT_TRUE(base2 != nullptr) << error_msg;
707 ASSERT_EQ(base2, base1 + 4 * page_size);
708 ASSERT_EQ(m1.Size(), 4 * page_size);
709
710 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
711 "MemMapTest_AlignByTest_map1",
712 PROT_READ | PROT_WRITE,
713 &error_msg);
714 uint8_t* base3 = m3.Begin();
715 ASSERT_TRUE(base3 != nullptr) << error_msg;
716 ASSERT_EQ(base3, base2 + 3 * page_size);
717 ASSERT_EQ(m2.Size(), 3 * page_size);
718 ASSERT_EQ(m3.Size(), 4 * page_size);
719
720 uint8_t* end0 = base0 + m0.Size();
721 uint8_t* end1 = base1 + m1.Size();
722 uint8_t* end2 = base2 + m2.Size();
723 uint8_t* end3 = base3 + m3.Size();
724
725 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
726
727 if (IsAlignedParam(base0, 2 * page_size)) {
728 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
729 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
730 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
731 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
732 } else {
733 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
734 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
735 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
736 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
737 }
738
739 // Align by 2 * page_size;
740 m0.AlignBy(2 * page_size);
741 m1.AlignBy(2 * page_size);
742 m2.AlignBy(2 * page_size);
743 m3.AlignBy(2 * page_size);
744
745 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
746 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
747 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
748 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
749
750 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
751 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
752 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
753 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
754
755 if (IsAlignedParam(base0, 2 * page_size)) {
756 EXPECT_EQ(m0.Begin(), base0);
757 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
758 EXPECT_EQ(m1.Begin(), base1 + page_size);
759 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
760 EXPECT_EQ(m2.Begin(), base2 + page_size);
761 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
762 EXPECT_EQ(m3.Begin(), base3);
763 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
764 } else {
765 EXPECT_EQ(m0.Begin(), base0 + page_size);
766 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
767 EXPECT_EQ(m1.Begin(), base1);
768 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
769 EXPECT_EQ(m2.Begin(), base2);
770 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
771 EXPECT_EQ(m3.Begin(), base3 + page_size);
772 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
773 }
774 }
775
TEST_F(MemMapTest,Reservation)776 TEST_F(MemMapTest, Reservation) {
777 CommonInit();
778 std::string error_msg;
779 ScratchFile scratch_file;
780 constexpr size_t kMapSize = 5 * kPageSize;
781 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
782 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
783
784 MemMap reservation = MemMap::MapAnonymous("Test reservation",
785 kMapSize,
786 PROT_NONE,
787 /*low_4gb=*/ false,
788 &error_msg);
789 ASSERT_TRUE(reservation.IsValid());
790 ASSERT_TRUE(error_msg.empty());
791
792 // Map first part of the reservation.
793 constexpr size_t kChunk1Size = kPageSize - 1u;
794 static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
795 uint8_t* addr1 = reservation.Begin();
796 MemMap map1 = MemMap::MapFileAtAddress(addr1,
797 /*byte_count=*/ kChunk1Size,
798 PROT_READ,
799 MAP_PRIVATE,
800 scratch_file.GetFd(),
801 /*start=*/ 0,
802 /*low_4gb=*/ false,
803 scratch_file.GetFilename().c_str(),
804 /*reuse=*/ false,
805 &reservation,
806 &error_msg);
807 ASSERT_TRUE(map1.IsValid()) << error_msg;
808 ASSERT_TRUE(error_msg.empty());
809 ASSERT_EQ(map1.Size(), kChunk1Size);
810 ASSERT_EQ(addr1, map1.Begin());
811 ASSERT_TRUE(reservation.IsValid());
812 // Entire pages are taken from the `reservation`.
813 ASSERT_LT(map1.End(), map1.BaseEnd());
814 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
815
816 // Map second part as an anonymous mapping.
817 constexpr size_t kChunk2Size = 2 * kPageSize;
818 DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
819 uint8_t* addr2 = reservation.Begin();
820 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
821 addr2,
822 /*byte_count=*/ kChunk2Size,
823 PROT_READ,
824 /*low_4gb=*/ false,
825 /*reuse=*/ false,
826 &reservation,
827 &error_msg);
828 ASSERT_TRUE(map2.IsValid()) << error_msg;
829 ASSERT_TRUE(error_msg.empty());
830 ASSERT_EQ(map2.Size(), kChunk2Size);
831 ASSERT_EQ(addr2, map2.Begin());
832 ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
833 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
834
835 // Map the rest of the reservation except the last byte.
836 const size_t kChunk3Size = reservation.Size() - 1u;
837 uint8_t* addr3 = reservation.Begin();
838 MemMap map3 = MemMap::MapFileAtAddress(addr3,
839 /*byte_count=*/ kChunk3Size,
840 PROT_READ,
841 MAP_PRIVATE,
842 scratch_file.GetFd(),
843 /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
844 /*low_4gb=*/ false,
845 scratch_file.GetFilename().c_str(),
846 /*reuse=*/ false,
847 &reservation,
848 &error_msg);
849 ASSERT_TRUE(map3.IsValid()) << error_msg;
850 ASSERT_TRUE(error_msg.empty());
851 ASSERT_EQ(map3.Size(), kChunk3Size);
852 ASSERT_EQ(addr3, map3.Begin());
853 // Entire pages are taken from the `reservation`, so it's now exhausted.
854 ASSERT_FALSE(reservation.IsValid());
855
856 // Now split the MiddleReservation.
857 constexpr size_t kChunk2ASize = kPageSize - 1u;
858 DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
859 MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
860 ASSERT_TRUE(map2a.IsValid()) << error_msg;
861 ASSERT_TRUE(error_msg.empty());
862 ASSERT_EQ(map2a.Size(), kChunk2ASize);
863 ASSERT_EQ(addr2, map2a.Begin());
864 ASSERT_TRUE(map2.IsValid());
865 ASSERT_LT(map2a.End(), map2a.BaseEnd());
866 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
867
868 // And take the rest of the middle reservation.
869 const size_t kChunk2BSize = map2.Size() - 1u;
870 uint8_t* addr2b = map2.Begin();
871 MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
872 ASSERT_TRUE(map2b.IsValid()) << error_msg;
873 ASSERT_TRUE(error_msg.empty());
874 ASSERT_EQ(map2b.Size(), kChunk2ASize);
875 ASSERT_EQ(addr2b, map2b.Begin());
876 ASSERT_FALSE(map2.IsValid());
877 }
878
879 } // namespace art
880
881 namespace {
882
883 class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
OnTestPartResult(const testing::TestPartResult & result)884 void OnTestPartResult(const testing::TestPartResult& result) override {
885 switch (result.type()) {
886 case testing::TestPartResult::kFatalFailure:
887 art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
888 break;
889
890 // TODO: Could consider logging on EXPECT failures.
891 case testing::TestPartResult::kNonFatalFailure:
892 case testing::TestPartResult::kSkip:
893 case testing::TestPartResult::kSuccess:
894 break;
895 }
896 }
897 };
898
899 } // namespace
900
901 // Inject our listener into the test runner.
902 extern "C"
903 __attribute__((visibility("default"))) __attribute__((used))
ArtTestGlobalInit()904 void ArtTestGlobalInit() {
905 LOG(ERROR) << "Installing listener";
906 testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
907 }
908