• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2013 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #include "mem_map.h"
18  
19  #include <memory>
20  
21  #include "gtest/gtest.h"
22  
23  namespace art {
24  
25  class MemMapTest : public testing::Test {
26   public:
BaseBegin(MemMap * mem_map)27    static byte* BaseBegin(MemMap* mem_map) {
28      return reinterpret_cast<byte*>(mem_map->base_begin_);
29    }
BaseSize(MemMap * mem_map)30    static size_t BaseSize(MemMap* mem_map) {
31      return mem_map->base_size_;
32    }
33  
RemapAtEndTest(bool low_4gb)34    static void RemapAtEndTest(bool low_4gb) {
35      std::string error_msg;
36      // Cast the page size to size_t.
37      const size_t page_size = static_cast<size_t>(kPageSize);
38      // Map a two-page memory region.
39      MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
40                                        nullptr,
41                                        2 * page_size,
42                                        PROT_READ | PROT_WRITE,
43                                        low_4gb,
44                                        &error_msg);
45      // Check its state and write to it.
46      byte* base0 = m0->Begin();
47      ASSERT_TRUE(base0 != nullptr) << error_msg;
48      size_t size0 = m0->Size();
49      EXPECT_EQ(m0->Size(), 2 * page_size);
50      EXPECT_EQ(BaseBegin(m0), base0);
51      EXPECT_EQ(BaseSize(m0), size0);
52      memset(base0, 42, 2 * page_size);
53      // Remap the latter half into a second MemMap.
54      MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
55                                  "MemMapTest_RemapAtEndTest_map1",
56                                  PROT_READ | PROT_WRITE,
57                                  &error_msg);
58      // Check the states of the two maps.
59      EXPECT_EQ(m0->Begin(), base0) << error_msg;
60      EXPECT_EQ(m0->Size(), page_size);
61      EXPECT_EQ(BaseBegin(m0), base0);
62      EXPECT_EQ(BaseSize(m0), page_size);
63      byte* base1 = m1->Begin();
64      size_t size1 = m1->Size();
65      EXPECT_EQ(base1, base0 + page_size);
66      EXPECT_EQ(size1, page_size);
67      EXPECT_EQ(BaseBegin(m1), base1);
68      EXPECT_EQ(BaseSize(m1), size1);
69      // Write to the second region.
70      memset(base1, 43, page_size);
71      // Check the contents of the two regions.
72      for (size_t i = 0; i < page_size; ++i) {
73        EXPECT_EQ(base0[i], 42);
74      }
75      for (size_t i = 0; i < page_size; ++i) {
76        EXPECT_EQ(base1[i], 43);
77      }
78      // Unmap the first region.
79      delete m0;
80      // Make sure the second region is still accessible after the first
81      // region is unmapped.
82      for (size_t i = 0; i < page_size; ++i) {
83        EXPECT_EQ(base1[i], 43);
84      }
85      delete m1;
86    }
87  
CommonInit()88    void CommonInit() {
89      MemMap::Init();
90    }
91  
92  #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()93    static uintptr_t GetLinearScanPos() {
94      return MemMap::next_mem_pos_;
95    }
96  #endif
97  };
98  
99  #if defined(__LP64__) && !defined(__x86_64__)
100  
101  #ifdef __BIONIC__
102  extern uintptr_t CreateStartPos(uint64_t input);
103  #endif
104  
TEST_F(MemMapTest,Start)105  TEST_F(MemMapTest, Start) {
106    CommonInit();
107    uintptr_t start = GetLinearScanPos();
108    EXPECT_LE(64 * KB, start);
109    EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
110  #ifdef __BIONIC__
111    // Test a couple of values. Make sure they are different.
112    uintptr_t last = 0;
113    for (size_t i = 0; i < 100; ++i) {
114      uintptr_t random_start = CreateStartPos(i * kPageSize);
115      EXPECT_NE(last, random_start);
116      last = random_start;
117    }
118  
119    // Even on max, should be below ART_BASE_ADDRESS.
120    EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
121  #endif
122    // End of test.
123  }
124  #endif
125  
TEST_F(MemMapTest,MapAnonymousEmpty)126  TEST_F(MemMapTest, MapAnonymousEmpty) {
127    CommonInit();
128    std::string error_msg;
129    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
130                                               nullptr,
131                                               0,
132                                               PROT_READ,
133                                               false,
134                                               &error_msg));
135    ASSERT_TRUE(map.get() != nullptr) << error_msg;
136    ASSERT_TRUE(error_msg.empty());
137    map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
138                                   nullptr,
139                                   kPageSize,
140                                   PROT_READ | PROT_WRITE,
141                                   false,
142                                   &error_msg));
143    ASSERT_TRUE(map.get() != nullptr) << error_msg;
144    ASSERT_TRUE(error_msg.empty());
145  }
146  
147  #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)148  TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
149    CommonInit();
150    std::string error_msg;
151    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
152                                               nullptr,
153                                               kPageSize,
154                                               PROT_READ | PROT_WRITE,
155                                               true,
156                                               &error_msg));
157    ASSERT_TRUE(map.get() != nullptr) << error_msg;
158    ASSERT_TRUE(error_msg.empty());
159    ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
160  }
161  #endif
162  
TEST_F(MemMapTest,MapAnonymousExactAddr)163  TEST_F(MemMapTest, MapAnonymousExactAddr) {
164    CommonInit();
165    std::string error_msg;
166    // Map at an address that should work, which should succeed.
167    std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
168                                                reinterpret_cast<byte*>(ART_BASE_ADDRESS),
169                                                kPageSize,
170                                                PROT_READ | PROT_WRITE,
171                                                false,
172                                                &error_msg));
173    ASSERT_TRUE(map0.get() != nullptr) << error_msg;
174    ASSERT_TRUE(error_msg.empty());
175    ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
176    // Map at an unspecified address, which should succeed.
177    std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
178                                                nullptr,
179                                                kPageSize,
180                                                PROT_READ | PROT_WRITE,
181                                                false,
182                                                &error_msg));
183    ASSERT_TRUE(map1.get() != nullptr) << error_msg;
184    ASSERT_TRUE(error_msg.empty());
185    ASSERT_TRUE(map1->BaseBegin() != nullptr);
186    // Attempt to map at the same address, which should fail.
187    std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
188                                                reinterpret_cast<byte*>(map1->BaseBegin()),
189                                                kPageSize,
190                                                PROT_READ | PROT_WRITE,
191                                                false,
192                                                &error_msg));
193    ASSERT_TRUE(map2.get() == nullptr) << error_msg;
194    ASSERT_TRUE(!error_msg.empty());
195  }
196  
TEST_F(MemMapTest,RemapAtEnd)197  TEST_F(MemMapTest, RemapAtEnd) {
198    RemapAtEndTest(false);
199  }
200  
201  #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)202  TEST_F(MemMapTest, RemapAtEnd32bit) {
203    RemapAtEndTest(true);
204  }
205  #endif
206  
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)207  TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
208    uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
209    std::string error_msg;
210    CommonInit();
211    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
212                                               reinterpret_cast<byte*>(start_addr),
213                                               0x21000000,
214                                               PROT_READ | PROT_WRITE,
215                                               true,
216                                               &error_msg));
217    ASSERT_TRUE(map.get() != nullptr) << error_msg;
218    ASSERT_TRUE(error_msg.empty());
219    ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
220  }
221  
TEST_F(MemMapTest,MapAnonymousOverflow)222  TEST_F(MemMapTest, MapAnonymousOverflow) {
223    CommonInit();
224    std::string error_msg;
225    uintptr_t ptr = 0;
226    ptr -= kPageSize;  // Now it's close to the top.
227    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
228                                               reinterpret_cast<byte*>(ptr),
229                                               2 * kPageSize,  // brings it over the top.
230                                               PROT_READ | PROT_WRITE,
231                                               false,
232                                               &error_msg));
233    ASSERT_EQ(nullptr, map.get());
234    ASSERT_FALSE(error_msg.empty());
235  }
236  
237  #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)238  TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
239    CommonInit();
240    std::string error_msg;
241    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
242                                               reinterpret_cast<byte*>(UINT64_C(0x100000000)),
243                                               kPageSize,
244                                               PROT_READ | PROT_WRITE,
245                                               true,
246                                               &error_msg));
247    ASSERT_EQ(nullptr, map.get());
248    ASSERT_FALSE(error_msg.empty());
249  }
250  
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)251  TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
252    CommonInit();
253    std::string error_msg;
254    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
255                                               reinterpret_cast<byte*>(0xF0000000),
256                                               0x20000000,
257                                               PROT_READ | PROT_WRITE,
258                                               true,
259                                               &error_msg));
260    ASSERT_EQ(nullptr, map.get());
261    ASSERT_FALSE(error_msg.empty());
262  }
263  #endif
264  
TEST_F(MemMapTest,CheckNoGaps)265  TEST_F(MemMapTest, CheckNoGaps) {
266    CommonInit();
267    std::string error_msg;
268    constexpr size_t kNumPages = 3;
269    // Map a 3-page mem map.
270    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
271                                                     nullptr,
272                                                     kPageSize * kNumPages,
273                                                     PROT_READ | PROT_WRITE,
274                                                     false,
275                                                     &error_msg));
276    ASSERT_TRUE(map.get() != nullptr) << error_msg;
277    ASSERT_TRUE(error_msg.empty());
278    // Record the base address.
279    byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
280    // Unmap it.
281    map.reset();
282  
283    // Map at the same address, but in page-sized separate mem maps,
284    // assuming the space at the address is still available.
285    std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
286                                                      map_base,
287                                                      kPageSize,
288                                                      PROT_READ | PROT_WRITE,
289                                                      false,
290                                                      &error_msg));
291    ASSERT_TRUE(map0.get() != nullptr) << error_msg;
292    ASSERT_TRUE(error_msg.empty());
293    std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
294                                                      map_base + kPageSize,
295                                                      kPageSize,
296                                                      PROT_READ | PROT_WRITE,
297                                                      false,
298                                                      &error_msg));
299    ASSERT_TRUE(map1.get() != nullptr) << error_msg;
300    ASSERT_TRUE(error_msg.empty());
301    std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
302                                                      map_base + kPageSize * 2,
303                                                      kPageSize,
304                                                      PROT_READ | PROT_WRITE,
305                                                      false,
306                                                      &error_msg));
307    ASSERT_TRUE(map2.get() != nullptr) << error_msg;
308    ASSERT_TRUE(error_msg.empty());
309  
310    // One-map cases.
311    ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
312    ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
313    ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
314  
315    // Two or three-map cases.
316    ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
317    ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
318    ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
319  
320    // Unmap the middle one.
321    map1.reset();
322  
323    // Should return false now that there's a gap in the middle.
324    ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
325  }
326  
327  }  // namespace art
328