1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "common_runtime_test.h"
18 #include "gc/collector/immune_spaces.h"
19 #include "gc/space/image_space.h"
20 #include "gc/space/space-inl.h"
21 #include "oat_file.h"
22 #include "thread-inl.h"
23
24 namespace art {
25 namespace mirror {
26 class Object;
27 } // namespace mirror
28 namespace gc {
29 namespace collector {
30
31 class DummyOatFile : public OatFile {
32 public:
DummyOatFile(uint8_t * begin,uint8_t * end)33 DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
34 begin_ = begin;
35 end_ = end;
36 }
37 };
38
39 class DummyImageSpace : public space::ImageSpace {
40 public:
DummyImageSpace(MemMap * map,accounting::ContinuousSpaceBitmap * live_bitmap,std::unique_ptr<DummyOatFile> && oat_file,std::unique_ptr<MemMap> && oat_map)41 DummyImageSpace(MemMap* map,
42 accounting::ContinuousSpaceBitmap* live_bitmap,
43 std::unique_ptr<DummyOatFile>&& oat_file,
44 std::unique_ptr<MemMap>&& oat_map)
45 : ImageSpace("DummyImageSpace",
46 /*image_location*/"",
47 map,
48 live_bitmap,
49 map->End()),
50 oat_map_(std::move(oat_map)) {
51 oat_file_ = std::move(oat_file);
52 oat_file_non_owned_ = oat_file_.get();
53 }
54
55 private:
56 std::unique_ptr<MemMap> oat_map_;
57 };
58
59 class ImmuneSpacesTest : public CommonRuntimeTest {
60 static constexpr size_t kMaxBitmaps = 10;
61
62 public:
ImmuneSpacesTest()63 ImmuneSpacesTest() {}
64
ReserveBitmaps()65 void ReserveBitmaps() {
66 // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
67 // do not need to cover the image spaces though.
68 for (size_t i = 0; i < kMaxBitmaps; ++i) {
69 std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
70 accounting::ContinuousSpaceBitmap::Create("bitmap",
71 reinterpret_cast<uint8_t*>(kPageSize),
72 kPageSize));
73 CHECK(bitmap != nullptr);
74 live_bitmaps_.push_back(std::move(bitmap));
75 }
76 }
77
78 // Create an image space, the oat file is optional.
CreateImageSpace(uint8_t * image_begin,size_t image_size,uint8_t * oat_begin,size_t oat_size)79 DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
80 size_t image_size,
81 uint8_t* oat_begin,
82 size_t oat_size) {
83 std::string error_str;
84 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
85 image_begin,
86 image_size,
87 PROT_READ | PROT_WRITE,
88 /*low_4gb*/true,
89 /*reuse*/false,
90 &error_str));
91 if (map == nullptr) {
92 LOG(ERROR) << error_str;
93 return nullptr;
94 }
95 CHECK(!live_bitmaps_.empty());
96 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
97 live_bitmaps_.pop_back();
98 std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
99 oat_begin,
100 oat_size,
101 PROT_READ | PROT_WRITE,
102 /*low_4gb*/true,
103 /*reuse*/false,
104 &error_str));
105 if (oat_map == nullptr) {
106 LOG(ERROR) << error_str;
107 return nullptr;
108 }
109 std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
110 // Create image header.
111 ImageSection sections[ImageHeader::kSectionCount];
112 new (map->Begin()) ImageHeader(
113 /*image_begin*/PointerToLowMemUInt32(map->Begin()),
114 /*image_size*/map->Size(),
115 sections,
116 /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
117 /*oat_checksum*/0u,
118 // The oat file data in the header is always right after the image space.
119 /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
120 /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
121 /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
122 /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
123 /*boot_image_begin*/0u,
124 /*boot_image_size*/0u,
125 /*boot_oat_begin*/0u,
126 /*boot_oat_size*/0u,
127 /*pointer_size*/sizeof(void*),
128 /*compile_pic*/false,
129 /*is_pic*/false,
130 ImageHeader::kStorageModeUncompressed,
131 /*storage_size*/0u);
132 return new DummyImageSpace(map.release(),
133 live_bitmap.release(),
134 std::move(oat_file),
135 std::move(oat_map));
136 }
137
138 // Does not reserve the memory, the caller needs to be sure no other threads will map at the
139 // returned address.
GetContinuousMemoryRegion(size_t size)140 static uint8_t* GetContinuousMemoryRegion(size_t size) {
141 std::string error_str;
142 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
143 nullptr,
144 size,
145 PROT_READ | PROT_WRITE,
146 /*low_4gb*/true,
147 /*reuse*/false,
148 &error_str));
149 if (map == nullptr) {
150 LOG(ERROR) << "Failed to allocate memory region " << error_str;
151 return nullptr;
152 }
153 return map->Begin();
154 }
155
156 private:
157 // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
158 // them to randomly get placed somewhere where we want an image space.
159 std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
160 };
161
162 class DummySpace : public space::ContinuousSpace {
163 public:
DummySpace(uint8_t * begin,uint8_t * end)164 DummySpace(uint8_t* begin, uint8_t* end)
165 : ContinuousSpace("DummySpace",
166 space::kGcRetentionPolicyNeverCollect,
167 begin,
168 end,
169 /*limit*/end) {}
170
GetType() const171 space::SpaceType GetType() const OVERRIDE {
172 return space::kSpaceTypeMallocSpace;
173 }
174
CanMoveObjects() const175 bool CanMoveObjects() const OVERRIDE {
176 return false;
177 }
178
GetLiveBitmap() const179 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
180 return nullptr;
181 }
182
GetMarkBitmap() const183 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
184 return nullptr;
185 }
186 };
187
TEST_F(ImmuneSpacesTest,AppendBasic)188 TEST_F(ImmuneSpacesTest, AppendBasic) {
189 ImmuneSpaces spaces;
190 uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
191 DummySpace a(base, base + 45 * KB);
192 DummySpace b(a.Limit(), a.Limit() + 813 * KB);
193 {
194 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
195 spaces.AddSpace(&a);
196 spaces.AddSpace(&b);
197 }
198 EXPECT_TRUE(spaces.ContainsSpace(&a));
199 EXPECT_TRUE(spaces.ContainsSpace(&b));
200 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
201 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
202 }
203
204 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)205 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
206 ReserveBitmaps();
207 ImmuneSpaces spaces;
208 constexpr size_t kImageSize = 123 * kPageSize;
209 constexpr size_t kImageOatSize = 321 * kPageSize;
210 constexpr size_t kOtherSpaceSize= 100 * kPageSize;
211
212 uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
213
214 std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
215 kImageSize,
216 memory + kImageSize,
217 kImageOatSize));
218 ASSERT_TRUE(image_space != nullptr);
219 const ImageHeader& image_header = image_space->GetImageHeader();
220 DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
221
222 EXPECT_EQ(image_header.GetImageSize(), kImageSize);
223 EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
224 kImageOatSize);
225 EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
226 // Check that we do not include the oat if there is no space after.
227 {
228 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
229 spaces.AddSpace(image_space.get());
230 }
231 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
232 image_space->Begin());
233 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
234 image_space->Limit());
235 // Add another space and ensure it gets appended.
236 EXPECT_NE(image_space->Limit(), space.Begin());
237 {
238 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
239 spaces.AddSpace(&space);
240 }
241 EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
242 EXPECT_TRUE(spaces.ContainsSpace(&space));
243 // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
244 // image prevents gaps.
245 // Check that we have a continuous region.
246 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
247 image_space->Begin());
248 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
249 }
250
251 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)252 TEST_F(ImmuneSpacesTest, MultiImage) {
253 ReserveBitmaps();
254 // Image 2 needs to be smaller or else it may be chosen for immune region.
255 constexpr size_t kImage1Size = kPageSize * 17;
256 constexpr size_t kImage2Size = kPageSize * 13;
257 constexpr size_t kImage3Size = kPageSize * 3;
258 constexpr size_t kImage1OatSize = kPageSize * 5;
259 constexpr size_t kImage2OatSize = kPageSize * 8;
260 constexpr size_t kImage3OatSize = kPageSize;
261 constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
262 constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
263 uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
264 uint8_t* space1_begin = memory;
265 memory += kImage1Size;
266 uint8_t* space2_begin = memory;
267 memory += kImage2Size;
268 uint8_t* space1_oat_begin = memory;
269 memory += kImage1OatSize;
270 uint8_t* space2_oat_begin = memory;
271 memory += kImage2OatSize;
272 uint8_t* space3_begin = memory;
273
274 std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
275 kImage1Size,
276 space1_oat_begin,
277 kImage1OatSize));
278 ASSERT_TRUE(space1 != nullptr);
279
280
281 std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
282 kImage2Size,
283 space2_oat_begin,
284 kImage2OatSize));
285 ASSERT_TRUE(space2 != nullptr);
286
287 // Finally put a 3rd image space.
288 std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
289 kImage3Size,
290 space3_begin + kImage3Size,
291 kImage3OatSize));
292 ASSERT_TRUE(space3 != nullptr);
293
294 // Check that we do not include the oat if there is no space after.
295 ImmuneSpaces spaces;
296 {
297 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
298 LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
299 spaces.AddSpace(space1.get());
300 LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
301 spaces.AddSpace(space2.get());
302 }
303 // There are no more heap bytes, the immune region should only be the first 2 image spaces and
304 // should exclude the image oat files.
305 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
306 space1->Begin());
307 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
308 space2->Limit());
309
310 // Add another space after the oat files, now it should contain the entire memory region.
311 {
312 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
313 LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
314 spaces.AddSpace(space3.get());
315 }
316 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
317 space1->Begin());
318 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
319 space3->Limit());
320
321 // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
322 // Image size is kImageBytes - kPageSize
323 // Oat size is kPageSize.
324 // Guard pages to ensure it is not adjacent to an existing immune region.
325 // Layout: [guard page][image][oat][guard page]
326 constexpr size_t kGuardSize = kPageSize;
327 constexpr size_t kImage4Size = kImageBytes - kPageSize;
328 constexpr size_t kImage4OatSize = kPageSize;
329 uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
330 std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
331 kImage4Size,
332 memory2 + kGuardSize + kImage4Size,
333 kImage4OatSize));
334 ASSERT_TRUE(space4 != nullptr);
335 {
336 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
337 LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
338 spaces.AddSpace(space4.get());
339 }
340 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
341 space1->Begin());
342 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
343 space3->Limit());
344
345 // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
346 // Image size is kImageBytes + kPageSize
347 // Oat size is kPageSize.
348 // Guard pages to ensure it is not adjacent to an existing immune region.
349 // Layout: [guard page][image][oat][guard page]
350 constexpr size_t kImage5Size = kImageBytes + kPageSize;
351 constexpr size_t kImage5OatSize = kPageSize;
352 uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
353 std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
354 kImage5Size,
355 memory3 + kGuardSize + kImage5Size,
356 kImage5OatSize));
357 ASSERT_TRUE(space5 != nullptr);
358 {
359 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
360 LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
361 spaces.AddSpace(space5.get());
362 }
363 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
364 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
365 }
366
367 } // namespace collector
368 } // namespace gc
369 } // namespace art
370