1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <sys/mman.h>
18
19 #include "base/common_art_test.h"
20 #include "base/utils.h"
21 #include "gc/collector/immune_spaces.h"
22 #include "gc/space/image_space.h"
23 #include "gc/space/space-inl.h"
24 #include "oat_file.h"
25 #include "thread-current-inl.h"
26
27 namespace art {
28 namespace mirror {
29 class Object;
30 } // namespace mirror
31 namespace gc {
32 namespace collector {
33
34 class FakeOatFile : public OatFile {
35 public:
FakeOatFile(uint8_t * begin,uint8_t * end)36 FakeOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
37 begin_ = begin;
38 end_ = end;
39 }
40 };
41
42 class FakeImageSpace : public space::ImageSpace {
43 public:
FakeImageSpace(MemMap && map,accounting::ContinuousSpaceBitmap && live_bitmap,std::unique_ptr<FakeOatFile> && oat_file,MemMap && oat_map)44 FakeImageSpace(MemMap&& map,
45 accounting::ContinuousSpaceBitmap&& live_bitmap,
46 std::unique_ptr<FakeOatFile>&& oat_file,
47 MemMap&& oat_map)
48 : ImageSpace("FakeImageSpace",
49 /*image_location=*/"",
50 /*profile_files=*/{},
51 std::move(map),
52 std::move(live_bitmap),
53 map.End()),
54 oat_map_(std::move(oat_map)) {
55 oat_file_ = std::move(oat_file);
56 oat_file_non_owned_ = oat_file_.get();
57 }
58
59 private:
60 MemMap oat_map_;
61 };
62
63 class ImmuneSpacesTest : public CommonArtTest {
64 static constexpr size_t kMaxBitmaps = 10;
65
66 public:
ImmuneSpacesTest()67 ImmuneSpacesTest() {}
68
ReserveBitmaps()69 void ReserveBitmaps() {
70 // Create a bunch of fake bitmaps since these are required to create image spaces. The bitmaps
71 // do not need to cover the image spaces though.
72 for (size_t i = 0; i < kMaxBitmaps; ++i) {
73 accounting::ContinuousSpaceBitmap bitmap(
74 accounting::ContinuousSpaceBitmap::Create("bitmap",
75 reinterpret_cast<uint8_t*>(kPageSize),
76 kPageSize));
77 CHECK(bitmap.IsValid());
78 live_bitmaps_.push_back(std::move(bitmap));
79 }
80 }
81
82 // Create an image space, the oat file is optional.
CreateImageSpace(size_t image_size,size_t oat_size,MemMap * image_reservation,MemMap * oat_reservation)83 FakeImageSpace* CreateImageSpace(size_t image_size,
84 size_t oat_size,
85 MemMap* image_reservation,
86 MemMap* oat_reservation) {
87 DCHECK(image_reservation != nullptr);
88 DCHECK(oat_reservation != nullptr);
89 std::string error_str;
90 MemMap image_map = MemMap::MapAnonymous("FakeImageSpace",
91 image_size,
92 PROT_READ | PROT_WRITE,
93 /*low_4gb=*/ true,
94 /*reservation=*/ image_reservation,
95 &error_str);
96 if (!image_map.IsValid()) {
97 LOG(ERROR) << error_str;
98 return nullptr;
99 }
100 CHECK(!live_bitmaps_.empty());
101 accounting::ContinuousSpaceBitmap live_bitmap(std::move(live_bitmaps_.back()));
102 live_bitmaps_.pop_back();
103 MemMap oat_map = MemMap::MapAnonymous("OatMap",
104 oat_size,
105 PROT_READ | PROT_WRITE,
106 /*low_4gb=*/ true,
107 /*reservation=*/ oat_reservation,
108 &error_str);
109 if (!oat_map.IsValid()) {
110 LOG(ERROR) << error_str;
111 return nullptr;
112 }
113 std::unique_ptr<FakeOatFile> oat_file(new FakeOatFile(oat_map.Begin(), oat_map.End()));
114 // Create image header.
115 ImageSection sections[ImageHeader::kSectionCount];
116 new (image_map.Begin()) ImageHeader(
117 /*image_reservation_size=*/ image_size,
118 /*component_count=*/ 1u,
119 /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
120 /*image_size=*/ image_size,
121 sections,
122 /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
123 /*oat_checksum=*/ 0u,
124 // The oat file data in the header is always right after the image space.
125 /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
126 /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
127 /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
128 /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
129 /*boot_image_begin=*/ 0u,
130 /*boot_image_size=*/ 0u,
131 /*boot_image_component_count=*/ 0u,
132 /*boot_image_checksum=*/ 0u,
133 /*pointer_size=*/ sizeof(void*));
134 return new FakeImageSpace(std::move(image_map),
135 std::move(live_bitmap),
136 std::move(oat_file),
137 std::move(oat_map));
138 }
139
140 private:
141 // Bitmap pool for pre-allocated fake bitmaps. We need to pre-allocate them since we don't want
142 // them to randomly get placed somewhere where we want an image space.
143 std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
144 };
145
146 class FakeSpace : public space::ContinuousSpace {
147 public:
FakeSpace(uint8_t * begin,uint8_t * end)148 FakeSpace(uint8_t* begin, uint8_t* end)
149 : ContinuousSpace("FakeSpace",
150 space::kGcRetentionPolicyNeverCollect,
151 begin,
152 end,
153 /*limit=*/end) {}
154
GetType() const155 space::SpaceType GetType() const override {
156 return space::kSpaceTypeMallocSpace;
157 }
158
CanMoveObjects() const159 bool CanMoveObjects() const override {
160 return false;
161 }
162
GetLiveBitmap()163 accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
164 return nullptr;
165 }
166
GetMarkBitmap()167 accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
168 return nullptr;
169 }
170 };
171
TEST_F(ImmuneSpacesTest,AppendBasic)172 TEST_F(ImmuneSpacesTest, AppendBasic) {
173 ImmuneSpaces spaces;
174 uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
175 FakeSpace a(base, base + 45 * KB);
176 FakeSpace b(a.Limit(), a.Limit() + 813 * KB);
177 {
178 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
179 spaces.AddSpace(&a);
180 spaces.AddSpace(&b);
181 }
182 EXPECT_TRUE(spaces.ContainsSpace(&a));
183 EXPECT_TRUE(spaces.ContainsSpace(&b));
184 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
185 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
186 }
187
188 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)189 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
190 ReserveBitmaps();
191 ImmuneSpaces spaces;
192 constexpr size_t kImageSize = 123 * kPageSize;
193 constexpr size_t kImageOatSize = 321 * kPageSize;
194 constexpr size_t kOtherSpaceSize = 100 * kPageSize;
195
196 std::string error_str;
197 MemMap reservation = MemMap::MapAnonymous("reserve",
198 kImageSize + kImageOatSize + kOtherSpaceSize,
199 PROT_READ | PROT_WRITE,
200 /*low_4gb=*/ true,
201 &error_str);
202 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
203 MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
204 ASSERT_TRUE(image_reservation.IsValid());
205 ASSERT_TRUE(reservation.IsValid());
206
207 std::unique_ptr<FakeImageSpace> image_space(CreateImageSpace(kImageSize,
208 kImageOatSize,
209 &image_reservation,
210 &reservation));
211 ASSERT_TRUE(image_space != nullptr);
212 ASSERT_FALSE(image_reservation.IsValid());
213 ASSERT_TRUE(reservation.IsValid());
214
215 const ImageHeader& image_header = image_space->GetImageHeader();
216 FakeSpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
217
218 EXPECT_EQ(image_header.GetImageSize(), kImageSize);
219 EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
220 kImageOatSize);
221 EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
222 // Check that we do not include the oat if there is no space after.
223 {
224 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
225 spaces.AddSpace(image_space.get());
226 }
227 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
228 image_space->Begin());
229 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
230 image_space->Limit());
231 // Add another space and ensure it gets appended.
232 EXPECT_NE(image_space->Limit(), space.Begin());
233 {
234 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
235 spaces.AddSpace(&space);
236 }
237 EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
238 EXPECT_TRUE(spaces.ContainsSpace(&space));
239 // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
240 // image prevents gaps.
241 // Check that we have a continuous region.
242 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
243 image_space->Begin());
244 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
245 }
246
247 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)248 TEST_F(ImmuneSpacesTest, MultiImage) {
249 ReserveBitmaps();
250 // Image 2 needs to be smaller or else it may be chosen for immune region.
251 constexpr size_t kImage1Size = kPageSize * 17;
252 constexpr size_t kImage2Size = kPageSize * 13;
253 constexpr size_t kImage3Size = kPageSize * 3;
254 constexpr size_t kImage1OatSize = kPageSize * 5;
255 constexpr size_t kImage2OatSize = kPageSize * 8;
256 constexpr size_t kImage3OatSize = kPageSize;
257 constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
258 constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
259 std::string error_str;
260 MemMap reservation = MemMap::MapAnonymous("reserve",
261 kMemorySize,
262 PROT_READ | PROT_WRITE,
263 /*low_4gb=*/ true,
264 &error_str);
265 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
266 MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
267 ASSERT_TRUE(image_reservation.IsValid());
268 ASSERT_TRUE(reservation.IsValid());
269
270 std::unique_ptr<FakeImageSpace> space1(CreateImageSpace(kImage1Size,
271 kImage1OatSize,
272 &image_reservation,
273 &reservation));
274 ASSERT_TRUE(space1 != nullptr);
275 ASSERT_TRUE(image_reservation.IsValid());
276 ASSERT_TRUE(reservation.IsValid());
277
278 std::unique_ptr<FakeImageSpace> space2(CreateImageSpace(kImage2Size,
279 kImage2OatSize,
280 &image_reservation,
281 &reservation));
282 ASSERT_TRUE(space2 != nullptr);
283 ASSERT_FALSE(image_reservation.IsValid());
284 ASSERT_TRUE(reservation.IsValid());
285
286 // Finally put a 3rd image space.
287 image_reservation = reservation.TakeReservedMemory(kImage3Size);
288 ASSERT_TRUE(image_reservation.IsValid());
289 ASSERT_TRUE(reservation.IsValid());
290 std::unique_ptr<FakeImageSpace> space3(CreateImageSpace(kImage3Size,
291 kImage3OatSize,
292 &image_reservation,
293 &reservation));
294 ASSERT_TRUE(space3 != nullptr);
295 ASSERT_FALSE(image_reservation.IsValid());
296 ASSERT_FALSE(reservation.IsValid());
297
298 // Check that we do not include the oat if there is no space after.
299 ImmuneSpaces spaces;
300 {
301 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
302 LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
303 spaces.AddSpace(space1.get());
304 LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
305 spaces.AddSpace(space2.get());
306 }
307 // There are no more heap bytes, the immune region should only be the first 2 image spaces and
308 // should exclude the image oat files.
309 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
310 space1->Begin());
311 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
312 space2->Limit());
313
314 // Add another space after the oat files, now it should contain the entire memory region.
315 {
316 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
317 LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
318 spaces.AddSpace(space3.get());
319 }
320 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
321 space1->Begin());
322 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
323 space3->Limit());
324
325 // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
326 // Image size is kImageBytes - kPageSize
327 // Oat size is kPageSize.
328 // Guard pages to ensure it is not adjacent to an existing immune region.
329 // Layout: [guard page][image][oat][guard page]
330 constexpr size_t kGuardSize = kPageSize;
331 constexpr size_t kImage4Size = kImageBytes - kPageSize;
332 constexpr size_t kImage4OatSize = kPageSize;
333
334 reservation = MemMap::MapAnonymous("reserve",
335 kImage4Size + kImage4OatSize + kGuardSize * 2,
336 PROT_READ | PROT_WRITE,
337 /*low_4gb=*/ true,
338 &error_str);
339 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
340 MemMap guard = reservation.TakeReservedMemory(kGuardSize);
341 ASSERT_TRUE(guard.IsValid());
342 ASSERT_TRUE(reservation.IsValid());
343 guard.Reset(); // Release the guard memory.
344 image_reservation = reservation.TakeReservedMemory(kImage4Size);
345 ASSERT_TRUE(image_reservation.IsValid());
346 ASSERT_TRUE(reservation.IsValid());
347 std::unique_ptr<FakeImageSpace> space4(CreateImageSpace(kImage4Size,
348 kImage4OatSize,
349 &image_reservation,
350 &reservation));
351 ASSERT_TRUE(space4 != nullptr);
352 ASSERT_FALSE(image_reservation.IsValid());
353 ASSERT_TRUE(reservation.IsValid());
354 ASSERT_EQ(reservation.Size(), kGuardSize);
355 reservation.Reset(); // Release the guard memory.
356 {
357 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
358 LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
359 spaces.AddSpace(space4.get());
360 }
361 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
362 space1->Begin());
363 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
364 space3->Limit());
365
366 // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
367 // Image size is kImageBytes + kPageSize
368 // Oat size is kPageSize.
369 // Guard pages to ensure it is not adjacent to an existing immune region.
370 // Layout: [guard page][image][oat][guard page]
371 constexpr size_t kImage5Size = kImageBytes + kPageSize;
372 constexpr size_t kImage5OatSize = kPageSize;
373 reservation = MemMap::MapAnonymous("reserve",
374 kImage5Size + kImage5OatSize + kGuardSize * 2,
375 PROT_READ | PROT_WRITE,
376 /*low_4gb=*/ true,
377 &error_str);
378 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
379 guard = reservation.TakeReservedMemory(kGuardSize);
380 ASSERT_TRUE(guard.IsValid());
381 ASSERT_TRUE(reservation.IsValid());
382 guard.Reset(); // Release the guard memory.
383 image_reservation = reservation.TakeReservedMemory(kImage5Size);
384 ASSERT_TRUE(image_reservation.IsValid());
385 ASSERT_TRUE(reservation.IsValid());
386 std::unique_ptr<FakeImageSpace> space5(CreateImageSpace(kImage5Size,
387 kImage5OatSize,
388 &image_reservation,
389 &reservation));
390 ASSERT_TRUE(space5 != nullptr);
391 ASSERT_FALSE(image_reservation.IsValid());
392 ASSERT_TRUE(reservation.IsValid());
393 ASSERT_EQ(reservation.Size(), kGuardSize);
394 reservation.Reset(); // Release the guard memory.
395 {
396 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
397 LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
398 spaces.AddSpace(space5.get());
399 }
400 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
401 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
402 }
403
404 } // namespace collector
405 } // namespace gc
406 } // namespace art
407