1 /*
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17
18 #include "runtime/include/runtime.h"
19 #include "runtime/include/panda_vm.h"
20 #include "runtime/include/class_linker.h"
21 #include "runtime/include/thread_scopes.h"
22 #include "runtime/mem/vm_handle.h"
23 #include "runtime/handle_scope-inl.h"
24 #include "runtime/include/coretypes/array.h"
25 #include "runtime/include/coretypes/string.h"
26 #include "runtime/mem/gc/card_table.h"
27 #include "runtime/mem/gc/g1/g1-allocator.h"
28 #include "runtime/mem/rem_set-inl.h"
29 #include "runtime/mem/region_space.h"
30 #include "runtime/mem/object_helpers.h"
31
32 #include "test_utils.h"
33
34 namespace panda::mem {
35
36 class G1GCTest : public testing::Test {
37 public:
G1GCTest(size_t promotion_region_alive_rate=100)38 explicit G1GCTest(size_t promotion_region_alive_rate = 100)
39 {
40 RuntimeOptions options;
41 options.SetBootClassSpaces({"core"});
42 options.SetRuntimeType("core");
43 options.SetGcType("g1-gc");
44 options.SetRunGcInPlace(true);
45 options.SetCompilerEnableJit(false);
46 options.SetGcWorkersCount(0);
47 options.SetG1PromotionRegionAliveRate(promotion_region_alive_rate);
48 options.SetGcTriggerType("debug-never");
49 options.SetShouldLoadBootPandaFiles(false);
50 options.SetShouldInitializeIntrinsics(false);
51
52 Runtime::Create(options);
53 }
54
~G1GCTest()55 ~G1GCTest()
56 {
57 Runtime::Destroy();
58 }
59
GetHumongousStringLength()60 static constexpr size_t GetHumongousStringLength()
61 {
62 // Total string size will be DEFAULT_REGION_SIZE + sizeof(String).
63 // It is enought to make it humongous.
64 return DEFAULT_REGION_SIZE;
65 }
66
GetHumongousArrayLength(ClassRoot class_root)67 size_t GetHumongousArrayLength(ClassRoot class_root)
68 {
69 Runtime *runtime = Runtime::GetCurrent();
70 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
71 auto *array_class = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(class_root);
72 EXPECT_TRUE(array_class->IsArrayClass());
73 if (!array_class->IsArrayClass()) {
74 return 0;
75 }
76 // Total array size will be DEFAULT_REGION_SIZE * elem_size + sizeof(Array).
77 // It is enought to make it humongous.
78 size_t elem_size = array_class->GetComponentSize();
79 return DEFAULT_REGION_SIZE / elem_size + 1;
80 }
81
AllocArray(size_t length,ClassRoot class_root,bool nonmovable)82 coretypes::Array *AllocArray(size_t length, ClassRoot class_root, bool nonmovable)
83 {
84 ObjectAllocator object_allocator;
85 return object_allocator.AllocArray(length, class_root, nonmovable);
86 }
87
AllocString(size_t length)88 coretypes::String *AllocString(size_t length)
89 {
90 ObjectAllocator object_allocator;
91 return object_allocator.AllocString(length);
92 }
93
AllocObjectInYoung()94 ObjectHeader *AllocObjectInYoung()
95 {
96 ObjectAllocator object_allocator;
97 return object_allocator.AllocObjectInYoung();
98 }
99
GetAllocator()100 ObjectAllocatorG1<> *GetAllocator()
101 {
102 Runtime *runtime = Runtime::GetCurrent();
103 GC *gc = runtime->GetPandaVM()->GetGC();
104 return static_cast<ObjectAllocatorG1<> *>(gc->GetObjectAllocator());
105 }
106 };
107
108 class RemSetChecker : public GCListener {
109 public:
RemSetChecker(ObjectHeader * obj,ObjectHeader * ref)110 explicit RemSetChecker(ObjectHeader *obj, ObjectHeader *ref)
111 : obj_(MTManagedThread::GetCurrent(), obj), ref_(MTManagedThread::GetCurrent(), ref)
112 {
113 }
114
GCPhaseStarted(GCPhase phase)115 void GCPhaseStarted(GCPhase phase) override
116 {
117 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
118 Check();
119 }
120 }
121
GCPhaseFinished(GCPhase phase)122 void GCPhaseFinished(GCPhase phase) override
123 {
124 if (phase == GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE) {
125 Check();
126 }
127 }
128
129 private:
Check()130 void Check()
131 {
132 RemSet<> *remset = ObjectToRegion(ref_.GetPtr())->GetRemSet();
133 ASSERT_NE(nullptr, remset);
134 bool has_object = false;
135 ObjectHeader *object = obj_.GetPtr();
136 remset->VisitMarkedCards([object, &has_object](ObjectHeader *obj) { has_object |= obj == object; });
137 ASSERT_TRUE(has_object);
138 }
139
140 private:
141 VMHandle<ObjectHeader> obj_;
142 VMHandle<ObjectHeader> ref_;
143 };
144
TEST_F(G1GCTest,TestAddrToRegion)145 TEST_F(G1GCTest, TestAddrToRegion)
146 {
147 MTManagedThread *thread = MTManagedThread::GetCurrent();
148 size_t humongous_len = GetHumongousArrayLength(ClassRoot::ARRAY_U8);
149 ScopedManagedCodeThread s(thread);
150 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
151
152 VMHandle<ObjectHeader> young(thread, AllocArray(0, ClassRoot::ARRAY_U8, false));
153 ASSERT_NE(nullptr, young.GetPtr());
154 VMHandle<ObjectHeader> nonmovable(thread, AllocArray(0, ClassRoot::ARRAY_U8, true));
155 ASSERT_NE(nullptr, nonmovable.GetPtr());
156 VMHandle<ObjectHeader> humongous(thread, AllocArray(humongous_len, ClassRoot::ARRAY_U8, false));
157 ASSERT_NE(nullptr, humongous.GetPtr());
158
159 Region *young_region = ObjectToRegion(young.GetPtr());
160 ASSERT_NE(nullptr, young_region);
161 ASSERT_EQ(young_region, AddrToRegion(young.GetPtr()));
162 bool has_young_obj = false;
163 young_region->IterateOverObjects(
164 [&has_young_obj, &young](ObjectHeader *obj) { has_young_obj |= obj == young.GetPtr(); });
165 ASSERT_TRUE(has_young_obj);
166
167 Region *nonmovable_region = ObjectToRegion(nonmovable.GetPtr());
168 ASSERT_NE(nullptr, nonmovable_region);
169 ASSERT_EQ(nonmovable_region, AddrToRegion(nonmovable.GetPtr()));
170 ASSERT_TRUE(nonmovable_region->GetLiveBitmap()->Test(nonmovable.GetPtr()));
171
172 Region *humongous_region = ObjectToRegion(humongous.GetPtr());
173 ASSERT_NE(nullptr, humongous_region);
174 ASSERT_EQ(humongous_region, AddrToRegion(humongous.GetPtr()));
175 ASSERT_EQ(humongous_region, AddrToRegion(ToVoidPtr(ToUintPtr(humongous.GetPtr()) + DEFAULT_REGION_SIZE)));
176 bool has_humongous_obj = false;
177 humongous_region->IterateOverObjects(
178 [&has_humongous_obj, &humongous](ObjectHeader *obj) { has_humongous_obj |= obj == humongous.GetPtr(); });
179 ASSERT_TRUE(has_humongous_obj);
180 }
181
TEST_F(G1GCTest,TestAllocHumongousArray)182 TEST_F(G1GCTest, TestAllocHumongousArray)
183 {
184 MTManagedThread *thread = MTManagedThread::GetCurrent();
185 ScopedManagedCodeThread s(thread);
186 ObjectHeader *obj = AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false);
187 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
188 }
189
TEST_F(G1GCTest,NonMovable2YoungRef)190 TEST_F(G1GCTest, NonMovable2YoungRef)
191 {
192 Runtime *runtime = Runtime::GetCurrent();
193 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
194 ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker();
195 MTManagedThread *thread = MTManagedThread::GetCurrent();
196 GC *gc = runtime->GetPandaVM()->GetGC();
197
198 ScopedManagedCodeThread s(thread);
199 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
200 static constexpr size_t array_length = 100;
201 coretypes::Array *non_movable_obj = nullptr;
202 uintptr_t prev_young_addr = 0;
203 Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
204 ->GetClass(ctx.GetStringArrayClassDescriptor());
205 ASSERT_NE(klass, nullptr);
206 non_movable_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
207 coretypes::String *young_obj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
208 non_movable_obj->Set(0, young_obj);
209 prev_young_addr = ToUintPtr(young_obj);
210 VMHandle<coretypes::Array> non_movable_obj_ptr(thread, non_movable_obj);
211
212 // Trigger GC
213 RemSetChecker listener(non_movable_obj, non_movable_obj->Get<ObjectHeader *>(0));
214 gc->AddListener(&listener);
215
216 {
217 ScopedNativeCodeThread sn(thread);
218 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
219 task.Run(*gc);
220 }
221
222 auto young_obj_2 = static_cast<coretypes::String *>(non_movable_obj_ptr->Get<ObjectHeader *>(0));
223 // Check GC has moved the young obj
224 ASSERT_NE(prev_young_addr, ToUintPtr(young_obj_2));
225 // Check young object is accessible
226 ASSERT_EQ(0, young_obj_2->GetLength());
227 }
228
TEST_F(G1GCTest,Humongous2YoungRef)229 TEST_F(G1GCTest, Humongous2YoungRef)
230 {
231 Runtime *runtime = Runtime::GetCurrent();
232 MTManagedThread *thread = MTManagedThread::GetCurrent();
233 GC *gc = runtime->GetPandaVM()->GetGC();
234 ScopedManagedCodeThread s(thread);
235 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
236 uintptr_t prev_young_addr = 0;
237 size_t array_length = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
238 VMHandle<coretypes::Array> humongous_obj(thread, AllocArray(array_length, ClassRoot::ARRAY_STRING, false));
239 ObjectHeader *young_obj = AllocObjectInYoung();
240 humongous_obj->Set(0, young_obj);
241 prev_young_addr = ToUintPtr(young_obj);
242
243 // Trigger GC
244 RemSetChecker listener(humongous_obj.GetPtr(), humongous_obj->Get<ObjectHeader *>(0));
245 gc->AddListener(&listener);
246
247 {
248 ScopedNativeCodeThread sn(thread);
249 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
250 task.Run(*gc);
251 }
252
253 young_obj = static_cast<ObjectHeader *>(humongous_obj->Get<ObjectHeader *>(0));
254 // Check GC has moved the young obj
255 ASSERT_NE(prev_young_addr, ToUintPtr(young_obj));
256 // Check the young object is accessible
257 ASSERT_NE(nullptr, young_obj->ClassAddr<Class>());
258 }
259
TEST_F(G1GCTest,TestCollectTenured)260 TEST_F(G1GCTest, TestCollectTenured)
261 {
262 Runtime *runtime = Runtime::GetCurrent();
263 MTManagedThread *thread = MTManagedThread::GetCurrent();
264 GC *gc = runtime->GetPandaVM()->GetGC();
265 ScopedManagedCodeThread s(thread);
266 [[maybe_unused]] HandleScope<ObjectHeader *> hs(thread);
267
268 VMHandle<coretypes::Array> humongous;
269 VMHandle<coretypes::Array> nonmovable;
270 ObjectHeader *obj;
271 uintptr_t obj_addr;
272
273 humongous = VMHandle<coretypes::Array>(
274 thread, AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_STRING), ClassRoot::ARRAY_STRING, false));
275 nonmovable = VMHandle<coretypes::Array>(thread, AllocArray(1, ClassRoot::ARRAY_STRING, true));
276 obj = AllocObjectInYoung();
277 humongous->Set(0, obj);
278 nonmovable->Set(0, obj);
279 obj_addr = ToUintPtr(obj);
280
281 RemSetChecker listener1(humongous.GetPtr(), obj);
282 RemSetChecker listener2(nonmovable.GetPtr(), obj);
283 gc->AddListener(&listener1);
284 gc->AddListener(&listener2);
285 {
286 ScopedNativeCodeThread sn(thread);
287 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
288 task.Run(*gc);
289 }
290 // Check the obj obj was propagated to tenured
291 obj = humongous->Get<ObjectHeader *>(0);
292 ASSERT_NE(obj_addr, ToUintPtr(obj));
293 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
294
295 obj_addr = ToUintPtr(obj);
296 {
297 ScopedNativeCodeThread sn(thread);
298 GCTask task1(GCTaskCause::EXPLICIT_CAUSE); // run full GC to collect all regions
299 task1.Run(*gc);
300 }
301
302 // Check the tenured obj was propagated to another tenured region
303 obj = humongous->Get<ObjectHeader *>(0);
304 ASSERT_NE(obj_addr, ToUintPtr(obj));
305 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
306
307 // Check the objet is accessible
308 ASSERT_NE(nullptr, obj->ClassAddr<Class>());
309 }
310
311 // test that we don't have remset from humongous space after we reclaim humongous object
TEST_F(G1GCTest,CheckRemsetToHumongousAfterReclaimHumongousObject)312 TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject)
313 {
314 Runtime *runtime = Runtime::GetCurrent();
315 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
316 ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker();
317 MTManagedThread *thread = MTManagedThread::GetCurrent();
318
319 ScopedManagedCodeThread s(thread);
320 [[maybe_unused]] HandleScope<ObjectHeader *> scope_for_young_obj(thread);
321
322 // 1MB array
323 static constexpr size_t humongous_array_length = 262144LU;
324 static constexpr size_t young_array_length = ((DEFAULT_REGION_SIZE - Region::HeadSize()) / 4U) - 16U;
325 coretypes::Array *humongous_obj;
326 coretypes::Array *young_arr;
327
328 auto *gc = runtime->GetPandaVM()->GetGC();
329 auto card_table = gc->GetCardTable();
330
331 Class *klass;
332
333 klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
334 ->GetClass(ctx.GetStringArrayClassDescriptor());
335 ASSERT_NE(klass, nullptr);
336
337 young_arr = coretypes::Array::Create(klass, young_array_length);
338 ASSERT_NE(young_arr, nullptr);
339 auto *region = ObjectToRegion(young_arr);
340 ASSERT_NE(region, nullptr);
341
342 VMHandle<coretypes::Array> young_obj_ptr(thread, young_arr);
343 GCTask task(GCTaskCause::EXPLICIT_CAUSE);
344 {
345 [[maybe_unused]] HandleScope<ObjectHeader *> scope_for_humongous_obj(thread);
346
347 humongous_obj = coretypes::Array::Create(klass, humongous_array_length);
348
349 ASSERT_NE(humongous_obj, nullptr);
350 // add humongous object to our remset
351 humongous_obj->Set(0, young_obj_ptr.GetPtr());
352
353 ASSERT_EQ(gc->GetType(), GCType::G1_GC);
354 {
355 VMHandle<coretypes::Array> humongous_obj_ptr(thread, humongous_obj);
356 {
357 ScopedNativeCodeThread sn(thread);
358 task.Run(*gc);
359 }
360
361 auto array_region = ObjectToRegion(young_obj_ptr.GetPtr());
362 PandaVector<CardTable::CardPtr> cards;
363 array_region->GetRemSet()->ProceedMarkedCards(
364 [&cards](CardTable::CardPtr card, [[maybe_unused]] Region *region_unused) { cards.push_back(card); });
365 ASSERT_EQ(1U, cards.size()); // we have reference only from 1 humongous space
366 uintptr_t card_addr = card_table->GetCardStartAddress(cards[0]);
367 ASSERT_EQ(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT,
368 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(ToVoidPtr(card_addr)));
369 }
370 }
371 /*
372 * humongous object is dead now
373 * need one fake GC because we marked humongous in concurrent in the first GC before we removed Scoped, need to
374 * unmark it
375 */
376 {
377 ScopedNativeCodeThread sn(thread);
378 task.Run(*gc);
379 task.Run(*gc); // humongous object should be reclaimed
380 }
381
382 auto array_region = ObjectToRegion(young_obj_ptr.GetPtr());
383 PandaVector<CardTable::CardPtr> cards;
384 array_region->GetRemSet()->ProceedMarkedCards(
385 [&cards](CardTable::CardPtr card, [[maybe_unused]] Region *region_unused) { cards.push_back(card); });
386 ASSERT_EQ(0, cards.size()); // we have no references from the humongous space
387 }
388
389 class NewObjectsListener : public GCListener {
390 public:
NewObjectsListener(G1GCTest * test)391 explicit NewObjectsListener(G1GCTest *test) : test_(test) {}
392
GCPhaseStarted(GCPhase phase)393 void GCPhaseStarted(GCPhase phase) override
394 {
395 if (phase != GCPhase::GC_PHASE_MARK) {
396 return;
397 }
398 MTManagedThread *thread = MTManagedThread::GetCurrent();
399
400 // Allocate quite large object to make allocator to create a separate region
401 size_t nonmovable_len = 9 * DEFAULT_REGION_SIZE / 10;
402 ObjectHeader *dummy = test_->AllocArray(nonmovable_len, ClassRoot::ARRAY_U8, true);
403 Region *dummy_region = ObjectToRegion(dummy);
404 EXPECT_TRUE(dummy_region->HasFlag(RegionFlag::IS_NONMOVABLE));
405 nonmovable_ = VMHandle<ObjectHeader>(thread, test_->AllocArray(nonmovable_len, ClassRoot::ARRAY_U8, true));
406 Region *nonmovable_region = ObjectToRegion(nonmovable_.GetPtr());
407 EXPECT_TRUE(nonmovable_region->HasFlag(RegionFlag::IS_NONMOVABLE));
408 EXPECT_NE(nonmovable_region, dummy_region);
409 nonmovable_mark_bitmap_addr_ = ToUintPtr(nonmovable_region->GetMarkBitmap());
410
411 size_t humongous_len = test_->GetHumongousArrayLength(ClassRoot::ARRAY_U8);
412 humongous_ = VMHandle<ObjectHeader>(thread, test_->AllocArray(humongous_len, ClassRoot::ARRAY_U8, false));
413 Region *humongous_region = ObjectToRegion(humongous_.GetPtr());
414 humongous_mark_bitmap_addr_ = ToUintPtr(humongous_region->GetMarkBitmap());
415 }
416
GetNonMovable()417 ObjectHeader *GetNonMovable()
418 {
419 ASSERT(nonmovable_.GetPtr() != nullptr);
420 return nonmovable_.GetPtr();
421 }
422
GetNonMovableMarkBitmapAddr()423 uintptr_t GetNonMovableMarkBitmapAddr()
424 {
425 return nonmovable_mark_bitmap_addr_;
426 }
427
GetHumongous()428 ObjectHeader *GetHumongous()
429 {
430 return humongous_.GetPtr();
431 }
432
GetHumongousMarkBitmapAddr()433 uintptr_t GetHumongousMarkBitmapAddr()
434 {
435 return humongous_mark_bitmap_addr_;
436 }
437
438 private:
439 G1GCTest *test_;
440 VMHandle<ObjectHeader> nonmovable_;
441 uintptr_t nonmovable_mark_bitmap_addr_ {0};
442 VMHandle<ObjectHeader> humongous_;
443 uintptr_t humongous_mark_bitmap_addr_ {0};
444 };
445
446 // Test the new objects created during concurrent marking are alive
TEST_F(G1GCTest,TestNewObjectsSATB)447 TEST_F(G1GCTest, TestNewObjectsSATB)
448 {
449 Runtime *runtime = Runtime::GetCurrent();
450 GC *gc = runtime->GetPandaVM()->GetGC();
451 MTManagedThread *thread = MTManagedThread::GetCurrent();
452 ScopedManagedCodeThread s(thread);
453 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
454
455 NewObjectsListener listener(this);
456 gc->AddListener(&listener);
457
458 {
459 ScopedNativeCodeThread sn(thread);
460 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE); // threshold cause should trigger concurrent marking
461 task.Run(*runtime->GetPandaVM()->GetGC());
462 }
463 // nullptr means we cannot allocate an object or concurrent phase wasn't triggered or
464 // the listener wasn't called.
465 ASSERT_NE(nullptr, listener.GetNonMovable());
466 ASSERT_NE(nullptr, listener.GetHumongous());
467
468 // Check the objects are alive
469 Region *nonmovable_region = ObjectToRegion(listener.GetNonMovable());
470 ASSERT_NE(nullptr, nonmovable_region->GetLiveBitmap());
471 ASSERT_TRUE(nonmovable_region->GetLiveBitmap()->Test(listener.GetNonMovable()));
472 ASSERT_FALSE(listener.GetNonMovable()->IsMarkedForGC()); // mark should be done using mark bitmap
473 Region *humongous_region = ObjectToRegion(listener.GetHumongous());
474 ASSERT_NE(nullptr, humongous_region->GetLiveBitmap());
475 ASSERT_TRUE(humongous_region->GetLiveBitmap()->Test(listener.GetHumongous()));
476 ASSERT_FALSE(listener.GetHumongous()->IsMarkedForGC()); // mark should be done using mark bitmap
477 }
478
479 class CollectionSetChecker : public GCListener {
480 public:
CollectionSetChecker(ObjectAllocatorG1<> * allocator)481 CollectionSetChecker(ObjectAllocatorG1<> *allocator) : allocator_(allocator) {}
482
SetExpectedRegions(const std::initializer_list<Region * > & expected_regions)483 void SetExpectedRegions(const std::initializer_list<Region *> &expected_regions)
484 {
485 expected_regions_ = expected_regions;
486 }
487
GCPhaseStarted(GCPhase phase)488 void GCPhaseStarted(GCPhase phase) override
489 {
490 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
491 EXPECT_EQ(expected_regions_, GetCollectionSet());
492 expected_regions_.clear();
493 }
494 }
495
496 private:
GetCollectionSet()497 PandaSet<Region *> GetCollectionSet()
498 {
499 PandaSet<Region *> collection_set;
500 for (Region *region : allocator_->GetAllRegions()) {
501 if (region->HasFlag(RegionFlag::IS_COLLECTION_SET)) {
502 collection_set.insert(region);
503 }
504 }
505 return collection_set;
506 }
507
508 private:
509 ObjectAllocatorG1<> *allocator_;
510 PandaSet<Region *> expected_regions_;
511 };
512
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllYoungRegions)513 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllYoungRegions)
514 {
515 // The object will occupy more than half of region.
516 // So expect the allocator allocates a separate young region for each object.
517 size_t young_len = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
518
519 Runtime *runtime = Runtime::GetCurrent();
520 GC *gc = runtime->GetPandaVM()->GetGC();
521 ObjectAllocatorG1<> *allocator = GetAllocator();
522 MTManagedThread *thread = MTManagedThread::GetCurrent();
523
524 CollectionSetChecker checker(allocator);
525 gc->AddListener(&checker);
526 {
527 ScopedManagedCodeThread s(thread);
528 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
529 VMHandle<ObjectHeader> young1;
530 VMHandle<ObjectHeader> young2;
531 VMHandle<ObjectHeader> young3;
532
533 young1 = VMHandle<ObjectHeader>(thread, AllocArray(young_len, ClassRoot::ARRAY_U8, false));
534 young2 = VMHandle<ObjectHeader>(thread, AllocArray(young_len, ClassRoot::ARRAY_U8, false));
535 young3 = VMHandle<ObjectHeader>(thread, AllocArray(young_len, ClassRoot::ARRAY_U8, false));
536
537 Region *yregion1 = ObjectToRegion(young1.GetPtr());
538 Region *yregion2 = ObjectToRegion(young2.GetPtr());
539 Region *yregion3 = ObjectToRegion(young3.GetPtr());
540 // Check all 3 objects are in different regions
541 ASSERT_NE(yregion1, yregion2);
542 ASSERT_NE(yregion2, yregion3);
543 ASSERT_NE(yregion1, yregion3);
544 checker.SetExpectedRegions({yregion1, yregion2, yregion3});
545 }
546 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
547 task.Run(*gc);
548 }
549
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)550 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)
551 {
552 Runtime *runtime = Runtime::GetCurrent();
553 GC *gc = runtime->GetPandaVM()->GetGC();
554 ObjectAllocatorG1<> *allocator = GetAllocator();
555 MTManagedThread *thread = MTManagedThread::GetCurrent();
556 ScopedManagedCodeThread s(thread);
557 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
558
559 VMHandle<ObjectHeader> young;
560 VMHandle<ObjectHeader> tenured;
561 VMHandle<ObjectHeader> humongous;
562 tenured = VMHandle<ObjectHeader>(thread, AllocObjectInYoung());
563
564 {
565 ScopedNativeCodeThread sn(thread);
566 // Propogate young to tenured
567 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
568 task.Run(*gc);
569 }
570
571 young = VMHandle<ObjectHeader>(thread, AllocObjectInYoung());
572 humongous = VMHandle<ObjectHeader>(
573 thread, AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false));
574
575 Region *yregion = ObjectToRegion(young.GetPtr());
576 [[maybe_unused]] Region *tregion = ObjectToRegion(tenured.GetPtr());
577 [[maybe_unused]] Region *hregion = ObjectToRegion(humongous.GetPtr());
578
579 CollectionSetChecker checker(allocator);
580 gc->AddListener(&checker);
581 // Even thou it's full, currently we split it into two parts, the 1st one is young-only collection.
582 // And the tenured collection part doesn't use GC_PHASE_MARK_YOUNG
583 checker.SetExpectedRegions({yregion});
584 {
585 ScopedNativeCodeThread sn(thread);
586 GCTask task1(GCTaskCause::EXPLICIT_CAUSE);
587 task1.Run(*gc);
588 }
589 }
590
TEST_F(G1GCTest,TestMixedCollections)591 TEST_F(G1GCTest, TestMixedCollections)
592 {
593 uint32_t garbage_rate = Runtime::GetOptions().GetG1RegionGarbageRateThreshold();
594 // The object will occupy more than half of region.
595 // So expect the allocator allocates a separate young region for each object.
596 size_t big_len = garbage_rate * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
597 size_t big_len1 = (garbage_rate + 1) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
598 size_t big_len2 = (garbage_rate + 2) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
599 size_t small_len = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::String);
600
601 Runtime *runtime = Runtime::GetCurrent();
602 GC *gc = runtime->GetPandaVM()->GetGC();
603 ObjectAllocatorG1<> *allocator = GetAllocator();
604 MTManagedThread *thread = MTManagedThread::GetCurrent();
605 ScopedManagedCodeThread s(thread);
606 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
607
608 VMHandle<coretypes::Array> holder;
609 VMHandle<ObjectHeader> young;
610
611 // Allocate objects of different sizes.
612 // Mixed regions should be choosen according to the largest garbage.
613 holder = VMHandle<coretypes::Array>(thread, AllocArray(4, ClassRoot::ARRAY_STRING, false));
614 holder->Set(0, AllocString(big_len));
615 ASSERT_TRUE(ObjectToRegion(holder->Get<ObjectHeader *>(0))->HasFlag(RegionFlag::IS_EDEN));
616 holder->Set(1, AllocString(big_len1));
617 ASSERT_TRUE(ObjectToRegion(holder->Get<ObjectHeader *>(1))->HasFlag(RegionFlag::IS_EDEN));
618 holder->Set(2, AllocString(big_len2));
619 ASSERT_TRUE(ObjectToRegion(holder->Get<ObjectHeader *>(2))->HasFlag(RegionFlag::IS_EDEN));
620 holder->Set(3, AllocString(small_len));
621 ASSERT_TRUE(ObjectToRegion(holder->Get<ObjectHeader *>(3))->HasFlag(RegionFlag::IS_EDEN));
622
623 {
624 ScopedNativeCodeThread sn(thread);
625 // Propogate young objects -> tenured
626 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
627 task.Run(*gc);
628 }
629 // GC doesn't include current tenured region to the collection set.
630 // Now we don't know which tenured region is current.
631 // So propagate one big young object to tenured to make the latter current.
632 VMHandle<ObjectHeader> current;
633 current = VMHandle<ObjectHeader>(thread, AllocArray(small_len, ClassRoot::ARRAY_U8, false));
634
635 // Propogate 'current' object -> tenured and prepare for mixed GC
636 // Release 'big1', 'big2' and 'small' objects to make them garbage
637 Region *region0 = ObjectToRegion(holder->Get<ObjectHeader *>(0));
638 Region *region1 = ObjectToRegion(holder->Get<ObjectHeader *>(1));
639 Region *region2 = ObjectToRegion(holder->Get<ObjectHeader *>(2));
640 holder->Set(0, static_cast<ObjectHeader *>(nullptr));
641 holder->Set(1, static_cast<ObjectHeader *>(nullptr));
642 holder->Set(2, static_cast<ObjectHeader *>(nullptr));
643 holder->Set(3, static_cast<ObjectHeader *>(nullptr));
644 {
645 ScopedNativeCodeThread sn(thread);
646 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
647 task1.Run(*gc);
648 }
649
650 // Now the region with 'current' is current and it will not be included into the collection set.
651
652 young = VMHandle<ObjectHeader>(thread, AllocObjectInYoung());
653
654 Region *yregion = ObjectToRegion(young.GetPtr());
655 CollectionSetChecker checker(allocator);
656 gc->AddListener(&checker);
657 checker.SetExpectedRegions({region1, region2, yregion});
658 {
659 ScopedNativeCodeThread sn(thread);
660 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
661 task2.Run(*gc);
662 }
663
664 // Run GC one more time because we still have garbage regions.
665 // Check we collect them.
666 young = VMHandle<ObjectHeader>(thread, AllocObjectInYoung());
667 yregion = ObjectToRegion(young.GetPtr());
668 checker.SetExpectedRegions({region0, yregion});
669 {
670 ScopedNativeCodeThread sn(thread);
671 GCTask task3(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
672 task3.Run(*gc);
673 }
674 }
675 class G1GCPromotionTest : public G1GCTest {
676 public:
G1GCPromotionTest()677 G1GCPromotionTest() : G1GCTest(PROMOTE_RATE) {}
678
679 static constexpr size_t PROMOTE_RATE = 50;
680 };
681
TEST_F(G1GCPromotionTest,TestCorrectPromotionYoungRegion)682 TEST_F(G1GCPromotionTest, TestCorrectPromotionYoungRegion)
683 {
684 // We will create a humongous object with a links to two young regions
685 // and check promotion workflow
686 static constexpr size_t HUMONGOUS_STRING_LEN = G1GCPromotionTest::GetHumongousStringLength();
687 // Consume more than 50% of region size
688 static constexpr size_t FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT =
689 DEFAULT_REGION_SIZE / sizeof(coretypes::String) * 2 / 3 + 1;
690 // Consume less than 50% of region size
691 static constexpr size_t SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT = 1;
692 ASSERT(FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT <= HUMONGOUS_STRING_LEN);
693 ASSERT((FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100 / DEFAULT_REGION_SIZE) >
694 G1GCPromotionTest::PROMOTE_RATE);
695 ASSERT((SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100 / DEFAULT_REGION_SIZE) <
696 G1GCPromotionTest::PROMOTE_RATE);
697
698 Runtime *runtime = Runtime::GetCurrent();
699 GC *gc = runtime->GetPandaVM()->GetGC();
700
701 // Run Full GC to compact all existed young regions:
702 GCTask task0(GCTaskCause::EXPLICIT_CAUSE);
703 task0.Run(*gc);
704
705 MTManagedThread *thread = MTManagedThread::GetCurrent();
706 ScopedManagedCodeThread s(thread);
707 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
708
709 VMHandle<coretypes::Array> first_holder;
710 VMHandle<coretypes::Array> second_holder;
711 VMHandle<ObjectHeader> young;
712 std::array<ObjectHeader *, FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT> first_region_object_links;
713 std::array<ObjectHeader *, SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT> second_region_object_links;
714 // Check Promotion for young region:
715
716 first_holder = VMHandle<coretypes::Array>(thread, AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
717 Region *first_region = ObjectToRegion(AllocObjectInYoung());
718 ASSERT_TRUE(first_region->HasFlag(RegionFlag::IS_EDEN));
719 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
720 first_region_object_links[i] = AllocObjectInYoung();
721 ASSERT_TRUE(first_region_object_links[i] != nullptr);
722 first_holder->Set(i, first_region_object_links[i]);
723 ASSERT_TRUE(ObjectToRegion(first_region_object_links[i]) == first_region);
724 }
725
726 {
727 ScopedNativeCodeThread sn(thread);
728 // Promote young objects in one region -> tenured
729 GCTask task1(GCTaskCause::YOUNG_GC_CAUSE);
730 task1.Run(*gc);
731 }
732 // Check that we didn't change the links for young objects from the first region:
733 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
734 ASSERT_EQ(first_region_object_links[i], first_holder->Get<ObjectHeader *>(i));
735 ASSERT_TRUE(ObjectToRegion(first_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
736 ASSERT_TRUE(ObjectToRegion(first_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
737 }
738
739 second_holder =
740 VMHandle<coretypes::Array>(thread, AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
741 Region *second_region = ObjectToRegion(AllocObjectInYoung());
742 ASSERT_TRUE(second_region->HasFlag(RegionFlag::IS_EDEN));
743 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
744 second_region_object_links[i] = AllocObjectInYoung();
745 ASSERT_TRUE(second_region_object_links[i] != nullptr);
746 second_holder->Set(i, second_region_object_links[i]);
747 ASSERT_TRUE(ObjectToRegion(second_region_object_links[i]) == second_region);
748 }
749
750 {
751 ScopedNativeCodeThread sn(thread);
752 // Compact young objects in one region -> tenured
753 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE);
754 task2.Run(*gc);
755 }
756 // Check that we changed the links for young objects from the second region:
757 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
758 ASSERT_NE(second_region_object_links[i], second_holder->Get<ObjectHeader *>(i));
759 ASSERT_TRUE(ObjectToRegion(second_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
760 ASSERT_FALSE(ObjectToRegion(second_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
761 }
762
763 {
764 ScopedNativeCodeThread sn(thread);
765 // Run Full GC to compact all tenured regions:
766 GCTask task3(GCTaskCause::EXPLICIT_CAUSE);
767 task3.Run(*gc);
768 }
769 // Now we should have updated links in the humongous object to first region objects:
770 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
771 ASSERT_NE(first_region_object_links[i], first_holder->Get<ObjectHeader *>(i));
772 ASSERT_TRUE(ObjectToRegion(first_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
773 ASSERT_FALSE(ObjectToRegion(first_holder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
774 }
775 }
776
777 class InterruptGCListener : public GCListener {
778 public:
InterruptGCListener(G1GCTest * test,VMHandle<coretypes::Array> * array)779 InterruptGCListener(G1GCTest *test, VMHandle<coretypes::Array> *array) : test_(test), array_(array) {}
780
GCPhaseStarted(GCPhase phase)781 void GCPhaseStarted(GCPhase phase) override
782 {
783 if (phase != GCPhase::GC_PHASE_MARK) {
784 return;
785 }
786 // Allocate an object to add it into SATB buffer
787 test_->AllocObjectInYoung();
788 // Set interrupt flag
789 GC *gc = Runtime::GetCurrent()->GetPandaVM()->GetGC();
790 gc->OnWaitForIdleFail();
791 }
792
GCPhaseFinished(GCPhase phase)793 void GCPhaseFinished(GCPhase phase) override
794 {
795 if (phase != GCPhase::GC_PHASE_MARK) {
796 return;
797 }
798 Region *region = ObjectToRegion((*array_)->Get<ObjectHeader *>(0));
799 // Check the object array[0] is not marked
800 EXPECT_FALSE(region->GetMarkBitmap()->Test((*array_)->Get<ObjectHeader *>(0)));
801 // Check GC haven't calculated live bytes for the region
802 EXPECT_EQ(0, region->GetLiveBytes());
803 // Check GC has cleared SATB buffer
804 MTManagedThread *thread = MTManagedThread::GetCurrent();
805 EXPECT_NE(nullptr, thread->GetPreBuff());
806 EXPECT_EQ(0, thread->GetPreBuff()->size());
807 }
808
809 private:
810 G1GCTest *test_;
811 VMHandle<coretypes::Array> *array_;
812 };
813
TEST_F(G1GCTest,TestInterruptConcurrentMarking)814 TEST_F(G1GCTest, TestInterruptConcurrentMarking)
815 {
816 Runtime *runtime = Runtime::GetCurrent();
817 GC *gc = runtime->GetPandaVM()->GetGC();
818
819 MTManagedThread *thread = MTManagedThread::GetCurrent();
820 ScopedManagedCodeThread s(thread);
821 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
822 VMHandle<coretypes::Array> array;
823
824 array = VMHandle<coretypes::Array>(thread, AllocArray(1, ClassRoot::ARRAY_STRING, false));
825 array->Set(0, AllocString(1));
826
827 {
828 ScopedNativeCodeThread sn(thread);
829 // Propogate young objects -> tenured
830 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
831 task.Run(*gc);
832
833 // Clear live bytes to check that concurrent marking will not calculate them
834 Region *region = ObjectToRegion(array->Get<ObjectHeader *>(0));
835 ASSERT_TRUE(region != nullptr);
836 region->SetLiveBytes(0);
837
838 InterruptGCListener listener(this, &array);
839 gc->AddListener(&listener);
840 // Trigger concurrent marking
841 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
842 task1.Run(*gc);
843 }
844 }
845
846 class NullRefListener : public GCListener {
847 public:
NullRefListener(VMHandle<coretypes::Array> * array)848 explicit NullRefListener(VMHandle<coretypes::Array> *array) : array_(array) {}
849
GCPhaseStarted(GCPhase phase)850 void GCPhaseStarted(GCPhase phase) override
851 {
852 if (phase != GCPhase::GC_PHASE_MARK) {
853 return;
854 }
855 (*array_)->Set(0, static_cast<ObjectHeader *>(nullptr));
856 }
857
858 private:
859 VMHandle<coretypes::Array> *array_;
860 };
861
TEST_F(G1GCTest,TestGarbageBytesCalculation)862 TEST_F(G1GCTest, TestGarbageBytesCalculation)
863 {
864 Runtime *runtime = Runtime::GetCurrent();
865 GC *gc = runtime->GetPandaVM()->GetGC();
866 MTManagedThread *thread = MTManagedThread::GetCurrent();
867 ScopedManagedCodeThread s(thread);
868 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
869
870 VMHandle<coretypes::Array> array;
871
872 // Allocate objects of different sizes.
873 // Mixed regions should be choosen according to the largest garbage.
874 // Allocate an array of length 2. 2 because the array's size must be 8 bytes aligned
875 array = VMHandle<coretypes::Array>(thread, AllocArray(2, ClassRoot::ARRAY_STRING, false));
876 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_EDEN));
877 // The same for string. The instance size must be 8-bytes aligned.
878 array->Set(0, AllocString(8));
879 ASSERT_TRUE(ObjectToRegion(array->Get<ObjectHeader *>(0))->HasFlag(RegionFlag::IS_EDEN));
880
881 size_t array_size = GetObjectSize(array.GetPtr());
882 size_t str_size = GetObjectSize(array->Get<ObjectHeader *>(0));
883
884 {
885 ScopedNativeCodeThread sn(thread);
886 // Propogate young objects -> tenured
887 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
888 task.Run(*gc);
889 }
890 // check the array and the string are in the same tenured region
891 ASSERT_EQ(ObjectToRegion(array.GetPtr()), ObjectToRegion(array->Get<ObjectHeader *>(0)));
892 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_OLD));
893
894 AllocObjectInYoung();
895
896 NullRefListener listener(&array);
897 gc->AddListener(&listener);
898 {
899 ScopedNativeCodeThread sn(thread);
900 // Prepare for mixed GC, start concurrent marking and calculate garbage for regions
901 GCTask task2(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
902 task2.Run(*gc);
903 }
904
905 Region *region = ObjectToRegion(array.GetPtr());
906 ASSERT_EQ(array_size, region->GetLiveBytes());
907 ASSERT_EQ(str_size, region->GetGarbageBytes());
908 }
909
TEST_F(G1GCTest,NonMovableClearingDuringConcurrentPhaseTest)910 TEST_F(G1GCTest, NonMovableClearingDuringConcurrentPhaseTest)
911 {
912 Runtime *runtime = Runtime::GetCurrent();
913 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
914 auto obj_allocator =
915 Runtime::GetCurrent()->GetPandaVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator();
916 ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker();
917 MTManagedThread *thread = MTManagedThread::GetCurrent();
918 GC *gc = runtime->GetPandaVM()->GetGC();
919
920 ScopedManagedCodeThread s(thread);
921 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
922 size_t array_length = GetHumongousArrayLength(ClassRoot::ARRAY_STRING) - 50;
923 coretypes::Array *first_non_movable_obj = nullptr;
924 coretypes::Array *second_non_movable_obj = nullptr;
925 uintptr_t prev_young_addr = 0;
926
927 Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
928 ->GetClass(ctx.GetStringArrayClassDescriptor());
929 ASSERT_NE(klass, nullptr);
930 first_non_movable_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
931 second_non_movable_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
932 ASSERT_EQ(true, ObjectToRegion(first_non_movable_obj)->HasFlag(RegionFlag::IS_NONMOVABLE));
933 ASSERT_EQ(true, ObjectToRegion(second_non_movable_obj)->HasFlag(RegionFlag::IS_NONMOVABLE));
934 coretypes::String *young_obj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
935 first_non_movable_obj->Set(0, young_obj);
936 prev_young_addr = ToUintPtr(young_obj);
937
938 VMHandle<coretypes::Array> second_non_movable_obj_ptr(thread, second_non_movable_obj);
939
940 {
941 [[maybe_unused]] HandleScope<ObjectHeader *> first_scope(thread);
942 VMHandle<coretypes::Array> first_non_movable_obj_ptr(thread, first_non_movable_obj);
943 {
944 ScopedNativeCodeThread sn(thread);
945 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
946 task.Run(*gc);
947 }
948
949 auto young_obj_2 = static_cast<coretypes::String *>(first_non_movable_obj_ptr->Get<ObjectHeader *>(0));
950 // Check GC has moved the young obj
951 ASSERT_NE(prev_young_addr, ToUintPtr(young_obj_2));
952 // Check young object is accessible
953 ASSERT_EQ(0, young_obj_2->GetLength());
954 }
955
956 // Check that all objects are alive
957 ASSERT_EQ(true, obj_allocator->ContainObject(first_non_movable_obj));
958 ASSERT_EQ(true, obj_allocator->ContainObject(second_non_movable_obj));
959 ASSERT_EQ(true, obj_allocator->IsLive(first_non_movable_obj));
960 ASSERT_EQ(true, obj_allocator->IsLive(second_non_movable_obj));
961 // Check that the first object is accessible
962 bool found_first_object = false;
963 obj_allocator->IterateOverObjects([&first_non_movable_obj, &found_first_object](ObjectHeader *object) {
964 if (first_non_movable_obj == object) {
965 found_first_object = true;
966 }
967 });
968 ASSERT_EQ(true, found_first_object);
969
970 // So, try to remove the first non movable object:
971 {
972 ScopedNativeCodeThread sn(thread);
973 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
974 task.Run(*gc);
975 }
976
977 // Check that the second object is still alive
978 ASSERT_EQ(true, obj_allocator->ContainObject(second_non_movable_obj));
979 ASSERT_EQ(true, obj_allocator->IsLive(second_non_movable_obj));
980 // Check that the first object is dead
981 obj_allocator->IterateOverObjects(
982 [&first_non_movable_obj](ObjectHeader *object) { ASSERT_NE(first_non_movable_obj, object); });
983 }
984
TEST_F(G1GCTest,HumongousClearingDuringConcurrentPhaseTest)985 TEST_F(G1GCTest, HumongousClearingDuringConcurrentPhaseTest)
986 {
987 Runtime *runtime = Runtime::GetCurrent();
988 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
989 auto obj_allocator =
990 Runtime::GetCurrent()->GetPandaVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator();
991 ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker();
992 MTManagedThread *thread = MTManagedThread::GetCurrent();
993 GC *gc = runtime->GetPandaVM()->GetGC();
994
995 ScopedManagedCodeThread s(thread);
996 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
997 size_t array_length = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
998 coretypes::Array *first_humongous_obj = nullptr;
999 coretypes::Array *second_humongous_obj = nullptr;
1000 uintptr_t prev_young_addr = 0;
1001
1002 Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
1003 ->GetClass(ctx.GetStringArrayClassDescriptor());
1004 ASSERT_NE(klass, nullptr);
1005 first_humongous_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1006 second_humongous_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1007 ASSERT_EQ(true, ObjectToRegion(first_humongous_obj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1008 ASSERT_EQ(true, ObjectToRegion(second_humongous_obj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1009 coretypes::String *young_obj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
1010 first_humongous_obj->Set(0, young_obj);
1011 prev_young_addr = ToUintPtr(young_obj);
1012
1013 VMHandle<coretypes::Array> second_humongous_obj_ptr(thread, second_humongous_obj);
1014
1015 {
1016 HandleScope<ObjectHeader *> first_scope(thread);
1017 VMHandle<coretypes::Array> first_humongous_obj_ptr(thread, first_humongous_obj);
1018 {
1019 ScopedNativeCodeThread sn(thread);
1020 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1021 task.Run(*gc);
1022 }
1023
1024 auto young_obj_2 = static_cast<coretypes::String *>(first_humongous_obj_ptr->Get<ObjectHeader *>(0));
1025 // Check GC has moved the young obj
1026 ASSERT_NE(prev_young_addr, ToUintPtr(young_obj_2));
1027 // Check young object is accessible
1028 ASSERT_EQ(0, young_obj_2->GetLength());
1029 }
1030
1031 // Check that all objects are alive
1032 ASSERT_EQ(true, obj_allocator->ContainObject(first_humongous_obj));
1033 ASSERT_EQ(true, obj_allocator->ContainObject(second_humongous_obj));
1034 ASSERT_EQ(true, obj_allocator->IsLive(first_humongous_obj));
1035 ASSERT_EQ(true, obj_allocator->IsLive(second_humongous_obj));
1036 // Check that the first object is accessible
1037 bool found_first_object = false;
1038 obj_allocator->IterateOverObjects([&first_humongous_obj, &found_first_object](ObjectHeader *object) {
1039 if (first_humongous_obj == object) {
1040 found_first_object = true;
1041 }
1042 });
1043 ASSERT_EQ(true, found_first_object);
1044
1045 {
1046 ScopedNativeCodeThread sn(thread);
1047 // So, try to remove the first non movable object:
1048 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1049 task.Run(*gc);
1050 }
1051
1052 // Check that the second object is still alive
1053 ASSERT_EQ(true, obj_allocator->ContainObject(second_humongous_obj));
1054 ASSERT_EQ(true, obj_allocator->IsLive(second_humongous_obj));
1055 // Check that the first object is dead
1056 obj_allocator->IterateOverObjects(
1057 [&first_humongous_obj](ObjectHeader *object) { ASSERT_NE(first_humongous_obj, object); });
1058 }
1059
1060 } // namespace panda::mem
1061