1 /**
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17 #include <array>
18
19 #include "runtime/include/object_header.h"
20 #include "runtime/mem/tlab.h"
21 #include "runtime/include/runtime.h"
22 #include "runtime/include/panda_vm.h"
23 #include "runtime/include/class_linker.h"
24 #include "runtime/include/thread_scopes.h"
25 #include "runtime/mem/vm_handle.h"
26 #include "runtime/handle_scope-inl.h"
27 #include "runtime/include/coretypes/array.h"
28 #include "runtime/include/coretypes/string.h"
29 #include "runtime/mem/gc/card_table.h"
30 #include "runtime/mem/gc/g1/g1-allocator.h"
31 #include "runtime/mem/rem_set-inl.h"
32 #include "runtime/mem/region_space.h"
33 #include "runtime/mem/object_helpers.h"
34 #include "runtime/mem/gc/g1/g1-gc.h"
35
36 #include "test_utils.h"
37
38 namespace ark::mem {
39
40 class G1GCTest : public testing::Test {
41 public:
G1GCTest()42 explicit G1GCTest() : G1GCTest(CreateDefaultOptions()) {}
43
G1GCTest(const RuntimeOptions & options)44 explicit G1GCTest(const RuntimeOptions &options)
45 {
46 Runtime::Create(options);
47 }
48
~G1GCTest()49 ~G1GCTest() override
50 {
51 Runtime::Destroy();
52 }
53
54 NO_COPY_SEMANTIC(G1GCTest);
55 NO_MOVE_SEMANTIC(G1GCTest);
56
CreateDefaultOptions()57 static RuntimeOptions CreateDefaultOptions()
58 {
59 RuntimeOptions options;
60 options.SetLoadRuntimes({"core"});
61 options.SetGcType("g1-gc");
62 options.SetRunGcInPlace(true);
63 options.SetCompilerEnableJit(false);
64 options.SetGcWorkersCount(0);
65 options.SetAdaptiveTlabSize(false);
66 // NOLINTNEXTLINE(readability-magic-numbers)
67 options.SetG1PromotionRegionAliveRate(100U);
68 options.SetGcTriggerType("debug-never");
69 options.SetShouldLoadBootPandaFiles(false);
70 options.SetShouldInitializeIntrinsics(false);
71 options.SetExplicitConcurrentGcEnabled(false);
72 options.SetG1NumberOfTenuredRegionsAtMixedCollection(2U);
73 return options;
74 }
75
GetHumongousStringLength()76 static constexpr size_t GetHumongousStringLength()
77 {
78 // Total string size will be DEFAULT_REGION_SIZE + sizeof(String).
79 // It is enough to make it humongous.
80 return DEFAULT_REGION_SIZE;
81 }
82
StringLengthFitIntoRegion(size_t numRegions)83 static constexpr size_t StringLengthFitIntoRegion(size_t numRegions)
84 {
85 return numRegions * DEFAULT_REGION_SIZE - sizeof(coretypes::String) - Region::HeadSize();
86 }
87
GetHumongousArrayLength(ClassRoot classRoot)88 static size_t GetHumongousArrayLength(ClassRoot classRoot)
89 {
90 Runtime *runtime = Runtime::GetCurrent();
91 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
92 auto *arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(classRoot);
93 EXPECT_TRUE(arrayClass->IsArrayClass());
94 if (!arrayClass->IsArrayClass()) {
95 return 0;
96 }
97 // Total array size will be DEFAULT_REGION_SIZE * elem_size + sizeof(Array).
98 // It is enough to make it humongous.
99 size_t elemSize = arrayClass->GetComponentSize();
100 ASSERT(elemSize != 0);
101 return DEFAULT_REGION_SIZE / elemSize + 1;
102 }
103
GetAllocator()104 ObjectAllocatorG1<> *GetAllocator()
105 {
106 Runtime *runtime = Runtime::GetCurrent();
107 GC *gc = runtime->GetPandaVM()->GetGC();
108 return static_cast<ObjectAllocatorG1<> *>(gc->GetObjectAllocator());
109 }
110
ProcessDirtyCards(G1GC<PandaAssemblyLanguageConfig> * gc)111 void ProcessDirtyCards(G1GC<PandaAssemblyLanguageConfig> *gc)
112 {
113 gc->EndConcurrentScopeRoutine();
114 gc->ProcessDirtyCards();
115 gc->StartConcurrentScopeRoutine();
116 }
117 };
118
119 class RemSetChecker : public GCListener {
120 public:
RemSetChecker(GC * gc,ObjectHeader * obj,ObjectHeader * ref)121 explicit RemSetChecker(GC *gc, ObjectHeader *obj, ObjectHeader *ref)
122 : gc_(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc)),
123 obj_(MTManagedThread::GetCurrent(), obj),
124 ref_(MTManagedThread::GetCurrent(), ref)
125 {
126 }
127
GCPhaseStarted(GCPhase phase)128 void GCPhaseStarted([[maybe_unused]] GCPhase phase) override {}
129
GCPhaseFinished(GCPhase phase)130 void GCPhaseFinished(GCPhase phase) override
131 {
132 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
133 // We don't do it in phase started because refs from remset will be collected at marking stage
134 Check();
135 }
136 if (phase == GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE) {
137 // remset is not fully updated during mixed collection
138 gc_->ProcessDirtyCards();
139 Check();
140 }
141 }
142
143 private:
Check()144 void Check()
145 {
146 RemSet<> *remset = ObjectToRegion(ref_.GetPtr())->GetRemSet();
147 ASSERT_NE(nullptr, remset);
148 bool hasObject = false;
149 ObjectHeader *object = obj_.GetPtr();
150 remset->IterateOverObjects([object, &hasObject](ObjectHeader *obj) { hasObject |= object == obj; });
151 // remset is not fully updated during mixed collection, check set of dirty objects
152 ASSERT_TRUE(hasObject || gc_->HasRefFromRemset(object));
153 }
154
155 private:
156 G1GC<PandaAssemblyLanguageConfig> *gc_;
157 VMHandle<ObjectHeader> obj_;
158 VMHandle<ObjectHeader> ref_;
159 };
160
TEST_F(G1GCTest,TestAddrToRegion)161 TEST_F(G1GCTest, TestAddrToRegion)
162 {
163 MTManagedThread *thread = MTManagedThread::GetCurrent();
164 size_t humongousLen = GetHumongousArrayLength(ClassRoot::ARRAY_U8);
165 ScopedManagedCodeThread s(thread);
166 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
167
168 VMHandle<ObjectHeader> young(thread, ObjectAllocator::AllocArray(0, ClassRoot::ARRAY_U8, false));
169 ASSERT_NE(nullptr, young.GetPtr());
170 VMHandle<ObjectHeader> nonmovable(thread, ObjectAllocator::AllocArray(0, ClassRoot::ARRAY_U8, true));
171 ASSERT_NE(nullptr, nonmovable.GetPtr());
172 VMHandle<ObjectHeader> humongous(thread, ObjectAllocator::AllocArray(humongousLen, ClassRoot::ARRAY_U8, false));
173 ASSERT_NE(nullptr, humongous.GetPtr());
174
175 Region *youngRegion = ObjectToRegion(young.GetPtr());
176 ASSERT_NE(nullptr, youngRegion);
177 ASSERT_EQ(youngRegion, AddrToRegion(young.GetPtr()));
178 bool hasYoungObj = false;
179 youngRegion->IterateOverObjects(
180 [&hasYoungObj, &young](ObjectHeader *obj) { hasYoungObj |= obj == young.GetPtr(); });
181 ASSERT_TRUE(hasYoungObj);
182
183 Region *nonmovableRegion = ObjectToRegion(nonmovable.GetPtr());
184 ASSERT_NE(nullptr, nonmovableRegion);
185 ASSERT_EQ(nonmovableRegion, AddrToRegion(nonmovable.GetPtr()));
186 ASSERT_TRUE(nonmovableRegion->GetLiveBitmap()->Test(nonmovable.GetPtr()));
187
188 Region *humongousRegion = ObjectToRegion(humongous.GetPtr());
189 ASSERT_NE(nullptr, humongousRegion);
190 ASSERT_EQ(humongousRegion, AddrToRegion(humongous.GetPtr()));
191 ASSERT_EQ(humongousRegion, AddrToRegion(ToVoidPtr(ToUintPtr(humongous.GetPtr()) + DEFAULT_REGION_SIZE)));
192 bool hasHumongousObj = false;
193 humongousRegion->IterateOverObjects(
194 [&hasHumongousObj, &humongous](ObjectHeader *obj) { hasHumongousObj |= obj == humongous.GetPtr(); });
195 ASSERT_TRUE(hasHumongousObj);
196 }
197
TEST_F(G1GCTest,TestAllocHumongousArray)198 TEST_F(G1GCTest, TestAllocHumongousArray)
199 {
200 MTManagedThread *thread = MTManagedThread::GetCurrent();
201 ScopedManagedCodeThread s(thread);
202 ObjectHeader *obj =
203 ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false);
204 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
205 }
206
TEST_F(G1GCTest,NonMovable2YoungRef)207 TEST_F(G1GCTest, NonMovable2YoungRef)
208 {
209 Runtime *runtime = Runtime::GetCurrent();
210 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
211 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
212 MTManagedThread *thread = MTManagedThread::GetCurrent();
213 GC *gc = runtime->GetPandaVM()->GetGC();
214
215 ScopedManagedCodeThread s(thread);
216 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
217 static constexpr size_t ARRAY_LENGTH = 100;
218 coretypes::Array *nonMovableObj = nullptr;
219 uintptr_t prevYoungAddr = 0;
220 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
221 ->GetClass(ctx.GetStringArrayClassDescriptor());
222 ASSERT_NE(klass, nullptr);
223 nonMovableObj = coretypes::Array::Create(klass, ARRAY_LENGTH, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
224 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
225 nonMovableObj->Set(0, youngObj);
226 prevYoungAddr = ToUintPtr(youngObj);
227 VMHandle<coretypes::Array> nonMovableObjPtr(thread, nonMovableObj);
228
229 // Trigger GC
230 RemSetChecker listener(gc, nonMovableObj, nonMovableObj->Get<ObjectHeader *>(0));
231 gc->AddListener(&listener);
232
233 {
234 ScopedNativeCodeThread sn(thread);
235 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
236 task.Run(*gc);
237 }
238
239 auto youngObj2 = static_cast<coretypes::String *>(nonMovableObjPtr->Get<ObjectHeader *>(0));
240 // Check GC has moved the young obj
241 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
242 // Check young object is accessible
243 ASSERT_EQ(0, youngObj2->GetLength());
244 }
245
TEST_F(G1GCTest,Humongous2YoungRef)246 TEST_F(G1GCTest, Humongous2YoungRef)
247 {
248 Runtime *runtime = Runtime::GetCurrent();
249 MTManagedThread *thread = MTManagedThread::GetCurrent();
250 GC *gc = runtime->GetPandaVM()->GetGC();
251 ScopedManagedCodeThread s(thread);
252 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
253 uintptr_t prevYoungAddr = 0;
254 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
255 VMHandle<coretypes::Array> humongousObj(thread,
256 ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
257 ObjectHeader *youngObj = ObjectAllocator::AllocObjectInYoung();
258 humongousObj->Set(0, youngObj);
259 prevYoungAddr = ToUintPtr(youngObj);
260
261 // Trigger GC
262 RemSetChecker listener(gc, humongousObj.GetPtr(), humongousObj->Get<ObjectHeader *>(0));
263 gc->AddListener(&listener);
264
265 {
266 ScopedNativeCodeThread sn(thread);
267 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
268 task.Run(*gc);
269 }
270
271 youngObj = static_cast<ObjectHeader *>(humongousObj->Get<ObjectHeader *>(0));
272 // Check GC has moved the young obj
273 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj));
274 // Check the young object is accessible
275 ASSERT_NE(nullptr, youngObj->ClassAddr<Class>());
276 }
277
TEST_F(G1GCTest,TestCollectTenured)278 TEST_F(G1GCTest, TestCollectTenured)
279 {
280 Runtime *runtime = Runtime::GetCurrent();
281 MTManagedThread *thread = MTManagedThread::GetCurrent();
282 GC *gc = runtime->GetPandaVM()->GetGC();
283 ScopedManagedCodeThread s(thread);
284 [[maybe_unused]] HandleScope<ObjectHeader *> hs(thread);
285
286 VMHandle<coretypes::Array> humongous;
287 VMHandle<coretypes::Array> nonmovable;
288 ObjectHeader *obj;
289 uintptr_t objAddr;
290
291 humongous =
292 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_STRING),
293 ClassRoot::ARRAY_STRING, false));
294 nonmovable = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(1, ClassRoot::ARRAY_STRING, true));
295 obj = ObjectAllocator::AllocObjectInYoung();
296 humongous->Set(0, obj);
297 nonmovable->Set(0, obj);
298 objAddr = ToUintPtr(obj);
299
300 RemSetChecker listener1(gc, humongous.GetPtr(), obj);
301 RemSetChecker listener2(gc, nonmovable.GetPtr(), obj);
302 gc->AddListener(&listener1);
303 gc->AddListener(&listener2);
304 {
305 ScopedNativeCodeThread sn(thread);
306 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
307 task.Run(*gc);
308 }
309 // Check the obj obj was propagated to tenured
310 obj = humongous->Get<ObjectHeader *>(0);
311 ASSERT_NE(objAddr, ToUintPtr(obj));
312 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
313
314 objAddr = ToUintPtr(obj);
315 {
316 ScopedNativeCodeThread sn(thread);
317 GCTask task1(GCTaskCause::EXPLICIT_CAUSE); // run full GC to collect all regions
318 task1.Run(*gc);
319 }
320
321 // Check the tenured obj was propagated to another tenured region
322 obj = humongous->Get<ObjectHeader *>(0);
323 ASSERT_NE(objAddr, ToUintPtr(obj));
324 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
325
326 // Check the objet is accessible
327 ASSERT_NE(nullptr, obj->ClassAddr<Class>());
328 }
329
330 // test that we don't have remset from humongous space after we reclaim humongous object
TEST_F(G1GCTest,CheckRemsetToHumongousAfterReclaimHumongousObject)331 TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject)
332 {
333 LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
334 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
335 MTManagedThread *thread = MTManagedThread::GetCurrent();
336
337 ScopedManagedCodeThread s(thread);
338 [[maybe_unused]] HandleScope<ObjectHeader *> scopeForYoungObj(thread);
339
340 // 1MB array
341 static constexpr size_t HUMONGOUS_ARRAY_LENGTH = 262144LU;
342 static constexpr size_t YOUNG_ARRAY_LENGTH = ((DEFAULT_REGION_SIZE - Region::HeadSize()) / 4U) - 16U;
343
344 auto *gc = Runtime::GetCurrent()->GetPandaVM()->GetGC();
345 auto regionPred = []([[maybe_unused]] Region *r) { return true; };
346
347 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
348 ->GetClass(ctx.GetStringArrayClassDescriptor());
349 ASSERT_NE(klass, nullptr);
350
351 auto *youngArr = coretypes::Array::Create(klass, YOUNG_ARRAY_LENGTH);
352 ASSERT_NE(youngArr, nullptr);
353 ASSERT_NE(ObjectToRegion(youngArr), nullptr);
354
355 VMHandle<coretypes::Array> youngObjPtr(thread, youngArr);
356 GCTask task(GCTaskCause::EXPLICIT_CAUSE);
357 {
358 [[maybe_unused]] HandleScope<ObjectHeader *> scopeForHumongousObj(thread);
359
360 auto *humongousObj = coretypes::Array::Create(klass, HUMONGOUS_ARRAY_LENGTH);
361 ASSERT_NE(humongousObj, nullptr);
362 // add humongous object to our remset
363 humongousObj->Set(0, youngObjPtr.GetPtr());
364
365 ASSERT_EQ(gc->GetType(), GCType::G1_GC);
366 {
367 VMHandle<coretypes::Array> humongousObjPtr(thread, humongousObj);
368 {
369 ScopedNativeCodeThread sn(thread);
370 task.Run(*gc);
371 }
372
373 auto *arrayRegion = ObjectToRegion(youngObjPtr.GetPtr());
374 PandaVector<Region *> regions;
375 arrayRegion->GetRemSet()->Iterate(
376 regionPred, [®ions](Region *r, [[maybe_unused]] const MemRange &range) { regions.push_back(r); });
377 ASSERT_EQ(1U, regions.size()); // we have reference only from 1 humongous space
378 ASSERT_TRUE(regions[0]->HasFlag(IS_LARGE_OBJECT));
379 ASSERT_EQ(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT,
380 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(regions[0]));
381 }
382 }
383 /*
384 * humongous object is dead now
385 * need one fake GC because we marked humongous in concurrent in the first GC before we removed Scoped, need to
386 * unmark it
387 */
388 {
389 ScopedNativeCodeThread sn(thread);
390 task.Run(*gc);
391 task.Run(*gc); // humongous object should be reclaimed
392 }
393
394 auto *arrayRegion = ObjectToRegion(youngObjPtr.GetPtr());
395 PandaVector<Region *> regions;
396 arrayRegion->GetRemSet()->Iterate(
397 regionPred, [®ions](Region *r, [[maybe_unused]] const MemRange &range) { regions.push_back(r); });
398 ASSERT_EQ(0U, regions.size()); // we have no references from the humongous space
399 }
400
401 class NewObjectsListener : public GCListener {
402 public:
GCPhaseStarted(GCPhase phase)403 void GCPhaseStarted(GCPhase phase) override
404 {
405 if (phase != GCPhase::GC_PHASE_MARK) {
406 return;
407 }
408 MTManagedThread *thread = MTManagedThread::GetCurrent();
409 ScopedManagedCodeThread s(thread);
410
411 // Allocate quite large object to make allocator to create a separate region
412 // NOLINTNEXTLINE(readability-magic-numbers)
413 size_t nonmovableLen = 9 * DEFAULT_REGION_SIZE / 10;
414 ObjectHeader *dummy = ObjectAllocator::AllocArray(nonmovableLen, ClassRoot::ARRAY_U8, true);
415 Region *dummyRegion = ObjectToRegion(dummy);
416 EXPECT_TRUE(dummyRegion->HasFlag(RegionFlag::IS_NONMOVABLE));
417 nonmovable_ =
418 VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(nonmovableLen, ClassRoot::ARRAY_U8, true));
419 Region *nonmovableRegion = ObjectToRegion(nonmovable_.GetPtr());
420 EXPECT_TRUE(nonmovableRegion->HasFlag(RegionFlag::IS_NONMOVABLE));
421 EXPECT_NE(nonmovableRegion, dummyRegion);
422 nonmovableMarkBitmapAddr_ = ToUintPtr(nonmovableRegion->GetMarkBitmap());
423
424 size_t humongousLen = G1GCTest::GetHumongousArrayLength(ClassRoot::ARRAY_U8);
425 humongous_ =
426 VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(humongousLen, ClassRoot::ARRAY_U8, false));
427 Region *humongousRegion = ObjectToRegion(humongous_.GetPtr());
428 humongousMarkBitmapAddr_ = ToUintPtr(humongousRegion->GetMarkBitmap());
429 }
430
GetNonMovable()431 ObjectHeader *GetNonMovable()
432 {
433 ASSERT(nonmovable_.GetPtr() != nullptr);
434 return nonmovable_.GetPtr();
435 }
436
GetNonMovableMarkBitmapAddr()437 uintptr_t GetNonMovableMarkBitmapAddr()
438 {
439 return nonmovableMarkBitmapAddr_;
440 }
441
GetHumongous()442 ObjectHeader *GetHumongous()
443 {
444 return humongous_.GetPtr();
445 }
446
GetHumongousMarkBitmapAddr()447 uintptr_t GetHumongousMarkBitmapAddr()
448 {
449 return humongousMarkBitmapAddr_;
450 }
451
452 private:
453 VMHandle<ObjectHeader> nonmovable_;
454 uintptr_t nonmovableMarkBitmapAddr_ {};
455 VMHandle<ObjectHeader> humongous_;
456 uintptr_t humongousMarkBitmapAddr_ {};
457 };
458
459 // Test the new objects created during concurrent marking are alive
TEST_F(G1GCTest,TestNewObjectsSATB)460 TEST_F(G1GCTest, TestNewObjectsSATB)
461 {
462 Runtime *runtime = Runtime::GetCurrent();
463 GC *gc = runtime->GetPandaVM()->GetGC();
464 MTManagedThread *thread = MTManagedThread::GetCurrent();
465 ScopedManagedCodeThread s(thread);
466 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
467
468 NewObjectsListener listener;
469 gc->AddListener(&listener);
470
471 {
472 ScopedNativeCodeThread sn(thread);
473 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE); // threshold cause should trigger concurrent marking
474 task.Run(*gc);
475 }
476 // nullptr means we cannot allocate an object or concurrent phase wasn't triggered or
477 // the listener wasn't called.
478 ASSERT_NE(nullptr, listener.GetNonMovable());
479 ASSERT_NE(nullptr, listener.GetHumongous());
480
481 // Check the objects are alive
482 Region *nonmovableRegion = ObjectToRegion(listener.GetNonMovable());
483 ASSERT_NE(nullptr, nonmovableRegion->GetLiveBitmap());
484 ASSERT_TRUE(nonmovableRegion->GetLiveBitmap()->Test(listener.GetNonMovable()));
485 ASSERT_FALSE(listener.GetNonMovable()->IsMarkedForGC()); // mark should be done using mark bitmap
486 Region *humongousRegion = ObjectToRegion(listener.GetHumongous());
487 ASSERT_NE(nullptr, humongousRegion->GetLiveBitmap());
488 ASSERT_TRUE(humongousRegion->GetLiveBitmap()->Test(listener.GetHumongous()));
489 ASSERT_FALSE(listener.GetHumongous()->IsMarkedForGC()); // mark should be done using mark bitmap
490 }
491
492 class CollectionSetChecker : public GCListener {
493 public:
CollectionSetChecker(ObjectAllocatorG1<> * allocator)494 explicit CollectionSetChecker(ObjectAllocatorG1<> *allocator) : allocator_(allocator) {}
495
SetExpectedRegions(const std::initializer_list<Region * > & expectedRegions)496 void SetExpectedRegions(const std::initializer_list<Region *> &expectedRegions)
497 {
498 expectedRegions_ = expectedRegions;
499 }
500
GCPhaseStarted(GCPhase phase)501 void GCPhaseStarted(GCPhase phase) override
502 {
503 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
504 EXPECT_EQ(expectedRegions_, GetCollectionSet());
505 expectedRegions_.clear();
506 }
507 }
508
509 private:
GetCollectionSet()510 PandaSet<Region *> GetCollectionSet()
511 {
512 PandaSet<Region *> collectionSet;
513 for (Region *region : allocator_->GetAllRegions()) {
514 if (region->HasFlag(RegionFlag::IS_COLLECTION_SET)) {
515 collectionSet.insert(region);
516 }
517 }
518 return collectionSet;
519 }
520
521 private:
522 ObjectAllocatorG1<> *allocator_;
523 PandaSet<Region *> expectedRegions_;
524 };
525
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllYoungRegions)526 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllYoungRegions)
527 {
528 // The object will occupy more than half of region.
529 // So expect the allocator allocates a separate young region for each object.
530 size_t youngLen = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
531
532 Runtime *runtime = Runtime::GetCurrent();
533 GC *gc = runtime->GetPandaVM()->GetGC();
534 ObjectAllocatorG1<> *allocator = GetAllocator();
535 MTManagedThread *thread = MTManagedThread::GetCurrent();
536
537 CollectionSetChecker checker(allocator);
538 gc->AddListener(&checker);
539 {
540 ScopedManagedCodeThread s(thread);
541 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
542 VMHandle<ObjectHeader> young1;
543 VMHandle<ObjectHeader> young2;
544 VMHandle<ObjectHeader> young3;
545
546 young1 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
547 young2 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
548 young3 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
549
550 Region *yregion1 = ObjectToRegion(young1.GetPtr());
551 Region *yregion2 = ObjectToRegion(young2.GetPtr());
552 Region *yregion3 = ObjectToRegion(young3.GetPtr());
553 // Check all 3 objects are in different regions
554 ASSERT_NE(yregion1, yregion2);
555 ASSERT_NE(yregion2, yregion3);
556 ASSERT_NE(yregion1, yregion3);
557 checker.SetExpectedRegions({yregion1, yregion2, yregion3});
558 }
559 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
560 task.Run(*gc);
561 }
562
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)563 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)
564 {
565 Runtime *runtime = Runtime::GetCurrent();
566 GC *gc = runtime->GetPandaVM()->GetGC();
567 ObjectAllocatorG1<> *allocator = GetAllocator();
568 MTManagedThread *thread = MTManagedThread::GetCurrent();
569 ScopedManagedCodeThread s(thread);
570 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
571
572 VMHandle<ObjectHeader> young;
573 VMHandle<ObjectHeader> tenured;
574 VMHandle<ObjectHeader> humongous;
575 tenured = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
576
577 {
578 ScopedNativeCodeThread sn(thread);
579 // Propogate young to tenured
580 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
581 task.Run(*gc);
582 }
583
584 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
585 humongous = VMHandle<ObjectHeader>(
586 thread, ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false));
587
588 Region *yregion = ObjectToRegion(young.GetPtr());
589 [[maybe_unused]] Region *tregion = ObjectToRegion(tenured.GetPtr());
590 [[maybe_unused]] Region *hregion = ObjectToRegion(humongous.GetPtr());
591
592 CollectionSetChecker checker(allocator);
593 gc->AddListener(&checker);
594 // Even thou it's full, currently we split it into two parts, the 1st one is young-only collection.
595 // And the tenured collection part doesn't use GC_PHASE_MARK_YOUNG
596 checker.SetExpectedRegions({yregion});
597 {
598 ScopedNativeCodeThread sn(thread);
599 GCTask task1(GCTaskCause::EXPLICIT_CAUSE);
600 task1.Run(*gc);
601 }
602 }
603
TEST_F(G1GCTest,TestMixedCollections)604 TEST_F(G1GCTest, TestMixedCollections)
605 {
606 uint32_t garbageRate = Runtime::GetOptions().GetG1RegionGarbageRateThreshold();
607 // The object will occupy more than half of region.
608 // So expect the allocator allocates a separate young region for each object.
609 static constexpr size_t ARRAY_SIZE = 4;
610 // NOLINTNEXTLINE(readability-magic-numbers)
611 size_t bigLen = garbageRate * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
612 // NOLINTNEXTLINE(readability-magic-numbers)
613 size_t bigLen1 = (garbageRate + 1) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
614 // NOLINTNEXTLINE(readability-magic-numbers)
615 size_t bigLen2 = (garbageRate + 2) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
616 size_t smallLen = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::String);
617 std::array<size_t, ARRAY_SIZE> lenthsArray {bigLen, bigLen1, bigLen2, smallLen};
618 size_t miniObjLen = Runtime::GetOptions().GetInitTlabSize() + 1; // To allocate not in TLAB
619
620 Runtime *runtime = Runtime::GetCurrent();
621 GC *gc = runtime->GetPandaVM()->GetGC();
622 ObjectAllocatorG1<> *allocator = GetAllocator();
623 MTManagedThread *thread = MTManagedThread::GetCurrent();
624 ScopedManagedCodeThread s(thread);
625 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
626
627 VMHandle<coretypes::Array> smallObjectHolder;
628 VMHandle<coretypes::Array> bigObjectHolder;
629 VMHandle<ObjectHeader> young;
630
631 // Allocate objects of different sizes.
632 // Allocate mini object after each of them for prevent clearing after concurrent
633 // Mixed regions should be choosen according to the largest garbage.
634 bigObjectHolder =
635 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(4U, ClassRoot::ARRAY_STRING, false));
636 smallObjectHolder =
637 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(4U, ClassRoot::ARRAY_STRING, false));
638 for (size_t i = 0; i < ARRAY_SIZE; i++) {
639 bigObjectHolder->Set(i, ObjectAllocator::AllocString(lenthsArray[i]));
640 smallObjectHolder->Set(i, ObjectAllocator::AllocString(miniObjLen));
641 Region *firstRegion = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(i));
642 Region *secondRegion = ObjectToRegion(smallObjectHolder->Get<ObjectHeader *>(i));
643 ASSERT_TRUE(firstRegion->HasFlag(RegionFlag::IS_EDEN));
644 ASSERT_TRUE(secondRegion->HasFlag(RegionFlag::IS_EDEN));
645 ASSERT_TRUE(firstRegion == secondRegion);
646 }
647
648 {
649 ScopedNativeCodeThread sn(thread);
650 // Propogate young objects -> tenured
651 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
652 task.Run(*gc);
653 }
654 // GC doesn't include current tenured region to the collection set.
655 // Now we don't know which tenured region is current.
656 // So propagate one big young object to tenured to make the latter current.
657 VMHandle<ObjectHeader> current;
658 current = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(smallLen, ClassRoot::ARRAY_U8, false));
659
660 // Propogate 'current' object -> tenured and prepare for mixed GC
661 // Release 'big1', 'big2' and 'small' objects to make them garbage
662 Region *region0 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(0));
663 Region *region1 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(1));
664 Region *region2 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(2));
665 Region *region3 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(3));
666 for (size_t i = 0; i < ARRAY_SIZE; i++) {
667 bigObjectHolder->Set(i, static_cast<ObjectHeader *>(nullptr));
668 }
669 {
670 ScopedNativeCodeThread sn(thread);
671 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
672 task1.Run(*gc);
673 }
674
675 // Now the region with 'current' is current and it will not be included into the collection set.
676
677 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
678
679 Region *yregion = ObjectToRegion(young.GetPtr());
680 CollectionSetChecker checker(allocator);
681 gc->AddListener(&checker);
682 checker.SetExpectedRegions({region1, region2, yregion});
683 {
684 ScopedNativeCodeThread sn(thread);
685 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
686 task2.Run(*gc);
687 }
688
689 // Run GC one more time because we still have garbage regions.
690 // Check we collect them.
691 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
692 yregion = ObjectToRegion(young.GetPtr());
693 checker.SetExpectedRegions({region0, yregion, region3});
694 {
695 ScopedNativeCodeThread sn(thread);
696 GCTask task3(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
697 task3.Run(*gc);
698 }
699 }
700
TEST_F(G1GCTest,TestHandlePendingCards)701 TEST_F(G1GCTest, TestHandlePendingCards)
702 {
703 auto thread = MTManagedThread::GetCurrent();
704 auto runtime = Runtime::GetCurrent();
705 auto ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
706 auto arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
707 auto gc = runtime->GetPandaVM()->GetGC();
708 size_t elemSize = arrayClass->GetComponentSize();
709 size_t arraySize = DEFAULT_REGION_SIZE / 2;
710 // NOLINTNEXTLINE(clang-analyzer-core.DivideZero)
711 size_t arrayLength = arraySize / elemSize + 1;
712 ScopedManagedCodeThread s(thread);
713 HandleScope<ObjectHeader *> scope(thread);
714
715 constexpr size_t REGION_NUM = 16;
716 std::vector<VMHandle<coretypes::Array>> arrays;
717
718 for (size_t i = 0; i < REGION_NUM; i++) {
719 arrays.emplace_back(thread, ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
720 }
721
722 {
723 ScopedNativeCodeThread sn(thread);
724 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
725 task.Run(*gc);
726 }
727
728 for (auto &array : arrays) {
729 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(IS_OLD));
730 }
731
732 std::vector<VMHandle<coretypes::String>> strings;
733 std::vector<void *> stringOrigPtrs;
734
735 for (auto &array : arrays) {
736 auto str = ObjectAllocator::AllocString(StringLengthFitIntoRegion(1));
737 strings.emplace_back(thread, str);
738 stringOrigPtrs.push_back(str);
739 array->Set(0, str); // create dirty card
740 }
741
742 // With high probability update_remset_worker could not process all dirty cards before GC
743 // so GC drains them from update_remset_worker and handles separately
744 {
745 ScopedNativeCodeThread sn(thread);
746 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
747 task.Run(*gc);
748 }
749
750 for (size_t i = 0; i < REGION_NUM; i++) {
751 auto &array = arrays[i];
752 auto &str = strings[i];
753 auto strOrigPtr = stringOrigPtrs[i];
754 ASSERT_NE(strOrigPtr, str.GetPtr()); // string was moved
755 ASSERT_EQ(array->Get<ObjectHeader *>(0), str.GetPtr()); // refs were correctly updated
756 }
757
758 // dirty cards corresponding to dirty_regions_objects should be reenqueued
759 ProcessDirtyCards(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc));
760 for (size_t i = 0; i < REGION_NUM; i++) {
761 auto &array = arrays[i];
762 auto &str = strings[i];
763 bool found = false;
764 ObjectToRegion(str.GetPtr())->GetRemSet()->IterateOverObjects([&found, &array](ObjectHeader *obj) {
765 if (obj == array.GetPtr()) {
766 found = true;
767 }
768 });
769 ASSERT_TRUE(found);
770 }
771 }
772
773 class G1GCPromotionTest : public G1GCTest {
774 public:
G1GCPromotionTest()775 G1GCPromotionTest() : G1GCTest(CreateOptions()) {}
776
CreateOptions()777 static RuntimeOptions CreateOptions()
778 {
779 RuntimeOptions options = CreateDefaultOptions();
780 // NOLINTNEXTLINE(readability-magic-numbers)
781 options.SetG1PromotionRegionAliveRate(PROMOTE_RATE);
782 return options;
783 }
784
785 static constexpr size_t PROMOTE_RATE = 50;
786 };
787
TEST_F(G1GCPromotionTest,TestCorrectPromotionYoungRegion)788 TEST_F(G1GCPromotionTest, TestCorrectPromotionYoungRegion)
789 {
790 // We will create a humongous object with a links to two young regions
791 // and check promotion workflow
792 static constexpr size_t HUMONGOUS_STRING_LEN = G1GCPromotionTest::GetHumongousStringLength();
793 // Consume more than 50% of region size
794 static constexpr size_t FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT =
795 DEFAULT_REGION_SIZE / sizeof(coretypes::String) * 2U / 3U + 1;
796 // Consume less than 50% of region size
797 static constexpr size_t SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT = 1;
798 ASSERT(FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT <= HUMONGOUS_STRING_LEN);
799 ASSERT((FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100U / DEFAULT_REGION_SIZE) >
800 G1GCPromotionTest::PROMOTE_RATE);
801 ASSERT((SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100U / DEFAULT_REGION_SIZE) <
802 G1GCPromotionTest::PROMOTE_RATE);
803
804 Runtime *runtime = Runtime::GetCurrent();
805 GC *gc = runtime->GetPandaVM()->GetGC();
806
807 // Run Full GC to compact all existed young regions:
808 GCTask task0(GCTaskCause::EXPLICIT_CAUSE);
809 task0.Run(*gc);
810
811 MTManagedThread *thread = MTManagedThread::GetCurrent();
812 ScopedManagedCodeThread s(thread);
813 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
814
815 VMHandle<coretypes::Array> firstHolder;
816 VMHandle<coretypes::Array> secondHolder;
817 VMHandle<ObjectHeader> young;
818 std::array<ObjectHeader *, FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT> firstRegionObjectLinks {};
819 std::array<ObjectHeader *, SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT> secondRegionObjectLinks {};
820 // Check Promotion for young region:
821
822 firstHolder = VMHandle<coretypes::Array>(
823 thread, ObjectAllocator::AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
824 Region *firstRegion = ObjectToRegion(ObjectAllocator::AllocObjectInYoung());
825 ASSERT_TRUE(firstRegion->HasFlag(RegionFlag::IS_EDEN));
826 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
827 firstRegionObjectLinks[i] = ObjectAllocator::AllocObjectInYoung();
828 ASSERT_TRUE(firstRegionObjectLinks[i] != nullptr);
829 firstHolder->Set(i, firstRegionObjectLinks[i]);
830 ASSERT_TRUE(ObjectToRegion(firstRegionObjectLinks[i]) == firstRegion);
831 }
832
833 {
834 ScopedNativeCodeThread sn(thread);
835 // Promote young objects in one region -> tenured
836 GCTask task1(GCTaskCause::YOUNG_GC_CAUSE);
837 task1.Run(*gc);
838 }
839 // Check that we didn't change the links for young objects from the first region:
840 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
841 ASSERT_EQ(firstRegionObjectLinks[i], firstHolder->Get<ObjectHeader *>(i));
842 ASSERT_TRUE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
843 ASSERT_FALSE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
844 }
845
846 secondHolder = VMHandle<coretypes::Array>(
847 thread, ObjectAllocator::AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
848 Region *secondRegion = ObjectToRegion(ObjectAllocator::AllocObjectInYoung());
849 ASSERT_TRUE(secondRegion->HasFlag(RegionFlag::IS_EDEN));
850 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
851 secondRegionObjectLinks[i] = ObjectAllocator::AllocObjectInYoung();
852 ASSERT_TRUE(secondRegionObjectLinks[i] != nullptr);
853 secondHolder->Set(i, secondRegionObjectLinks[i]);
854 ASSERT_TRUE(ObjectToRegion(secondRegionObjectLinks[i]) == secondRegion);
855 }
856
857 {
858 ScopedNativeCodeThread sn(thread);
859 // Compact young objects in one region -> tenured
860 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE);
861 task2.Run(*gc);
862 }
863 // Check that we changed the links for young objects from the second region:
864 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
865 ASSERT_NE(secondRegionObjectLinks[i], secondHolder->Get<ObjectHeader *>(i));
866 ASSERT_TRUE(ObjectToRegion(secondHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
867 ASSERT_FALSE(ObjectToRegion(secondHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
868 }
869
870 {
871 ScopedNativeCodeThread sn(thread);
872 // Run Full GC to compact all tenured regions:
873 GCTask task3(GCTaskCause::EXPLICIT_CAUSE);
874 task3.Run(*gc);
875 }
876 // Now we should have updated links in the humongous object to first region objects:
877 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
878 ASSERT_NE(firstRegionObjectLinks[i], firstHolder->Get<ObjectHeader *>(i));
879 ASSERT_TRUE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
880 ASSERT_FALSE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
881 }
882 }
883
884 class PromotionRemSetChecker : public GCListener {
885 public:
PromotionRemSetChecker(VMHandle<coretypes::Array> * array,VMHandle<coretypes::String> * string)886 PromotionRemSetChecker(VMHandle<coretypes::Array> *array, VMHandle<coretypes::String> *string)
887 : array_(array), string_(string)
888 {
889 }
890
GCPhaseStarted(GCPhase phase)891 void GCPhaseStarted(GCPhase phase) override
892 {
893 if (phase != GCPhase::GC_PHASE_MARK_YOUNG) {
894 return;
895 }
896 // Before marking young all remsets must by actual
897 CheckRemSets();
898 }
899
CheckRemSets()900 bool CheckRemSets()
901 {
902 Region *refRegion = ObjectToRegion(string_->GetPtr());
903 found_ = false;
904 refRegion->GetRemSet()->IterateOverObjects([this](ObjectHeader *obj) {
905 if (obj == array_->GetPtr()) {
906 found_ = true;
907 }
908 });
909 return found_;
910 }
911
IsFound() const912 bool IsFound() const
913 {
914 return found_;
915 }
916
917 private:
918 VMHandle<coretypes::Array> *array_;
919 VMHandle<coretypes::String> *string_;
920 bool found_ = false;
921 };
922
TEST_F(G1GCPromotionTest,TestPromotedRegionHasValidRemSets)923 TEST_F(G1GCPromotionTest, TestPromotedRegionHasValidRemSets)
924 {
925 MTManagedThread *thread = MTManagedThread::GetCurrent();
926 Runtime *runtime = Runtime::GetCurrent();
927 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
928 GC *gc = runtime->GetPandaVM()->GetGC();
929 ScopedManagedCodeThread s(thread);
930 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
931
932 VMHandle<coretypes::String> string(thread, ObjectAllocator::AllocString(1));
933 ASSERT_TRUE(ObjectToRegion(string.GetPtr())->IsYoung());
934 {
935 ScopedNativeCodeThread sn(thread);
936 // Move string to tenured
937 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
938 task.Run(*gc);
939 }
940 ASSERT_TRUE(ObjectToRegion(string.GetPtr())->HasFlag(IS_OLD));
941
942 // Allocate an array which ocuppies more than half region.
943 // This array will be promoted.
944 auto *arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
945 size_t elemSize = arrayClass->GetComponentSize();
946 size_t arraySize = DEFAULT_REGION_SIZE / 2;
947 size_t arrayLength = arraySize / elemSize + 1;
948 VMHandle<coretypes::Array> array(thread, ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
949 ASSERT_FALSE(array->IsForwarded());
950 Region *arrayRegion = ObjectToRegion(array.GetPtr());
951 ASSERT_TRUE(arrayRegion->IsYoung());
952 array->Set(0, string.GetPtr());
953
954 PromotionRemSetChecker listener(&array, &string);
955 gc->AddListener(&listener);
956 {
957 ScopedNativeCodeThread sn(thread);
958 // Promote array's regions to tenured
959 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
960 task.Run(*gc);
961 ASSERT_FALSE(listener.IsFound());
962 }
963 // Check the array was promoted.
964 ASSERT_TRUE(arrayRegion == ObjectToRegion(array.GetPtr()));
965
966 // remset is not fully updated during mixed collection
967 ProcessDirtyCards(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc));
968 // Check remsets
969 ASSERT_TRUE(listener.CheckRemSets());
970 }
971
972 class InterruptGCListener : public GCListener {
973 public:
InterruptGCListener(VMHandle<coretypes::Array> * array)974 explicit InterruptGCListener(VMHandle<coretypes::Array> *array) : array_(array) {}
975
GCPhaseStarted(GCPhase phase)976 void GCPhaseStarted(GCPhase phase) override
977 {
978 if (phase != GCPhase::GC_PHASE_MARK) {
979 return;
980 }
981 GC *gc = Runtime::GetCurrent()->GetPandaVM()->GetGC();
982 {
983 ScopedManagedCodeThread s(ManagedThread::GetCurrent());
984 // Allocate an object to add it into SATB buffer
985 ObjectAllocator::AllocObjectInYoung();
986 }
987 // Set interrupt flag
988 gc->OnWaitForIdleFail();
989 }
990
GCPhaseFinished(GCPhase phase)991 void GCPhaseFinished(GCPhase phase) override
992 {
993 if (phase != GCPhase::GC_PHASE_MARK) {
994 return;
995 }
996 Region *region = ObjectToRegion((*array_)->Get<ObjectHeader *>(0));
997 // Check the object array[0] is not marked
998 EXPECT_FALSE(region->GetMarkBitmap()->Test((*array_)->Get<ObjectHeader *>(0)));
999 // Check GC haven't calculated live bytes for the region
1000 EXPECT_EQ(0, region->GetLiveBytes());
1001 // Check GC has cleared SATB buffer
1002 MTManagedThread *thread = MTManagedThread::GetCurrent();
1003 EXPECT_NE(nullptr, thread->GetPreBuff());
1004 EXPECT_EQ(0, thread->GetPreBuff()->size());
1005 }
1006
1007 private:
1008 VMHandle<coretypes::Array> *array_;
1009 };
1010
TEST_F(G1GCTest,TestInterruptConcurrentMarking)1011 TEST_F(G1GCTest, TestInterruptConcurrentMarking)
1012 {
1013 Runtime *runtime = Runtime::GetCurrent();
1014 GC *gc = runtime->GetPandaVM()->GetGC();
1015
1016 MTManagedThread *thread = MTManagedThread::GetCurrent();
1017 ScopedManagedCodeThread s(thread);
1018 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1019 VMHandle<coretypes::Array> array;
1020
1021 array = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(1, ClassRoot::ARRAY_STRING, false));
1022 array->Set(0, ObjectAllocator::AllocString(1));
1023
1024 {
1025 ScopedNativeCodeThread sn(thread);
1026 // Propogate young objects -> tenured
1027 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1028 task.Run(*gc);
1029
1030 // Clear live bytes to check that concurrent marking will not calculate them
1031 Region *region = ObjectToRegion(array->Get<ObjectHeader *>(0));
1032 ASSERT_TRUE(region != nullptr);
1033 region->SetLiveBytes(0);
1034
1035 InterruptGCListener listener(&array);
1036 gc->AddListener(&listener);
1037 // Trigger concurrent marking
1038 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1039 task1.Run(*gc);
1040 }
1041 }
1042
1043 class NullRefListener : public GCListener {
1044 public:
NullRefListener(VMHandle<coretypes::Array> * array)1045 explicit NullRefListener(VMHandle<coretypes::Array> *array) : array_(array) {}
1046
GCPhaseStarted(GCPhase phase)1047 void GCPhaseStarted(GCPhase phase) override
1048 {
1049 if (phase != GCPhase::GC_PHASE_MARK) {
1050 return;
1051 }
1052 (*array_)->Set(0, static_cast<ObjectHeader *>(nullptr));
1053 }
1054
1055 private:
1056 VMHandle<coretypes::Array> *array_;
1057 };
1058
TEST_F(G1GCTest,TestGarbageBytesCalculation)1059 TEST_F(G1GCTest, TestGarbageBytesCalculation)
1060 {
1061 Runtime *runtime = Runtime::GetCurrent();
1062 GC *gc = runtime->GetPandaVM()->GetGC();
1063 MTManagedThread *thread = MTManagedThread::GetCurrent();
1064 ScopedManagedCodeThread s(thread);
1065 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1066
1067 VMHandle<coretypes::Array> array;
1068
1069 // Allocate objects of different sizes.
1070 // Mixed regions should be choosen according to the largest garbage.
1071 // Allocate an array of length 2. 2 because the array's size must be 8 bytes aligned
1072 array = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(2, ClassRoot::ARRAY_STRING, false));
1073 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_EDEN));
1074 // The same for string. The instance size must be 8-bytes aligned.
1075 array->Set(0, ObjectAllocator::AllocString(8U));
1076 array->Set(1, ObjectAllocator::AllocString(8U));
1077 ASSERT_TRUE(ObjectToRegion(array->Get<ObjectHeader *>(0))->HasFlag(RegionFlag::IS_EDEN));
1078
1079 size_t arraySize = GetObjectSize(array.GetPtr());
1080 size_t strSize = GetObjectSize(array->Get<ObjectHeader *>(0));
1081
1082 {
1083 ScopedNativeCodeThread sn(thread);
1084 // Propogate young objects -> tenured
1085 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1086 task.Run(*gc);
1087 }
1088 // check the array and the string are in the same tenured region
1089 ASSERT_EQ(ObjectToRegion(array.GetPtr()), ObjectToRegion(array->Get<ObjectHeader *>(0)));
1090 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1091
1092 ObjectAllocator::AllocObjectInYoung();
1093 array->Set(1, static_cast<ObjectHeader *>(nullptr));
1094
1095 NullRefListener listener(&array);
1096 gc->AddListener(&listener);
1097 {
1098 ScopedNativeCodeThread sn(thread);
1099 // Prepare for mixed GC, start concurrent marking and calculate garbage for regions
1100 GCTask task2(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1101 task2.Run(*gc);
1102 }
1103
1104 Region *region = ObjectToRegion(array.GetPtr());
1105 ASSERT_EQ(arraySize + strSize, region->GetLiveBytes());
1106 ASSERT_EQ(strSize, region->GetGarbageBytes());
1107 }
1108
TEST_F(G1GCTest,NonMovableClearingDuringConcurrentPhaseTest)1109 TEST_F(G1GCTest, NonMovableClearingDuringConcurrentPhaseTest)
1110 {
1111 Runtime *runtime = Runtime::GetCurrent();
1112 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1113 auto objAllocator = Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator();
1114 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
1115 MTManagedThread *thread = MTManagedThread::GetCurrent();
1116 GC *gc = runtime->GetPandaVM()->GetGC();
1117
1118 ScopedManagedCodeThread s(thread);
1119 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1120 // NOLINTBEGIN(readability-magic-numbers)
1121 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING) - 50;
1122 coretypes::Array *firstNonMovableObj = nullptr;
1123 coretypes::Array *secondNonMovableObj = nullptr;
1124 uintptr_t prevYoungAddr = 0;
1125
1126 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
1127 ->GetClass(ctx.GetStringArrayClassDescriptor());
1128 ASSERT_NE(klass, nullptr);
1129 firstNonMovableObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1130 secondNonMovableObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1131 ASSERT_EQ(true, ObjectToRegion(firstNonMovableObj)->HasFlag(RegionFlag::IS_NONMOVABLE));
1132 ASSERT_EQ(true, ObjectToRegion(secondNonMovableObj)->HasFlag(RegionFlag::IS_NONMOVABLE));
1133 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
1134 firstNonMovableObj->Set(0, youngObj);
1135 prevYoungAddr = ToUintPtr(youngObj);
1136
1137 VMHandle<coretypes::Array> secondNonMovableObjPtr(thread, secondNonMovableObj);
1138
1139 {
1140 [[maybe_unused]] HandleScope<ObjectHeader *> firstScope(thread);
1141 VMHandle<coretypes::Array> firstNonMovableObjPtr(thread, firstNonMovableObj);
1142 {
1143 ScopedNativeCodeThread sn(thread);
1144 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1145 task.Run(*gc);
1146 }
1147
1148 auto youngObj2 = static_cast<coretypes::String *>(firstNonMovableObjPtr->Get<ObjectHeader *>(0));
1149 // Check GC has moved the young obj
1150 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
1151 // Check young object is accessible
1152 ASSERT_EQ(0, youngObj2->GetLength());
1153 }
1154
1155 // Check that all objects are alive
1156 ASSERT_EQ(true, objAllocator->ContainObject(firstNonMovableObj));
1157 ASSERT_EQ(true, objAllocator->ContainObject(secondNonMovableObj));
1158 ASSERT_EQ(true, objAllocator->IsLive(firstNonMovableObj));
1159 ASSERT_EQ(true, objAllocator->IsLive(secondNonMovableObj));
1160 // Check that the first object is accessible
1161 bool foundFirstObject = false;
1162 objAllocator->IterateOverObjects([&firstNonMovableObj, &foundFirstObject](ObjectHeader *object) {
1163 if (firstNonMovableObj == object) {
1164 foundFirstObject = true;
1165 }
1166 });
1167 ASSERT_EQ(true, foundFirstObject);
1168
1169 // So, try to remove the first non movable object:
1170 {
1171 ScopedNativeCodeThread sn(thread);
1172 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1173 task.Run(*gc);
1174 }
1175
1176 // Check that the second object is still alive
1177 ASSERT_EQ(true, objAllocator->ContainObject(secondNonMovableObj));
1178 ASSERT_EQ(true, objAllocator->IsLive(secondNonMovableObj));
1179 // Check that the first object is dead
1180 objAllocator->IterateOverObjects(
1181 [&firstNonMovableObj](ObjectHeader *object) { ASSERT_NE(firstNonMovableObj, object); });
1182 }
1183
TEST_F(G1GCTest,HumongousClearingDuringConcurrentPhaseTest)1184 TEST_F(G1GCTest, HumongousClearingDuringConcurrentPhaseTest)
1185 {
1186 Runtime *runtime = Runtime::GetCurrent();
1187 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1188 auto objAllocator = Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator();
1189 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
1190 MTManagedThread *thread = MTManagedThread::GetCurrent();
1191 GC *gc = runtime->GetPandaVM()->GetGC();
1192
1193 ScopedManagedCodeThread s(thread);
1194 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1195 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
1196 coretypes::Array *firstHumongousObj = nullptr;
1197 coretypes::Array *secondHumongousObj = nullptr;
1198 uintptr_t prevYoungAddr = 0;
1199
1200 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
1201 ->GetClass(ctx.GetStringArrayClassDescriptor());
1202 ASSERT_NE(klass, nullptr);
1203 firstHumongousObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1204 secondHumongousObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1205 ASSERT_EQ(true, ObjectToRegion(firstHumongousObj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1206 ASSERT_EQ(true, ObjectToRegion(secondHumongousObj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1207 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
1208 firstHumongousObj->Set(0, youngObj);
1209 prevYoungAddr = ToUintPtr(youngObj);
1210
1211 VMHandle<coretypes::Array> secondHumongousObjPtr(thread, secondHumongousObj);
1212
1213 {
1214 HandleScope<ObjectHeader *> firstScope(thread);
1215 VMHandle<coretypes::Array> firstHumongousObjPtr(thread, firstHumongousObj);
1216 {
1217 ScopedNativeCodeThread sn(thread);
1218 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1219 task.Run(*gc);
1220 }
1221
1222 auto youngObj2 = static_cast<coretypes::String *>(firstHumongousObjPtr->Get<ObjectHeader *>(0));
1223 // Check GC has moved the young obj
1224 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
1225 // Check young object is accessible
1226 ASSERT_EQ(0, youngObj2->GetLength());
1227 }
1228
1229 // Check that all objects are alive
1230 ASSERT_EQ(true, objAllocator->ContainObject(firstHumongousObj));
1231 ASSERT_EQ(true, objAllocator->ContainObject(secondHumongousObj));
1232 ASSERT_EQ(true, objAllocator->IsLive(firstHumongousObj));
1233 ASSERT_EQ(true, objAllocator->IsLive(secondHumongousObj));
1234 // Check that the first object is accessible
1235 bool foundFirstObject = false;
1236 objAllocator->IterateOverObjects([&firstHumongousObj, &foundFirstObject](ObjectHeader *object) {
1237 if (firstHumongousObj == object) {
1238 foundFirstObject = true;
1239 }
1240 });
1241 ASSERT_EQ(true, foundFirstObject);
1242
1243 {
1244 ScopedNativeCodeThread sn(thread);
1245 // So, try to remove the first non movable object:
1246 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1247 task.Run(*gc);
1248 }
1249
1250 // Check that the second object is still alive
1251 ASSERT_EQ(true, objAllocator->ContainObject(secondHumongousObj));
1252 ASSERT_EQ(true, objAllocator->IsLive(secondHumongousObj));
1253 // Check that the first object is dead
1254 objAllocator->IterateOverObjects(
1255 [&firstHumongousObj](ObjectHeader *object) { ASSERT_NE(firstHumongousObj, object); });
1256 }
1257
1258 class G1FullGCTest : public G1GCTest {
1259 public:
G1FullGCTest(uint32_t fullGcRegionFragmentationRate=0)1260 explicit G1FullGCTest(uint32_t fullGcRegionFragmentationRate = 0)
1261 : G1GCTest(CreateOptions(fullGcRegionFragmentationRate))
1262 {
1263 }
1264
CreateOptions(uint32_t fullGcRegionFragmentationRate)1265 static RuntimeOptions CreateOptions(uint32_t fullGcRegionFragmentationRate)
1266 {
1267 RuntimeOptions options = CreateDefaultOptions();
1268 options.SetInitYoungSpaceSize(YOUNG_SIZE);
1269 options.SetYoungSpaceSize(YOUNG_SIZE);
1270 options.SetHeapSizeLimit(HEAP_SIZE);
1271 options.SetG1FullGcRegionFragmentationRate(fullGcRegionFragmentationRate);
1272 return options;
1273 }
1274
NumYoungRegions()1275 static constexpr size_t NumYoungRegions()
1276 {
1277 return YOUNG_SIZE / DEFAULT_REGION_SIZE;
1278 }
1279
NumRegions()1280 static constexpr size_t NumRegions()
1281 {
1282 // Region count without reserved region for Full GC
1283 return HEAP_SIZE / DEFAULT_REGION_SIZE - 1U;
1284 }
1285
RefArrayLengthFitIntoRegion(size_t numRegions)1286 size_t RefArrayLengthFitIntoRegion(size_t numRegions)
1287 {
1288 Runtime *runtime = Runtime::GetCurrent();
1289 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1290 auto *klass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
1291 size_t elemSize = klass->GetComponentSize();
1292 // NOLINTNEXTLINE(clang-analyzer-core.DivideZero)
1293 return (numRegions * DEFAULT_REGION_SIZE - sizeof(coretypes::Array) - Region::HeadSize()) / elemSize;
1294 }
1295
FillHeap(size_t numRegions,VMHandle<coretypes::Array> & holder,size_t startIndex)1296 void FillHeap(size_t numRegions, VMHandle<coretypes::Array> &holder, size_t startIndex)
1297 {
1298 constexpr size_t STRING_LENGTH = StringLengthFitIntoRegion(1);
1299 EXPECT_LE(numRegions, holder->GetLength());
1300 for (size_t i = 0; i < numRegions; ++i) {
1301 ObjectHeader *obj = ObjectAllocator::AllocString(STRING_LENGTH);
1302 EXPECT_NE(nullptr, obj);
1303 holder->Set(startIndex + i, obj);
1304 }
1305 }
1306
1307 static constexpr size_t YOUNG_SIZE = 1_MB;
1308 static constexpr size_t HEAP_SIZE = 4_MB;
1309 static constexpr size_t NUM_NONMOVABLE_REGIONS_FOR_RUNTIME = 1;
1310 };
1311
TEST_F(G1FullGCTest,TestFullGCCollectsNonRegularObjects)1312 TEST_F(G1FullGCTest, TestFullGCCollectsNonRegularObjects)
1313 {
1314 Runtime *runtime = Runtime::GetCurrent();
1315 GC *gc = runtime->GetPandaVM()->GetGC();
1316 ManagedThread *thread = ManagedThread::GetCurrent();
1317 ScopedManagedCodeThread s(thread);
1318 ObjectHeader *humongousObj = ObjectAllocator::AllocString(GetHumongousStringLength());
1319 ObjectHeader *nonmovableObj = AllocNonMovableObject();
1320
1321 {
1322 ScopedNativeCodeThread sn(thread);
1323 GCTask task(GCTaskCause::EXPLICIT_CAUSE);
1324 task.Run(*gc);
1325 }
1326
1327 PandaVector<Region *> nonregularRegions = GetAllocator()->GetNonRegularRegions();
1328 for (Region *region : nonregularRegions) {
1329 if (region->HasFlag(IS_LARGE_OBJECT)) {
1330 ASSERT_NE(humongousObj, region->GetLargeObject());
1331 } else if (region->HasFlag(IS_NONMOVABLE)) {
1332 if (region->Begin() <= ToUintPtr(nonmovableObj) && ToUintPtr(nonmovableObj) < region->End()) {
1333 ASSERT_FALSE(region->GetLiveBitmap()->Test(nonmovableObj));
1334 }
1335 } else {
1336 FAIL() << "Unknown region type";
1337 }
1338 }
1339 }
1340
TEST_F(G1FullGCTest,TestFullGCFreeHumongousBeforeTenuredCollection)1341 TEST_F(G1FullGCTest, TestFullGCFreeHumongousBeforeTenuredCollection)
1342 {
1343 constexpr size_t NUM_REGIONS_FOR_HUMONGOUS = 4;
1344
1345 Runtime *runtime = Runtime::GetCurrent();
1346 GC *gc = runtime->GetPandaVM()->GetGC();
1347 ManagedThread *thread = ManagedThread::GetCurrent();
1348 ScopedManagedCodeThread s(thread);
1349 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1350 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1351 coretypes::String *humongousObj =
1352 ObjectAllocator::AllocString(StringLengthFitIntoRegion(NUM_REGIONS_FOR_HUMONGOUS));
1353 holder->Set(0, humongousObj);
1354 size_t numFreeRegions =
1355 NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME - NUM_REGIONS_FOR_HUMONGOUS;
1356 FillHeap(numFreeRegions, holder, 1); // occupy 4 tenured regions and 3 young regions
1357 // move 3 young regions to tenured space.
1358 gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1359 // now tenured space is full. Fill young space.
1360 FillHeap(NumYoungRegions(), holder, numFreeRegions + 1);
1361 // At this point we have filled 4 tenured regions and 4 young regions and 3 free tenured regions.
1362 // We cannot do young GC because there are not enough free regions in tenured to move 4 young regions.
1363 // Check we are OOM
1364 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1365 // Forget humongous_obj
1366 holder->Set(0, static_cast<ObjectHeader *>(nullptr));
1367 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1368 // We should have 2 free regions but during allocation we reserve 1 region for full GC.
1369 // So we can allocate only one region.
1370 ASSERT_NE(nullptr, ObjectAllocator::AllocObjectInYoung());
1371 }
1372
TEST_F(G1FullGCTest,TestRemSetsAndYoungCardsAfterFailedFullGC)1373 TEST_F(G1FullGCTest, TestRemSetsAndYoungCardsAfterFailedFullGC)
1374 {
1375 Runtime *runtime = Runtime::GetCurrent();
1376 GC *gc = runtime->GetPandaVM()->GetGC();
1377 CardTable *cardTable = gc->GetCardTable();
1378 ASSERT_NE(nullptr, cardTable);
1379 ManagedThread *thread = ManagedThread::GetCurrent();
1380 ScopedManagedCodeThread s(thread);
1381 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1382 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1383 size_t numFreeRegions = NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME + 1U;
1384 FillHeap(numFreeRegions, holder, 0); // occupy 8 tenured regions and 3 young regions
1385 VMHandle<coretypes::Array> youngArray(
1386 thread, ObjectAllocator::AllocArray(RefArrayLengthFitIntoRegion(1), ClassRoot::ARRAY_STRING, false));
1387 ASSERT(ObjectToRegion(youngArray.GetPtr())->IsEden());
1388 youngArray->Set(0, holder->Get<ObjectHeader *>(0));
1389 uintptr_t tenuredAddrBeforeGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1390 // Trigger FullGC by allocating an object in full young. It should fail because there is no tenured space to move 4
1391 // young regions.
1392 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1393 uintptr_t tenuredAddrAfterGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1394 // Check GC moved tenured regions
1395 ASSERT_NE(tenuredAddrBeforeGc, tenuredAddrAfterGc);
1396 // Check FullGC updates refs in young correctly in case it cannot collect young.
1397 ASSERT_EQ(holder->Get<ObjectHeader *>(0), youngArray->Get<ObjectHeader *>(0));
1398 // Check remsets.
1399 Region *youngRegion = ObjectToRegion(youngArray.GetPtr());
1400 ASSERT_TRUE(youngRegion->IsEden());
1401 Region *tenuredRegion = ObjectToRegion(holder->Get<ObjectHeader *>(0));
1402 ASSERT_TRUE(tenuredRegion->HasFlag(IS_OLD));
1403 bool hasObject = false;
1404 tenuredRegion->GetRemSet()->IterateOverObjects(
1405 [&hasObject, &youngArray](ObjectHeader *obj) { hasObject |= obj == youngArray.GetPtr(); });
1406 ASSERT_FALSE(hasObject);
1407 // Check young cards
1408 ASSERT_EQ(NumYoungRegions(), GetAllocator()->GetYoungRegions().size());
1409 for (Region *region : GetAllocator()->GetYoungRegions()) {
1410 uintptr_t begin = ToUintPtr(region);
1411 uintptr_t end = region->End();
1412 while (begin < end) {
1413 ASSERT_TRUE(cardTable->GetCardPtr(begin)->IsYoung());
1414 begin += CardTable::GetCardSize();
1415 }
1416 }
1417 }
1418
TEST_F(G1FullGCTest,TestFullGCGenericFlow)1419 TEST_F(G1FullGCTest, TestFullGCGenericFlow)
1420 {
1421 Runtime *runtime = Runtime::GetCurrent();
1422 GC *gc = runtime->GetPandaVM()->GetGC();
1423 CardTable *cardTable = gc->GetCardTable();
1424 ASSERT_NE(nullptr, cardTable);
1425 ManagedThread *thread = ManagedThread::GetCurrent();
1426 ScopedManagedCodeThread s(thread);
1427 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1428 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1429 size_t numFreeRegions = NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME + 1U;
1430 FillHeap(numFreeRegions, holder, 0); // occupy 8 tenured regions and 3 young regions
1431 VMHandle<coretypes::Array> youngArray(
1432 thread, ObjectAllocator::AllocArray(RefArrayLengthFitIntoRegion(1), ClassRoot::ARRAY_STRING, false));
1433 ASSERT(ObjectToRegion(youngArray.GetPtr())->IsEden());
1434 youngArray->Set(0, holder->Get<ObjectHeader *>(0));
1435 // Check we are OOM
1436 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1437 uintptr_t tenuredAddrBeforeGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1438 // Forget two tenured regions
1439 holder->Set(1U, static_cast<ObjectHeader *>(nullptr));
1440 holder->Set(2U, static_cast<ObjectHeader *>(nullptr));
1441 // Now there should be enough space in tenured to move young
1442 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1443 ASSERT_NE(nullptr, ObjectAllocator::AllocObjectInYoung());
1444 uintptr_t tenuredAddrAfterGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1445 // Check GC moved tenured regions
1446 ASSERT_NE(tenuredAddrBeforeGc, tenuredAddrAfterGc);
1447 // Check FullGC updates refs in young correctly in case it cannot collect young.
1448 ASSERT_EQ(holder->Get<ObjectHeader *>(0), youngArray->Get<ObjectHeader *>(0));
1449 }
1450
TEST_F(G1FullGCTest,TestFullGCResetTenuredRegions)1451 TEST_F(G1FullGCTest, TestFullGCResetTenuredRegions)
1452 {
1453 Runtime *runtime = Runtime::GetCurrent();
1454 GC *gc = runtime->GetPandaVM()->GetGC();
1455 CardTable *cardTable = gc->GetCardTable();
1456 ASSERT_NE(nullptr, cardTable);
1457 ManagedThread *thread = ManagedThread::GetCurrent();
1458 ScopedManagedCodeThread s(thread);
1459 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1460 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1461 // Fill almost all regions (3 tenured regions will be free)
1462 size_t numFreeRegions = NumRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME;
1463 size_t numFilledRegions = RoundDown(numFreeRegions, NumYoungRegions());
1464 FillHeap(numFilledRegions, holder, 0);
1465 // Foreget all objects
1466 for (size_t i = 0; i < numFilledRegions; ++i) {
1467 holder->Set(i, static_cast<ObjectHeader *>(nullptr));
1468 }
1469 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1470 // Fill almost all regions (3 tenured regions will be free)
1471 // We should be able to allocate all objects because FullGC should reset old tenured regions.
1472 FillHeap(numFilledRegions, holder, 0);
1473 }
1474
1475 template <uint32_t REGION_FRAGMENTATION_RATE>
1476 class G1FullGCWithRegionFragmentationRate : public G1FullGCTest {
1477 public:
G1FullGCWithRegionFragmentationRate()1478 G1FullGCWithRegionFragmentationRate() : G1FullGCTest(REGION_FRAGMENTATION_RATE) {}
1479 };
1480
1481 class FullGcRegionFragmentationRateOptionNever : public G1FullGCWithRegionFragmentationRate<100U> {};
1482
TEST_F(FullGcRegionFragmentationRateOptionNever,TestG1FullGcRegionFragmentationRateOptionNever)1483 TEST_F(FullGcRegionFragmentationRateOptionNever, TestG1FullGcRegionFragmentationRateOptionNever)
1484 {
1485 Runtime *runtime = Runtime::GetCurrent();
1486 GC *gc = runtime->GetPandaVM()->GetGC();
1487 CardTable *cardTable = gc->GetCardTable();
1488 ASSERT_NE(nullptr, cardTable);
1489 ManagedThread *thread = ManagedThread::GetCurrent();
1490 ScopedManagedCodeThread s(thread);
1491 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1492 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1493 // Fill one region
1494 FillHeap(1, holder, 0);
1495 // Save ref to young object
1496 auto object = holder->Get<ObjectHeader *>(0);
1497 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1498 // Check that we moved this young object
1499 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1500 // Save ref to tenured object
1501 object = holder->Get<ObjectHeader *>(0);
1502 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1503 // Check that we don't move this object
1504 ASSERT_EQ(holder->Get<ObjectHeader *>(0), object);
1505 }
1506
1507 class FullGcRegionFragmentationRateOptionAlways : public G1FullGCWithRegionFragmentationRate<0> {};
1508
TEST_F(FullGcRegionFragmentationRateOptionAlways,TestG1FullGcRegionFragmentationRateOptionAlways)1509 TEST_F(FullGcRegionFragmentationRateOptionAlways, TestG1FullGcRegionFragmentationRateOptionAlways)
1510 {
1511 Runtime *runtime = Runtime::GetCurrent();
1512 GC *gc = runtime->GetPandaVM()->GetGC();
1513 CardTable *cardTable = gc->GetCardTable();
1514 ASSERT_NE(nullptr, cardTable);
1515 ManagedThread *thread = ManagedThread::GetCurrent();
1516 ScopedManagedCodeThread s(thread);
1517 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1518 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1519 // Fill one region
1520 FillHeap(1, holder, 0);
1521 // Save ref to young object
1522 auto object = holder->Get<ObjectHeader *>(0);
1523 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1524 // Check that we moved this young object
1525 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1526 // Save ref to tenured object
1527 object = holder->Get<ObjectHeader *>(0);
1528 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1529 // Check that we moved this object
1530 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1531 }
1532
1533 class G1FullGCOOMTest : public G1GCTest {
1534 public:
G1FullGCOOMTest()1535 G1FullGCOOMTest() : G1GCTest(CreateOOMOptions())
1536 {
1537 thread_ = MTManagedThread::GetCurrent();
1538 ASSERT(thread_ != nullptr);
1539 thread_->ManagedCodeBegin();
1540 }
1541
1542 NO_COPY_SEMANTIC(G1FullGCOOMTest);
1543 NO_MOVE_SEMANTIC(G1FullGCOOMTest);
1544
CreateOOMOptions()1545 static RuntimeOptions CreateOOMOptions()
1546 {
1547 RuntimeOptions options;
1548 options.SetShouldLoadBootPandaFiles(false);
1549 options.SetCompilerEnableJit(false);
1550 options.SetShouldInitializeIntrinsics(false);
1551 // GC options
1552 constexpr size_t HEAP_SIZE_LIMIT_TEST = 16_MB;
1553 options.SetRunGcInPlace(true);
1554 options.SetGcType("g1-gc");
1555 options.SetHeapSizeLimit(HEAP_SIZE_LIMIT_TEST);
1556 options.SetGcTriggerType("debug-never");
1557 options.SetG1NumberOfTenuredRegionsAtMixedCollection(0);
1558 return options;
1559 }
1560
~G1FullGCOOMTest()1561 ~G1FullGCOOMTest() override
1562 {
1563 thread_->ManagedCodeEnd();
1564 }
1565
1566 protected:
1567 MTManagedThread *thread_; // NOLINT(misc-non-private-member-variables-in-classes)
1568 };
1569
TEST_F(G1FullGCOOMTest,AllocateBy1Region)1570 TEST_F(G1FullGCOOMTest, AllocateBy1Region)
1571 {
1572 constexpr size_t OBJECT_SIZE = AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.8F), DEFAULT_ALIGNMENT_IN_BYTES);
1573 {
1574 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1575 auto *g1Allocator =
1576 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1577 // Fill tenured space by garbage
1578 do {
1579 VMHandle<ObjectHeader> handle(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1580 ASSERT_NE(handle.GetPtr(), nullptr) << "Must be correctly allocated object in non-full heap";
1581 // Move new object to tenured
1582 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1583 } while (g1Allocator->HaveTenuredSize(2U));
1584 ASSERT_TRUE(g1Allocator->HaveTenuredSize(1));
1585 // Allocate one young region
1586 VMHandle<ObjectHeader> handle1(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1587 ASSERT_NE(handle1.GetPtr(), nullptr) << "Must be correctly allocated object. Heap has a lot of garbage";
1588 // Try to move alone young region to last tenured region
1589 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1590 // Fully fill young space
1591 while (g1Allocator->GetHeapSpace()->GetCurrentFreeYoungSize() > 0) {
1592 auto *youngObj = ObjectAllocator::AllocString(OBJECT_SIZE);
1593 ASSERT_NE(youngObj, nullptr) << "Must allocate in free young space";
1594 }
1595 }
1596 ASSERT_NE(ObjectAllocator::AllocString(OBJECT_SIZE), nullptr)
1597 << "We must correctly allocate object in non-full heap";
1598 }
1599
TEST_F(G1FullGCOOMTest,PinUnpinObject)1600 TEST_F(G1FullGCOOMTest, PinUnpinObject)
1601 {
1602 constexpr size_t OBJECT_SIZE = AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.8F), DEFAULT_ALIGNMENT_IN_BYTES);
1603 auto *g1Allocator =
1604 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1605 {
1606 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1607 // Fill tenured space by garbage
1608 do {
1609 VMHandle<ObjectHeader> handle(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1610 ASSERT_NE(handle.GetPtr(), nullptr) << "Must be correctly allocated object in non-full heap";
1611 // Move new object to tenured
1612 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1613 } while (g1Allocator->HaveTenuredSize(2U));
1614 ASSERT_TRUE(g1Allocator->HaveTenuredSize(1));
1615 // Allocate one young region
1616 VMHandle<ObjectHeader> handle1(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1617 ASSERT_NE(handle1.GetPtr(), nullptr) << "Must be correctly allocated object in young";
1618 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsYoung());
1619 // Pin object in young region
1620 g1Allocator->PinObject(handle1.GetPtr());
1621 // Try to move young region to last tenured region
1622 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1623 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1624 // Just allocate one object in young
1625 auto *youngObj = ObjectAllocator::AllocString(OBJECT_SIZE);
1626 ASSERT_NE(youngObj, nullptr) << "Must allocate in free young space";
1627 // Run Full GC
1628 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::OOM_CAUSE));
1629 // Check "pinned" region
1630 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1631 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1632 // Unpin object
1633 g1Allocator->UnpinObject(handle1.GetPtr());
1634 // Check "unpinned" region
1635 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1636 ASSERT_FALSE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1637 }
1638 // Run yet FullGC after unpinning
1639 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::OOM_CAUSE));
1640 g1Allocator->IterateOverObjects([](ObjectHeader *obj) {
1641 ASSERT_FALSE(ObjectToRegion(obj)->HasPinnedObjects()) << "Along pinned object was unpinned before GC";
1642 });
1643 }
1644
PinUnpinTest(SpaceType requestedSpaceType,size_t objectSize=1_KB)1645 static void PinUnpinTest(SpaceType requestedSpaceType, size_t objectSize = 1_KB)
1646 {
1647 ASSERT_TRUE(IsHeapSpace(requestedSpaceType));
1648 Runtime::Create(G1FullGCOOMTest::CreateOOMOptions());
1649 auto *thread = MTManagedThread::GetCurrent();
1650 ASSERT_NE(thread, nullptr);
1651 thread->ManagedCodeBegin();
1652 auto *g1Allocator =
1653 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1654 {
1655 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread);
1656 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1657 auto *addressBeforeGc =
1658 ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64,
1659 requestedSpaceType == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1660 ASSERT_NE(addressBeforeGc, nullptr);
1661 VMHandle<ObjectHeader> handle(thread, addressBeforeGc);
1662 SpaceType objSpaceType =
1663 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(static_cast<void *>(handle.GetPtr()));
1664 ASSERT_EQ(objSpaceType, requestedSpaceType);
1665 g1Allocator->PinObject(handle.GetPtr());
1666 // Run GC - try to move objects
1667 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1668 ASSERT_EQ(addressBeforeGc, handle.GetPtr()) << "Pinned object must not moved";
1669 g1Allocator->UnpinObject(handle.GetPtr());
1670 ASSERT_FALSE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1671 }
1672 thread->ManagedCodeEnd();
1673 Runtime::Destroy();
1674 }
1675
TEST(G1GCPinnigTest,PinUnpinRegularObjectTest)1676 TEST(G1GCPinnigTest, PinUnpinRegularObjectTest)
1677 {
1678 PinUnpinTest(SpaceType::SPACE_TYPE_OBJECT);
1679 }
1680
TEST(G1GCPinnigTest,PinUnpinHumongousObjectTest)1681 TEST(G1GCPinnigTest, PinUnpinHumongousObjectTest)
1682 {
1683 constexpr size_t HUMONGOUS_OBJECT_FOR_PINNING_SIZE = 4_MB;
1684 PinUnpinTest(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT, HUMONGOUS_OBJECT_FOR_PINNING_SIZE);
1685 }
1686
TEST(G1GCPinnigTest,PinUnpinNonMovableObjectTest)1687 TEST(G1GCPinnigTest, PinUnpinNonMovableObjectTest)
1688 {
1689 PinUnpinTest(SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1690 }
1691
1692 class G1GCPromotePinnedRegionTest : public G1GCTest {
1693 public:
G1GCPromotePinnedRegionTest()1694 G1GCPromotePinnedRegionTest() : G1GCTest(CreateOptions())
1695 {
1696 thread_ = MTManagedThread::GetCurrent();
1697 ASSERT(thread_ != nullptr);
1698 thread_->ManagedCodeBegin();
1699 }
1700
1701 NO_COPY_SEMANTIC(G1GCPromotePinnedRegionTest);
1702 NO_MOVE_SEMANTIC(G1GCPromotePinnedRegionTest);
1703
CreateOptions()1704 static RuntimeOptions CreateOptions()
1705 {
1706 RuntimeOptions options;
1707 options.SetShouldLoadBootPandaFiles(false);
1708 options.SetCompilerEnableJit(false);
1709 options.SetShouldInitializeIntrinsics(false);
1710 // GC options
1711 constexpr size_t HEAP_SIZE_LIMIT_TEST = 16_MB;
1712 options.SetRunGcInPlace(true);
1713 options.SetGcType("g1-gc");
1714 options.SetHeapSizeLimit(HEAP_SIZE_LIMIT_TEST);
1715 options.SetGcTriggerType("debug-never");
1716 options.SetG1NumberOfTenuredRegionsAtMixedCollection(0);
1717 return options;
1718 }
1719
~G1GCPromotePinnedRegionTest()1720 ~G1GCPromotePinnedRegionTest() override
1721 {
1722 thread_->ManagedCodeEnd();
1723 }
1724
1725 protected:
1726 MTManagedThread *thread_; // NOLINT(misc-non-private-member-variables-in-classes)
1727 };
1728
TEST_F(G1GCPromotePinnedRegionTest,CompactingRegularStringObjectAndPromoteToMixedTLABRegionAndUnpin)1729 TEST_F(G1GCPromotePinnedRegionTest, CompactingRegularStringObjectAndPromoteToMixedTLABRegionAndUnpin)
1730 {
1731 auto *g1Allocator =
1732 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1733 {
1734 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1735 constexpr size_t OBJECT_SIZE = 1_KB;
1736 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1737 ASSERT_NE(addressBeforeGc, nullptr);
1738 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1739 g1Allocator->PinObject(handle1.GetPtr());
1740 // Run GC - promote pinned region
1741 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1742 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsMixedTLAB());
1743 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1744 g1Allocator->UnpinObject(handle1.GetPtr());
1745 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1746 }
1747 }
1748
TEST_F(G1GCPromotePinnedRegionTest,PromoteTLABRegionToMixedTLABAndTestIsInAllocRangeMethod)1749 TEST_F(G1GCPromotePinnedRegionTest, PromoteTLABRegionToMixedTLABAndTestIsInAllocRangeMethod)
1750 {
1751 auto *g1Allocator =
1752 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1753 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1754 constexpr size_t OBJECT_SIZE = 1_KB;
1755 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1756 auto *addressBeforeGc2 = ObjectAllocator::AllocString(OBJECT_SIZE);
1757 ASSERT_NE(addressBeforeGc, nullptr);
1758 ASSERT_NE(addressBeforeGc2, nullptr);
1759 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1760 VMHandle<ObjectHeader> handle2(thread_, addressBeforeGc2);
1761 g1Allocator->PinObject(handle1.GetPtr());
1762 g1Allocator->PinObject(handle2.GetPtr());
1763 // Run GC - promote pinned region
1764 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1765 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsMixedTLAB());
1766 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1767 ASSERT_TRUE(ObjectToRegion(handle2.GetPtr())->IsMixedTLAB());
1768 ASSERT_EQ(addressBeforeGc2, handle2.GetPtr()) << "Pinned object must not moved";
1769 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsInAllocRange(handle1.GetPtr()));
1770 auto *pinnedObj = ObjectAllocator::AllocString(OBJECT_SIZE, true); // Pinned allocation
1771 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsInAllocRange(pinnedObj));
1772 g1Allocator->UnpinObject(handle1.GetPtr());
1773 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1774 }
1775
1776 class G1AllocatePinnedObjectTest : public G1GCPromotePinnedRegionTest {};
1777
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularArrayAndCreatedNewPinnedRegion)1778 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularArrayAndCreatedNewPinnedRegion)
1779 {
1780 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1781 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1782 size_t objectSize = 1_KB;
1783 auto *arrayObj = ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64, false, true);
1784 ASSERT_NE(arrayObj, nullptr);
1785 VMHandle<ObjectHeader> handle(thread_, arrayObj);
1786 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1787 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1788 }
1789
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringAndCreatedNewPinnedRegion)1790 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringAndCreatedNewPinnedRegion)
1791 {
1792 auto *g1Allocator =
1793 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1794 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1795 size_t objectSize = 128_KB;
1796 auto *arrayObj = ObjectAllocator::AllocString(objectSize, true);
1797 ASSERT_NE(arrayObj, nullptr);
1798 VMHandle<ObjectHeader> handle(thread_, arrayObj);
1799 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1800 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1801 g1Allocator->UnpinObject(handle.GetPtr());
1802 size_t objectSize2 = 32_KB;
1803 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1804 auto *arrayObj2 = ObjectAllocator::AllocString(objectSize2, true);
1805 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1806 ASSERT_NE(arrayObj2, nullptr);
1807 ASSERT_EQ(sizeRegionBefore, sizeRegionAfter);
1808 }
1809
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringToNewPinnedRegion)1810 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringToNewPinnedRegion)
1811 {
1812 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1813 size_t objectSize = 128_KB;
1814 auto *strObj1 = ObjectAllocator::AllocString(objectSize, true);
1815 ASSERT_NE(strObj1, nullptr);
1816 VMHandle<ObjectHeader> handle(thread_, strObj1);
1817 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1818 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1819 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1820 // Create new pinned object
1821 size_t objectSize2 = 196_KB;
1822 auto *strObj2 = ObjectAllocator::AllocString(objectSize2, true);
1823 ASSERT_NE(strObj2, nullptr);
1824 VMHandle<ObjectHeader> handle1(thread_, strObj2);
1825 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1826 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1827 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1828 ASSERT_EQ(sizeRegionBefore, sizeRegionAfter);
1829 ASSERT_NE(ObjectToRegion(handle.GetPtr()), ObjectToRegion(handle1.GetPtr()));
1830 }
1831
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringToExistPinnedRegion)1832 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringToExistPinnedRegion)
1833 {
1834 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1835 size_t objectSize = 32_KB;
1836 auto *strObj1 = ObjectAllocator::AllocString(objectSize, true);
1837 ASSERT_NE(strObj1, nullptr);
1838 VMHandle<ObjectHeader> handle(thread_, strObj1);
1839 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1840 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1841 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1842 // Create new pinned object
1843 size_t objectSize2 = 32_KB;
1844 auto *strObj2 = ObjectAllocator::AllocString(objectSize2, true);
1845 ASSERT_NE(strObj2, nullptr);
1846 VMHandle<ObjectHeader> handle1(thread_, strObj2);
1847 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1848 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1849 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1850 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
1851 }
1852
TEST_F(G1AllocatePinnedObjectTest,AllocateRegularStringToExistPinnedRegion)1853 TEST_F(G1AllocatePinnedObjectTest, AllocateRegularStringToExistPinnedRegion)
1854 {
1855 auto *g1Allocator =
1856 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1857 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1858 constexpr size_t OBJECT_SIZE =
1859 AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.08F), DEFAULT_ALIGNMENT_IN_BYTES);
1860 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1861 ASSERT_NE(addressBeforeGc, nullptr);
1862 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1863 g1Allocator->PinObject(handle1.GetPtr());
1864 size_t sizeRegionBefore = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
1865 // Run GC - promote pinned region
1866 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1867
1868 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1869 auto *pinnedObj = ObjectAllocator::AllocString(OBJECT_SIZE, true); // Pinned allocation
1870 VMHandle<ObjectHeader> handle3(thread_, pinnedObj);
1871 size_t sizeRegionAfter = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
1872 ASSERT_EQ(ObjectToRegion(handle3.GetPtr()), ObjectToRegion(handle1.GetPtr()));
1873 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasPinnedObjects());
1874 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1875 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
1876 }
1877
TEST_F(G1AllocatePinnedObjectTest,AllocateRegularArray)1878 TEST_F(G1AllocatePinnedObjectTest, AllocateRegularArray)
1879 {
1880 auto *g1Allocator =
1881 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1882 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1883 constexpr size_t OBJECT_SIZE =
1884 AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.08F), DEFAULT_ALIGNMENT_IN_BYTES);
1885 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1886 ASSERT_NE(addressBeforeGc, nullptr);
1887 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1888 g1Allocator->PinObject(handle1.GetPtr());
1889 size_t sizeRegionBefore = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
1890 // Run GC - promote pinned region
1891 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1892
1893 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1894 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1895 size_t objectSize = 1_KB;
1896 auto *arrayObj = ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64, false, true);
1897 ASSERT_NE(arrayObj, nullptr);
1898 VMHandle<ObjectHeader> handle3(thread_, arrayObj);
1899 size_t sizeRegionAfter = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
1900 ASSERT_EQ(ObjectToRegion(handle3.GetPtr()), ObjectToRegion(handle1.GetPtr()));
1901 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasPinnedObjects());
1902 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
1903 }
1904
AllocatePinnedObjectTest(SpaceType requestedSpaceType,size_t objectSize=1_KB)1905 static void AllocatePinnedObjectTest(SpaceType requestedSpaceType, size_t objectSize = 1_KB)
1906 {
1907 ASSERT_TRUE(IsHeapSpace(requestedSpaceType));
1908 Runtime::Create(G1AllocatePinnedObjectTest::CreateOptions());
1909 auto *thread = MTManagedThread::GetCurrent();
1910 ASSERT_NE(thread, nullptr);
1911 thread->ManagedCodeBegin();
1912 auto *g1Allocator =
1913 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1914 {
1915 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread);
1916 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1917 auto *addressBeforeGc =
1918 ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64,
1919 requestedSpaceType == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT, true);
1920 ASSERT_NE(addressBeforeGc, nullptr);
1921 VMHandle<ObjectHeader> handle(thread, addressBeforeGc);
1922 SpaceType objSpaceType =
1923 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(static_cast<void *>(handle.GetPtr()));
1924 ASSERT_EQ(objSpaceType, requestedSpaceType);
1925 ASSERT_EQ(addressBeforeGc, handle.GetPtr());
1926 if (requestedSpaceType == SpaceType::SPACE_TYPE_OBJECT) {
1927 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1928 }
1929 g1Allocator->UnpinObject(handle.GetPtr());
1930 ASSERT_FALSE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1931 }
1932 thread->ManagedCodeEnd();
1933 Runtime::Destroy();
1934 }
1935
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedRegularObjectTest)1936 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedRegularObjectTest)
1937 {
1938 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_OBJECT);
1939 }
1940
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedHumongousObjectTest)1941 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedHumongousObjectTest)
1942 {
1943 constexpr size_t HUMONGOUS_OBJECT_FOR_PINNING_SIZE = 4_MB;
1944 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT, HUMONGOUS_OBJECT_FOR_PINNING_SIZE);
1945 }
1946
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedNonMovableObjectTest)1947 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedNonMovableObjectTest)
1948 {
1949 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1950 }
1951
1952 // NOLINTEND(readability-magic-numbers)
1953
1954 } // namespace ark::mem
1955