1 /**
2 * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17 #include <array>
18
19 #include "runtime/include/object_header.h"
20 #include "runtime/mem/tlab.h"
21 #include "runtime/include/runtime.h"
22 #include "runtime/include/panda_vm.h"
23 #include "runtime/include/class_linker.h"
24 #include "runtime/include/thread_scopes.h"
25 #include "runtime/mem/vm_handle.h"
26 #include "runtime/handle_scope-inl.h"
27 #include "runtime/include/coretypes/array.h"
28 #include "runtime/include/coretypes/string.h"
29 #include "runtime/mem/gc/card_table.h"
30 #include "runtime/mem/gc/g1/g1-allocator.h"
31 #include "runtime/mem/rem_set-inl.h"
32 #include "runtime/mem/region_space.h"
33 #include "runtime/mem/object_helpers.h"
34 #include "runtime/mem/gc/g1/g1-gc.h"
35
36 #include "test_utils.h"
37
38 namespace ark::mem {
39
40 class G1GCTest : public testing::Test {
41 public:
G1GCTest()42 explicit G1GCTest() : G1GCTest(CreateDefaultOptions()) {}
43
G1GCTest(const RuntimeOptions & options)44 explicit G1GCTest(const RuntimeOptions &options)
45 {
46 Runtime::Create(options);
47 }
48
~G1GCTest()49 ~G1GCTest() override
50 {
51 Runtime::Destroy();
52 }
53
54 NO_COPY_SEMANTIC(G1GCTest);
55 NO_MOVE_SEMANTIC(G1GCTest);
56
CreateDefaultOptions()57 static RuntimeOptions CreateDefaultOptions()
58 {
59 RuntimeOptions options;
60 options.SetLoadRuntimes({"core"});
61 options.SetGcType("g1-gc");
62 options.SetRunGcInPlace(true);
63 options.SetCompilerEnableJit(false);
64 options.SetGcWorkersCount(0);
65 options.SetAdaptiveTlabSize(false);
66 // NOLINTNEXTLINE(readability-magic-numbers)
67 options.SetG1PromotionRegionAliveRate(100U);
68 options.SetGcTriggerType("debug-never");
69 options.SetShouldLoadBootPandaFiles(false);
70 options.SetShouldInitializeIntrinsics(false);
71 options.SetExplicitConcurrentGcEnabled(false);
72 options.SetG1NumberOfTenuredRegionsAtMixedCollection(2U);
73 return options;
74 }
75
GetHumongousStringLength()76 static constexpr size_t GetHumongousStringLength()
77 {
78 // Total string size will be DEFAULT_REGION_SIZE + sizeof(String).
79 // It is enough to make it humongous.
80 return DEFAULT_REGION_SIZE;
81 }
82
StringLengthFitIntoRegion(size_t numRegions)83 static constexpr size_t StringLengthFitIntoRegion(size_t numRegions)
84 {
85 return numRegions * DEFAULT_REGION_SIZE - sizeof(coretypes::String) - Region::HeadSize();
86 }
87
GetHumongousArrayLength(ClassRoot classRoot)88 static size_t GetHumongousArrayLength(ClassRoot classRoot)
89 {
90 Runtime *runtime = Runtime::GetCurrent();
91 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
92 auto *arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(classRoot);
93 EXPECT_TRUE(arrayClass->IsArrayClass());
94 if (!arrayClass->IsArrayClass()) {
95 return 0;
96 }
97 // Total array size will be DEFAULT_REGION_SIZE * elem_size + sizeof(Array).
98 // It is enough to make it humongous.
99 size_t elemSize = arrayClass->GetComponentSize();
100 ASSERT(elemSize != 0);
101 return DEFAULT_REGION_SIZE / elemSize + 1;
102 }
103
GetAllocator()104 ObjectAllocatorG1<> *GetAllocator()
105 {
106 Runtime *runtime = Runtime::GetCurrent();
107 GC *gc = runtime->GetPandaVM()->GetGC();
108 return static_cast<ObjectAllocatorG1<> *>(gc->GetObjectAllocator());
109 }
110
ProcessDirtyCards(G1GC<PandaAssemblyLanguageConfig> * gc)111 void ProcessDirtyCards(G1GC<PandaAssemblyLanguageConfig> *gc)
112 {
113 gc->EndConcurrentScopeRoutine();
114 gc->ProcessDirtyCards();
115 gc->StartConcurrentScopeRoutine();
116 }
117 };
118
119 class RemSetChecker : public GCListener {
120 public:
RemSetChecker(GC * gc,ObjectHeader * obj,ObjectHeader * ref)121 explicit RemSetChecker(GC *gc, ObjectHeader *obj, ObjectHeader *ref)
122 : gc_(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc)),
123 obj_(MTManagedThread::GetCurrent(), obj),
124 ref_(MTManagedThread::GetCurrent(), ref)
125 {
126 }
127
GCPhaseStarted(GCPhase phase)128 void GCPhaseStarted([[maybe_unused]] GCPhase phase) override {}
129
GCPhaseFinished(GCPhase phase)130 void GCPhaseFinished(GCPhase phase) override
131 {
132 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
133 // We don't do it in phase started because refs from remset will be collected at marking stage
134 Check();
135 }
136 if (phase == GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE) {
137 // remset is not fully updated during mixed collection
138 gc_->ProcessDirtyCards();
139 Check();
140 }
141 }
142
143 private:
Check()144 void Check()
145 {
146 RemSet<> *remset = ObjectToRegion(ref_.GetPtr())->GetRemSet();
147 ASSERT_NE(nullptr, remset);
148 bool hasObject = false;
149 ObjectHeader *object = obj_.GetPtr();
150 remset->IterateOverObjects([object, &hasObject](ObjectHeader *obj) { hasObject |= object == obj; });
151 // remset is not fully updated during mixed collection, check set of dirty objects
152 ASSERT_TRUE(hasObject || gc_->HasRefFromRemset(object));
153 }
154
155 private:
156 G1GC<PandaAssemblyLanguageConfig> *gc_;
157 VMHandle<ObjectHeader> obj_;
158 VMHandle<ObjectHeader> ref_;
159 };
160
TEST_F(G1GCTest,TestAddrToRegion)161 TEST_F(G1GCTest, TestAddrToRegion)
162 {
163 MTManagedThread *thread = MTManagedThread::GetCurrent();
164 size_t humongousLen = GetHumongousArrayLength(ClassRoot::ARRAY_U8);
165 ScopedManagedCodeThread s(thread);
166 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
167
168 VMHandle<ObjectHeader> young(thread, ObjectAllocator::AllocArray(0, ClassRoot::ARRAY_U8, false));
169 ASSERT_NE(nullptr, young.GetPtr());
170 VMHandle<ObjectHeader> nonmovable(thread, ObjectAllocator::AllocArray(0, ClassRoot::ARRAY_U8, true));
171 ASSERT_NE(nullptr, nonmovable.GetPtr());
172 VMHandle<ObjectHeader> humongous(thread, ObjectAllocator::AllocArray(humongousLen, ClassRoot::ARRAY_U8, false));
173 ASSERT_NE(nullptr, humongous.GetPtr());
174
175 Region *youngRegion = ObjectToRegion(young.GetPtr());
176 ASSERT_NE(nullptr, youngRegion);
177 ASSERT_EQ(youngRegion, AddrToRegion(young.GetPtr()));
178 bool hasYoungObj = false;
179 youngRegion->IterateOverObjects(
180 [&hasYoungObj, &young](ObjectHeader *obj) { hasYoungObj |= obj == young.GetPtr(); });
181 ASSERT_TRUE(hasYoungObj);
182
183 Region *nonmovableRegion = ObjectToRegion(nonmovable.GetPtr());
184 ASSERT_NE(nullptr, nonmovableRegion);
185 ASSERT_EQ(nonmovableRegion, AddrToRegion(nonmovable.GetPtr()));
186 ASSERT_TRUE(nonmovableRegion->GetLiveBitmap()->Test(nonmovable.GetPtr()));
187
188 Region *humongousRegion = ObjectToRegion(humongous.GetPtr());
189 ASSERT_NE(nullptr, humongousRegion);
190 ASSERT_EQ(humongousRegion, AddrToRegion(humongous.GetPtr()));
191 ASSERT_EQ(humongousRegion, AddrToRegion(ToVoidPtr(ToUintPtr(humongous.GetPtr()) + DEFAULT_REGION_SIZE)));
192 bool hasHumongousObj = false;
193 humongousRegion->IterateOverObjects(
194 [&hasHumongousObj, &humongous](ObjectHeader *obj) { hasHumongousObj |= obj == humongous.GetPtr(); });
195 ASSERT_TRUE(hasHumongousObj);
196 }
197
TEST_F(G1GCTest,TestAllocHumongousArray)198 TEST_F(G1GCTest, TestAllocHumongousArray)
199 {
200 MTManagedThread *thread = MTManagedThread::GetCurrent();
201 ScopedManagedCodeThread s(thread);
202 ObjectHeader *obj =
203 ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false);
204 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
205 }
206
TEST_F(G1GCTest,NonMovable2YoungRef)207 TEST_F(G1GCTest, NonMovable2YoungRef)
208 {
209 Runtime *runtime = Runtime::GetCurrent();
210 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
211 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
212 MTManagedThread *thread = MTManagedThread::GetCurrent();
213 GC *gc = runtime->GetPandaVM()->GetGC();
214
215 ScopedManagedCodeThread s(thread);
216 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
217 static constexpr size_t ARRAY_LENGTH = 100;
218 coretypes::Array *nonMovableObj = nullptr;
219 uintptr_t prevYoungAddr = 0;
220 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
221 ->GetClass(ctx.GetStringArrayClassDescriptor());
222 ASSERT_NE(klass, nullptr);
223 nonMovableObj = coretypes::Array::Create(klass, ARRAY_LENGTH, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
224 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
225 nonMovableObj->Set(0, youngObj);
226 prevYoungAddr = ToUintPtr(youngObj);
227 VMHandle<coretypes::Array> nonMovableObjPtr(thread, nonMovableObj);
228
229 // Trigger GC
230 RemSetChecker listener(gc, nonMovableObj, nonMovableObj->Get<ObjectHeader *>(0));
231 gc->AddListener(&listener);
232
233 {
234 ScopedNativeCodeThread sn(thread);
235 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
236 task.Run(*gc);
237 }
238
239 auto youngObj2 = static_cast<coretypes::String *>(nonMovableObjPtr->Get<ObjectHeader *>(0));
240 // Check GC has moved the young obj
241 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
242 // Check young object is accessible
243 ASSERT_EQ(0, youngObj2->GetLength());
244 }
245
TEST_F(G1GCTest,Humongous2YoungRef)246 TEST_F(G1GCTest, Humongous2YoungRef)
247 {
248 Runtime *runtime = Runtime::GetCurrent();
249 MTManagedThread *thread = MTManagedThread::GetCurrent();
250 GC *gc = runtime->GetPandaVM()->GetGC();
251 ScopedManagedCodeThread s(thread);
252 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
253 uintptr_t prevYoungAddr = 0;
254 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
255 VMHandle<coretypes::Array> humongousObj(thread,
256 ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
257 ObjectHeader *youngObj = ObjectAllocator::AllocObjectInYoung();
258 humongousObj->Set(0, youngObj);
259 prevYoungAddr = ToUintPtr(youngObj);
260
261 // Trigger GC
262 RemSetChecker listener(gc, humongousObj.GetPtr(), humongousObj->Get<ObjectHeader *>(0));
263 gc->AddListener(&listener);
264
265 {
266 ScopedNativeCodeThread sn(thread);
267 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
268 task.Run(*gc);
269 }
270
271 youngObj = static_cast<ObjectHeader *>(humongousObj->Get<ObjectHeader *>(0));
272 // Check GC has moved the young obj
273 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj));
274 // Check the young object is accessible
275 ASSERT_NE(nullptr, youngObj->ClassAddr<Class>());
276 }
277
TEST_F(G1GCTest,TestCollectTenured)278 TEST_F(G1GCTest, TestCollectTenured)
279 {
280 Runtime *runtime = Runtime::GetCurrent();
281 MTManagedThread *thread = MTManagedThread::GetCurrent();
282 GC *gc = runtime->GetPandaVM()->GetGC();
283 ScopedManagedCodeThread s(thread);
284 [[maybe_unused]] HandleScope<ObjectHeader *> hs(thread);
285
286 VMHandle<coretypes::Array> humongous;
287 VMHandle<coretypes::Array> nonmovable;
288 ObjectHeader *obj;
289 uintptr_t objAddr;
290
291 humongous =
292 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_STRING),
293 ClassRoot::ARRAY_STRING, false));
294 nonmovable = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(1, ClassRoot::ARRAY_STRING, true));
295 obj = ObjectAllocator::AllocObjectInYoung();
296 humongous->Set(0, obj);
297 nonmovable->Set(0, obj);
298 objAddr = ToUintPtr(obj);
299
300 RemSetChecker listener1(gc, humongous.GetPtr(), obj);
301 RemSetChecker listener2(gc, nonmovable.GetPtr(), obj);
302 gc->AddListener(&listener1);
303 gc->AddListener(&listener2);
304 {
305 ScopedNativeCodeThread sn(thread);
306 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
307 task.Run(*gc);
308 }
309 // Check the obj obj was propagated to tenured
310 obj = humongous->Get<ObjectHeader *>(0);
311 ASSERT_NE(objAddr, ToUintPtr(obj));
312 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
313
314 objAddr = ToUintPtr(obj);
315 {
316 ScopedNativeCodeThread sn(thread);
317 GCTask task1(GCTaskCause::EXPLICIT_CAUSE); // run full GC to collect all regions
318 task1.Run(*gc);
319 }
320
321 // Check the tenured obj was propagated to another tenured region
322 obj = humongous->Get<ObjectHeader *>(0);
323 ASSERT_NE(objAddr, ToUintPtr(obj));
324 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
325
326 // Check the objet is accessible
327 ASSERT_NE(nullptr, obj->ClassAddr<Class>());
328 }
329
330 // test that we don't have remset from humongous space after we reclaim humongous object
TEST_F(G1GCTest,CheckRemsetToHumongousAfterReclaimHumongousObject)331 TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject)
332 {
333 LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
334 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
335 MTManagedThread *thread = MTManagedThread::GetCurrent();
336
337 ScopedManagedCodeThread s(thread);
338 [[maybe_unused]] HandleScope<ObjectHeader *> scopeForYoungObj(thread);
339
340 // 1MB array
341 static constexpr size_t HUMONGOUS_ARRAY_LENGTH = 262144LU;
342 static constexpr size_t YOUNG_ARRAY_LENGTH = ((DEFAULT_REGION_SIZE - Region::HeadSize()) / 4U) - 16U;
343
344 auto *gc = Runtime::GetCurrent()->GetPandaVM()->GetGC();
345 auto regionPred = []([[maybe_unused]] Region *r) { return true; };
346
347 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
348 ->GetClass(ctx.GetStringArrayClassDescriptor());
349 ASSERT_NE(klass, nullptr);
350
351 auto *youngArr = coretypes::Array::Create(klass, YOUNG_ARRAY_LENGTH);
352 ASSERT_NE(youngArr, nullptr);
353 ASSERT_NE(ObjectToRegion(youngArr), nullptr);
354
355 VMHandle<coretypes::Array> youngObjPtr(thread, youngArr);
356 GCTask task(GCTaskCause::EXPLICIT_CAUSE);
357 {
358 [[maybe_unused]] HandleScope<ObjectHeader *> scopeForHumongousObj(thread);
359
360 auto *humongousObj = coretypes::Array::Create(klass, HUMONGOUS_ARRAY_LENGTH);
361 ASSERT_NE(humongousObj, nullptr);
362 // add humongous object to our remset
363 humongousObj->Set(0, youngObjPtr.GetPtr());
364
365 ASSERT_EQ(gc->GetType(), GCType::G1_GC);
366 {
367 VMHandle<coretypes::Array> humongousObjPtr(thread, humongousObj);
368 {
369 ScopedNativeCodeThread sn(thread);
370 task.Run(*gc);
371 }
372
373 auto *arrayRegion = ObjectToRegion(youngObjPtr.GetPtr());
374 PandaVector<Region *> regions;
375 arrayRegion->GetRemSet()->Iterate(
376 regionPred, [®ions](Region *r, [[maybe_unused]] const MemRange &range) { regions.push_back(r); });
377 ASSERT_EQ(1U, regions.size()); // we have reference only from 1 humongous space
378 ASSERT_TRUE(regions[0]->HasFlag(IS_LARGE_OBJECT));
379 ASSERT_EQ(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT,
380 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(regions[0]));
381 }
382 }
383 /*
384 * humongous object is dead now
385 * need one fake GC because we marked humongous in concurrent in the first GC before we removed Scoped, need to
386 * unmark it
387 */
388 {
389 ScopedNativeCodeThread sn(thread);
390 task.Run(*gc);
391 task.Run(*gc); // humongous object should be reclaimed
392 }
393
394 auto *arrayRegion = ObjectToRegion(youngObjPtr.GetPtr());
395 PandaVector<Region *> regions;
396 arrayRegion->GetRemSet()->Iterate(
397 regionPred, [®ions](Region *r, [[maybe_unused]] const MemRange &range) { regions.push_back(r); });
398 ASSERT_EQ(0U, regions.size()); // we have no references from the humongous space
399 }
400
401 class NewObjectsListener : public GCListener {
402 public:
GCPhaseStarted(GCPhase phase)403 void GCPhaseStarted(GCPhase phase) override
404 {
405 if (phase != GCPhase::GC_PHASE_MARK) {
406 return;
407 }
408 MTManagedThread *thread = MTManagedThread::GetCurrent();
409 ScopedManagedCodeThread s(thread);
410
411 // Allocate quite large object to make allocator to create a separate region
412 // NOLINTNEXTLINE(readability-magic-numbers)
413 size_t nonmovableLen = 9 * DEFAULT_REGION_SIZE / 10;
414 ObjectHeader *dummy = ObjectAllocator::AllocArray(nonmovableLen, ClassRoot::ARRAY_U8, true);
415 Region *dummyRegion = ObjectToRegion(dummy);
416 EXPECT_TRUE(dummyRegion->HasFlag(RegionFlag::IS_NONMOVABLE));
417 nonmovable_ =
418 VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(nonmovableLen, ClassRoot::ARRAY_U8, true));
419 Region *nonmovableRegion = ObjectToRegion(nonmovable_.GetPtr());
420 EXPECT_TRUE(nonmovableRegion->HasFlag(RegionFlag::IS_NONMOVABLE));
421 EXPECT_NE(nonmovableRegion, dummyRegion);
422 nonmovableMarkBitmapAddr_ = ToUintPtr(nonmovableRegion->GetMarkBitmap());
423
424 size_t humongousLen = G1GCTest::GetHumongousArrayLength(ClassRoot::ARRAY_U8);
425 humongous_ =
426 VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(humongousLen, ClassRoot::ARRAY_U8, false));
427 Region *humongousRegion = ObjectToRegion(humongous_.GetPtr());
428 humongousMarkBitmapAddr_ = ToUintPtr(humongousRegion->GetMarkBitmap());
429 }
430
GetNonMovable()431 ObjectHeader *GetNonMovable()
432 {
433 ASSERT(nonmovable_.GetPtr() != nullptr);
434 return nonmovable_.GetPtr();
435 }
436
GetNonMovableMarkBitmapAddr()437 uintptr_t GetNonMovableMarkBitmapAddr()
438 {
439 return nonmovableMarkBitmapAddr_;
440 }
441
GetHumongous()442 ObjectHeader *GetHumongous()
443 {
444 return humongous_.GetPtr();
445 }
446
GetHumongousMarkBitmapAddr()447 uintptr_t GetHumongousMarkBitmapAddr()
448 {
449 return humongousMarkBitmapAddr_;
450 }
451
452 private:
453 VMHandle<ObjectHeader> nonmovable_;
454 uintptr_t nonmovableMarkBitmapAddr_ {};
455 VMHandle<ObjectHeader> humongous_;
456 uintptr_t humongousMarkBitmapAddr_ {};
457 };
458
459 // Test the new objects created during concurrent marking are alive
TEST_F(G1GCTest,TestNewObjectsSATB)460 TEST_F(G1GCTest, TestNewObjectsSATB)
461 {
462 Runtime *runtime = Runtime::GetCurrent();
463 GC *gc = runtime->GetPandaVM()->GetGC();
464 MTManagedThread *thread = MTManagedThread::GetCurrent();
465 ScopedManagedCodeThread s(thread);
466 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
467
468 NewObjectsListener listener;
469 gc->AddListener(&listener);
470
471 {
472 ScopedNativeCodeThread sn(thread);
473 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE); // threshold cause should trigger concurrent marking
474 task.Run(*gc);
475 }
476 // nullptr means we cannot allocate an object or concurrent phase wasn't triggered or
477 // the listener wasn't called.
478 ASSERT_NE(nullptr, listener.GetNonMovable());
479 ASSERT_NE(nullptr, listener.GetHumongous());
480
481 // Check the objects are alive
482 Region *nonmovableRegion = ObjectToRegion(listener.GetNonMovable());
483 ASSERT_NE(nullptr, nonmovableRegion->GetLiveBitmap());
484 ASSERT_TRUE(nonmovableRegion->GetLiveBitmap()->Test(listener.GetNonMovable()));
485 ASSERT_FALSE(listener.GetNonMovable()->IsMarkedForGC()); // mark should be done using mark bitmap
486 Region *humongousRegion = ObjectToRegion(listener.GetHumongous());
487 ASSERT_NE(nullptr, humongousRegion->GetLiveBitmap());
488 ASSERT_TRUE(humongousRegion->GetLiveBitmap()->Test(listener.GetHumongous()));
489 ASSERT_FALSE(listener.GetHumongous()->IsMarkedForGC()); // mark should be done using mark bitmap
490 }
491
492 class CollectionSetChecker : public GCListener {
493 public:
CollectionSetChecker(ObjectAllocatorG1<> * allocator)494 explicit CollectionSetChecker(ObjectAllocatorG1<> *allocator) : allocator_(allocator) {}
495
SetExpectedRegions(const std::initializer_list<Region * > & expectedRegions)496 void SetExpectedRegions(const std::initializer_list<Region *> &expectedRegions)
497 {
498 expectedRegions_ = expectedRegions;
499 }
500
GCPhaseStarted(GCPhase phase)501 void GCPhaseStarted(GCPhase phase) override
502 {
503 if (phase == GCPhase::GC_PHASE_MARK_YOUNG) {
504 EXPECT_EQ(expectedRegions_, GetCollectionSet());
505 expectedRegions_.clear();
506 }
507 }
508
509 private:
GetCollectionSet()510 PandaSet<Region *> GetCollectionSet()
511 {
512 PandaSet<Region *> collectionSet;
513 for (Region *region : allocator_->GetAllRegions()) {
514 if (region->HasFlag(RegionFlag::IS_COLLECTION_SET)) {
515 collectionSet.insert(region);
516 }
517 }
518 return collectionSet;
519 }
520
521 private:
522 ObjectAllocatorG1<> *allocator_;
523 PandaSet<Region *> expectedRegions_;
524 };
525
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllYoungRegions)526 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllYoungRegions)
527 {
528 // The object will occupy more than half of region.
529 // So expect the allocator allocates a separate young region for each object.
530 size_t youngLen = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
531
532 Runtime *runtime = Runtime::GetCurrent();
533 GC *gc = runtime->GetPandaVM()->GetGC();
534 ObjectAllocatorG1<> *allocator = GetAllocator();
535 MTManagedThread *thread = MTManagedThread::GetCurrent();
536
537 CollectionSetChecker checker(allocator);
538 gc->AddListener(&checker);
539 {
540 ScopedManagedCodeThread s(thread);
541 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
542 VMHandle<ObjectHeader> young1;
543 VMHandle<ObjectHeader> young2;
544 VMHandle<ObjectHeader> young3;
545
546 young1 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
547 young2 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
548 young3 = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(youngLen, ClassRoot::ARRAY_U8, false));
549
550 Region *yregion1 = ObjectToRegion(young1.GetPtr());
551 Region *yregion2 = ObjectToRegion(young2.GetPtr());
552 Region *yregion3 = ObjectToRegion(young3.GetPtr());
553 // Check all 3 objects are in different regions
554 ASSERT_NE(yregion1, yregion2);
555 ASSERT_NE(yregion2, yregion3);
556 ASSERT_NE(yregion1, yregion3);
557 checker.SetExpectedRegions({yregion1, yregion2, yregion3});
558 }
559 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
560 task.Run(*gc);
561 }
562
TEST_F(G1GCTest,TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)563 TEST_F(G1GCTest, TestGetCollectibleRegionsHasAllRegionsInCaseOfFull)
564 {
565 Runtime *runtime = Runtime::GetCurrent();
566 GC *gc = runtime->GetPandaVM()->GetGC();
567 ObjectAllocatorG1<> *allocator = GetAllocator();
568 MTManagedThread *thread = MTManagedThread::GetCurrent();
569 ScopedManagedCodeThread s(thread);
570 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
571
572 VMHandle<ObjectHeader> young;
573 VMHandle<ObjectHeader> tenured;
574 VMHandle<ObjectHeader> humongous;
575 tenured = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
576
577 {
578 ScopedNativeCodeThread sn(thread);
579 // Propogate young to tenured
580 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
581 task.Run(*gc);
582 }
583
584 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
585 humongous = VMHandle<ObjectHeader>(
586 thread, ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_U8), ClassRoot::ARRAY_U8, false));
587
588 Region *yregion = ObjectToRegion(young.GetPtr());
589 [[maybe_unused]] Region *tregion = ObjectToRegion(tenured.GetPtr());
590 [[maybe_unused]] Region *hregion = ObjectToRegion(humongous.GetPtr());
591
592 CollectionSetChecker checker(allocator);
593 gc->AddListener(&checker);
594 // Even thou it's full, currently we split it into two parts, the 1st one is young-only collection.
595 // And the tenured collection part doesn't use GC_PHASE_MARK_YOUNG
596 checker.SetExpectedRegions({yregion});
597 {
598 ScopedNativeCodeThread sn(thread);
599 GCTask task1(GCTaskCause::EXPLICIT_CAUSE);
600 task1.Run(*gc);
601 }
602 }
603
TEST_F(G1GCTest,TestMixedCollections)604 TEST_F(G1GCTest, TestMixedCollections)
605 {
606 uint32_t garbageRate = Runtime::GetOptions().GetG1RegionGarbageRateThreshold();
607 // The object will occupy more than half of region.
608 // So expect the allocator allocates a separate young region for each object.
609 static constexpr size_t ARRAY_SIZE = 4;
610 // NOLINTNEXTLINE(readability-magic-numbers)
611 size_t bigLen = garbageRate * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
612 // NOLINTNEXTLINE(readability-magic-numbers)
613 size_t bigLen1 = (garbageRate + 1) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
614 // NOLINTNEXTLINE(readability-magic-numbers)
615 size_t bigLen2 = (garbageRate + 2) * DEFAULT_REGION_SIZE / 100 + sizeof(coretypes::String);
616 size_t smallLen = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::String);
617 std::array<size_t, ARRAY_SIZE> lenthsArray {bigLen, bigLen1, bigLen2, smallLen};
618 size_t miniObjLen = Runtime::GetOptions().GetInitTlabSize() + 1; // To allocate not in TLAB
619
620 Runtime *runtime = Runtime::GetCurrent();
621 GC *gc = runtime->GetPandaVM()->GetGC();
622 ObjectAllocatorG1<> *allocator = GetAllocator();
623 MTManagedThread *thread = MTManagedThread::GetCurrent();
624 ScopedManagedCodeThread s(thread);
625 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
626
627 VMHandle<coretypes::Array> smallObjectHolder;
628 VMHandle<coretypes::Array> bigObjectHolder;
629 VMHandle<ObjectHeader> young;
630
631 // Allocate objects of different sizes.
632 // Allocate mini object after each of them for prevent clearing after concurrent
633 // Mixed regions should be choosen according to the largest garbage.
634 bigObjectHolder =
635 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(4U, ClassRoot::ARRAY_STRING, false));
636 smallObjectHolder =
637 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(4U, ClassRoot::ARRAY_STRING, false));
638 for (size_t i = 0; i < ARRAY_SIZE; i++) {
639 bigObjectHolder->Set(i, ObjectAllocator::AllocString(lenthsArray[i]));
640 smallObjectHolder->Set(i, ObjectAllocator::AllocString(miniObjLen));
641 Region *firstRegion = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(i));
642 Region *secondRegion = ObjectToRegion(smallObjectHolder->Get<ObjectHeader *>(i));
643 ASSERT_TRUE(firstRegion->HasFlag(RegionFlag::IS_EDEN));
644 ASSERT_TRUE(secondRegion->HasFlag(RegionFlag::IS_EDEN));
645 ASSERT_TRUE(firstRegion == secondRegion);
646 }
647
648 {
649 ScopedNativeCodeThread sn(thread);
650 // Propogate young objects -> tenured
651 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
652 task.Run(*gc);
653 }
654 // GC doesn't include current tenured region to the collection set.
655 // Now we don't know which tenured region is current.
656 // So propagate one big young object to tenured to make the latter current.
657 VMHandle<ObjectHeader> current;
658 current = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocArray(smallLen, ClassRoot::ARRAY_U8, false));
659
660 // Propogate 'current' object -> tenured and prepare for mixed GC
661 // Release 'big1', 'big2' and 'small' objects to make them garbage
662 Region *region0 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(0));
663 Region *region1 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(1));
664 Region *region2 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(2));
665 Region *region3 = ObjectToRegion(bigObjectHolder->Get<ObjectHeader *>(3));
666 for (size_t i = 0; i < ARRAY_SIZE; i++) {
667 bigObjectHolder->Set(i, static_cast<ObjectHeader *>(nullptr));
668 }
669 {
670 ScopedNativeCodeThread sn(thread);
671 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
672 task1.Run(*gc);
673 }
674
675 // Now the region with 'current' is current and it will not be included into the collection set.
676
677 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
678
679 Region *yregion = ObjectToRegion(young.GetPtr());
680 CollectionSetChecker checker(allocator);
681 gc->AddListener(&checker);
682 checker.SetExpectedRegions({region1, region2, yregion});
683 {
684 ScopedNativeCodeThread sn(thread);
685 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
686 task2.Run(*gc);
687 }
688
689 // Run GC one more time because we still have garbage regions.
690 // Check we collect them.
691 young = VMHandle<ObjectHeader>(thread, ObjectAllocator::AllocObjectInYoung());
692 yregion = ObjectToRegion(young.GetPtr());
693 checker.SetExpectedRegions({region0, yregion, region3});
694 {
695 ScopedNativeCodeThread sn(thread);
696 GCTask task3(GCTaskCause::YOUNG_GC_CAUSE); // should run mixed GC
697 task3.Run(*gc);
698 }
699 }
700
TEST_F(G1GCTest,TestHandlePendingCards)701 TEST_F(G1GCTest, TestHandlePendingCards)
702 {
703 auto thread = MTManagedThread::GetCurrent();
704 auto runtime = Runtime::GetCurrent();
705 auto ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
706 auto arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
707 auto gc = runtime->GetPandaVM()->GetGC();
708 size_t elemSize = arrayClass->GetComponentSize();
709 size_t arraySize = DEFAULT_REGION_SIZE / 2;
710 // NOLINTNEXTLINE(clang-analyzer-core.DivideZero)
711 size_t arrayLength = arraySize / elemSize + 1;
712 ScopedManagedCodeThread s(thread);
713 HandleScope<ObjectHeader *> scope(thread);
714
715 constexpr size_t REGION_NUM = 16;
716 std::vector<VMHandle<coretypes::Array>> arrays;
717
718 for (size_t i = 0; i < REGION_NUM; i++) {
719 arrays.emplace_back(thread, ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
720 }
721
722 {
723 ScopedNativeCodeThread sn(thread);
724 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
725 task.Run(*gc);
726 }
727
728 for (auto &array : arrays) {
729 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(IS_OLD));
730 }
731
732 std::vector<VMHandle<coretypes::String>> strings;
733 std::vector<void *> stringOrigPtrs;
734
735 for (auto &array : arrays) {
736 auto str = ObjectAllocator::AllocString(StringLengthFitIntoRegion(1));
737 strings.emplace_back(thread, str);
738 stringOrigPtrs.push_back(str);
739 array->Set(0, str); // create dirty card
740 }
741
742 // With high probability update_remset_worker could not process all dirty cards before GC
743 // so GC drains them from update_remset_worker and handles separately
744 {
745 ScopedNativeCodeThread sn(thread);
746 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
747 task.Run(*gc);
748 }
749
750 for (size_t i = 0; i < REGION_NUM; i++) {
751 auto &array = arrays[i];
752 auto &str = strings[i];
753 auto strOrigPtr = stringOrigPtrs[i];
754 ASSERT_NE(strOrigPtr, str.GetPtr()); // string was moved
755 ASSERT_EQ(array->Get<ObjectHeader *>(0), str.GetPtr()); // refs were correctly updated
756 }
757
758 // dirty cards corresponding to dirty_regions_objects should be reenqueued
759 ProcessDirtyCards(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc));
760 for (size_t i = 0; i < REGION_NUM; i++) {
761 auto &array = arrays[i];
762 auto &str = strings[i];
763 bool found = false;
764 ObjectToRegion(str.GetPtr())->GetRemSet()->IterateOverObjects([&found, &array](ObjectHeader *obj) {
765 if (obj == array.GetPtr()) {
766 found = true;
767 }
768 });
769 ASSERT_TRUE(found);
770 }
771 }
772
773 class G1GCPromotionTest : public G1GCTest {
774 public:
G1GCPromotionTest()775 G1GCPromotionTest() : G1GCTest(CreateOptions()) {}
776
CreateOptions()777 static RuntimeOptions CreateOptions()
778 {
779 RuntimeOptions options = CreateDefaultOptions();
780 // NOLINTNEXTLINE(readability-magic-numbers)
781 options.SetG1PromotionRegionAliveRate(PROMOTE_RATE);
782 return options;
783 }
784
785 static constexpr size_t PROMOTE_RATE = 50;
786 };
787
TEST_F(G1GCPromotionTest,TestCorrectPromotionYoungRegion)788 TEST_F(G1GCPromotionTest, TestCorrectPromotionYoungRegion)
789 {
790 // We will create a humongous object with a links to two young regions
791 // and check promotion workflow
792 static constexpr size_t HUMONGOUS_STRING_LEN = G1GCPromotionTest::GetHumongousStringLength();
793 // Consume more than 50% of region size
794 static constexpr size_t FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT =
795 DEFAULT_REGION_SIZE / sizeof(coretypes::String) * 2U / 3U + 1;
796 // Consume less than 50% of region size
797 static constexpr size_t SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT = 1;
798 ASSERT(FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT <= HUMONGOUS_STRING_LEN);
799 ASSERT((FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100U / DEFAULT_REGION_SIZE) >
800 G1GCPromotionTest::PROMOTE_RATE);
801 ASSERT((SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT * sizeof(coretypes::String) * 100U / DEFAULT_REGION_SIZE) <
802 G1GCPromotionTest::PROMOTE_RATE);
803
804 Runtime *runtime = Runtime::GetCurrent();
805 GC *gc = runtime->GetPandaVM()->GetGC();
806
807 // Run Full GC to compact all existed young regions:
808 GCTask task0(GCTaskCause::EXPLICIT_CAUSE);
809 task0.Run(*gc);
810
811 MTManagedThread *thread = MTManagedThread::GetCurrent();
812 ScopedManagedCodeThread s(thread);
813 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
814
815 VMHandle<coretypes::Array> firstHolder;
816 VMHandle<coretypes::Array> secondHolder;
817 VMHandle<ObjectHeader> young;
818 std::array<ObjectHeader *, FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT> firstRegionObjectLinks {};
819 std::array<ObjectHeader *, SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT> secondRegionObjectLinks {};
820 // Check Promotion for young region:
821
822 firstHolder = VMHandle<coretypes::Array>(
823 thread, ObjectAllocator::AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
824 Region *firstRegion = ObjectToRegion(ObjectAllocator::AllocObjectInYoung());
825 ASSERT_TRUE(firstRegion->HasFlag(RegionFlag::IS_EDEN));
826 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
827 firstRegionObjectLinks[i] = ObjectAllocator::AllocObjectInYoung();
828 ASSERT_TRUE(firstRegionObjectLinks[i] != nullptr);
829 firstHolder->Set(i, firstRegionObjectLinks[i]);
830 ASSERT_TRUE(ObjectToRegion(firstRegionObjectLinks[i]) == firstRegion);
831 }
832
833 {
834 ScopedNativeCodeThread sn(thread);
835 // Promote young objects in one region -> tenured
836 GCTask task1(GCTaskCause::YOUNG_GC_CAUSE);
837 task1.Run(*gc);
838 }
839 // Check that we didn't change the links for young objects from the first region:
840 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
841 ASSERT_EQ(firstRegionObjectLinks[i], firstHolder->Get<ObjectHeader *>(i));
842 ASSERT_TRUE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
843 ASSERT_FALSE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
844 }
845
846 secondHolder = VMHandle<coretypes::Array>(
847 thread, ObjectAllocator::AllocArray(HUMONGOUS_STRING_LEN, ClassRoot::ARRAY_STRING, false));
848 Region *secondRegion = ObjectToRegion(ObjectAllocator::AllocObjectInYoung());
849 ASSERT_TRUE(secondRegion->HasFlag(RegionFlag::IS_EDEN));
850 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
851 secondRegionObjectLinks[i] = ObjectAllocator::AllocObjectInYoung();
852 ASSERT_TRUE(secondRegionObjectLinks[i] != nullptr);
853 secondHolder->Set(i, secondRegionObjectLinks[i]);
854 ASSERT_TRUE(ObjectToRegion(secondRegionObjectLinks[i]) == secondRegion);
855 }
856
857 {
858 ScopedNativeCodeThread sn(thread);
859 // Compact young objects in one region -> tenured
860 GCTask task2(GCTaskCause::YOUNG_GC_CAUSE);
861 task2.Run(*gc);
862 }
863 // Check that we changed the links for young objects from the second region:
864 for (size_t i = 0; i < SECOND_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
865 ASSERT_NE(secondRegionObjectLinks[i], secondHolder->Get<ObjectHeader *>(i));
866 ASSERT_TRUE(ObjectToRegion(secondHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
867 ASSERT_FALSE(ObjectToRegion(secondHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
868 }
869
870 {
871 ScopedNativeCodeThread sn(thread);
872 // Run Full GC to compact all tenured regions:
873 GCTask task3(GCTaskCause::EXPLICIT_CAUSE);
874 task3.Run(*gc);
875 }
876 // Now we should have updated links in the humongous object to first region objects:
877 for (size_t i = 0; i < FIRST_YOUNG_REGION_ALIVE_OBJECTS_COUNT; i++) {
878 ASSERT_NE(firstRegionObjectLinks[i], firstHolder->Get<ObjectHeader *>(i));
879 ASSERT_TRUE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_OLD));
880 ASSERT_FALSE(ObjectToRegion(firstHolder->Get<ObjectHeader *>(i))->HasFlag(RegionFlag::IS_PROMOTED));
881 }
882 }
883
TEST_F(G1GCPromotionTest,TestFullCollectionSetPromotionObjects)884 TEST_F(G1GCPromotionTest, TestFullCollectionSetPromotionObjects)
885 {
886 // The object will occupy more than half of region.
887 // So expect the allocator allocates a separate young region for each object.
888 // NOLINTNEXTLINE(readability-identifier-naming)
889 static constexpr size_t youngLength = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
890
891 ObjectAllocatorG1<> *allocator = GetAllocator();
892 Runtime *runtime = Runtime::GetCurrent();
893 // Setting FastGC flag = true - means that G1-GC should
894 // only promote all young regions without marking
895 GC *gc = runtime->GetPandaVM()->GetGC();
896 gc->SetFastGCFlag(true);
897 ASSERT_TRUE(gc->GetFastGCFlag());
898
899 // Run Full GC to compact all existed young regions:
900 GCTask task0(GCTaskCause::OOM_CAUSE);
901 task0.Run(*gc);
902
903 MTManagedThread *thread = MTManagedThread::GetCurrent();
904 ScopedManagedCodeThread s(thread);
905 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
906
907 CollectionSetChecker checker(allocator);
908 gc->AddListener(&checker);
909 {
910 // Allocating arrays to young regions without saving any references to them
911 coretypes::Array *young1 = ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false);
912 coretypes::Array *young2 = ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false);
913 coretypes::Array *young3 = ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false);
914 Region *yregion1 = ObjectToRegion(young1);
915 Region *yregion2 = ObjectToRegion(young2);
916 Region *yregion3 = ObjectToRegion(young3);
917 ASSERT_TRUE(yregion1->HasFlag(RegionFlag::IS_EDEN));
918 ASSERT_TRUE(yregion2->HasFlag(RegionFlag::IS_EDEN));
919 ASSERT_TRUE(yregion3->HasFlag(RegionFlag::IS_EDEN));
920 // Check all 3 objects are in different regions
921 ASSERT_NE(yregion1, yregion2);
922 ASSERT_NE(yregion2, yregion3);
923 ASSERT_NE(yregion1, yregion3);
924 checker.SetExpectedRegions({yregion1, yregion2, yregion3});
925 }
926 auto regions = allocator->GetAllRegions();
927 size_t aliveBytesSum = 0;
928 for (auto *region : regions) {
929 aliveBytesSum += region->GetAllocatedBytes();
930 }
931 {
932 ScopedNativeCodeThread sn(thread);
933 // Promote array's regions to tenured
934 GCTask task(GCTaskCause::MIXED);
935 task.Run(*gc);
936 }
937 // Even when there are no references to the arrays
938 // they must not be deleted
939 regions = allocator->GetAllRegions();
940 size_t aliveBytesSumToCheck = 0;
941 for (auto *region : regions) {
942 aliveBytesSumToCheck += region->GetAllocatedBytes();
943 }
944 ASSERT_EQ(aliveBytesSum, aliveBytesSumToCheck);
945 }
946
TEST_F(G1GCPromotionTest,TestFullCollectionSetPromotionBitmaps)947 TEST_F(G1GCPromotionTest, TestFullCollectionSetPromotionBitmaps)
948 {
949 // Lets check LiveBitmaps after collection
950 // NOLINTNEXTLINE(readability-identifier-naming)
951 static constexpr size_t youngLength = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
952 ObjectAllocatorG1<> *allocator = GetAllocator();
953 Runtime *runtime = Runtime::GetCurrent();
954 GC *gc = runtime->GetPandaVM()->GetGC();
955 gc->SetFastGCFlag(true);
956 ASSERT_TRUE(gc->GetFastGCFlag());
957
958 GCTask task0(GCTaskCause::OOM_CAUSE);
959 task0.Run(*gc);
960
961 MTManagedThread *thread = MTManagedThread::GetCurrent();
962 ScopedManagedCodeThread s(thread);
963 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
964
965 VMHandle<coretypes::Array> firstHolder =
966 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false));
967 VMHandle<coretypes::Array> secondHolder =
968 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false));
969 VMHandle<coretypes::Array> thirdHolder =
970 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false));
971 CollectionSetChecker listener(allocator);
972 gc->AddListener(&listener);
973 {
974 Region *yregion1 = ObjectToRegion(firstHolder.GetPtr());
975 Region *yregion2 = ObjectToRegion(secondHolder.GetPtr());
976 Region *yregion3 = ObjectToRegion(thirdHolder.GetPtr());
977 ASSERT_TRUE(yregion1->HasFlag(RegionFlag::IS_EDEN));
978 ASSERT_TRUE(yregion2->HasFlag(RegionFlag::IS_EDEN));
979 ASSERT_TRUE(yregion3->HasFlag(RegionFlag::IS_EDEN));
980 // Check all 3 objects are in different regions
981 ASSERT_NE(yregion1, yregion2);
982 ASSERT_NE(yregion2, yregion3);
983 ASSERT_NE(yregion1, yregion3);
984 listener.SetExpectedRegions({yregion1, yregion2, yregion3});
985 }
986 {
987 ScopedNativeCodeThread sn(thread);
988 // Promote array's regions to tenured
989 GCTask task(GCTaskCause::MIXED);
990 task.Run(*gc);
991 }
992 Region *region1 = ObjectToRegion(firstHolder.GetPtr());
993 Region *region2 = ObjectToRegion(secondHolder.GetPtr());
994 Region *region3 = ObjectToRegion(thirdHolder.GetPtr());
995 ASSERT_TRUE(region1->HasFlag(RegionFlag::IS_OLD));
996 ASSERT_TRUE(region2->HasFlag(RegionFlag::IS_OLD));
997 ASSERT_TRUE(region3->HasFlag(RegionFlag::IS_OLD));
998 ASSERT_TRUE(region1->GetLiveBitmap()->Test(firstHolder.GetPtr()));
999 ASSERT_TRUE(region2->GetLiveBitmap()->Test(secondHolder.GetPtr()));
1000 ASSERT_TRUE(region3->GetLiveBitmap()->Test(thirdHolder.GetPtr()));
1001 }
1002
TEST_F(G1GCPromotionTest,TestFullCollectionSetPromotionReferences)1003 TEST_F(G1GCPromotionTest, TestFullCollectionSetPromotionReferences)
1004 {
1005 // Lets check LiveBitmaps after collection
1006 // NOLINTNEXTLINE(readability-identifier-naming)
1007 static constexpr size_t youngLength = DEFAULT_REGION_SIZE / 2 + sizeof(coretypes::Array);
1008 Runtime *runtime = Runtime::GetCurrent();
1009 GC *gc = runtime->GetPandaVM()->GetGC();
1010 gc->SetFastGCFlag(true);
1011 ASSERT_TRUE(gc->GetFastGCFlag());
1012
1013 GCTask task0(GCTaskCause::OOM_CAUSE);
1014 task0.Run(*gc);
1015
1016 MTManagedThread *thread = MTManagedThread::GetCurrent();
1017 ScopedManagedCodeThread s(thread);
1018 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1019
1020 auto *obj = ObjectAllocator::AllocObjectInYoung();
1021 VMHandle<coretypes::Array> youngHolder =
1022 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(youngLength, ClassRoot::ARRAY_U8, false));
1023 VMHandle<coretypes::Array> hugeHolder =
1024 VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(GetHumongousArrayLength(ClassRoot::ARRAY_STRING),
1025 ClassRoot::ARRAY_STRING, false));
1026 youngHolder->Set(0, obj);
1027 hugeHolder->Set(0, obj);
1028 RemSetChecker listener(gc, hugeHolder.GetPtr(), obj);
1029 gc->AddListener(&listener);
1030 {
1031 Region *yregion = ObjectToRegion(youngHolder.GetPtr());
1032 ASSERT_TRUE(yregion->HasFlag(RegionFlag::IS_EDEN));
1033
1034 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_EDEN));
1035 Region *hregion = ObjectToRegion(hugeHolder.GetPtr());
1036 ASSERT_TRUE(hregion->HasFlag(RegionFlag::IS_OLD));
1037 ASSERT_TRUE(hregion->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1038 }
1039 {
1040 ScopedNativeCodeThread sn(thread);
1041 // Promote array's regions to tenured
1042 GCTask task(GCTaskCause::MIXED);
1043 task.Run(*gc);
1044 }
1045 Region *region1 = ObjectToRegion(youngHolder.GetPtr());
1046 ASSERT_TRUE(region1->HasFlag(RegionFlag::IS_OLD));
1047 ASSERT_TRUE(region1->GetLiveBitmap()->Test(youngHolder.GetPtr()));
1048
1049 // Check update reference from objects which were moved while garbage collection
1050 obj = youngHolder->Get<ObjectHeader *>(0);
1051 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
1052 ASSERT_FALSE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_PROMOTED));
1053 // Check that the object is accessible
1054 ASSERT_NE(obj->ClassAddr<Class>(), nullptr);
1055
1056 // Check update references from objects which are not part of collection set
1057 obj = hugeHolder->Get<ObjectHeader *>(0);
1058 ASSERT_TRUE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_OLD));
1059 ASSERT_FALSE(ObjectToRegion(obj)->HasFlag(RegionFlag::IS_PROMOTED));
1060 // Check that the object is accessible
1061 ASSERT_NE(obj->ClassAddr<Class>(), nullptr);
1062 }
1063
1064 class PromotionRemSetChecker : public GCListener {
1065 public:
PromotionRemSetChecker(VMHandle<coretypes::Array> * array,VMHandle<coretypes::String> * string)1066 PromotionRemSetChecker(VMHandle<coretypes::Array> *array, VMHandle<coretypes::String> *string)
1067 : array_(array), string_(string)
1068 {
1069 }
1070
GCPhaseStarted(GCPhase phase)1071 void GCPhaseStarted(GCPhase phase) override
1072 {
1073 if (phase != GCPhase::GC_PHASE_MARK_YOUNG) {
1074 return;
1075 }
1076 // Before marking young all remsets must by actual
1077 CheckRemSets();
1078 }
1079
CheckRemSets()1080 bool CheckRemSets()
1081 {
1082 Region *refRegion = ObjectToRegion(string_->GetPtr());
1083 found_ = false;
1084 refRegion->GetRemSet()->IterateOverObjects([this](ObjectHeader *obj) {
1085 if (obj == array_->GetPtr()) {
1086 found_ = true;
1087 }
1088 });
1089 return found_;
1090 }
1091
IsFound() const1092 bool IsFound() const
1093 {
1094 return found_;
1095 }
1096
1097 private:
1098 VMHandle<coretypes::Array> *array_;
1099 VMHandle<coretypes::String> *string_;
1100 bool found_ = false;
1101 };
1102
TEST_F(G1GCPromotionTest,TestPromotedRegionHasValidRemSets)1103 TEST_F(G1GCPromotionTest, TestPromotedRegionHasValidRemSets)
1104 {
1105 MTManagedThread *thread = MTManagedThread::GetCurrent();
1106 Runtime *runtime = Runtime::GetCurrent();
1107 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1108 GC *gc = runtime->GetPandaVM()->GetGC();
1109 ScopedManagedCodeThread s(thread);
1110 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1111
1112 VMHandle<coretypes::String> string(thread, ObjectAllocator::AllocString(1));
1113 ASSERT_TRUE(ObjectToRegion(string.GetPtr())->IsYoung());
1114 {
1115 ScopedNativeCodeThread sn(thread);
1116 // Move string to tenured
1117 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1118 task.Run(*gc);
1119 }
1120 ASSERT_TRUE(ObjectToRegion(string.GetPtr())->HasFlag(IS_OLD));
1121
1122 // Allocate an array which ocuppies more than half region.
1123 // This array will be promoted.
1124 auto *arrayClass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
1125 size_t elemSize = arrayClass->GetComponentSize();
1126 size_t arraySize = DEFAULT_REGION_SIZE / 2;
1127 size_t arrayLength = arraySize / elemSize + 1;
1128 VMHandle<coretypes::Array> array(thread, ObjectAllocator::AllocArray(arrayLength, ClassRoot::ARRAY_STRING, false));
1129 ASSERT_FALSE(array->IsForwarded());
1130 Region *arrayRegion = ObjectToRegion(array.GetPtr());
1131 ASSERT_TRUE(arrayRegion->IsYoung());
1132 array->Set(0, string.GetPtr());
1133
1134 PromotionRemSetChecker listener(&array, &string);
1135 gc->AddListener(&listener);
1136 {
1137 ScopedNativeCodeThread sn(thread);
1138 // Promote array's regions to tenured
1139 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1140 task.Run(*gc);
1141 ASSERT_FALSE(listener.IsFound());
1142 }
1143 // Check the array was promoted.
1144 ASSERT_TRUE(arrayRegion == ObjectToRegion(array.GetPtr()));
1145
1146 // remset is not fully updated during mixed collection
1147 ProcessDirtyCards(static_cast<G1GC<PandaAssemblyLanguageConfig> *>(gc));
1148 // Check remsets
1149 ASSERT_TRUE(listener.CheckRemSets());
1150 }
1151
1152 class InterruptGCListener : public GCListener {
1153 public:
InterruptGCListener(VMHandle<coretypes::Array> * array)1154 explicit InterruptGCListener(VMHandle<coretypes::Array> *array) : array_(array) {}
1155
GCPhaseStarted(GCPhase phase)1156 void GCPhaseStarted(GCPhase phase) override
1157 {
1158 if (phase != GCPhase::GC_PHASE_MARK) {
1159 return;
1160 }
1161 GC *gc = Runtime::GetCurrent()->GetPandaVM()->GetGC();
1162 {
1163 ScopedManagedCodeThread s(ManagedThread::GetCurrent());
1164 // Allocate an object to add it into SATB buffer
1165 ObjectAllocator::AllocObjectInYoung();
1166 }
1167 // Set interrupt flag
1168 gc->OnWaitForIdleFail();
1169 }
1170
GCPhaseFinished(GCPhase phase)1171 void GCPhaseFinished(GCPhase phase) override
1172 {
1173 if (phase != GCPhase::GC_PHASE_MARK) {
1174 return;
1175 }
1176 Region *region = ObjectToRegion((*array_)->Get<ObjectHeader *>(0));
1177 // Check the object array[0] is not marked
1178 EXPECT_FALSE(region->GetMarkBitmap()->Test((*array_)->Get<ObjectHeader *>(0)));
1179 // Check GC haven't calculated live bytes for the region
1180 EXPECT_EQ(0, region->GetLiveBytes());
1181 // Check GC has cleared SATB buffer
1182 MTManagedThread *thread = MTManagedThread::GetCurrent();
1183 EXPECT_NE(nullptr, thread->GetPreBuff());
1184 EXPECT_EQ(0, thread->GetPreBuff()->size());
1185 }
1186
1187 private:
1188 VMHandle<coretypes::Array> *array_;
1189 };
1190
TEST_F(G1GCTest,TestInterruptConcurrentMarking)1191 TEST_F(G1GCTest, TestInterruptConcurrentMarking)
1192 {
1193 Runtime *runtime = Runtime::GetCurrent();
1194 GC *gc = runtime->GetPandaVM()->GetGC();
1195
1196 MTManagedThread *thread = MTManagedThread::GetCurrent();
1197 ScopedManagedCodeThread s(thread);
1198 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1199 VMHandle<coretypes::Array> array;
1200
1201 array = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(1, ClassRoot::ARRAY_STRING, false));
1202 array->Set(0, ObjectAllocator::AllocString(1));
1203
1204 {
1205 ScopedNativeCodeThread sn(thread);
1206 // Propogate young objects -> tenured
1207 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1208 task.Run(*gc);
1209
1210 // Clear live bytes to check that concurrent marking will not calculate them
1211 Region *region = ObjectToRegion(array->Get<ObjectHeader *>(0));
1212 ASSERT_TRUE(region != nullptr);
1213 region->SetLiveBytes(0);
1214
1215 InterruptGCListener listener(&array);
1216 gc->AddListener(&listener);
1217 // Trigger concurrent marking
1218 GCTask task1(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1219 task1.Run(*gc);
1220 }
1221 }
1222
1223 class NullRefListener : public GCListener {
1224 public:
NullRefListener(VMHandle<coretypes::Array> * array)1225 explicit NullRefListener(VMHandle<coretypes::Array> *array) : array_(array) {}
1226
GCPhaseStarted(GCPhase phase)1227 void GCPhaseStarted(GCPhase phase) override
1228 {
1229 if (phase != GCPhase::GC_PHASE_MARK) {
1230 return;
1231 }
1232 (*array_)->Set(0, static_cast<ObjectHeader *>(nullptr));
1233 }
1234
1235 private:
1236 VMHandle<coretypes::Array> *array_;
1237 };
1238
TEST_F(G1GCTest,TestGarbageBytesCalculation)1239 TEST_F(G1GCTest, TestGarbageBytesCalculation)
1240 {
1241 Runtime *runtime = Runtime::GetCurrent();
1242 GC *gc = runtime->GetPandaVM()->GetGC();
1243 MTManagedThread *thread = MTManagedThread::GetCurrent();
1244 ScopedManagedCodeThread s(thread);
1245 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1246
1247 VMHandle<coretypes::Array> array;
1248
1249 // Allocate objects of different sizes.
1250 // Mixed regions should be choosen according to the largest garbage.
1251 // Allocate an array of length 2. 2 because the array's size must be 8 bytes aligned
1252 array = VMHandle<coretypes::Array>(thread, ObjectAllocator::AllocArray(2, ClassRoot::ARRAY_STRING, false));
1253 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_EDEN));
1254 // The same for string. The instance size must be 8-bytes aligned.
1255 array->Set(0, ObjectAllocator::AllocString(8U));
1256 array->Set(1, ObjectAllocator::AllocString(8U));
1257 ASSERT_TRUE(ObjectToRegion(array->Get<ObjectHeader *>(0))->HasFlag(RegionFlag::IS_EDEN));
1258
1259 size_t arraySize = GetObjectSize(array.GetPtr());
1260 size_t strSize = GetObjectSize(array->Get<ObjectHeader *>(0));
1261
1262 {
1263 ScopedNativeCodeThread sn(thread);
1264 // Propogate young objects -> tenured
1265 GCTask task(GCTaskCause::YOUNG_GC_CAUSE);
1266 task.Run(*gc);
1267 }
1268 // check the array and the string are in the same tenured region
1269 ASSERT_EQ(ObjectToRegion(array.GetPtr()), ObjectToRegion(array->Get<ObjectHeader *>(0)));
1270 ASSERT_TRUE(ObjectToRegion(array.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1271
1272 ObjectAllocator::AllocObjectInYoung();
1273 array->Set(1, static_cast<ObjectHeader *>(nullptr));
1274
1275 NullRefListener listener(&array);
1276 gc->AddListener(&listener);
1277 {
1278 ScopedNativeCodeThread sn(thread);
1279 // Prepare for mixed GC, start concurrent marking and calculate garbage for regions
1280 GCTask task2(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1281 task2.Run(*gc);
1282 }
1283
1284 Region *region = ObjectToRegion(array.GetPtr());
1285 ASSERT_EQ(arraySize + strSize, region->GetLiveBytes());
1286 ASSERT_EQ(strSize, region->GetGarbageBytes());
1287 }
1288
TEST_F(G1GCTest,NonMovableClearingDuringConcurrentPhaseTest)1289 TEST_F(G1GCTest, NonMovableClearingDuringConcurrentPhaseTest)
1290 {
1291 Runtime *runtime = Runtime::GetCurrent();
1292 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1293 auto objAllocator = Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator();
1294 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
1295 MTManagedThread *thread = MTManagedThread::GetCurrent();
1296 GC *gc = runtime->GetPandaVM()->GetGC();
1297
1298 ScopedManagedCodeThread s(thread);
1299 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1300 // NOLINTBEGIN(readability-magic-numbers)
1301 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING) - 50;
1302 coretypes::Array *firstNonMovableObj = nullptr;
1303 coretypes::Array *secondNonMovableObj = nullptr;
1304 uintptr_t prevYoungAddr = 0;
1305
1306 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
1307 ->GetClass(ctx.GetStringArrayClassDescriptor());
1308 ASSERT_NE(klass, nullptr);
1309 firstNonMovableObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1310 secondNonMovableObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1311 ASSERT_EQ(true, ObjectToRegion(firstNonMovableObj)->HasFlag(RegionFlag::IS_NONMOVABLE));
1312 ASSERT_EQ(true, ObjectToRegion(secondNonMovableObj)->HasFlag(RegionFlag::IS_NONMOVABLE));
1313 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
1314 firstNonMovableObj->Set(0, youngObj);
1315 prevYoungAddr = ToUintPtr(youngObj);
1316
1317 VMHandle<coretypes::Array> secondNonMovableObjPtr(thread, secondNonMovableObj);
1318
1319 {
1320 [[maybe_unused]] HandleScope<ObjectHeader *> firstScope(thread);
1321 VMHandle<coretypes::Array> firstNonMovableObjPtr(thread, firstNonMovableObj);
1322 {
1323 ScopedNativeCodeThread sn(thread);
1324 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1325 task.Run(*gc);
1326 }
1327
1328 auto youngObj2 = static_cast<coretypes::String *>(firstNonMovableObjPtr->Get<ObjectHeader *>(0));
1329 // Check GC has moved the young obj
1330 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
1331 // Check young object is accessible
1332 ASSERT_EQ(0, youngObj2->GetLength());
1333 }
1334
1335 // Check that all objects are alive
1336 ASSERT_EQ(true, objAllocator->ContainObject(firstNonMovableObj));
1337 ASSERT_EQ(true, objAllocator->ContainObject(secondNonMovableObj));
1338 ASSERT_EQ(true, objAllocator->IsLive(firstNonMovableObj));
1339 ASSERT_EQ(true, objAllocator->IsLive(secondNonMovableObj));
1340 // Check that the first object is accessible
1341 bool foundFirstObject = false;
1342 objAllocator->IterateOverObjects([&firstNonMovableObj, &foundFirstObject](ObjectHeader *object) {
1343 if (firstNonMovableObj == object) {
1344 foundFirstObject = true;
1345 }
1346 });
1347 ASSERT_EQ(true, foundFirstObject);
1348
1349 // So, try to remove the first non movable object:
1350 {
1351 ScopedNativeCodeThread sn(thread);
1352 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1353 task.Run(*gc);
1354 }
1355
1356 // Check that the second object is still alive
1357 ASSERT_EQ(true, objAllocator->ContainObject(secondNonMovableObj));
1358 ASSERT_EQ(true, objAllocator->IsLive(secondNonMovableObj));
1359 // Check that the first object is dead
1360 objAllocator->IterateOverObjects(
1361 [&firstNonMovableObj](ObjectHeader *object) { ASSERT_NE(firstNonMovableObj, object); });
1362 }
1363
TEST_F(G1GCTest,HumongousClearingDuringConcurrentPhaseTest)1364 TEST_F(G1GCTest, HumongousClearingDuringConcurrentPhaseTest)
1365 {
1366 Runtime *runtime = Runtime::GetCurrent();
1367 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1368 auto objAllocator = Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator();
1369 ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker();
1370 MTManagedThread *thread = MTManagedThread::GetCurrent();
1371 GC *gc = runtime->GetPandaVM()->GetGC();
1372
1373 ScopedManagedCodeThread s(thread);
1374 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1375 size_t arrayLength = GetHumongousArrayLength(ClassRoot::ARRAY_STRING);
1376 coretypes::Array *firstHumongousObj = nullptr;
1377 coretypes::Array *secondHumongousObj = nullptr;
1378 uintptr_t prevYoungAddr = 0;
1379
1380 Class *klass = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY)
1381 ->GetClass(ctx.GetStringArrayClassDescriptor());
1382 ASSERT_NE(klass, nullptr);
1383 firstHumongousObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1384 secondHumongousObj = coretypes::Array::Create(klass, arrayLength, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1385 ASSERT_EQ(true, ObjectToRegion(firstHumongousObj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1386 ASSERT_EQ(true, ObjectToRegion(secondHumongousObj)->HasFlag(RegionFlag::IS_LARGE_OBJECT));
1387 coretypes::String *youngObj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM());
1388 firstHumongousObj->Set(0, youngObj);
1389 prevYoungAddr = ToUintPtr(youngObj);
1390
1391 VMHandle<coretypes::Array> secondHumongousObjPtr(thread, secondHumongousObj);
1392
1393 {
1394 HandleScope<ObjectHeader *> firstScope(thread);
1395 VMHandle<coretypes::Array> firstHumongousObjPtr(thread, firstHumongousObj);
1396 {
1397 ScopedNativeCodeThread sn(thread);
1398 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1399 task.Run(*gc);
1400 }
1401
1402 auto youngObj2 = static_cast<coretypes::String *>(firstHumongousObjPtr->Get<ObjectHeader *>(0));
1403 // Check GC has moved the young obj
1404 ASSERT_NE(prevYoungAddr, ToUintPtr(youngObj2));
1405 // Check young object is accessible
1406 ASSERT_EQ(0, youngObj2->GetLength());
1407 }
1408
1409 // Check that all objects are alive
1410 ASSERT_EQ(true, objAllocator->ContainObject(firstHumongousObj));
1411 ASSERT_EQ(true, objAllocator->ContainObject(secondHumongousObj));
1412 ASSERT_EQ(true, objAllocator->IsLive(firstHumongousObj));
1413 ASSERT_EQ(true, objAllocator->IsLive(secondHumongousObj));
1414 // Check that the first object is accessible
1415 bool foundFirstObject = false;
1416 objAllocator->IterateOverObjects([&firstHumongousObj, &foundFirstObject](ObjectHeader *object) {
1417 if (firstHumongousObj == object) {
1418 foundFirstObject = true;
1419 }
1420 });
1421 ASSERT_EQ(true, foundFirstObject);
1422
1423 {
1424 ScopedNativeCodeThread sn(thread);
1425 // So, try to remove the first non movable object:
1426 GCTask task(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
1427 task.Run(*gc);
1428 }
1429
1430 // Check that the second object is still alive
1431 ASSERT_EQ(true, objAllocator->ContainObject(secondHumongousObj));
1432 ASSERT_EQ(true, objAllocator->IsLive(secondHumongousObj));
1433 // Check that the first object is dead
1434 objAllocator->IterateOverObjects(
1435 [&firstHumongousObj](ObjectHeader *object) { ASSERT_NE(firstHumongousObj, object); });
1436 }
1437
1438 class G1FullGCTest : public G1GCTest {
1439 public:
G1FullGCTest(uint32_t fullGcRegionFragmentationRate=0)1440 explicit G1FullGCTest(uint32_t fullGcRegionFragmentationRate = 0)
1441 : G1GCTest(CreateOptions(fullGcRegionFragmentationRate))
1442 {
1443 }
1444
CreateOptions(uint32_t fullGcRegionFragmentationRate)1445 static RuntimeOptions CreateOptions(uint32_t fullGcRegionFragmentationRate)
1446 {
1447 RuntimeOptions options = CreateDefaultOptions();
1448 options.SetInitYoungSpaceSize(YOUNG_SIZE);
1449 options.SetYoungSpaceSize(YOUNG_SIZE);
1450 options.SetHeapSizeLimit(HEAP_SIZE);
1451 options.SetG1FullGcRegionFragmentationRate(fullGcRegionFragmentationRate);
1452 return options;
1453 }
1454
NumYoungRegions()1455 static constexpr size_t NumYoungRegions()
1456 {
1457 return YOUNG_SIZE / DEFAULT_REGION_SIZE;
1458 }
1459
NumRegions()1460 static constexpr size_t NumRegions()
1461 {
1462 // Region count without reserved region for Full GC
1463 return HEAP_SIZE / DEFAULT_REGION_SIZE - 1U;
1464 }
1465
RefArrayLengthFitIntoRegion(size_t numRegions)1466 size_t RefArrayLengthFitIntoRegion(size_t numRegions)
1467 {
1468 Runtime *runtime = Runtime::GetCurrent();
1469 LanguageContext ctx = runtime->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY);
1470 auto *klass = runtime->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::ARRAY_STRING);
1471 size_t elemSize = klass->GetComponentSize();
1472 // NOLINTNEXTLINE(clang-analyzer-core.DivideZero)
1473 return (numRegions * DEFAULT_REGION_SIZE - sizeof(coretypes::Array) - Region::HeadSize()) / elemSize;
1474 }
1475
FillHeap(size_t numRegions,VMHandle<coretypes::Array> & holder,size_t startIndex)1476 void FillHeap(size_t numRegions, VMHandle<coretypes::Array> &holder, size_t startIndex)
1477 {
1478 constexpr size_t STRING_LENGTH = StringLengthFitIntoRegion(1);
1479 EXPECT_LE(numRegions, holder->GetLength());
1480 for (size_t i = 0; i < numRegions; ++i) {
1481 ObjectHeader *obj = ObjectAllocator::AllocString(STRING_LENGTH);
1482 EXPECT_NE(nullptr, obj);
1483 holder->Set(startIndex + i, obj);
1484 }
1485 }
1486
1487 static constexpr size_t YOUNG_SIZE = 1_MB;
1488 static constexpr size_t HEAP_SIZE = 4_MB;
1489 static constexpr size_t NUM_NONMOVABLE_REGIONS_FOR_RUNTIME = 1;
1490 };
1491
TEST_F(G1FullGCTest,TestFullGCCollectsNonRegularObjects)1492 TEST_F(G1FullGCTest, TestFullGCCollectsNonRegularObjects)
1493 {
1494 Runtime *runtime = Runtime::GetCurrent();
1495 GC *gc = runtime->GetPandaVM()->GetGC();
1496 ManagedThread *thread = ManagedThread::GetCurrent();
1497 ScopedManagedCodeThread s(thread);
1498 ObjectHeader *humongousObj = ObjectAllocator::AllocString(GetHumongousStringLength());
1499 ObjectHeader *nonmovableObj = AllocNonMovableObject();
1500
1501 {
1502 ScopedNativeCodeThread sn(thread);
1503 GCTask task(GCTaskCause::EXPLICIT_CAUSE);
1504 task.Run(*gc);
1505 }
1506
1507 PandaVector<Region *> nonregularRegions = GetAllocator()->GetNonRegularRegions();
1508 for (Region *region : nonregularRegions) {
1509 if (region->HasFlag(IS_LARGE_OBJECT)) {
1510 ASSERT_NE(humongousObj, region->GetLargeObject());
1511 } else if (region->HasFlag(IS_NONMOVABLE)) {
1512 if (region->Begin() <= ToUintPtr(nonmovableObj) && ToUintPtr(nonmovableObj) < region->End()) {
1513 ASSERT_FALSE(region->GetLiveBitmap()->Test(nonmovableObj));
1514 }
1515 } else {
1516 FAIL() << "Unknown region type";
1517 }
1518 }
1519 }
1520
TEST_F(G1FullGCTest,TestFullGCFreeHumongousBeforeTenuredCollection)1521 TEST_F(G1FullGCTest, TestFullGCFreeHumongousBeforeTenuredCollection)
1522 {
1523 constexpr size_t NUM_REGIONS_FOR_HUMONGOUS = 4;
1524
1525 Runtime *runtime = Runtime::GetCurrent();
1526 GC *gc = runtime->GetPandaVM()->GetGC();
1527 ManagedThread *thread = ManagedThread::GetCurrent();
1528 ScopedManagedCodeThread s(thread);
1529 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1530 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1531 coretypes::String *humongousObj =
1532 ObjectAllocator::AllocString(StringLengthFitIntoRegion(NUM_REGIONS_FOR_HUMONGOUS));
1533 holder->Set(0, humongousObj);
1534 size_t numFreeRegions =
1535 NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME - NUM_REGIONS_FOR_HUMONGOUS;
1536 FillHeap(numFreeRegions, holder, 1); // occupy 4 tenured regions and 3 young regions
1537 // move 3 young regions to tenured space.
1538 gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1539 // now tenured space is full. Fill young space.
1540 FillHeap(NumYoungRegions(), holder, numFreeRegions + 1);
1541 // At this point we have filled 4 tenured regions and 4 young regions and 3 free tenured regions.
1542 // We cannot do young GC because there are not enough free regions in tenured to move 4 young regions.
1543 // Check we are OOM
1544 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1545 // Forget humongous_obj
1546 holder->Set(0, static_cast<ObjectHeader *>(nullptr));
1547 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1548 // We should have 2 free regions but during allocation we reserve 1 region for full GC.
1549 // So we can allocate only one region.
1550 ASSERT_NE(nullptr, ObjectAllocator::AllocObjectInYoung());
1551 }
1552
TEST_F(G1FullGCTest,TestRemSetsAndYoungCardsAfterFailedFullGC)1553 TEST_F(G1FullGCTest, TestRemSetsAndYoungCardsAfterFailedFullGC)
1554 {
1555 Runtime *runtime = Runtime::GetCurrent();
1556 GC *gc = runtime->GetPandaVM()->GetGC();
1557 CardTable *cardTable = gc->GetCardTable();
1558 ASSERT_NE(nullptr, cardTable);
1559 ManagedThread *thread = ManagedThread::GetCurrent();
1560 ScopedManagedCodeThread s(thread);
1561 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1562 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1563 size_t numFreeRegions = NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME + 1U;
1564 FillHeap(numFreeRegions, holder, 0); // occupy 8 tenured regions and 3 young regions
1565 VMHandle<coretypes::Array> youngArray(
1566 thread, ObjectAllocator::AllocArray(RefArrayLengthFitIntoRegion(1), ClassRoot::ARRAY_STRING, false));
1567 ASSERT(ObjectToRegion(youngArray.GetPtr())->IsEden());
1568 youngArray->Set(0, holder->Get<ObjectHeader *>(0));
1569 uintptr_t tenuredAddrBeforeGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1570 // Trigger FullGC by allocating an object in full young. It should fail because there is no tenured space to move 4
1571 // young regions.
1572 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1573 uintptr_t tenuredAddrAfterGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1574 // Check GC moved tenured regions
1575 ASSERT_NE(tenuredAddrBeforeGc, tenuredAddrAfterGc);
1576 // Check FullGC updates refs in young correctly in case it cannot collect young.
1577 ASSERT_EQ(holder->Get<ObjectHeader *>(0), youngArray->Get<ObjectHeader *>(0));
1578 // Check remsets.
1579 Region *youngRegion = ObjectToRegion(youngArray.GetPtr());
1580 ASSERT_TRUE(youngRegion->IsEden());
1581 Region *tenuredRegion = ObjectToRegion(holder->Get<ObjectHeader *>(0));
1582 ASSERT_TRUE(tenuredRegion->HasFlag(IS_OLD));
1583 bool hasObject = false;
1584 tenuredRegion->GetRemSet()->IterateOverObjects(
1585 [&hasObject, &youngArray](ObjectHeader *obj) { hasObject |= obj == youngArray.GetPtr(); });
1586 ASSERT_FALSE(hasObject);
1587 // Check young cards
1588 ASSERT_EQ(NumYoungRegions(), GetAllocator()->GetYoungRegions().size());
1589 for (Region *region : GetAllocator()->GetYoungRegions()) {
1590 uintptr_t begin = ToUintPtr(region);
1591 uintptr_t end = region->End();
1592 while (begin < end) {
1593 ASSERT_TRUE(cardTable->GetCardPtr(begin)->IsYoung());
1594 begin += CardTable::GetCardSize();
1595 }
1596 }
1597 }
1598
TEST_F(G1FullGCTest,TestFullGCGenericFlow)1599 TEST_F(G1FullGCTest, TestFullGCGenericFlow)
1600 {
1601 Runtime *runtime = Runtime::GetCurrent();
1602 GC *gc = runtime->GetPandaVM()->GetGC();
1603 CardTable *cardTable = gc->GetCardTable();
1604 ASSERT_NE(nullptr, cardTable);
1605 ManagedThread *thread = ManagedThread::GetCurrent();
1606 ScopedManagedCodeThread s(thread);
1607 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1608 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1609 size_t numFreeRegions = NumRegions() - NumYoungRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME + 1U;
1610 FillHeap(numFreeRegions, holder, 0); // occupy 8 tenured regions and 3 young regions
1611 VMHandle<coretypes::Array> youngArray(
1612 thread, ObjectAllocator::AllocArray(RefArrayLengthFitIntoRegion(1), ClassRoot::ARRAY_STRING, false));
1613 ASSERT(ObjectToRegion(youngArray.GetPtr())->IsEden());
1614 youngArray->Set(0, holder->Get<ObjectHeader *>(0));
1615 // Check we are OOM
1616 ASSERT_EQ(nullptr, ObjectAllocator::AllocObjectInYoung());
1617 uintptr_t tenuredAddrBeforeGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1618 // Forget two tenured regions
1619 holder->Set(1U, static_cast<ObjectHeader *>(nullptr));
1620 holder->Set(2U, static_cast<ObjectHeader *>(nullptr));
1621 // Now there should be enough space in tenured to move young
1622 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1623 ASSERT_NE(nullptr, ObjectAllocator::AllocObjectInYoung());
1624 uintptr_t tenuredAddrAfterGc = ToUintPtr(holder->Get<ObjectHeader *>(0));
1625 // Check GC moved tenured regions
1626 ASSERT_NE(tenuredAddrBeforeGc, tenuredAddrAfterGc);
1627 // Check FullGC updates refs in young correctly in case it cannot collect young.
1628 ASSERT_EQ(holder->Get<ObjectHeader *>(0), youngArray->Get<ObjectHeader *>(0));
1629 }
1630
TEST_F(G1FullGCTest,TestFullGCResetTenuredRegions)1631 TEST_F(G1FullGCTest, TestFullGCResetTenuredRegions)
1632 {
1633 Runtime *runtime = Runtime::GetCurrent();
1634 GC *gc = runtime->GetPandaVM()->GetGC();
1635 CardTable *cardTable = gc->GetCardTable();
1636 ASSERT_NE(nullptr, cardTable);
1637 ManagedThread *thread = ManagedThread::GetCurrent();
1638 ScopedManagedCodeThread s(thread);
1639 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1640 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1641 // Fill almost all regions (3 tenured regions will be free)
1642 size_t numFreeRegions = NumRegions() - NUM_NONMOVABLE_REGIONS_FOR_RUNTIME;
1643 size_t numFilledRegions = RoundDown(numFreeRegions, NumYoungRegions());
1644 FillHeap(numFilledRegions, holder, 0);
1645 // Foreget all objects
1646 for (size_t i = 0; i < numFilledRegions; ++i) {
1647 holder->Set(i, static_cast<ObjectHeader *>(nullptr));
1648 }
1649 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1650 // Fill almost all regions (3 tenured regions will be free)
1651 // We should be able to allocate all objects because FullGC should reset old tenured regions.
1652 FillHeap(numFilledRegions, holder, 0);
1653 }
1654
1655 template <uint32_t REGION_FRAGMENTATION_RATE>
1656 class G1FullGCWithRegionFragmentationRate : public G1FullGCTest {
1657 public:
G1FullGCWithRegionFragmentationRate()1658 G1FullGCWithRegionFragmentationRate() : G1FullGCTest(REGION_FRAGMENTATION_RATE) {}
1659 };
1660
1661 class FullGcRegionFragmentationRateOptionNever : public G1FullGCWithRegionFragmentationRate<100U> {};
1662
TEST_F(FullGcRegionFragmentationRateOptionNever,TestG1FullGcRegionFragmentationRateOptionNever)1663 TEST_F(FullGcRegionFragmentationRateOptionNever, TestG1FullGcRegionFragmentationRateOptionNever)
1664 {
1665 Runtime *runtime = Runtime::GetCurrent();
1666 GC *gc = runtime->GetPandaVM()->GetGC();
1667 CardTable *cardTable = gc->GetCardTable();
1668 ASSERT_NE(nullptr, cardTable);
1669 ManagedThread *thread = ManagedThread::GetCurrent();
1670 ScopedManagedCodeThread s(thread);
1671 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1672 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1673 // Fill one region
1674 FillHeap(1, holder, 0);
1675 // Save ref to young object
1676 auto object = holder->Get<ObjectHeader *>(0);
1677 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1678 // Check that we moved this young object
1679 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1680 // Save ref to tenured object
1681 object = holder->Get<ObjectHeader *>(0);
1682 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1683 // Check that we don't move this object
1684 ASSERT_EQ(holder->Get<ObjectHeader *>(0), object);
1685 }
1686
1687 class FullGcRegionFragmentationRateOptionAlways : public G1FullGCWithRegionFragmentationRate<0> {};
1688
TEST_F(FullGcRegionFragmentationRateOptionAlways,TestG1FullGcRegionFragmentationRateOptionAlways)1689 TEST_F(FullGcRegionFragmentationRateOptionAlways, TestG1FullGcRegionFragmentationRateOptionAlways)
1690 {
1691 Runtime *runtime = Runtime::GetCurrent();
1692 GC *gc = runtime->GetPandaVM()->GetGC();
1693 CardTable *cardTable = gc->GetCardTable();
1694 ASSERT_NE(nullptr, cardTable);
1695 ManagedThread *thread = ManagedThread::GetCurrent();
1696 ScopedManagedCodeThread s(thread);
1697 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
1698 VMHandle<coretypes::Array> holder(thread, ObjectAllocator::AllocArray(NumRegions(), ClassRoot::ARRAY_STRING, true));
1699 // Fill one region
1700 FillHeap(1, holder, 0);
1701 // Save ref to young object
1702 auto object = holder->Get<ObjectHeader *>(0);
1703 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1704 // Check that we moved this young object
1705 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1706 // Save ref to tenured object
1707 object = holder->Get<ObjectHeader *>(0);
1708 gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1709 // Check that we moved this object
1710 ASSERT_NE(holder->Get<ObjectHeader *>(0), object);
1711 }
1712
1713 class G1FullGCOOMTest : public G1GCTest {
1714 public:
G1FullGCOOMTest()1715 G1FullGCOOMTest() : G1GCTest(CreateOOMOptions())
1716 {
1717 thread_ = MTManagedThread::GetCurrent();
1718 ASSERT(thread_ != nullptr);
1719 thread_->ManagedCodeBegin();
1720 }
1721
1722 NO_COPY_SEMANTIC(G1FullGCOOMTest);
1723 NO_MOVE_SEMANTIC(G1FullGCOOMTest);
1724
CreateOOMOptions()1725 static RuntimeOptions CreateOOMOptions()
1726 {
1727 RuntimeOptions options;
1728 options.SetShouldLoadBootPandaFiles(false);
1729 options.SetCompilerEnableJit(false);
1730 options.SetShouldInitializeIntrinsics(false);
1731 // GC options
1732 constexpr size_t HEAP_SIZE_LIMIT_TEST = 16_MB;
1733 options.SetRunGcInPlace(true);
1734 options.SetGcType("g1-gc");
1735 options.SetHeapSizeLimit(HEAP_SIZE_LIMIT_TEST);
1736 options.SetGcTriggerType("debug-never");
1737 options.SetG1NumberOfTenuredRegionsAtMixedCollection(0);
1738 return options;
1739 }
1740
~G1FullGCOOMTest()1741 ~G1FullGCOOMTest() override
1742 {
1743 thread_->ManagedCodeEnd();
1744 }
1745
1746 protected:
1747 MTManagedThread *thread_; // NOLINT(misc-non-private-member-variables-in-classes)
1748 };
1749
TEST_F(G1FullGCOOMTest,AllocateBy1Region)1750 TEST_F(G1FullGCOOMTest, AllocateBy1Region)
1751 {
1752 constexpr size_t OBJECT_SIZE = AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.8F), DEFAULT_ALIGNMENT_IN_BYTES);
1753 {
1754 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1755 auto *g1Allocator =
1756 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1757 // Fill tenured space by garbage
1758 do {
1759 VMHandle<ObjectHeader> handle(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1760 ASSERT_NE(handle.GetPtr(), nullptr) << "Must be correctly allocated object in non-full heap";
1761 // Move new object to tenured
1762 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1763 } while (g1Allocator->HaveTenuredSize(2U));
1764 ASSERT_TRUE(g1Allocator->HaveTenuredSize(1));
1765 // Allocate one young region
1766 VMHandle<ObjectHeader> handle1(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1767 ASSERT_NE(handle1.GetPtr(), nullptr) << "Must be correctly allocated object. Heap has a lot of garbage";
1768 // Try to move alone young region to last tenured region
1769 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1770 // Fully fill young space
1771 while (g1Allocator->GetHeapSpace()->GetCurrentFreeYoungSize() > 0) {
1772 auto *youngObj = ObjectAllocator::AllocString(OBJECT_SIZE);
1773 ASSERT_NE(youngObj, nullptr) << "Must allocate in free young space";
1774 }
1775 }
1776 ASSERT_NE(ObjectAllocator::AllocString(OBJECT_SIZE), nullptr)
1777 << "We must correctly allocate object in non-full heap";
1778 }
1779
TEST_F(G1FullGCOOMTest,PinUnpinObject)1780 TEST_F(G1FullGCOOMTest, PinUnpinObject)
1781 {
1782 constexpr size_t OBJECT_SIZE = AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.8F), DEFAULT_ALIGNMENT_IN_BYTES);
1783 auto *g1Allocator =
1784 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1785 {
1786 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1787 // Fill tenured space by garbage
1788 do {
1789 VMHandle<ObjectHeader> handle(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1790 ASSERT_NE(handle.GetPtr(), nullptr) << "Must be correctly allocated object in non-full heap";
1791 // Move new object to tenured
1792 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1793 } while (g1Allocator->HaveTenuredSize(2U));
1794 ASSERT_TRUE(g1Allocator->HaveTenuredSize(1));
1795 // Allocate one young region
1796 VMHandle<ObjectHeader> handle1(thread_, ObjectAllocator::AllocString(OBJECT_SIZE));
1797 ASSERT_NE(handle1.GetPtr(), nullptr) << "Must be correctly allocated object in young";
1798 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsYoung());
1799 // Pin object in young region
1800 g1Allocator->PinObject(handle1.GetPtr());
1801 // Try to move young region to last tenured region
1802 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE));
1803 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1804 // Just allocate one object in young
1805 auto *youngObj = ObjectAllocator::AllocString(OBJECT_SIZE);
1806 ASSERT_NE(youngObj, nullptr) << "Must allocate in free young space";
1807 // Run Full GC
1808 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::OOM_CAUSE));
1809 // Check "pinned" region
1810 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1811 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1812 // Unpin object
1813 g1Allocator->UnpinObject(handle1.GetPtr());
1814 // Check "unpinned" region
1815 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1816 ASSERT_FALSE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
1817 }
1818 // Run yet FullGC after unpinning
1819 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::OOM_CAUSE));
1820 g1Allocator->IterateOverObjects([](ObjectHeader *obj) {
1821 ASSERT_FALSE(ObjectToRegion(obj)->HasPinnedObjects()) << "Along pinned object was unpinned before GC";
1822 });
1823 }
1824
PinUnpinTest(SpaceType requestedSpaceType,size_t objectSize=1_KB)1825 static void PinUnpinTest(SpaceType requestedSpaceType, size_t objectSize = 1_KB)
1826 {
1827 ASSERT_TRUE(IsHeapSpace(requestedSpaceType));
1828 Runtime::Create(G1FullGCOOMTest::CreateOOMOptions());
1829 auto *thread = MTManagedThread::GetCurrent();
1830 ASSERT_NE(thread, nullptr);
1831 thread->ManagedCodeBegin();
1832 auto *g1Allocator =
1833 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1834 {
1835 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread);
1836 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1837 auto *addressBeforeGc =
1838 ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64,
1839 requestedSpaceType == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1840 ASSERT_NE(addressBeforeGc, nullptr);
1841 VMHandle<ObjectHeader> handle(thread, addressBeforeGc);
1842 SpaceType objSpaceType =
1843 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(static_cast<void *>(handle.GetPtr()));
1844 ASSERT_EQ(objSpaceType, requestedSpaceType);
1845 g1Allocator->PinObject(handle.GetPtr());
1846 // Run GC - try to move objects
1847 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1848 ASSERT_EQ(addressBeforeGc, handle.GetPtr()) << "Pinned object must not moved";
1849 g1Allocator->UnpinObject(handle.GetPtr());
1850 ASSERT_FALSE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1851 }
1852 thread->ManagedCodeEnd();
1853 Runtime::Destroy();
1854 }
1855
TEST(G1GCPinnigTest,PinUnpinRegularObjectTest)1856 TEST(G1GCPinnigTest, PinUnpinRegularObjectTest)
1857 {
1858 PinUnpinTest(SpaceType::SPACE_TYPE_OBJECT);
1859 }
1860
TEST(G1GCPinnigTest,PinUnpinHumongousObjectTest)1861 TEST(G1GCPinnigTest, PinUnpinHumongousObjectTest)
1862 {
1863 constexpr size_t HUMONGOUS_OBJECT_FOR_PINNING_SIZE = 4_MB;
1864 PinUnpinTest(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT, HUMONGOUS_OBJECT_FOR_PINNING_SIZE);
1865 }
1866
TEST(G1GCPinnigTest,PinUnpinNonMovableObjectTest)1867 TEST(G1GCPinnigTest, PinUnpinNonMovableObjectTest)
1868 {
1869 PinUnpinTest(SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
1870 }
1871
1872 class G1GCPromotePinnedRegionTest : public G1GCTest {
1873 public:
G1GCPromotePinnedRegionTest()1874 G1GCPromotePinnedRegionTest() : G1GCTest(CreateOptions())
1875 {
1876 thread_ = MTManagedThread::GetCurrent();
1877 ASSERT(thread_ != nullptr);
1878 thread_->ManagedCodeBegin();
1879 }
1880
1881 NO_COPY_SEMANTIC(G1GCPromotePinnedRegionTest);
1882 NO_MOVE_SEMANTIC(G1GCPromotePinnedRegionTest);
1883
CreateOptions()1884 static RuntimeOptions CreateOptions()
1885 {
1886 RuntimeOptions options;
1887 options.SetShouldLoadBootPandaFiles(false);
1888 options.SetCompilerEnableJit(false);
1889 options.SetShouldInitializeIntrinsics(false);
1890 // GC options
1891 constexpr size_t HEAP_SIZE_LIMIT_TEST = 16_MB;
1892 options.SetRunGcInPlace(true);
1893 options.SetGcType("g1-gc");
1894 options.SetHeapSizeLimit(HEAP_SIZE_LIMIT_TEST);
1895 options.SetGcTriggerType("debug-never");
1896 options.SetG1NumberOfTenuredRegionsAtMixedCollection(0);
1897 return options;
1898 }
1899
~G1GCPromotePinnedRegionTest()1900 ~G1GCPromotePinnedRegionTest() override
1901 {
1902 thread_->ManagedCodeEnd();
1903 }
1904
1905 protected:
1906 MTManagedThread *thread_; // NOLINT(misc-non-private-member-variables-in-classes)
1907 };
1908
TEST_F(G1GCPromotePinnedRegionTest,CompactingRegularStringObjectAndPromoteToMixedTLABRegionAndUnpin)1909 TEST_F(G1GCPromotePinnedRegionTest, CompactingRegularStringObjectAndPromoteToMixedTLABRegionAndUnpin)
1910 {
1911 auto *g1Allocator =
1912 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1913 {
1914 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1915 constexpr size_t OBJECT_SIZE = 1_KB;
1916 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1917 ASSERT_NE(addressBeforeGc, nullptr);
1918 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1919 g1Allocator->PinObject(handle1.GetPtr());
1920 // Run GC - promote pinned region
1921 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1922 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsMixedTLAB());
1923 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1924 g1Allocator->UnpinObject(handle1.GetPtr());
1925 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1926 }
1927 }
1928
TEST_F(G1GCPromotePinnedRegionTest,PromoteTLABRegionToMixedTLABAndTestIsInAllocRangeMethod)1929 TEST_F(G1GCPromotePinnedRegionTest, PromoteTLABRegionToMixedTLABAndTestIsInAllocRangeMethod)
1930 {
1931 auto *g1Allocator =
1932 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1933 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1934 constexpr size_t OBJECT_SIZE = 1_KB;
1935 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
1936 auto *addressBeforeGc2 = ObjectAllocator::AllocString(OBJECT_SIZE);
1937 ASSERT_NE(addressBeforeGc, nullptr);
1938 ASSERT_NE(addressBeforeGc2, nullptr);
1939 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
1940 VMHandle<ObjectHeader> handle2(thread_, addressBeforeGc2);
1941 g1Allocator->PinObject(handle1.GetPtr());
1942 g1Allocator->PinObject(handle2.GetPtr());
1943 // Run GC - promote pinned region
1944 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1945 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsMixedTLAB());
1946 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
1947 ASSERT_TRUE(ObjectToRegion(handle2.GetPtr())->IsMixedTLAB());
1948 ASSERT_EQ(addressBeforeGc2, handle2.GetPtr()) << "Pinned object must not moved";
1949 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsInAllocRange(handle1.GetPtr()));
1950 auto *pinnedObj = ObjectAllocator::AllocString(OBJECT_SIZE, true); // Pinned allocation
1951 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->IsInAllocRange(pinnedObj));
1952 g1Allocator->UnpinObject(handle1.GetPtr());
1953 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
1954 }
1955
1956 class G1AllocatePinnedObjectTest : public G1GCPromotePinnedRegionTest {};
1957
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularArrayAndCreatedNewPinnedRegion)1958 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularArrayAndCreatedNewPinnedRegion)
1959 {
1960 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1961 constexpr size_t OBJ_ELEMENT_SIZE = 64;
1962 size_t objectSize = 1_KB;
1963 auto *arrayObj = ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64, false, true);
1964 ASSERT_NE(arrayObj, nullptr);
1965 VMHandle<ObjectHeader> handle(thread_, arrayObj);
1966 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1967 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1968 }
1969
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringAndCreatedNewPinnedRegion)1970 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringAndCreatedNewPinnedRegion)
1971 {
1972 auto *g1Allocator =
1973 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
1974 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1975 size_t objectSize = 128_KB;
1976 auto *arrayObj = ObjectAllocator::AllocString(objectSize, true);
1977 ASSERT_NE(arrayObj, nullptr);
1978 VMHandle<ObjectHeader> handle(thread_, arrayObj);
1979 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1980 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1981 g1Allocator->UnpinObject(handle.GetPtr());
1982 size_t objectSize2 = 32_KB;
1983 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1984 auto *arrayObj2 = ObjectAllocator::AllocString(objectSize2, true);
1985 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
1986 ASSERT_NE(arrayObj2, nullptr);
1987 ASSERT_EQ(sizeRegionBefore, sizeRegionAfter);
1988 }
1989
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringToNewPinnedRegion)1990 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringToNewPinnedRegion)
1991 {
1992 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
1993 size_t objectSize = 128_KB;
1994 auto *strObj1 = ObjectAllocator::AllocString(objectSize, true);
1995 ASSERT_NE(strObj1, nullptr);
1996 VMHandle<ObjectHeader> handle(thread_, strObj1);
1997 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
1998 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
1999 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
2000 // Create new pinned object
2001 size_t objectSize2 = 196_KB;
2002 auto *strObj2 = ObjectAllocator::AllocString(objectSize2, true);
2003 ASSERT_NE(strObj2, nullptr);
2004 VMHandle<ObjectHeader> handle1(thread_, strObj2);
2005 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
2006 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
2007 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
2008 ASSERT_EQ(sizeRegionBefore, sizeRegionAfter);
2009 ASSERT_NE(ObjectToRegion(handle.GetPtr()), ObjectToRegion(handle1.GetPtr()));
2010 }
2011
TEST_F(G1AllocatePinnedObjectTest,AllocatePinnedRegularStringToExistPinnedRegion)2012 TEST_F(G1AllocatePinnedObjectTest, AllocatePinnedRegularStringToExistPinnedRegion)
2013 {
2014 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
2015 size_t objectSize = 32_KB;
2016 auto *strObj1 = ObjectAllocator::AllocString(objectSize, true);
2017 ASSERT_NE(strObj1, nullptr);
2018 VMHandle<ObjectHeader> handle(thread_, strObj1);
2019 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
2020 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasFlag(RegionFlag::IS_OLD));
2021 size_t sizeRegionBefore = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
2022 // Create new pinned object
2023 size_t objectSize2 = 32_KB;
2024 auto *strObj2 = ObjectAllocator::AllocString(objectSize2, true);
2025 ASSERT_NE(strObj2, nullptr);
2026 VMHandle<ObjectHeader> handle1(thread_, strObj2);
2027 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasPinnedObjects());
2028 ASSERT_TRUE(ObjectToRegion(handle1.GetPtr())->HasFlag(RegionFlag::IS_OLD));
2029 size_t sizeRegionAfter = ObjectToRegion(handle.GetPtr())->GetAllocatedBytes();
2030 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
2031 }
2032
TEST_F(G1AllocatePinnedObjectTest,AllocateRegularStringToExistPinnedRegion)2033 TEST_F(G1AllocatePinnedObjectTest, AllocateRegularStringToExistPinnedRegion)
2034 {
2035 auto *g1Allocator =
2036 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
2037 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
2038 constexpr size_t OBJECT_SIZE =
2039 AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.08F), DEFAULT_ALIGNMENT_IN_BYTES);
2040 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
2041 ASSERT_NE(addressBeforeGc, nullptr);
2042 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
2043 g1Allocator->PinObject(handle1.GetPtr());
2044 size_t sizeRegionBefore = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
2045 // Run GC - promote pinned region
2046 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
2047
2048 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
2049 auto *pinnedObj = ObjectAllocator::AllocString(OBJECT_SIZE, true); // Pinned allocation
2050 VMHandle<ObjectHeader> handle3(thread_, pinnedObj);
2051 size_t sizeRegionAfter = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
2052 ASSERT_EQ(ObjectToRegion(handle3.GetPtr()), ObjectToRegion(handle1.GetPtr()));
2053 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasPinnedObjects());
2054 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasFlag(RegionFlag::IS_OLD));
2055 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
2056 }
2057
TEST_F(G1AllocatePinnedObjectTest,AllocateRegularArray)2058 TEST_F(G1AllocatePinnedObjectTest, AllocateRegularArray)
2059 {
2060 auto *g1Allocator =
2061 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
2062 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread_);
2063 constexpr size_t OBJECT_SIZE =
2064 AlignUp(static_cast<size_t>(DEFAULT_REGION_SIZE * 0.08F), DEFAULT_ALIGNMENT_IN_BYTES);
2065 auto *addressBeforeGc = ObjectAllocator::AllocString(OBJECT_SIZE);
2066 ASSERT_NE(addressBeforeGc, nullptr);
2067 VMHandle<ObjectHeader> handle1(thread_, addressBeforeGc);
2068 g1Allocator->PinObject(handle1.GetPtr());
2069 size_t sizeRegionBefore = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
2070 // Run GC - promote pinned region
2071 Runtime::GetCurrent()->GetPandaVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE));
2072
2073 ASSERT_EQ(addressBeforeGc, handle1.GetPtr()) << "Pinned object must not moved";
2074 constexpr size_t OBJ_ELEMENT_SIZE = 64;
2075 size_t objectSize = 1_KB;
2076 auto *arrayObj = ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64, false, true);
2077 ASSERT_NE(arrayObj, nullptr);
2078 VMHandle<ObjectHeader> handle3(thread_, arrayObj);
2079 size_t sizeRegionAfter = ObjectToRegion(handle1.GetPtr())->GetAllocatedBytes();
2080 ASSERT_EQ(ObjectToRegion(handle3.GetPtr()), ObjectToRegion(handle1.GetPtr()));
2081 ASSERT_TRUE(ObjectToRegion(handle3.GetPtr())->HasPinnedObjects());
2082 ASSERT_NE(sizeRegionBefore, sizeRegionAfter);
2083 }
2084
AllocatePinnedObjectTest(SpaceType requestedSpaceType,size_t objectSize=1_KB)2085 static void AllocatePinnedObjectTest(SpaceType requestedSpaceType, size_t objectSize = 1_KB)
2086 {
2087 ASSERT_TRUE(IsHeapSpace(requestedSpaceType));
2088 Runtime::Create(G1AllocatePinnedObjectTest::CreateOptions());
2089 auto *thread = MTManagedThread::GetCurrent();
2090 ASSERT_NE(thread, nullptr);
2091 thread->ManagedCodeBegin();
2092 auto *g1Allocator =
2093 static_cast<ObjectAllocatorG1<> *>(Runtime::GetCurrent()->GetPandaVM()->GetGC()->GetObjectAllocator());
2094 {
2095 [[maybe_unused]] HandleScope<ark::ObjectHeader *> scope(thread);
2096 constexpr size_t OBJ_ELEMENT_SIZE = 64;
2097 auto *addressBeforeGc =
2098 ObjectAllocator::AllocArray(objectSize / OBJ_ELEMENT_SIZE, ClassRoot::ARRAY_I64,
2099 requestedSpaceType == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT, true);
2100 ASSERT_NE(addressBeforeGc, nullptr);
2101 VMHandle<ObjectHeader> handle(thread, addressBeforeGc);
2102 SpaceType objSpaceType =
2103 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(static_cast<void *>(handle.GetPtr()));
2104 ASSERT_EQ(objSpaceType, requestedSpaceType);
2105 ASSERT_EQ(addressBeforeGc, handle.GetPtr());
2106 if (requestedSpaceType == SpaceType::SPACE_TYPE_OBJECT) {
2107 ASSERT_TRUE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
2108 }
2109 g1Allocator->UnpinObject(handle.GetPtr());
2110 ASSERT_FALSE(ObjectToRegion(handle.GetPtr())->HasPinnedObjects());
2111 }
2112 thread->ManagedCodeEnd();
2113 Runtime::Destroy();
2114 }
2115
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedRegularObjectTest)2116 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedRegularObjectTest)
2117 {
2118 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_OBJECT);
2119 }
2120
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedHumongousObjectTest)2121 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedHumongousObjectTest)
2122 {
2123 constexpr size_t HUMONGOUS_OBJECT_FOR_PINNING_SIZE = 4_MB;
2124 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT, HUMONGOUS_OBJECT_FOR_PINNING_SIZE);
2125 }
2126
TEST(G1AllocateDifferentSpaceTypePinnedObjectTest,AllocatePinnedNonMovableObjectTest)2127 TEST(G1AllocateDifferentSpaceTypePinnedObjectTest, AllocatePinnedNonMovableObjectTest)
2128 {
2129 AllocatePinnedObjectTest(SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT);
2130 }
2131
2132 // NOLINTEND(readability-magic-numbers)
2133
2134 } // namespace ark::mem
2135