1 /**
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/mem/gc/gen-gc/gen-gc.h"
17 #include "runtime/mem/gc/gc_root-inl.h"
18 #include "runtime/mem/object_helpers-inl.h"
19 #include "runtime/include/panda_vm.h"
20 #include "runtime/mem/gc/card_table-inl.h"
21 #include "runtime/timing.h"
22 #include "runtime/mem/pygote_space_allocator-inl.h"
23 #include "runtime/mem/gc/static/gc_marker_static-inl.h"
24 #include "runtime/mem/gc/dynamic/gc_marker_dynamic-inl.h"
25 #include "runtime/mem/gc/generational-gc-base-inl.h"
26
27 namespace ark::mem {
28
29 template <class LanguageConfig>
GenGC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)30 GenGC<LanguageConfig>::GenGC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
31 : GenerationalGC<LanguageConfig>(objectAllocator, settings),
32 marker_(this),
33 isExplicitConcurrentGcEnabled_(settings.IsExplicitConcurrentGcEnabled())
34 {
35 this->SetType(GCType::GEN_GC);
36 this->SetTLABsSupported();
37 }
38
39 template <class LanguageConfig>
InitializeImpl()40 void GenGC<LanguageConfig>::InitializeImpl()
41 {
42 // GC saved the PandaVM instance, so we get allocator from the PandaVM.
43 InternalAllocatorPtr allocator = this->GetInternalAllocator();
44 this->CreateCardTable(allocator, PoolManager::GetMmapMemPool()->GetMinObjectAddress(),
45 PoolManager::GetMmapMemPool()->GetTotalObjectSize());
46 auto barrierSet = allocator->New<GCGenBarrierSet>(allocator, this->GetCardTable(), CardTable::GetCardBits(),
47 CardTable::GetCardDirtyValue());
48 ASSERT(barrierSet != nullptr);
49 this->SetGCBarrierSet(barrierSet);
50 LOG_DEBUG_GC << "GenGC initialized";
51 }
52
53 template <class LanguageConfig>
ShouldRunTenuredGC(const GCTask & task)54 bool GenGC<LanguageConfig>::ShouldRunTenuredGC(const GCTask &task)
55 {
56 return task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE;
57 }
58
59 template <class LanguageConfig>
ShouldRunFullGC(const GCTask & task,bool haveEnoughSpaceForYoung) const60 bool GenGC<LanguageConfig>::ShouldRunFullGC(const GCTask &task, bool haveEnoughSpaceForYoung) const
61 {
62 return !haveEnoughSpaceForYoung || task.reason == GCTaskCause::OOM_CAUSE || this->IsExplicitFull(task) ||
63 this->IsOnPygoteFork() || task.reason == GCTaskCause::STARTUP_COMPLETE_CAUSE;
64 }
65
66 template <class LanguageConfig>
RunPhasesImpl(GCTask & task)67 void GenGC<LanguageConfig>::RunPhasesImpl(GCTask &task)
68 {
69 LOG(DEBUG, GC) << "GenGC start";
70 uint64_t footprintBefore = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
71 LOG_DEBUG_GC << "Footprint before GC: " << footprintBefore;
72 if (this->IsLogDetailedGcInfoEnabled()) {
73 this->footprintList_.clear();
74 this->footprintList_.push_back({"Footprint before GC", footprintBefore});
75 }
76 uint64_t youngTotalTime = 0;
77 {
78 ScopedTiming t("Generational GC", *this->GetTiming());
79 this->memStats_.Reset();
80 // We trigger a full gc at first pygote fork
81 if (ShouldRunFullGC(task, HaveEnoughSpaceToMove())) {
82 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats());
83 marker_.BindBitmaps(true); // clear pygote live bitmaps, we will rebuild it
84 this->GetObjectGenAllocator()->InvalidateSpaceData();
85 this->GetObjectGenAllocator()->UpdateSpaceData();
86 RunFullGC(task);
87 } else {
88 {
89 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats());
90 time::Timer timer(&youngTotalTime, true);
91 marker_.BindBitmaps(false);
92 this->GetObjectGenAllocator()->InvalidateSpaceData();
93 this->GetObjectGenAllocator()->UpdateSpaceData();
94 LOG_DEBUG_GC << "Young range: " << this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
95 RunYoungGC(task);
96 if (youngTotalTime > 0) {
97 this->GetStats()->AddTimeValue(youngTotalTime, TimeTypeStats::YOUNG_TOTAL_TIME);
98 }
99 uint64_t footprintYoung = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
100 LOG_DEBUG_GC << "Footprint after young: " << footprintYoung;
101 if (this->IsLogDetailedGcInfoEnabled()) {
102 this->footprintList_.push_back({"Footprint after young", footprintYoung});
103 }
104 }
105 if (ShouldRunTenuredGC(task)) {
106 marker_.BindBitmaps(true); // clear pygote live bitmaps, we will rebuild it
107 this->GetObjectGenAllocator()->InvalidateSpaceData();
108 this->GetObjectGenAllocator()->UpdateSpaceData();
109 RunTenuredGC(task);
110 }
111 }
112 }
113 uint64_t footprintAfter = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
114 LOG_DEBUG_GC << "Footprint after GC: " << footprintAfter;
115 if (this->IsLogDetailedGcInfoEnabled()) {
116 this->footprintList_.push_back({"Footprint after GC", footprintAfter});
117 }
118 }
119
120 template <class LanguageConfig>
PreStartupImp()121 void GenGC<LanguageConfig>::PreStartupImp()
122 {
123 GenerationalGC<LanguageConfig>::DisableTenuredGC();
124 }
125
126 template <class LanguageConfig>
InitGCBits(ark::ObjectHeader * objHeader)127 void GenGC<LanguageConfig>::InitGCBits(ark::ObjectHeader *objHeader)
128 {
129 if (UNLIKELY(this->GetGCPhase() == GCPhase::GC_PHASE_SWEEP) &&
130 (!this->GetObjectAllocator()->IsObjectInYoungSpace(objHeader))) {
131 objHeader->SetMarkedForGC();
132 // do unmark if out of sweep phase otherwise we may miss it in sweep
133 if (UNLIKELY(this->GetGCPhase() != GCPhase::GC_PHASE_SWEEP)) {
134 objHeader->SetUnMarkedForGC();
135 }
136 } else {
137 objHeader->SetUnMarkedForGC();
138 }
139 LOG_DEBUG_GC << "Init gc bits for object: " << std::hex << objHeader << " bit: " << objHeader->IsMarkedForGC()
140 << ", is marked = " << IsMarked(objHeader);
141 }
142
143 template <class LanguageConfig>
InitGCBitsForAllocationInTLAB(ark::ObjectHeader * objHeader)144 void GenGC<LanguageConfig>::InitGCBitsForAllocationInTLAB(ark::ObjectHeader *objHeader)
145 {
146 // Compiler will allocate objects in TLABs only in young space
147 // Therefore, set unmarked for GC here.
148 objHeader->SetUnMarkedForGC();
149 }
150
151 template <class LanguageConfig>
RunYoungGC(GCTask & task)152 void GenGC<LanguageConfig>::RunYoungGC(GCTask &task)
153 {
154 GCScope<TRACE_TIMING> scopedTrace(__FUNCTION__, this);
155 LOG_DEBUG_GC << "GenGC RunYoungGC start";
156 uint64_t youngPauseTime;
157 {
158 NoAtomicGCMarkerScope scope(&this->marker_);
159 time::Timer timer(&youngPauseTime, true);
160 // NOLINTNEXTLINE(performance-unnecessary-value-param)
161 MarkYoung(task);
162 CollectYoungAndMove();
163 this->GetCardTable()->ClearAll();
164 }
165 if (youngPauseTime > 0) {
166 this->GetStats()->AddTimeValue(youngPauseTime, TimeTypeStats::YOUNG_PAUSED_TIME);
167 }
168 LOG_DEBUG_GC << "GenGC RunYoungGC end";
169 task.collectionType = GCCollectionType::YOUNG;
170 }
171
172 template <class LanguageConfig>
MarkYoung(const GCTask & task)173 void GenGC<LanguageConfig>::MarkYoung(const GCTask &task)
174 {
175 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK_YOUNG);
176
177 // Iterate over roots and add other roots
178 GCMarkingStackType objectsStack(this);
179 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
180 auto youngMr = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
181 GCRootVisitor gcMarkYoung = [&objectsStack, &youngMr, this](const GCRoot &gcRoot) {
182 // Skip non-young roots
183 auto rootObjectPtr = gcRoot.GetObjectHeader();
184 ASSERT(rootObjectPtr != nullptr);
185 if (!youngMr.IsAddressInRange(ToUintPtr(rootObjectPtr))) {
186 LOG_DEBUG_GC << "Skip root for young mark: " << std::hex << rootObjectPtr;
187 return;
188 }
189 LOG(DEBUG, GC) << "root " << GetDebugInfoAboutObject(rootObjectPtr);
190 if (this->MarkObjectIfNotMarked(rootObjectPtr)) {
191 objectsStack.PushToStack(gcRoot.GetType(), rootObjectPtr);
192 this->MarkYoungStack(&objectsStack);
193 }
194 };
195 {
196 GCScope<TRACE_TIMING> markingYoungRootsTrace("Marking roots young", this);
197 this->VisitRoots(gcMarkYoung,
198 VisitGCRootFlags::ACCESS_ROOT_NONE | VisitGCRootFlags::ACCESS_ROOT_AOT_STRINGS_ONLY_YOUNG);
199 }
200 {
201 ScopedTiming visitCardTableRootsTiming("VisitCardTableRoots", *this->GetTiming());
202 LOG_DEBUG_GC << "START Marking tenured -> young roots";
203 MemRangeChecker tenuredRangeChecker = [&youngMr](MemRange &memRange) -> bool {
204 return !youngMr.IsIntersect(memRange);
205 };
206 ObjectChecker tenuredRangeYoungObjectChecker = [&youngMr](const ObjectHeader *objectHeader) -> bool {
207 return youngMr.IsAddressInRange(ToUintPtr(objectHeader));
208 };
209
210 ObjectChecker fromObjectChecker = []([[maybe_unused]] const ObjectHeader *objectHeader) -> bool {
211 return true;
212 };
213
214 this->VisitCardTableRoots(this->GetCardTable(), gcMarkYoung, tenuredRangeChecker,
215 tenuredRangeYoungObjectChecker, fromObjectChecker,
216 CardTableProcessedFlag::VISIT_MARKED | CardTableProcessedFlag::VISIT_PROCESSED);
217 }
218 // reference-processor in VisitCardTableRoots can add new objects to stack
219 this->MarkYoungStack(&objectsStack);
220 LOG_DEBUG_GC << "END Marking tenured -> young roots";
221 auto refClearPred = [this]([[maybe_unused]] const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
222 this->GetPandaVm()->HandleReferences(task, refClearPred);
223 }
224
225 template <class LanguageConfig>
MarkYoungStack(GCMarkingStackType * stack)226 void GenGC<LanguageConfig>::MarkYoungStack(GCMarkingStackType *stack)
227 {
228 trace::ScopedTrace scopedTrace(__FUNCTION__);
229 ASSERT(stack != nullptr);
230 auto allocator = this->GetObjectAllocator();
231 auto &youngRanges = allocator->GetYoungSpaceMemRanges();
232 auto refPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
233 while (!stack->Empty()) {
234 auto *object = this->PopObjectFromStack(stack);
235 ValidateObject(nullptr, object);
236 auto *cls = object->template ClassAddr<BaseClass>();
237 LOG_DEBUG_GC << "current object " << GetDebugInfoAboutObject(object);
238
239 bool inRange = false;
240 for (const auto &r : youngRanges) {
241 if (r.IsAddressInRange(ToUintPtr(object))) {
242 inRange = true;
243 break;
244 }
245 }
246 if (inRange) {
247 marker_.MarkInstance(stack, object, cls, refPred);
248 }
249 }
250 }
251
252 template <class LanguageConfig>
CollectVerificationInfo(const MemRange & youngMemRange)253 HeapVerifierIntoGC<LanguageConfig> GenGC<LanguageConfig>::CollectVerificationInfo(const MemRange &youngMemRange)
254 {
255 HeapVerifierIntoGC<LanguageConfig> youngVerifier(this->GetPandaVm()->GetHeapManager());
256 if (this->GetSettings()->IntoGCHeapVerification() && !this->IsFullGC()) {
257 ScopedTiming collectVerificationTiming("CollectVerificationInfo", *this->GetTiming());
258 youngVerifier.CollectVerificationInfo(PandaVector<MemRange>(1U, youngMemRange));
259 }
260 return youngVerifier;
261 }
262
263 template <class LanguageConfig>
VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> && youngVerifier)264 void GenGC<LanguageConfig>::VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&youngVerifier)
265 {
266 if (this->GetSettings()->IntoGCHeapVerification() && !this->IsFullGC()) {
267 ScopedTiming verificationTiming("Verification", *this->GetTiming());
268 size_t failsCount = youngVerifier.VerifyAll();
269 if (this->GetSettings()->FailOnHeapVerification() && failsCount > 0) {
270 LOG(FATAL, GC) << "Heap was corrupted during GC, HeapVerifier found " << failsCount << " corruptions";
271 }
272 }
273 }
274
275 // NOLINTNEXTLINE(readability-function-size)
276 template <class LanguageConfig>
CollectYoungAndMove()277 void GenGC<LanguageConfig>::CollectYoungAndMove()
278 {
279 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE);
280 LOG_DEBUG_GC << "== GenGC CollectYoungAndMove start ==";
281 // NOTE(dtrubenkov): add assert that we in STW
282 PandaVector<ObjectHeader *> movedObjects;
283 size_t prevMovedSize = this->GetPandaVm()->GetMemStats()->GetLastYoungObjectsMovedBytes();
284 constexpr size_t MINIMAL_PREALLOC_MOVE_OBJ = 32U;
285 // Adaptive preallocate buffer for moved_objects to avoid useless reallocations
286 movedObjects.reserve(std::max(MINIMAL_PREALLOC_MOVE_OBJ, prevMovedSize / GetMinimalObjectSize()));
287 size_t youngMoveSize = 0;
288 size_t youngMoveCount = 0;
289 size_t youngDeleteSize = 0;
290 size_t youngDeleteCount = 0;
291
292 auto *objectAllocator = this->GetObjectGenAllocator();
293 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
294 auto youngMemRange = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
295 HeapVerifierIntoGC<LanguageConfig> youngVerifier = CollectVerificationInfo(youngMemRange);
296
297 std::function<void(ObjectHeader * objectHeader)> moveVisitor(
298 [this, &objectAllocator, &movedObjects, &youngMoveSize, &youngMoveCount, &youngDeleteSize,
299 &youngDeleteCount](ObjectHeader *objectHeader) -> void {
300 size_t size = GetObjectSize(objectHeader);
301 ASSERT(size <= Runtime::GetOptions().GetMaxTlabSize());
302 // Use aligned size here, because we need to proceed MemStats correctly.
303 size_t alignedSize = GetAlignedObjectSize(size);
304 if (objectHeader->IsMarkedForGC<false>()) {
305 auto dst = reinterpret_cast<ObjectHeader *>(objectAllocator->AllocateTenuredWithoutLocks(size));
306 ASSERT(dst != nullptr);
307 memcpy_s(dst, size, objectHeader, size);
308 youngMoveSize += alignedSize;
309 youngMoveCount++;
310 LOG_DEBUG_OBJECT_EVENTS << "MOVE object " << objectHeader << " -> " << dst << ", size = " << size;
311 movedObjects.push_back(dst);
312 // set unmarked dst
313 UnMarkObject(dst);
314 this->SetForwardAddress(objectHeader, dst);
315 } else {
316 LOG_DEBUG_OBJECT_EVENTS << "DELETE OBJECT young: " << objectHeader;
317 ++youngDeleteCount;
318 youngDeleteSize += alignedSize;
319 }
320 // We will record all object in MemStats as SPACE_TYPE_OBJECT, so check it
321 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(objectHeader) == SpaceType::SPACE_TYPE_OBJECT);
322 });
323 {
324 ScopedTiming moveTiming("MoveAndSweep", *this->GetTiming());
325 objectAllocator->IterateOverYoungObjects(moveVisitor);
326 }
327 this->memStats_.RecordYoungStats(youngMoveSize, youngMoveCount, youngDeleteSize, youngDeleteCount);
328 UpdateRefsToMovedObjects(&movedObjects);
329 this->VerifyCollectAndMove(std::move(youngVerifier));
330 SweepYoungVmRefs();
331 // Remove young
332 objectAllocator->ResetYoungAllocator();
333
334 this->UpdateMemStats(this->GetPandaVm()->GetMemStats()->GetFootprintHeap(), false);
335
336 LOG_DEBUG_GC << "== GenGC CollectYoungAndMove end ==";
337 }
338
339 template <class LanguageConfig>
UpdateRefsToMovedObjects(PandaVector<ObjectHeader * > * movedObjects)340 void GenGC<LanguageConfig>::UpdateRefsToMovedObjects(PandaVector<ObjectHeader *> *movedObjects)
341 {
342 GCScope<TRACE_TIMING> scope("UpdateRefsToMovedObjects", this);
343
344 auto objAllocator = this->GetObjectAllocator();
345 // Update references exyoung -> young
346 LOG_DEBUG_GC << "process moved objects cnt = " << std::dec << movedObjects->size();
347 LOG_DEBUG_GC << "=== Update exyoung -> young references. START. ===";
348 for (auto obj : *movedObjects) {
349 ObjectHelpers<LanguageConfig::LANG_TYPE>::UpdateRefsToMovedObjects(obj);
350 }
351
352 LOG_DEBUG_GC << "=== Update exyoung -> young references. END. ===";
353 // update references tenured -> young
354 LOG_DEBUG_GC << "=== Update tenured -> young references. START. ===";
355 auto youngSpace = objAllocator->GetYoungSpaceMemRanges().at(0);
356 auto updateRefsInObject(
357 [](ObjectHeader *obj) { ObjectHelpers<LanguageConfig::LANG_TYPE>::UpdateRefsToMovedObjects(obj); });
358 this->GetCardTable()->VisitMarked(
359 [&updateRefsInObject, &objAllocator, &youngSpace](const MemRange &memRange) {
360 if (!youngSpace.Contains(memRange)) {
361 objAllocator->IterateOverObjectsInRange(memRange, updateRefsInObject);
362 }
363 },
364 CardTableProcessedFlag::VISIT_MARKED | CardTableProcessedFlag::VISIT_PROCESSED);
365 LOG_DEBUG_GC << "=== Update tenured -> young references. END. ===";
366 this->CommonUpdateRefsToMovedObjects();
367 }
368
369 template <class LanguageConfig>
RunTenuredGC(GCTask & task)370 void GenGC<LanguageConfig>::RunTenuredGC(GCTask &task)
371 {
372 GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
373 LOG_DEBUG_GC << "GC tenured start";
374 GCMarkingStackType objectsStack(this);
375 {
376 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::COMMON_PAUSE);
377 {
378 ScopedTiming unMarkTiming("UnMark", *this->GetTiming());
379 // Unmark all because no filter out tenured when mark young
380 // NOTE(dtrubenk): remove this
381 this->GetObjectAllocator()->IterateOverObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
382 }
383 InitialMark(&objectsStack);
384 }
385 this->ConcurrentMark(&objectsStack);
386 // NOLINTNEXTLINE(performance-unnecessary-value-param)
387 ReMark(&objectsStack, task);
388
389 ASSERT(objectsStack.Empty());
390 {
391 ScopedTiming unMarkYoungTiming("UnMarkYoung", *this->GetTiming());
392 this->GetObjectAllocator()->IterateOverYoungObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
393 }
394 Sweep<true>();
395 LOG_DEBUG_GC << "GC tenured end";
396 task.collectionType = GCCollectionType::TENURED;
397 }
398
399 // Full GC is ran on pause
400 template <class LanguageConfig>
RunFullGC(GCTask & task)401 void GenGC<LanguageConfig>::RunFullGC(GCTask &task)
402 {
403 GCScope<TRACE_TIMING> fullGcScope(__FUNCTION__, this);
404 LOG_DEBUG_GC << "Full GC start";
405 this->SetFullGC(true);
406 {
407 ScopedTiming unMarkTiming("UnMark", *this->GetTiming());
408 this->GetObjectAllocator()->IterateOverObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
409 }
410 FullMark(task);
411 Sweep<false>();
412 // Young GC
413 if (LIKELY(HaveEnoughSpaceToMove())) {
414 // We already marked objects above so just collect and move
415 CollectYoungAndMove();
416 this->GetCardTable()->ClearAll();
417 }
418 this->SetFullGC(false);
419 LOG_DEBUG_GC << "Full GC end";
420 task.collectionType = GCCollectionType::FULL;
421 }
422
423 template <class LanguageConfig>
MarkRoots(GCMarkingStackType * objectsStack,CardTableVisitFlag visitCardTableRoots,const ReferenceCheckPredicateT & refPred,VisitGCRootFlags flags)424 void GenGC<LanguageConfig>::MarkRoots(GCMarkingStackType *objectsStack, CardTableVisitFlag visitCardTableRoots,
425 const ReferenceCheckPredicateT &refPred, VisitGCRootFlags flags)
426 {
427 trace::ScopedTrace scopedTrace(__FUNCTION__);
428 GCRootVisitor gcMarkRoots = [this, &objectsStack, &refPred](const GCRoot &gcRoot) {
429 ObjectHeader *rootObject = gcRoot.GetObjectHeader();
430 ObjectHeader *fromObject = gcRoot.GetFromObjectHeader();
431 LOG_DEBUG_GC << "Handle root " << GetDebugInfoAboutObject(rootObject);
432 if (UNLIKELY(fromObject != nullptr) &&
433 this->IsReference(fromObject->NotAtomicClassAddr<BaseClass>(), fromObject, refPred)) {
434 LOG_DEBUG_GC << "Add reference: " << GetDebugInfoAboutObject(fromObject) << " to stack";
435 marker_.Mark(fromObject);
436 this->ProcessReference(objectsStack, fromObject->NotAtomicClassAddr<BaseClass>(), fromObject,
437 GC::EmptyReferenceProcessPredicate);
438 } else {
439 // we should always add this object to the stack, because we could mark this object in InitialMark, but
440 // write to some fields in ConcurrentMark - need to iterate over all fields again, MarkObjectIfNotMarked
441 // can't be used here
442 marker_.Mark(rootObject);
443 objectsStack->PushToStack(gcRoot.GetType(), rootObject);
444 }
445 };
446 this->VisitRoots(gcMarkRoots, flags);
447 if (visitCardTableRoots == CardTableVisitFlag::VISIT_ENABLED) {
448 auto allocator = this->GetObjectAllocator();
449 ASSERT(allocator->GetYoungSpaceMemRanges().size() == 1);
450 MemRange youngMr = allocator->GetYoungSpaceMemRanges().at(0);
451 MemRangeChecker youngRangeChecker = []([[maybe_unused]] MemRange &memRange) -> bool { return true; };
452 ObjectChecker youngRangeTenuredObjectChecker = [&youngMr](const ObjectHeader *objectHeader) -> bool {
453 return !youngMr.IsAddressInRange(ToUintPtr(objectHeader));
454 };
455 ObjectChecker fromObjectChecker = [&youngMr, this](const ObjectHeader *objectHeader) -> bool {
456 // Don't visit objects which are in tenured and not marked.
457 return youngMr.IsAddressInRange(ToUintPtr(objectHeader)) || IsMarked(objectHeader);
458 };
459 this->VisitCardTableRoots(this->GetCardTable(), gcMarkRoots, youngRangeChecker, youngRangeTenuredObjectChecker,
460 fromObjectChecker, CardTableProcessedFlag::VISIT_MARKED);
461 }
462 }
463
464 template <class LanguageConfig>
InitialMark(GCMarkingStackType * objectsStack)465 void GenGC<LanguageConfig>::InitialMark(GCMarkingStackType *objectsStack)
466 {
467 GCScope<TRACE_TIMING_PHASE> gcScope(__FUNCTION__, this, GCPhase::GC_PHASE_INITIAL_MARK);
468 {
469 NoAtomicGCMarkerScope scope(&this->marker_);
470 auto refPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
471 MarkRoots(objectsStack, CardTableVisitFlag::VISIT_DISABLED, refPred,
472 VisitGCRootFlags::ACCESS_ROOT_NONE | VisitGCRootFlags::START_RECORDING_NEW_ROOT);
473 }
474 }
475
476 template <class LanguageConfig>
ConcurrentMark(GCMarkingStackType * objectsStack)477 NO_THREAD_SAFETY_ANALYSIS void GenGC<LanguageConfig>::ConcurrentMark(GCMarkingStackType *objectsStack)
478 {
479 GCScope<TRACE_TIMING_PHASE> scopedFunc(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
480 ConcurrentScope concurrentScope(this);
481 auto *objectAllocator = this->GetObjectAllocator();
482 this->MarkImpl(
483 &marker_, objectsStack, CardTableVisitFlag::VISIT_ENABLED,
484 // Process 'weak' references as regular object on concurrent phase to avoid
485 // concurrent access to referent
486 []([[maybe_unused]] const ObjectHeader *obj) { return false; },
487 // non-young mem range checker
488 [objectAllocator](MemRange &memRange) { return !objectAllocator->IsIntersectedWithYoung(memRange); });
489 }
490
491 template <class LanguageConfig>
ReMark(GCMarkingStackType * objectsStack,const GCTask & task)492 void GenGC<LanguageConfig>::ReMark(GCMarkingStackType *objectsStack, const GCTask &task)
493 {
494 GCScope<TRACE_TIMING_PHASE> gcScope(__FUNCTION__, this, GCPhase::GC_PHASE_REMARK);
495 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::REMARK_PAUSE);
496
497 // NOTE(dtrubenkov): consider iterational concurrent marking of card table
498 {
499 NoAtomicGCMarkerScope scope(&this->marker_);
500 auto refPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
501 MarkRoots(objectsStack, CardTableVisitFlag::VISIT_ENABLED, refPred,
502 VisitGCRootFlags::ACCESS_ROOT_ONLY_NEW | VisitGCRootFlags::END_RECORDING_NEW_ROOT);
503 this->MarkStack(&marker_, objectsStack, GC::EmptyMarkPreprocess, refPred);
504 {
505 ScopedTiming t1("VisitInternalStringTable", *this->GetTiming());
506 this->GetPandaVm()->VisitStringTable(
507 [this, &objectsStack](ObjectHeader *str) {
508 if (this->MarkObjectIfNotMarked(str)) {
509 ASSERT(str != nullptr);
510 objectsStack->PushToStack(RootType::STRING_TABLE, str);
511 }
512 },
513 VisitGCRootFlags::ACCESS_ROOT_ONLY_NEW | VisitGCRootFlags::END_RECORDING_NEW_ROOT);
514 this->MarkStack(&marker_, objectsStack, GC::EmptyMarkPreprocess, refPred);
515 }
516 // NOLINTNEXTLINE(performance-unnecessary-value-param)
517 this->GetPandaVm()->HandleReferences(task, GC::EmptyReferenceProcessPredicate);
518 }
519 }
520
521 template <class LanguageConfig>
FullMark(const GCTask & task)522 void GenGC<LanguageConfig>::FullMark(const GCTask &task)
523 {
524 GCScope<TRACE_TIMING_PHASE> fullMarkScope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
525 NoAtomicGCMarkerScope markerScope(&this->marker_);
526
527 GCMarkingStackType objectsStack(this);
528 VisitGCRootFlags flags = VisitGCRootFlags::ACCESS_ROOT_ALL;
529 auto refPred = GC::EmptyReferenceProcessPredicate;
530 // Mark all reachable objects
531 MarkRoots(&objectsStack, CardTableVisitFlag::VISIT_DISABLED, refPred, flags);
532 this->GetPandaVm()->VisitStringTable(
533 [this, &objectsStack](ObjectHeader *str) {
534 if (this->MarkObjectIfNotMarked(str)) {
535 ASSERT(str != nullptr);
536 objectsStack.PushToStack(RootType::STRING_TABLE, str);
537 }
538 },
539 flags);
540 this->MarkStack(&marker_, &objectsStack, GC::EmptyMarkPreprocess, refPred);
541 auto refClearPred = []([[maybe_unused]] const ObjectHeader *obj) { return true; };
542 // NOLINTNEXTLINE(performance-unnecessary-value-param)
543 this->GetPandaVm()->HandleReferences(task, refClearPred);
544 }
545
546 template <class LanguageConfig>
MarkReferences(GCMarkingStackType * references,GCPhase gcPhase)547 void GenGC<LanguageConfig>::MarkReferences(GCMarkingStackType *references, GCPhase gcPhase)
548 {
549 trace::ScopedTrace scopedTrace(__FUNCTION__);
550 LOG_DEBUG_GC << "Start marking " << references->Size() << " references";
551 auto refPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
552 if (gcPhase == GCPhase::GC_PHASE_MARK_YOUNG) {
553 this->MarkYoungStack(references);
554 } else if (gcPhase == GCPhase::GC_PHASE_INITIAL_MARK || gcPhase == GCPhase::GC_PHASE_MARK ||
555 gcPhase == GCPhase::GC_PHASE_REMARK) {
556 this->MarkStack(&marker_, references, GC::EmptyMarkPreprocess, refPred);
557 } else {
558 UNREACHABLE();
559 }
560 }
561
562 template <class LanguageConfig>
MarkObject(ObjectHeader * object)563 void GenGC<LanguageConfig>::MarkObject(ObjectHeader *object)
564 {
565 marker_.Mark(object);
566 }
567
568 template <class LanguageConfig>
UnMarkObject(ObjectHeader * objectHeader)569 void GenGC<LanguageConfig>::UnMarkObject(ObjectHeader *objectHeader)
570 {
571 LOG_DEBUG_GC << "Set unmark for GC " << GetDebugInfoAboutObject(objectHeader);
572 this->marker_.UnMark(objectHeader);
573 }
574
575 template <class LanguageConfig>
IsMarked(const ObjectHeader * object) const576 bool GenGC<LanguageConfig>::IsMarked(const ObjectHeader *object) const
577 {
578 return this->marker_.IsMarked(object);
579 }
580
581 // NO_THREAD_SAFETY_ANALYSIS because clang thread safety analysis
582 template <class LanguageConfig>
583 template <bool CONCURRENT>
Sweep()584 NO_THREAD_SAFETY_ANALYSIS void GenGC<LanguageConfig>::Sweep()
585 {
586 GCScope<TRACE_TIMING> gcScope(__FUNCTION__, this);
587 ConcurrentScope concurrentScope(this, false);
588 size_t freedObjectSize = 0U;
589 size_t freedObjectCount = 0U;
590
591 // NB! can't move block out of brace, we need to make sure GC_PHASE_SWEEP cleared
592 {
593 GCScopedPhase scopedPhase(this, GCPhase::GC_PHASE_SWEEP);
594 // NOTE(dtrubenkov): make concurrent
595 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
596 // new strings may be created in young space during tenured gc, we shouldn't collect them
597 auto youngMemRange = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
598 this->GetPandaVm()->SweepVmRefs([this, &youngMemRange](ObjectHeader *object) {
599 if (youngMemRange.IsAddressInRange(ToUintPtr(object))) {
600 return ObjectStatus::ALIVE_OBJECT;
601 }
602 return this->marker_.MarkChecker(object);
603 });
604 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
605 if constexpr (CONCURRENT) {
606 concurrentScope.Start(); // enable concurrent after GC_PHASE_SWEEP has been set
607 }
608
609 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
610 if constexpr (CONCURRENT && LanguageConfig::MT_MODE != MT_MODE_SINGLE) {
611 // Run monitor deflation again, to avoid object was reclaimed before monitor deflate.
612 auto youngMr = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
613 this->GetPandaVm()->GetMonitorPool()->DeflateMonitorsWithCallBack([&youngMr, this](Monitor *monitor) {
614 ObjectHeader *objectHeader = monitor->GetObject();
615 return (!IsMarked(objectHeader)) && (!youngMr.IsAddressInRange(ToUintPtr(objectHeader)));
616 });
617 }
618
619 this->GetObjectAllocator()->Collect(
620 [this, &freedObjectSize, &freedObjectCount](ObjectHeader *object) {
621 auto status = this->marker_.MarkChecker(object);
622 if (status == ObjectStatus::DEAD_OBJECT) {
623 LOG_DEBUG_OBJECT_EVENTS << "DELETE OBJECT tenured: " << object;
624 freedObjectSize += GetAlignedObjectSize(GetObjectSize(object));
625 freedObjectCount++;
626 }
627 return status;
628 },
629 GCCollectMode::GC_ALL);
630 this->GetObjectAllocator()->VisitAndRemoveFreePools([this](void *mem, size_t size) {
631 this->GetCardTable()->ClearCardRange(ToUintPtr(mem), ToUintPtr(mem) + size);
632 PoolManager::GetMmapMemPool()->FreePool(mem, size);
633 });
634 }
635
636 this->memStats_.RecordSizeFreedTenured(freedObjectSize);
637 this->memStats_.RecordCountFreedTenured(freedObjectCount);
638
639 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
640 if constexpr (CONCURRENT) {
641 // In concurrent sweep phase, the new created objects may being marked in InitGCBits,
642 // so we need wait for that done, then we can safely unmark objects concurrent with mutator.
643 ASSERT(this->GetGCPhase() != GCPhase::GC_PHASE_SWEEP); // Make sure we are out of sweep scope
644 this->GetObjectAllocator()->IterateOverTenuredObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
645 }
646 }
647
648 template <class LanguageConfig>
SweepYoungVmRefs()649 void GenGC<LanguageConfig>::SweepYoungVmRefs()
650 {
651 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_SWEEP);
652 // new strings may be created in young space during tenured gc, we shouldn't collect them
653 // Sweep string table here to avoid dangling references
654 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
655 // new strings may be created in young space during tenured gc, we shouldn't collect them
656 auto youngMemRange = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
657 this->GetPandaVm()->SweepVmRefs([&youngMemRange](ObjectHeader *objectHeader) {
658 if (youngMemRange.IsAddressInRange(ToUintPtr(objectHeader))) {
659 return ObjectStatus::DEAD_OBJECT;
660 }
661 return ObjectStatus::ALIVE_OBJECT;
662 });
663 }
664
665 template <class LanguageConfig>
InGCSweepRange(const ObjectHeader * obj) const666 bool GenGC<LanguageConfig>::InGCSweepRange(const ObjectHeader *obj) const
667 {
668 bool inYoungSpace = this->GetObjectAllocator()->IsObjectInYoungSpace(obj);
669 auto phase = this->GetGCPhase();
670 // Do young GC and the object is in the young space
671 if (phase == GCPhase::GC_PHASE_MARK_YOUNG && inYoungSpace) {
672 return true;
673 }
674
675 // Do tenured GC and the object is in the tenured space
676 if (phase != GCPhase::GC_PHASE_MARK_YOUNG && !inYoungSpace) {
677 return true;
678 }
679
680 return this->IsFullGC();
681 }
682
683 template <class LanguageConfig>
IsPostponeGCSupported() const684 bool GenGC<LanguageConfig>::IsPostponeGCSupported() const
685 {
686 // Gen GC doesn't support GC postponing because
687 // we have to move young space objects
688 return false;
689 }
690
691 template <class LanguageConfig>
HaveEnoughSpaceToMove() const692 bool GenGC<LanguageConfig>::HaveEnoughSpaceToMove() const
693 {
694 // hack for pools because we have 2 type of pools in tenures space, in bad cases objects can be moved to different
695 // spaces. And move 4M objects in bump-allocator to other allocator, may need more than 4M space in other allocator
696 // - so we need 3 empty pools.
697 // NOTE(xucheng) : remove the checker when we can do part young collection.
698 // The min num that can guarantee that we move all objects in young space.
699 constexpr size_t POOLS_NUM = 3;
700 return this->GetObjectAllocator()->HaveEnoughPoolsInObjectSpace(POOLS_NUM);
701 }
702
703 TEMPLATE_CLASS_LANGUAGE_CONFIG(GenGC);
704
705 } // namespace ark::mem
706