• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <memory>
17 
18 #include "libpandabase/os/cpu_affinity.h"
19 #include "libpandabase/os/mem.h"
20 #include "libpandabase/os/thread.h"
21 #include "libpandabase/utils/time.h"
22 #include "runtime/assert_gc_scope.h"
23 #include "runtime/include/class.h"
24 #include "runtime/include/coretypes/dyn_objects.h"
25 #include "runtime/include/locks.h"
26 #include "runtime/include/runtime.h"
27 #include "runtime/include/runtime_notification.h"
28 #include "runtime/include/stack_walker-inl.h"
29 #include "runtime/mem/gc/epsilon/epsilon.h"
30 #include "runtime/mem/gc/epsilon-g1/epsilon-g1.h"
31 #include "runtime/mem/gc/gc.h"
32 #include "runtime/mem/gc/gc_root-inl.h"
33 #include "runtime/mem/gc/g1/g1-gc.h"
34 #include "runtime/mem/gc/gen-gc/gen-gc.h"
35 #include "runtime/mem/gc/stw-gc/stw-gc.h"
36 #include "runtime/mem/gc/workers/gc_workers_task_queue.h"
37 #include "runtime/mem/gc/workers/gc_workers_thread_pool.h"
38 #include "runtime/mem/pygote_space_allocator-inl.h"
39 #include "runtime/mem/heap_manager.h"
40 #include "runtime/mem/gc/reference-processor/reference_processor.h"
41 #include "runtime/mem/gc/gc-hung/gc_hung.h"
42 #include "runtime/include/panda_vm.h"
43 #include "runtime/include/object_accessor-inl.h"
44 #include "runtime/include/coretypes/class.h"
45 #include "runtime/thread_manager.h"
46 #include "runtime/mem/gc/gc_adaptive_stack_inl.h"
47 
48 namespace ark::mem {
49 using TaggedValue = coretypes::TaggedValue;
50 using TaggedType = coretypes::TaggedType;
51 using DynClass = coretypes::DynClass;
52 
GC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)53 GC::GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
54     : gcSettings_(settings),
55       objectAllocator_(objectAllocator),
56       internalAllocator_(InternalAllocator<>::GetInternalAllocatorFromRuntime())
57 {
58     if (gcSettings_.UseTaskManagerForGC()) {
59         // Create gc task queue for task manager
60         auto *tm = taskmanager::TaskScheduler::GetTaskScheduler();
61         gcWorkersTaskQueue_ = tm->CreateAndRegisterTaskQueue<decltype(internalAllocator_->Adapter())>(
62             taskmanager::TaskType::GC, taskmanager::VMType::STATIC_VM, GC_TASK_QUEUE_PRIORITY);
63         ASSERT(gcWorkersTaskQueue_ != nullptr);
64     }
65 }
66 
~GC()67 GC::~GC()
68 {
69     InternalAllocatorPtr allocator = GetInternalAllocator();
70     if (gcWorker_ != nullptr) {
71         allocator->Delete(gcWorker_);
72     }
73     if (gcListenerManager_ != nullptr) {
74         allocator->Delete(gcListenerManager_);
75     }
76     if (gcBarrierSet_ != nullptr) {
77         allocator->Delete(gcBarrierSet_);
78     }
79     if (clearedReferences_ != nullptr) {
80         allocator->Delete(clearedReferences_);
81     }
82     if (clearedReferencesLock_ != nullptr) {
83         allocator->Delete(clearedReferencesLock_);
84     }
85     if (workersTaskPool_ != nullptr) {
86         allocator->Delete(workersTaskPool_);
87     }
88     if (gcWorkersTaskQueue_ != nullptr) {
89         taskmanager::TaskScheduler::GetTaskScheduler()->UnregisterAndDestroyTaskQueue<decltype(allocator->Adapter())>(
90             gcWorkersTaskQueue_);
91     }
92 }
93 
GetLogPrefix() const94 Logger::Buffer GC::GetLogPrefix() const
95 {
96     const char *phase = GCScopedPhase::GetPhaseAbbr(GetGCPhase());
97     // Atomic with acquire order reason: data race with gc_counter_
98     size_t counter = gcCounter_.load(std::memory_order_acquire);
99 
100     Logger::Buffer buffer;
101     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
102     buffer.Printf("[%zu, %s]: ", counter, phase);
103 
104     return buffer;
105 }
106 
GetType()107 GCType GC::GetType()
108 {
109     return gcType_;
110 }
111 
SetPandaVM(PandaVM * vm)112 void GC::SetPandaVM(PandaVM *vm)
113 {
114     vm_ = vm;
115     referenceProcessor_ = vm->GetReferenceProcessor();
116 }
117 
GetNativeGcTriggerType()118 NativeGcTriggerType GC::GetNativeGcTriggerType()
119 {
120     return gcSettings_.GetNativeGcTriggerType();
121 }
122 
SimpleNativeAllocationGcWatermark()123 size_t GC::SimpleNativeAllocationGcWatermark()
124 {
125     return GetPandaVm()->GetOptions().GetMaxFree();
126 }
127 
WaitForIdleGC()128 NO_THREAD_SAFETY_ANALYSIS void GC::WaitForIdleGC()
129 {
130     while (!CASGCPhase(GCPhase::GC_PHASE_IDLE, GCPhase::GC_PHASE_RUNNING)) {
131         GetPandaVm()->GetRendezvous()->SafepointEnd();
132         // Interrupt the running GC if possible
133         OnWaitForIdleFail();
134         // NOTE(dtrubenkov): resolve it more properly
135         constexpr uint64_t WAIT_FINISHED = 100;
136         // Use NativeSleep for all threads, as this thread shouldn't hold Mutator lock here
137         os::thread::NativeSleepUS(std::chrono::microseconds(WAIT_FINISHED));
138         GetPandaVm()->GetRendezvous()->SafepointBegin();
139     }
140 }
141 
TriggerGCForNative()142 inline void GC::TriggerGCForNative()
143 {
144     auto nativeGcTriggerType = GetNativeGcTriggerType();
145     ASSERT_PRINT((nativeGcTriggerType == NativeGcTriggerType::NO_NATIVE_GC_TRIGGER) ||
146                      (nativeGcTriggerType == NativeGcTriggerType::SIMPLE_STRATEGY),
147                  "Unknown Native GC Trigger type");
148     switch (nativeGcTriggerType) {
149         case NativeGcTriggerType::NO_NATIVE_GC_TRIGGER:
150             break;
151         case NativeGcTriggerType::SIMPLE_STRATEGY:
152             // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or
153             // ordering constraints imposed on other reads or writes
154             if (nativeBytesRegistered_.load(std::memory_order_relaxed) > SimpleNativeAllocationGcWatermark()) {
155                 auto task = MakePandaUnique<GCTask>(GCTaskCause::NATIVE_ALLOC_CAUSE, time::GetCurrentTimeInNanos());
156                 AddGCTask(false, std::move(task));
157                 ManagedThread::GetCurrent()->SafepointPoll();
158             }
159             break;
160         default:
161             LOG(FATAL, GC) << "Unknown Native GC Trigger type";
162             break;
163     }
164 }
165 
Initialize(PandaVM * vm)166 void GC::Initialize(PandaVM *vm)
167 {
168     trace::ScopedTrace scopedTrace(__PRETTY_FUNCTION__);
169     // GC saved the PandaVM instance, so we get allocator from the PandaVM.
170     auto allocator = GetInternalAllocator();
171     gcListenerManager_ = allocator->template New<GCListenerManager>();
172     clearedReferencesLock_ = allocator->New<os::memory::Mutex>();
173     os::memory::LockHolder holder(*clearedReferencesLock_);
174     clearedReferences_ = allocator->New<PandaVector<ark::mem::Reference *>>(allocator->Adapter());
175     this->SetPandaVM(vm);
176     InitializeImpl();
177     gcWorker_ = allocator->New<GCWorker>(this);
178 }
179 
CreateWorkersTaskPool()180 void GC::CreateWorkersTaskPool()
181 {
182     ASSERT(workersTaskPool_ == nullptr);
183     if (this->IsWorkerThreadsExist()) {
184         auto allocator = GetInternalAllocator();
185         GCWorkersTaskPool *gcTaskPool = nullptr;
186         if (this->GetSettings()->UseThreadPoolForGC()) {
187             // Use internal gc thread pool
188             gcTaskPool = allocator->New<GCWorkersThreadPool>(this, this->GetSettings()->GCWorkersCount());
189         } else {
190             // Use common TaskManager
191             ASSERT(this->GetSettings()->UseTaskManagerForGC());
192             gcTaskPool = allocator->New<GCWorkersTaskQueue>(this);
193         }
194         ASSERT(gcTaskPool != nullptr);
195         workersTaskPool_ = gcTaskPool;
196     }
197 }
198 
DestroyWorkersTaskPool()199 void GC::DestroyWorkersTaskPool()
200 {
201     if (workersTaskPool_ == nullptr) {
202         return;
203     }
204     workersTaskPool_->WaitUntilTasksEnd();
205     auto allocator = this->GetInternalAllocator();
206     allocator->Delete(workersTaskPool_);
207     workersTaskPool_ = nullptr;
208 }
209 
StartGC()210 void GC::StartGC()
211 {
212     CreateWorker();
213 }
214 
StopGC()215 void GC::StopGC()
216 {
217     DestroyWorker();
218     DestroyWorkersTaskPool();
219 }
220 
SetupCpuAffinity()221 void GC::SetupCpuAffinity()
222 {
223     if (!gcSettings_.ManageGcThreadsAffinity()) {
224         return;
225     }
226     // Try to get CPU affinity fo GC Thread
227     if (UNLIKELY(!os::CpuAffinityManager::GetCurrentThreadAffinity(affinityBeforeGc_))) {
228         affinityBeforeGc_.Clear();
229         return;
230     }
231     // Try to use best + middle for preventing issues when best core is used in another thread,
232     // and GC waits for it to finish.
233     if (!os::CpuAffinityManager::SetAffinityForCurrentThread(os::CpuPower::BEST | os::CpuPower::MIDDLE)) {
234         affinityBeforeGc_.Clear();
235     }
236     // Some GCs don't use GC Workers
237     if (workersTaskPool_ != nullptr && this->GetSettings()->UseThreadPoolForGC()) {
238         static_cast<GCWorkersThreadPool *>(workersTaskPool_)->SetAffinityForGCWorkers();
239     }
240 }
241 
SetupCpuAffinityAfterConcurrent()242 void GC::SetupCpuAffinityAfterConcurrent()
243 {
244     if (!gcSettings_.ManageGcThreadsAffinity()) {
245         return;
246     }
247     os::CpuAffinityManager::SetAffinityForCurrentThread(os::CpuPower::BEST | os::CpuPower::MIDDLE);
248     // Some GCs don't use GC Workers
249     if (workersTaskPool_ != nullptr && this->GetSettings()->UseThreadPoolForGC()) {
250         static_cast<GCWorkersThreadPool *>(workersTaskPool_)->SetAffinityForGCWorkers();
251     }
252 }
253 
ResetCpuAffinity(bool beforeConcurrent)254 void GC::ResetCpuAffinity(bool beforeConcurrent)
255 {
256     if (!gcSettings_.ManageGcThreadsAffinity()) {
257         return;
258     }
259     if (!affinityBeforeGc_.IsEmpty()) {
260         // Set GC Threads on weak CPUs before concurrent if needed
261         if (beforeConcurrent && gcSettings_.UseWeakCpuForGcConcurrent()) {
262             os::CpuAffinityManager::SetAffinityForCurrentThread(os::CpuPower::WEAK);
263         } else {  // else set on saved affinity
264             os::CpuAffinityManager::SetAffinityForCurrentThread(affinityBeforeGc_);
265         }
266     }
267     // Some GCs don't use GC Workers
268     if (workersTaskPool_ != nullptr && this->GetSettings()->UseThreadPoolForGC()) {
269         static_cast<GCWorkersThreadPool *>(workersTaskPool_)->UnsetAffinityForGCWorkers();
270     }
271 }
272 
SetupCpuAffinityBeforeConcurrent()273 void GC::SetupCpuAffinityBeforeConcurrent()
274 {
275     ResetCpuAffinity(true);
276 }
277 
RestoreCpuAffinity()278 void GC::RestoreCpuAffinity()
279 {
280     ResetCpuAffinity(false);
281 }
282 
NeedRunGCAfterWaiting(size_t counterBeforeWaiting,const GCTask & task) const283 bool GC::NeedRunGCAfterWaiting(size_t counterBeforeWaiting, const GCTask &task) const
284 {
285     // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which
286     // should become visible
287     auto newCounter = gcCounter_.load(std::memory_order_acquire);
288     ASSERT(newCounter >= counterBeforeWaiting);
289     // Atomic with acquire order reason: data race with last_cause_ with dependecies on reads after the load which
290     // should become visible
291     return (newCounter == counterBeforeWaiting || lastCause_.load(std::memory_order_acquire) < task.reason);
292 }
293 
GCPhasesPreparation(const GCTask & task)294 bool GC::GCPhasesPreparation(const GCTask &task)
295 {
296     // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which
297     // should become visible
298     auto oldCounter = gcCounter_.load(std::memory_order_acquire);
299     WaitForIdleGC();
300     if (!this->NeedRunGCAfterWaiting(oldCounter, task)) {
301         SetGCPhase(GCPhase::GC_PHASE_IDLE);
302         return false;
303     }
304     this->SetupCpuAffinity();
305     this->GetTiming()->Reset();  // Clear records.
306     // Atomic with release order reason: data race with last_cause_ with dependecies on writes before the store which
307     // should become visible acquire
308     lastCause_.store(task.reason, std::memory_order_release);
309     if (gcSettings_.PreGCHeapVerification()) {
310         trace::ScopedTrace preHeapVerifierTrace("PreGCHeapVeriFier");
311         size_t failCount = VerifyHeap();
312         if (gcSettings_.FailOnHeapVerification() && failCount > 0) {
313             LOG(FATAL, GC) << "Heap corrupted before GC, HeapVerifier found " << failCount << " corruptions";
314         }
315     }
316     // Atomic with acq_rel order reason: data race with gc_counter_ with dependecies on reads after the load and on
317     // writes before the store
318     gcCounter_.fetch_add(1, std::memory_order_acq_rel);
319     if (gcSettings_.IsDumpHeap()) {
320         PandaOStringStream os;
321         os << "Heap dump before GC" << std::endl;
322         GetPandaVm()->DumpHeap(&os);
323         std::cerr << os.str() << std::endl;
324     }
325     return true;
326 }
327 
GCPhasesFinish(const GCTask & task)328 void GC::GCPhasesFinish(const GCTask &task)
329 {
330     ASSERT(task.collectionType != GCCollectionType::NONE);
331     LOG(INFO, GC) << "[" << gcCounter_ << "] [" << task.collectionType << " (" << task.reason << ")] "
332                   << GetPandaVm()->GetGCStats()->GetStatistics();
333 
334     if (gcSettings_.IsDumpHeap()) {
335         PandaOStringStream os;
336         os << "Heap dump after GC" << std::endl;
337         GetPandaVm()->DumpHeap(&os);
338         std::cerr << os.str() << std::endl;
339     }
340 
341     if (gcSettings_.PostGCHeapVerification()) {
342         trace::ScopedTrace postHeapVerifierTrace("PostGCHeapVeriFier");
343         size_t failCount = VerifyHeap();
344         if (gcSettings_.FailOnHeapVerification() && failCount > 0) {
345             LOG(FATAL, GC) << "Heap corrupted after GC, HeapVerifier found " << failCount << " corruptions";
346         }
347     }
348     this->RestoreCpuAffinity();
349 
350     SetGCPhase(GCPhase::GC_PHASE_IDLE);
351 }
352 
353 // NOLINTNEXTLINE(performance-unnecessary-value-param)
RunPhases(GCTask & task)354 void GC::RunPhases(GCTask &task)
355 {
356     DCHECK_ALLOW_GARBAGE_COLLECTION;
357     trace::ScopedTrace scopedTrace(__FUNCTION__);
358     bool needRunGCAfterWaiting = GCPhasesPreparation(task);
359     if (!needRunGCAfterWaiting) {
360         return;
361     }
362     size_t bytesInHeapBeforeGc = GetPandaVm()->GetMemStats()->GetFootprintHeap();
363     LOG_DEBUG_GC << "Bytes in heap before GC " << std::dec << bytesInHeapBeforeGc;
364     {
365         GCScopedStats scopedStats(GetPandaVm()->GetGCStats(), gcType_ == GCType::STW_GC ? GetStats() : nullptr);
366         ScopedGcHung scopedHung(&task);
367         GetPandaVm()->GetGCStats()->ResetLastPause();
368 
369         FireGCStarted(task, bytesInHeapBeforeGc);
370         PreRunPhasesImpl();
371         clearSoftReferencesEnabled_ = task.reason == GCTaskCause::OOM_CAUSE || IsExplicitFull(task);
372         // NOLINTNEXTLINE(performance-unnecessary-value-param)
373         RunPhasesImpl(task);
374         // Clear Internal allocator unused pools (must do it on pause to avoid race conditions):
375         // - Clear global part:
376         InternalAllocator<>::GetInternalAllocatorFromRuntime()->VisitAndRemoveFreePools(
377             [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
378         // - Clear local part:
379         ClearLocalInternalAllocatorPools();
380 
381         size_t bytesInHeapAfterGc = GetPandaVm()->GetMemStats()->GetFootprintHeap();
382         // There is case than bytes_in_heap_after_gc > 0 and bytes_in_heap_before_gc == 0.
383         // Because TLABs are registered during GC
384         if (bytesInHeapAfterGc > 0 && bytesInHeapBeforeGc > 0) {
385             GetStats()->AddReclaimRatioValue(1 - static_cast<double>(bytesInHeapAfterGc) / bytesInHeapBeforeGc);
386         }
387         LOG_DEBUG_GC << "Bytes in heap after GC " << std::dec << bytesInHeapAfterGc;
388         FireGCFinished(task, bytesInHeapBeforeGc, bytesInHeapAfterGc);
389     }
390     GCPhasesFinish(task);
391 }
392 
393 template <class LanguageConfig>
CreateGC(GCType gcType,ObjectAllocatorBase * objectAllocator,const GCSettings & settings)394 GC *CreateGC(GCType gcType, ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
395 {
396     GC *ret = nullptr;
397     InternalAllocatorPtr allocator {InternalAllocator<>::GetInternalAllocatorFromRuntime()};
398 
399     switch (gcType) {
400         case GCType::EPSILON_GC:
401             ret = allocator->New<EpsilonGC<LanguageConfig>>(objectAllocator, settings);
402             break;
403         case GCType::EPSILON_G1_GC:
404             ret = allocator->New<EpsilonG1GC<LanguageConfig>>(objectAllocator, settings);
405             break;
406         case GCType::STW_GC:
407             ret = allocator->New<StwGC<LanguageConfig>>(objectAllocator, settings);
408             break;
409         case GCType::GEN_GC:
410             ret = allocator->New<GenGC<LanguageConfig>>(objectAllocator, settings);
411             break;
412         case GCType::G1_GC:
413             ret = allocator->New<G1GC<LanguageConfig>>(objectAllocator, settings);
414             break;
415         default:
416             LOG(FATAL, GC) << "Unknown GC type";
417             break;
418     }
419     return ret;
420 }
421 
CheckGCCause(GCTaskCause cause) const422 bool GC::CheckGCCause(GCTaskCause cause) const
423 {
424     // Cross reference cause is only suitable for XGC
425     if (cause == GCTaskCause::CROSSREF_CAUSE) {
426         return false;
427     }
428     return cause != GCTaskCause::INVALID_CAUSE;
429 }
430 
IsMarkedEx(const ObjectHeader * object) const431 bool GC::IsMarkedEx(const ObjectHeader *object) const
432 {
433     return IsMarked(object);
434 }
435 
MarkObjectIfNotMarked(ObjectHeader * objectHeader)436 bool GC::MarkObjectIfNotMarked(ObjectHeader *objectHeader)
437 {
438     ASSERT(objectHeader != nullptr);
439     if (IsMarked(objectHeader)) {
440         return false;
441     }
442     MarkObject(objectHeader);
443     return true;
444 }
445 
ProcessReference(GCMarkingStackType * objectsStack,const BaseClass * cls,const ObjectHeader * ref,const ReferenceProcessPredicateT & pred)446 void GC::ProcessReference(GCMarkingStackType *objectsStack, const BaseClass *cls, const ObjectHeader *ref,
447                           const ReferenceProcessPredicateT &pred)
448 {
449     ASSERT(referenceProcessor_ != nullptr);
450     referenceProcessor_->HandleReference(this, objectsStack, cls, ref, pred);
451 }
452 
ProcessReferenceForSinglePassCompaction(const BaseClass * cls,const ObjectHeader * ref,const ReferenceProcessorT & processor)453 void GC::ProcessReferenceForSinglePassCompaction(const BaseClass *cls, const ObjectHeader *ref,
454                                                  const ReferenceProcessorT &processor)
455 {
456     ASSERT(referenceProcessor_ != nullptr);
457     referenceProcessor_->HandleReference(this, cls, ref, processor);
458 }
459 
AddReference(ObjectHeader * fromObj,ObjectHeader * object)460 void GC::AddReference(ObjectHeader *fromObj, ObjectHeader *object)
461 {
462     ASSERT(IsMarked(object));
463     GCMarkingStackType references(this);
464     // NOTE(alovkov): support stack with workers here & put all refs in stack and only then process altogether for once
465     ASSERT(!references.IsWorkersTaskSupported());
466     references.PushToStack(fromObj, object);
467     MarkReferences(&references, phase_);
468     if (gcType_ != GCType::EPSILON_GC) {
469         ASSERT(references.Empty());
470     }
471 }
472 
473 // NOLINTNEXTLINE(performance-unnecessary-value-param)
ProcessReferences(GCPhase gcPhase,const GCTask & task,const ReferenceClearPredicateT & pred)474 void GC::ProcessReferences(GCPhase gcPhase, const GCTask &task, const ReferenceClearPredicateT &pred)
475 {
476     trace::ScopedTrace scopedTrace(__FUNCTION__);
477     LOG(DEBUG, REF_PROC) << "Start processing cleared references";
478     ASSERT(referenceProcessor_ != nullptr);
479     bool clearSoftReferences = task.reason == GCTaskCause::OOM_CAUSE || IsExplicitFull(task);
480     referenceProcessor_->ProcessReferences(false, clearSoftReferences, gcPhase, pred);
481     Reference *processedRef = referenceProcessor_->CollectClearedReferences();
482     if (processedRef != nullptr) {
483         os::memory::LockHolder holder(*clearedReferencesLock_);
484         // NOTE(alovkov): ged rid of cleared_references_ and just enqueue refs here?
485         clearedReferences_->push_back(processedRef);
486     }
487 }
488 
ProcessReferences(const mem::GC::ReferenceClearPredicateT & pred)489 void GC::ProcessReferences(const mem::GC::ReferenceClearPredicateT &pred)
490 {
491     ASSERT(!this->IsFullGC());
492     trace::ScopedTrace scopedTrace(__FUNCTION__);
493     LOG(DEBUG, REF_PROC) << "Start processing cleared references";
494     ASSERT(referenceProcessor_ != nullptr);
495     referenceProcessor_->ProcessReferencesAfterCompaction(pred);
496     Reference *processedRef = referenceProcessor_->CollectClearedReferences();
497     if (processedRef != nullptr) {
498         os::memory::LockHolder holder(*clearedReferencesLock_);
499         clearedReferences_->push_back(processedRef);
500     }
501 }
502 
EvacuateStartingWith(void * ref)503 void GC::EvacuateStartingWith([[maybe_unused]] void *ref)
504 {
505     ASSERT_PRINT(false, "Should be implemented by subclasses");
506 }
507 
IsClearSoftReferencesEnabled() const508 bool GC::IsClearSoftReferencesEnabled() const
509 {
510     return clearSoftReferencesEnabled_;
511 }
512 
SetGCPhase(GCPhase gcPhase)513 void GC::SetGCPhase(GCPhase gcPhase)
514 {
515     phase_ = gcPhase;
516 }
517 
GetCounter() const518 size_t GC::GetCounter() const
519 {
520     return gcCounter_;
521 }
522 
PostponeGCStart()523 void GC::PostponeGCStart()
524 {
525     ASSERT(IsPostponeGCSupported());
526     isPostponeEnabled_ = true;
527 }
528 
PostponeGCEnd()529 void GC::PostponeGCEnd()
530 {
531     ASSERT(IsPostponeGCSupported());
532     ASSERT(IsPostponeEnabled());
533     isPostponeEnabled_ = false;
534 }
535 
IsPostponeEnabled() const536 bool GC::IsPostponeEnabled() const
537 {
538     return isPostponeEnabled_;
539 }
540 
DestroyWorker()541 void GC::DestroyWorker()
542 {
543     // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent order
544     // where threads observe all modifications in the same order
545     gcRunning_.store(false, std::memory_order_seq_cst);
546     gcWorker_->FinalizeAndDestroyWorker();
547 }
548 
CreateWorker()549 void GC::CreateWorker()
550 {
551     // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent order
552     // where threads observe all modifications in the same order
553     gcRunning_.store(true, std::memory_order_seq_cst);
554     ASSERT(gcWorker_ != nullptr);
555     gcWorker_->CreateAndStartWorker();
556 }
557 
DisableWorkerThreads()558 void GC::DisableWorkerThreads()
559 {
560     gcSettings_.SetGCWorkersCount(0);
561     gcSettings_.SetParallelMarkingEnabled(false);
562     gcSettings_.SetParallelCompactingEnabled(false);
563     gcSettings_.SetParallelRefUpdatingEnabled(false);
564 }
565 
EnableWorkerThreads()566 void GC::EnableWorkerThreads()
567 {
568     const RuntimeOptions &options = Runtime::GetOptions();
569     gcSettings_.SetGCWorkersCount(options.GetGcWorkersCount());
570     gcSettings_.SetParallelMarkingEnabled(options.IsGcParallelMarkingEnabled() && (options.GetGcWorkersCount() != 0));
571     gcSettings_.SetParallelCompactingEnabled(options.IsGcParallelCompactingEnabled() &&
572                                              (options.GetGcWorkersCount() != 0));
573     gcSettings_.SetParallelRefUpdatingEnabled(options.IsGcParallelRefUpdatingEnabled() &&
574                                               (options.GetGcWorkersCount() != 0));
575 }
576 
PreZygoteFork()577 void GC::PreZygoteFork()
578 {
579     DestroyWorker();
580     if (gcSettings_.UseTaskManagerForGC()) {
581         ASSERT(gcWorkersTaskQueue_ != nullptr);
582         ASSERT(gcWorkersTaskQueue_->IsEmpty());
583     }
584 }
585 
PostZygoteFork()586 void GC::PostZygoteFork()
587 {
588     CreateWorker();
589 }
590 
591 class GC::PostForkGCTask : public GCTask {
592 public:
PostForkGCTask(GCTaskCause gcReason,uint64_t gcTargetTime)593     PostForkGCTask(GCTaskCause gcReason, uint64_t gcTargetTime) : GCTask(gcReason, gcTargetTime) {}
594 
Run(mem::GC & gc)595     void Run(mem::GC &gc) override
596     {
597         LOG(DEBUG, GC) << "Runing PostForkGCTask";
598         gc.GetPandaVm()->GetGCTrigger()->RestoreMinTargetFootprint();
599         gc.PostForkCallback();
600         GCTask::Run(gc);
601     }
602 
603     ~PostForkGCTask() override = default;
604 
605     NO_COPY_SEMANTIC(PostForkGCTask);
606     NO_MOVE_SEMANTIC(PostForkGCTask);
607 };
608 
PreStartup()609 void GC::PreStartup()
610 {
611     // Add a delay GCTask.
612     if ((!Runtime::GetCurrent()->IsZygote()) && (!gcSettings_.RunGCInPlace())) {
613         // divide 2 to temporarily set target footprint to a high value to disable GC during App startup.
614         GetPandaVm()->GetGCTrigger()->SetMinTargetFootprint(Runtime::GetOptions().GetHeapSizeLimit() / 2);
615         PreStartupImp();
616         constexpr uint64_t DISABLE_GC_DURATION_NS = 2000 * 1000 * 1000;
617         auto task = MakePandaUnique<PostForkGCTask>(GCTaskCause::STARTUP_COMPLETE_CAUSE,
618                                                     time::GetCurrentTimeInNanos() + DISABLE_GC_DURATION_NS);
619         AddGCTask(true, std::move(task));
620         LOG(DEBUG, GC) << "Add PostForkGCTask";
621     }
622 }
623 
624 // NOLINTNEXTLINE(performance-unnecessary-value-param)
AddGCTask(bool isManaged,PandaUniquePtr<GCTask> task)625 bool GC::AddGCTask(bool isManaged, PandaUniquePtr<GCTask> task)
626 {
627     bool triggeredByThreshold = (task->reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE);
628     if (gcSettings_.RunGCInPlace()) {
629         auto *gcTask = task.get();
630         if (IsGCRunning()) {
631             if (isManaged) {
632                 return WaitForGCInManaged(*gcTask);
633             }
634             return WaitForGC(*gcTask);
635         }
636     } else {
637         if (triggeredByThreshold) {
638             bool expect = true;
639             if (canAddGcTask_.compare_exchange_strong(expect, false, std::memory_order_seq_cst)) {
640                 return gcWorker_->AddTask(std::move(task));
641             }
642         } else {
643             return gcWorker_->AddTask(std::move(task));
644         }
645     }
646     return false;
647 }
648 
IsReference(const BaseClass * cls,const ObjectHeader * ref,const ReferenceCheckPredicateT & pred)649 bool GC::IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred)
650 {
651     ASSERT(referenceProcessor_ != nullptr);
652     return referenceProcessor_->IsReference(cls, ref, pred);
653 }
654 
EnqueueReferences()655 void GC::EnqueueReferences()
656 {
657     while (true) {
658         ark::mem::Reference *ref = nullptr;
659         {
660             os::memory::LockHolder holder(*clearedReferencesLock_);
661             if (clearedReferences_->empty()) {
662                 break;
663             }
664             ref = clearedReferences_->back();
665             clearedReferences_->pop_back();
666         }
667         ASSERT(ref != nullptr);
668         ASSERT(referenceProcessor_ != nullptr);
669         referenceProcessor_->ScheduleForEnqueue(ref);
670     }
671 }
672 
IsFullGC() const673 bool GC::IsFullGC() const
674 {
675     // Atomic with relaxed order reason: data race with is_full_gc_ with no synchronization or ordering
676     // constraints imposed on other reads or writes
677     return isFullGc_.load(std::memory_order_relaxed);
678 }
679 
SetFullGC(bool value)680 void GC::SetFullGC(bool value)
681 {
682     // Atomic with relaxed order reason: data race with is_full_gc_ with no synchronization or ordering
683     // constraints imposed on other reads or writes
684     isFullGc_.store(value, std::memory_order_relaxed);
685 }
686 
NotifyNativeAllocations()687 void GC::NotifyNativeAllocations()
688 {
689     // Atomic with relaxed order reason: data race with native_objects_notified_ with no synchronization or ordering
690     // constraints imposed on other reads or writes
691     nativeObjectsNotified_.fetch_add(NOTIFY_NATIVE_INTERVAL, std::memory_order_relaxed);
692     TriggerGCForNative();
693 }
694 
RegisterNativeAllocation(size_t bytes)695 void GC::RegisterNativeAllocation(size_t bytes)
696 {
697     ASSERT_NATIVE_CODE();
698     size_t allocated;
699     do {
700         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
701         // constraints imposed on other reads or writes
702         allocated = nativeBytesRegistered_.load(std::memory_order_relaxed);
703     } while (!nativeBytesRegistered_.compare_exchange_weak(allocated, allocated + bytes));
704     if (allocated > std::numeric_limits<size_t>::max() - bytes) {
705         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
706         // constraints imposed on other reads or writes
707         nativeBytesRegistered_.store(std::numeric_limits<size_t>::max(), std::memory_order_relaxed);
708     }
709     TriggerGCForNative();
710 }
711 
RegisterNativeFree(size_t bytes)712 void GC::RegisterNativeFree(size_t bytes)
713 {
714     size_t allocated;
715     size_t newFreedBytes;
716     do {
717         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
718         // constraints imposed on other reads or writes
719         allocated = nativeBytesRegistered_.load(std::memory_order_relaxed);
720         newFreedBytes = std::min(allocated, bytes);
721     } while (!nativeBytesRegistered_.compare_exchange_weak(allocated, allocated - newFreedBytes));
722 }
723 
GetNativeBytesFromMallinfoAndRegister() const724 size_t GC::GetNativeBytesFromMallinfoAndRegister() const
725 {
726     size_t mallinfoBytes = ark::os::mem::GetNativeBytesFromMallinfo();
727     // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
728     // constraints imposed on other reads or writes
729     size_t allBytes = mallinfoBytes + nativeBytesRegistered_.load(std::memory_order_relaxed);
730     return allBytes;
731 }
732 
WaitForGC(GCTask task)733 bool GC::WaitForGC(GCTask task)
734 {
735     // NOTE(maksenov): Notify only about pauses (#4681)
736     Runtime::GetCurrent()->GetNotificationManager()->GarbageCollectorStartEvent();
737     // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which
738     // should become visible
739     auto oldCounter = this->gcCounter_.load(std::memory_order_acquire);
740     Timing suspendThreadsTiming;
741     {
742         ScopedTiming t("SuspendThreads", suspendThreadsTiming);
743         this->GetPandaVm()->GetRendezvous()->SafepointBegin();
744     }
745     if (!this->NeedRunGCAfterWaiting(oldCounter, task)) {
746         this->GetPandaVm()->GetRendezvous()->SafepointEnd();
747         return false;
748     }
749 
750     // Create a copy of the constant GCTask to be able to change its value
751     this->RunPhases(task);
752 
753     if (UNLIKELY(this->IsLogDetailedGcInfoEnabled())) {
754         PrintDetailedLog();
755     }
756 
757     this->GetPandaVm()->GetRendezvous()->SafepointEnd();
758     Runtime::GetCurrent()->GetNotificationManager()->GarbageCollectorFinishEvent();
759     this->GetPandaVm()->HandleGCFinished();
760     this->GetPandaVm()->HandleEnqueueReferences();
761     this->GetPandaVm()->ProcessReferenceFinalizers();
762     return true;
763 }
764 
WaitForGCInManaged(const GCTask & task)765 bool GC::WaitForGCInManaged(const GCTask &task)
766 {
767     Thread *baseThread = Thread::GetCurrent();
768     if (ManagedThread::ThreadIsManagedThread(baseThread)) {
769         ManagedThread *thread = ManagedThread::CastFromThread(baseThread);
770         ASSERT(thread->GetMutatorLock()->HasLock());
771         [[maybe_unused]] bool isDaemon = MTManagedThread::ThreadIsMTManagedThread(baseThread) &&
772                                          MTManagedThread::CastFromThread(baseThread)->IsDaemon();
773         ASSERT(!isDaemon || thread->GetStatus() == ThreadStatus::RUNNING);
774         vm_->GetMutatorLock()->Unlock();
775         thread->PrintSuspensionStackIfNeeded();
776         WaitForGC(task);
777         vm_->GetMutatorLock()->ReadLock();
778         ASSERT(vm_->GetMutatorLock()->HasLock());
779         this->GetPandaVm()->HandleGCRoutineInMutator();
780         return true;
781     }
782     return false;
783 }
784 
StartConcurrentScopeRoutine() const785 void GC::StartConcurrentScopeRoutine() const {}
786 
EndConcurrentScopeRoutine() const787 void GC::EndConcurrentScopeRoutine() const {}
788 
PrintDetailedLog()789 void GC::PrintDetailedLog()
790 {
791     for (auto &footprint : this->footprintList_) {
792         LOG(INFO, GC) << footprint.first << " : " << footprint.second;
793     }
794     LOG(INFO, GC) << this->GetTiming()->Dump();
795 }
796 
ConcurrentScope(GC * gc,bool autoStart)797 ConcurrentScope::ConcurrentScope(GC *gc, bool autoStart)
798 {
799     LOG(DEBUG, GC) << "Start ConcurrentScope";
800     gc_ = gc;
801     if (autoStart) {
802         Start();
803     }
804 }
805 
~ConcurrentScope()806 ConcurrentScope::~ConcurrentScope()
807 {
808     LOG(DEBUG, GC) << "Stop ConcurrentScope";
809     if (started_ && gc_->IsConcurrencyAllowed()) {
810         gc_->GetPandaVm()->GetRendezvous()->SafepointBegin();
811         gc_->SetupCpuAffinityAfterConcurrent();
812         gc_->EndConcurrentScopeRoutine();
813     }
814 }
815 
Start()816 NO_THREAD_SAFETY_ANALYSIS void ConcurrentScope::Start()
817 {
818     if (!started_ && gc_->IsConcurrencyAllowed()) {
819         gc_->StartConcurrentScopeRoutine();
820         gc_->SetupCpuAffinityBeforeConcurrent();
821         gc_->GetPandaVm()->GetRendezvous()->SafepointEnd();
822         started_ = true;
823     }
824 }
825 
WaitForGCOnPygoteFork(const GCTask & task)826 void GC::WaitForGCOnPygoteFork(const GCTask &task)
827 {
828     // do nothing if no pygote space
829     auto pygoteSpaceAllocator = objectAllocator_->GetPygoteSpaceAllocator();
830     if (pygoteSpaceAllocator == nullptr) {
831         return;
832     }
833 
834     // do nothing if not at first pygote fork
835     if (pygoteSpaceAllocator->GetState() != PygoteSpaceState::STATE_PYGOTE_INIT) {
836         return;
837     }
838 
839     LOG(DEBUG, GC) << "== GC WaitForGCOnPygoteFork Start ==";
840 
841     // do we need a lock?
842     // looks all other threads have been stopped before pygote fork
843 
844     // 0. indicate that we're rebuilding pygote space
845     pygoteSpaceAllocator->SetState(PygoteSpaceState::STATE_PYGOTE_FORKING);
846 
847     // 1. trigger gc
848     WaitForGC(task);
849 
850     // 2. move other space to pygote space
851     MoveObjectsToPygoteSpace();
852 
853     // 3. indicate that we have done
854     pygoteSpaceAllocator->SetState(PygoteSpaceState::STATE_PYGOTE_FORKED);
855 
856     // 4. disable pygote for allocation
857     objectAllocator_->DisablePygoteAlloc();
858 
859     LOG(DEBUG, GC) << "== GC WaitForGCOnPygoteFork End ==";
860 }
861 
IsOnPygoteFork() const862 bool GC::IsOnPygoteFork() const
863 {
864     auto pygoteSpaceAllocator = objectAllocator_->GetPygoteSpaceAllocator();
865     return pygoteSpaceAllocator != nullptr &&
866            pygoteSpaceAllocator->GetState() == PygoteSpaceState::STATE_PYGOTE_FORKING;
867 }
868 
MoveObjectsToPygoteSpace()869 void GC::MoveObjectsToPygoteSpace()
870 {
871     trace::ScopedTrace scopedTrace(__FUNCTION__);
872     LOG(DEBUG, GC) << "MoveObjectsToPygoteSpace: start";
873 
874     size_t allSizeMove = 0;
875     size_t movedObjectsNum = 0;
876     size_t bytesInHeapBeforeMove = GetPandaVm()->GetMemStats()->GetFootprintHeap();
877     auto pygoteSpaceAllocator = objectAllocator_->GetPygoteSpaceAllocator();
878     ObjectVisitor moveVisitor([this, &pygoteSpaceAllocator, &movedObjectsNum, &allSizeMove](ObjectHeader *src) -> void {
879         size_t size = GetObjectSize(src);
880         auto dst = reinterpret_cast<ObjectHeader *>(pygoteSpaceAllocator->Alloc(size));
881         ASSERT(dst != nullptr);
882         memcpy_s(dst, size, src, size);
883         allSizeMove += size;
884         movedObjectsNum++;
885         SetForwardAddress(src, dst);
886         LOG_DEBUG_GC << "object MOVED from " << std::hex << src << " to " << dst << ", size = " << std::dec << size;
887     });
888 
889     // move all small movable objects to pygote space
890     objectAllocator_->IterateRegularSizeObjects(moveVisitor);
891 
892     LOG(DEBUG, GC) << "MoveObjectsToPygoteSpace: move_num = " << movedObjectsNum << ", move_size = " << allSizeMove;
893 
894     if (allSizeMove > 0) {
895         GetStats()->AddMemoryValue(allSizeMove, MemoryTypeStats::MOVED_BYTES);
896         GetStats()->AddObjectsValue(movedObjectsNum, ObjectTypeStats::MOVED_OBJECTS);
897     }
898     if (bytesInHeapBeforeMove > 0) {
899         GetStats()->AddCopiedRatioValue(static_cast<double>(allSizeMove) / bytesInHeapBeforeMove);
900     }
901 
902     // Update because we moved objects from object_allocator -> pygote space
903     UpdateRefsToMovedObjectsInPygoteSpace();
904     CommonUpdateRefsToMovedObjects();
905 
906     // Clear the moved objects in old space
907     objectAllocator_->FreeObjectsMovedToPygoteSpace();
908 
909     LOG(DEBUG, GC) << "MoveObjectsToPygoteSpace: finish";
910 }
911 
SetForwardAddress(ObjectHeader * src,ObjectHeader * dst)912 void GC::SetForwardAddress(ObjectHeader *src, ObjectHeader *dst)
913 {
914     auto baseCls = src->ClassAddr<BaseClass>();
915     if (baseCls->IsDynamicClass()) {
916         auto cls = static_cast<HClass *>(baseCls);
917         // Note: During moving phase, 'src => dst'. Consider the src is a DynClass,
918         //       since 'dst' is not in GC-status the 'manage-object' inside 'dst' won't be updated to
919         //       'dst'. To fix it, we update 'manage-object' here rather than upating phase.
920         if (cls->IsHClass()) {
921             size_t offset = ObjectHeader::ObjectHeaderSize() + HClass::GetManagedObjectOffset();
922             dst->SetFieldObject<false, false, true>(GetPandaVm()->GetAssociatedThread(), offset, dst);
923         }
924     }
925 
926     // Set fwd address in src
927     bool updateRes = false;
928     do {
929         MarkWord markWord = src->AtomicGetMark();
930         MarkWord fwdMarkWord =
931             markWord.DecodeFromForwardingAddress(static_cast<MarkWord::MarkWordSize>(ToUintPtr(dst)));
932         updateRes = src->AtomicSetMark<false>(markWord, fwdMarkWord);
933     } while (!updateRes);
934 }
935 
UpdateRefsInVRegs(ManagedThread * thread)936 void GC::UpdateRefsInVRegs(ManagedThread *thread)
937 {
938     LOG_DEBUG_GC << "Update frames for thread: " << thread->GetId();
939     for (auto pframe = StackWalker::Create(thread); pframe.HasFrame(); pframe.NextFrame()) {
940         LOG_DEBUG_GC << "Frame for method " << pframe.GetMethod()->GetFullName();
941         auto iterator = [&pframe, this](auto &regInfo, auto &vreg) {
942             ObjectHeader *objectHeader = vreg.GetReference();
943             if (objectHeader == nullptr) {
944                 return true;
945             }
946             MarkWord markWord = objectHeader->AtomicGetMark();
947             if (markWord.GetState() != MarkWord::ObjectState::STATE_GC) {
948                 return true;
949             }
950             MarkWord::MarkWordSize addr = markWord.GetForwardingAddress();
951             LOG_DEBUG_GC << "Update vreg, vreg old val = " << std::hex << objectHeader << ", new val = 0x" << addr;
952             LOG_IF(regInfo.IsAccumulator(), DEBUG, GC) << "^ acc reg";
953             if (!pframe.IsCFrame() && regInfo.IsAccumulator()) {
954                 LOG_DEBUG_GC << "^ acc updated";
955                 vreg.SetReference(reinterpret_cast<ObjectHeader *>(addr));
956             } else {
957                 pframe.template SetVRegValue<std::is_same_v<decltype(vreg), interpreter::DynamicVRegisterRef &>>(
958                     regInfo, reinterpret_cast<ObjectHeader *>(addr));
959             }
960             return true;
961         };
962         pframe.IterateObjectsWithInfo(iterator);
963     }
964 }
965 
PopObjectFromStack(GCMarkingStackType * objectsStack)966 const ObjectHeader *GC::PopObjectFromStack(GCMarkingStackType *objectsStack)
967 {
968     auto *object = objectsStack->PopFromStack();
969     ASSERT(object != nullptr);
970     return object;
971 }
972 
IsGenerational() const973 bool GC::IsGenerational() const
974 {
975     return IsGenerationalGCType(gcType_);
976 }
977 
AddListener(GCListener * listener)978 void GC::GCListenerManager::AddListener(GCListener *listener)
979 {
980     os::memory::LockHolder lh(listenerLock_);
981     newListeners_.insert(listener);
982 }
983 
RemoveListener(GCListener * listener)984 void GC::GCListenerManager::RemoveListener(GCListener *listener)
985 {
986     os::memory::LockHolder lh(listenerLock_);
987     listenersForRemove_.insert(listener);
988 }
989 
NormalizeListenersOnStartGC()990 void GC::GCListenerManager::NormalizeListenersOnStartGC()
991 {
992     os::memory::LockHolder lh(listenerLock_);
993     for (auto *listenerForRemove : listenersForRemove_) {
994         if (newListeners_.find(listenerForRemove) != newListeners_.end()) {
995             newListeners_.erase(listenerForRemove);
996         }
997         auto it = currentListeners_.find(listenerForRemove);
998         if (it != currentListeners_.end()) {
999             LOG(DEBUG, GC) << "Remove listener for GC: " << listenerForRemove;
1000             currentListeners_.erase(it);
1001         }
1002     }
1003     listenersForRemove_.clear();
1004     for (auto *newListener : newListeners_) {
1005         LOG(DEBUG, GC) << "Add new listener for GC: " << newListener;
1006         currentListeners_.insert(newListener);
1007     }
1008     newListeners_.clear();
1009 }
1010 
FireGCStarted(const GCTask & task,size_t bytesInHeapBeforeGc)1011 void GC::FireGCStarted(const GCTask &task, size_t bytesInHeapBeforeGc)
1012 {
1013     gcListenerManager_->NormalizeListenersOnStartGC();
1014     gcListenerManager_->IterateOverListeners(
1015         [&](GCListener *listener) { listener->GCStarted(task, bytesInHeapBeforeGc); });
1016 }
1017 
FireGCFinished(const GCTask & task,size_t bytesInHeapBeforeGc,size_t bytesInHeapAfterGc)1018 void GC::FireGCFinished(const GCTask &task, size_t bytesInHeapBeforeGc, size_t bytesInHeapAfterGc)
1019 {
1020     gcListenerManager_->IterateOverListeners(
1021         [&](GCListener *listener) { listener->GCFinished(task, bytesInHeapBeforeGc, bytesInHeapAfterGc); });
1022 }
1023 
FireGCPhaseStarted(GCPhase phase)1024 void GC::FireGCPhaseStarted(GCPhase phase)
1025 {
1026     gcListenerManager_->IterateOverListeners([phase](GCListener *listener) { listener->GCPhaseStarted(phase); });
1027 }
1028 
FireGCPhaseFinished(GCPhase phase)1029 void GC::FireGCPhaseFinished(GCPhase phase)
1030 {
1031     gcListenerManager_->IterateOverListeners([phase](GCListener *listener) { listener->GCPhaseFinished(phase); });
1032 }
1033 
OnWaitForIdleFail()1034 void GC::OnWaitForIdleFail() {}
1035 
1036 TEMPLATE_GC_CREATE_GC();
1037 
1038 }  // namespace ark::mem
1039