• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_GC_H
16 #define PANDA_RUNTIME_MEM_GC_GC_H
17 
18 #include <atomic>
19 #include <map>
20 #include <string_view>
21 #include <vector>
22 
23 #include "libpandabase/os/cpu_affinity.h"
24 #include "libpandabase/os/mutex.h"
25 #include "libpandabase/os/thread.h"
26 #include "libpandabase/taskmanager/task_queue.h"
27 #include "libpandabase/trace/trace.h"
28 #include "libpandabase/utils/expected.h"
29 #include "runtime/include/gc_task.h"
30 #include "runtime/include/object_header.h"
31 #include "runtime/include/language_config.h"
32 #include "runtime/include/locks.h"
33 #include "runtime/include/mem/panda_containers.h"
34 #include "runtime/include/mem/panda_smart_pointers.h"
35 #include "runtime/include/mem/panda_string.h"
36 #include "runtime/mem/allocator_adapter.h"
37 #include "runtime/mem/gc/gc_settings.h"
38 #include "runtime/mem/gc/gc_barrier_set.h"
39 #include "runtime/mem/gc/gc_phase.h"
40 #include "runtime/mem/gc/gc_root.h"
41 #include "runtime/mem/gc/gc_adaptive_marking_stack.h"
42 #include "runtime/mem/gc/gc_scope.h"
43 #include "runtime/mem/gc/gc_scoped_phase.h"
44 #include "runtime/mem/gc/gc_stats.h"
45 #include "runtime/mem/gc/gc_types.h"
46 #include "runtime/mem/refstorage/reference.h"
47 #include "runtime/mem/gc/bitmap.h"
48 #include "runtime/mem/gc/workers/gc_worker.h"
49 #include "runtime/mem/object_helpers.h"
50 #include "runtime/timing.h"
51 #include "runtime/mem/region_allocator.h"
52 
53 namespace ark {
54 class BaseClass;
55 class HClass;
56 class PandaVM;
57 class Timing;
58 namespace mem {
59 class G1GCTest;
60 class GlobalObjectStorage;
61 class ReferenceProcessor;
62 template <MTModeT MT_MODE>
63 class ObjectAllocatorG1;
64 namespace test {
65 class MemStatsGenGCTest;
66 class ReferenceStorageTest;
67 class RemSetTest;
68 }  // namespace test
69 namespace ecmascript {
70 class EcmaReferenceProcessor;
71 }  // namespace ecmascript
72 }  // namespace mem
73 }  // namespace ark
74 
75 namespace ark::coretypes {
76 class Array;
77 class DynClass;
78 }  // namespace ark::coretypes
79 
80 namespace ark::mem {
81 
82 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
83 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
84 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
85 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
86 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
87 #define LOG_DEBUG_OBJECT_EVENTS LOG(DEBUG, MM_OBJECT_EVENTS)
88 
89 // forward declarations:
90 class GCListener;
91 class GCScopePhase;
92 class GCScopedPhase;
93 class GCQueueInterface;
94 class GCDynamicObjectHelpers;
95 class GCWorkersTaskPool;
96 class GCWorkersTask;
97 
98 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
99 
100 enum ClassRootsVisitFlag : bool {
101     ENABLED = true,
102     DISABLED = false,
103 };
104 
105 enum CardTableVisitFlag : bool {
106     VISIT_ENABLED = true,
107     VISIT_DISABLED = false,
108 };
109 
110 enum BuffersKeepingFlag : bool {
111     KEEP = true,
112     DELETE = false,
113 };
114 
115 class GCListener {
116 public:
117     GCListener() = default;
118     NO_COPY_SEMANTIC(GCListener);
119     DEFAULT_MOVE_SEMANTIC(GCListener);
120     virtual ~GCListener() = default;
GCStarted(const GCTask & task,size_t heapSize)121     virtual void GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize) {}
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)122     virtual void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
123                             [[maybe_unused]] size_t heapSize)
124     {
125     }
GCPhaseStarted(GCPhase phase)126     virtual void GCPhaseStarted([[maybe_unused]] GCPhase phase) {}
GCPhaseFinished(GCPhase phase)127     virtual void GCPhaseFinished([[maybe_unused]] GCPhase phase) {}
128 };
129 
130 class GCExtensionData;
131 
132 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
133 
134 // base class for all GCs
135 class GC {
136 public:
137     using MarkPreprocess = std::function<void(const ObjectHeader *, BaseClass *)>;
138     using ReferenceCheckPredicateT = std::function<bool(const ObjectHeader *)>;
139     using ReferenceClearPredicateT = std::function<bool(const ObjectHeader *)>;
140     using ReferenceProcessPredicateT = std::function<bool(const ObjectHeader *)>;
141     using ReferenceProcessorT = std::function<void(void *)>;
142 
EmptyReferenceProcessPredicate(const ObjectHeader * ref)143     static constexpr bool EmptyReferenceProcessPredicate([[maybe_unused]] const ObjectHeader *ref)
144     {
145         return true;
146     }
147 
EmptyMarkPreprocess(const ObjectHeader * ref,BaseClass * baseKlass)148     static constexpr void EmptyMarkPreprocess([[maybe_unused]] const ObjectHeader *ref,
149                                               [[maybe_unused]] BaseClass *baseKlass)
150     {
151     }
152 
153     explicit GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
154     NO_COPY_SEMANTIC(GC);
155     NO_MOVE_SEMANTIC(GC);
156     virtual ~GC() = 0;
157 
158     GCType GetType();
159 
160     /// @brief Initialize GC
161     void Initialize(PandaVM *vm);
162 
163     /**
164      * @brief Starts GC after initialization
165      * Creates worker thread, sets gc_running_ to true
166      */
167     virtual void StartGC();
168 
169     /**
170      * @brief Stops GC for runtime destruction
171      * Joins GC thread, clears queue
172      */
173     virtual void StopGC();
174 
175     /**
176      * Should be used to wait while GC should work exclusively
177      * Note: for non-mt STW GC can be used to run GC.
178      * @return false if the task is discarded. Otherwise true.
179      * The task may be discarded if the GC already executing a task with
180      * the same reason.
181      */
182     virtual bool WaitForGC(GCTask task);
183 
184     /**
185      * Should be used to wait while GC should be executed in managed scope
186      * @return false if the task is discarded. Otherwise true.
187      * The task may be discarded if the GC already executing a task with
188      * the same reason.
189      */
190     bool WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
191 
192     /// Only be used to at first pygote fork
193     void WaitForGCOnPygoteFork(const GCTask &task);
194 
195     bool IsOnPygoteFork() const;
196 
197     /**
198      * Initialize GC bits on object creation.
199      * Required only for GCs with switched bits
200      */
201     virtual void InitGCBits(ark::ObjectHeader *objHeader) = 0;
202 
203     /// Initialize GC bits on object creation for the TLAB allocation.
204     virtual void InitGCBitsForAllocationInTLAB(ark::ObjectHeader *objHeader) = 0;
205 
IsTLABsSupported()206     bool IsTLABsSupported() const
207     {
208         return tlabsSupported_;
209     }
210 
211     /// @return true if GC supports object pinning (will not move pinned object), false otherwise
212     virtual bool IsPinningSupported() const = 0;
213 
214     /// @return true if cause is suitable for the GC, false otherwise
215     virtual bool CheckGCCause(GCTaskCause cause) const;
216 
217     /**
218      * Trigger GC.
219      * @return false if the task is discarded. Otherwise true.
220      * The task may be discarded if the GC already executing a task with
221      * the same reason. The task may be discarded by other reasons.
222      */
223     virtual bool Trigger(PandaUniquePtr<GCTask> task) = 0;
224 
225     virtual bool IsFullGC() const;
226 
227     /// Return true if gc has generations, false otherwise
228     bool IsGenerational() const;
229 
DumpStatistics()230     PandaString DumpStatistics()
231     {
232         return instanceStats_.GetDump(gcType_);
233     }
234 
AddListener(GCListener * listener)235     void AddListener(GCListener *listener)
236     {
237         ASSERT(gcListenerManager_ != nullptr);
238         gcListenerManager_->AddListener(listener);
239     }
240 
RemoveListener(GCListener * listener)241     void RemoveListener(GCListener *listener)
242     {
243         ASSERT(gcListenerManager_ != nullptr);
244         gcListenerManager_->RemoveListener(listener);
245     }
246 
GetBarrierSet()247     GCBarrierSet *GetBarrierSet()
248     {
249         ASSERT(gcBarrierSet_ != nullptr);
250         return gcBarrierSet_;
251     }
252 
GetWorkersTaskPool()253     GCWorkersTaskPool *GetWorkersTaskPool() const
254     {
255         ASSERT(workersTaskPool_ != nullptr);
256         return workersTaskPool_;
257     }
258 
259     // Additional NativeGC
260     void NotifyNativeAllocations();
261 
262     void RegisterNativeAllocation(size_t bytes);
263 
264     void RegisterNativeFree(size_t bytes);
265 
GetNotifyNativeInterval()266     int32_t GetNotifyNativeInterval()
267     {
268         return NOTIFY_NATIVE_INTERVAL;
269     }
270 
271     // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
272     static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
273 
274     // Calling CheckGCForNative immediately if size exceeds the following
275     static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
276 
IsLogDetailedGcInfoEnabled()277     inline bool IsLogDetailedGcInfoEnabled() const
278     {
279         return gcSettings_.LogDetailedGCInfoEnabled();
280     }
281 
IsLogDetailedGcCompactionInfoEnabled()282     inline bool IsLogDetailedGcCompactionInfoEnabled() const
283     {
284         return gcSettings_.LogDetailedGCCompactionInfoEnabled();
285     }
286 
GetGCPhase()287     inline GCPhase GetGCPhase() const
288     {
289         return phase_;
290     }
291 
GetLastGCCause()292     inline GCTaskCause GetLastGCCause() const
293     {
294         // Atomic with acquire order reason: data race with another threads which can update the variable
295         return lastCause_.load(std::memory_order_acquire);
296     }
297 
IsGCRunning()298     inline bool IsGCRunning()
299     {
300         // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent
301         // order where threads observe all modifications in the same order
302         return gcRunning_.load(std::memory_order_seq_cst);
303     }
304 
305     void PreStartup();
306 
GetInternalAllocator()307     InternalAllocatorPtr GetInternalAllocator() const
308     {
309         return internalAllocator_;
310     }
311 
312     /**
313      * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
314      * ReferenceQueue.class)
315      */
316     void EnqueueReferences();
317 
318     /// Process all references which GC found in marking phase.
319     void ProcessReferences(GCPhase gcPhase, const GCTask &task, const ReferenceClearPredicateT &pred);
320 
321     /// Process all references which were found during evacuation
322     void ProcessReferences(const mem::GC::ReferenceClearPredicateT &pred);
323 
324     virtual void EvacuateStartingWith(void *ref);
325 
GetNativeBytesRegistered()326     size_t GetNativeBytesRegistered()
327     {
328         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
329         // constraints imposed on other reads or writes
330         return nativeBytesRegistered_.load(std::memory_order_relaxed);
331     }
332 
333     virtual void SetPandaVM(PandaVM *vm);
334 
GetPandaVm()335     PandaVM *GetPandaVm() const
336     {
337         return vm_;
338     }
339 
GetWorkersTaskQueue()340     taskmanager::TaskQueueInterface *GetWorkersTaskQueue() const
341     {
342         return gcWorkersTaskQueue_;
343     }
344 
345     virtual void PreZygoteFork();
346 
347     virtual void PostZygoteFork();
348 
349     /**
350      * Processes thread's remaining pre and post barrier buffer entries on its termination.
351      *
352      * @param keep_buffers specifies whether to clear (=BuffersKeepingFlag::KEEP) or deallocate
353      * (=BuffersKeepingFlag::DELETE) pre and post barrier buffers upon OnThreadTerminate() completion
354      */
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)355     virtual void OnThreadTerminate([[maybe_unused]] ManagedThread *thread,
356                                    [[maybe_unused]] mem::BuffersKeepingFlag keepBuffers)
357     {
358     }
359 
360     /// Performs the actions that are required upon thread creation (if any)
OnThreadCreate(ManagedThread * thread)361     virtual void OnThreadCreate([[maybe_unused]] ManagedThread *thread) {}
362 
SetCanAddGCTask(bool canAddTask)363     void SetCanAddGCTask(bool canAddTask)
364     {
365         // Atomic with relaxed order reason: data race with can_add_gc_task_ with no synchronization or ordering
366         // constraints imposed on other reads or writes
367         canAddGcTask_.store(canAddTask, std::memory_order_relaxed);
368     }
369 
GetExtensionData()370     GCExtensionData *GetExtensionData() const
371     {
372         return extensionData_;
373     }
374 
SetExtensionData(GCExtensionData * data)375     void SetExtensionData(GCExtensionData *data)
376     {
377         extensionData_ = data;
378     }
379 
PostForkCallback()380     virtual void PostForkCallback() {}
381 
382     /// Check if the object addr is in the GC sweep range
InGCSweepRange(const ObjectHeader * obj)383     virtual bool InGCSweepRange([[maybe_unused]] const ObjectHeader *obj) const
384     {
385         return true;
386     }
387 
GetCardTable()388     virtual CardTable *GetCardTable() const
389     {
390         return nullptr;
391     }
392 
393     /// Called from GCWorker thread to assign thread specific data
InitWorker(void ** workerData)394     virtual bool InitWorker(void **workerData)
395     {
396         *workerData = nullptr;
397         return true;
398     }
399 
400     /// Called from GCWorker thread to destroy thread specific data
DestroyWorker(void * workerData)401     virtual void DestroyWorker([[maybe_unused]] void *workerData) {}
402 
403     /// Process a task sent to GC workers thread.
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)404     virtual void WorkerTaskProcessing([[maybe_unused]] GCWorkersTask *task, [[maybe_unused]] void *workerData)
405     {
406         LOG(FATAL, GC) << "Unimplemented method";
407     }
408 
IsMutatorAllowed()409     virtual bool IsMutatorAllowed()
410     {
411         return false;
412     }
413 
414     /// Return true of ref is an instance of reference or it's ancestor, false otherwise
415     bool IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred);
416 
417     void ProcessReference(GCMarkingStackType *objectsStack, const BaseClass *cls, const ObjectHeader *ref,
418                           const ReferenceProcessPredicateT &pred);
419     void ProcessReferenceForSinglePassCompaction(const BaseClass *cls, const ObjectHeader *ref,
420                                                  const ReferenceProcessorT &processor);
421 
GetObjectAllocator()422     ALWAYS_INLINE ObjectAllocatorBase *GetObjectAllocator() const
423     {
424         return objectAllocator_;
425     }
426 
427     // called if we fail change state from idle to running
428     virtual void OnWaitForIdleFail();
429 
PendingGC()430     virtual void PendingGC() {}
431 
432     /**
433      * Check if the object is marked for GC(alive)
434      * @param object
435      * @return true if object marked for GC
436      */
437     virtual bool IsMarked(const ObjectHeader *object) const = 0;
438 
439     /**
440      * Check if the object is marked for GC(alive)
441      * It is similar to IsMarked method but can contain additional GC specific logic.
442      * If caller is not aware about GC mode it should use this method instead of IsMarked.
443      * @param object
444      * @return true if object marked for GC
445      */
446     virtual bool IsMarkedEx(const ObjectHeader *object) const;
447 
448     /**
449      * Mark object.
450      * Note: for some GCs it is not necessary set GC bit to 1.
451      * @param object_header
452      * @return true if object old state is not marked
453      */
454     virtual bool MarkObjectIfNotMarked(ObjectHeader *objectHeader);
455 
456     /**
457      * Mark object.
458      * Note: for some GCs it is not necessary set GC bit to 1.
459      * @param object_header
460      */
461     virtual void MarkObject(ObjectHeader *objectHeader) = 0;
462 
463     /**
464      * Add reference for later processing in marking phase
465      * @param object - object from which we start to mark
466      */
467     void AddReference(ObjectHeader *fromObject, ObjectHeader *object);
468 
469     void SetGCPhase(GCPhase gcPhase);
470 
471     size_t GetCounter() const;
472 
473     virtual void PostponeGCStart();
474 
475     virtual void PostponeGCEnd();
476 
477     virtual bool IsPostponeGCSupported() const = 0;
478 
479     bool IsPostponeEnabled() const;
480 
ComputeNewSize()481     virtual void ComputeNewSize()
482     {
483         GetObjectAllocator()->GetHeapSpace()->ComputeNewSize();
484     }
485 
486     /// @return GC specific settings based on runtime options and GC type
GetSettings()487     const GCSettings *GetSettings() const
488     {
489         return &gcSettings_;
490     }
491 
492     bool IsClearSoftReferencesEnabled() const;
493 
494 protected:
495     /// @brief Runs all phases
496     void RunPhases(GCTask &task);
497 
498     /**
499      * Add task to GC Queue to be run by a GC worker (or run in place)
500      * @return false if the task is discarded. Otherwise true.
501      * The task may be discarded if the GC already executing a task with
502      * the same reason. The task may be discarded by other reasons (for example, task is invalid).
503      */
504     bool AddGCTask(bool isManaged, PandaUniquePtr<GCTask> task);
505 
506     virtual void InitializeImpl() = 0;
507     virtual void PreRunPhasesImpl() = 0;
508     virtual void RunPhasesImpl(GCTask &task) = 0;
PreStartupImp()509     virtual void PreStartupImp() {}
510 
IsTracingEnabled()511     inline bool IsTracingEnabled() const
512     {
513         return gcSettings_.IsGcEnableTracing();
514     }
515 
BeginTracePoint(const PandaString & tracePointName)516     inline void BeginTracePoint(const PandaString &tracePointName) const
517     {
518         if (IsTracingEnabled()) {
519             trace::BeginTracePoint(tracePointName.c_str());
520         }
521     }
522 
EndTracePoint()523     inline void EndTracePoint() const
524     {
525         if (IsTracingEnabled()) {
526             trace::EndTracePoint();
527         }
528     }
529 
530     virtual void VisitRoots(const GCRootVisitor &gcRootVisitor, VisitGCRootFlags flags) = 0;
531     virtual void VisitClassRoots(const GCRootVisitor &gcRootVisitor) = 0;
532     virtual void VisitCardTableRoots(CardTable *cardTable, const GCRootVisitor &gcRootVisitor,
533                                      const MemRangeChecker &rangeChecker, const ObjectChecker &rangeObjectChecker,
534                                      const ObjectChecker &fromObjectChecker, uint32_t processedFlag) = 0;
535 
CASGCPhase(GCPhase expected,GCPhase set)536     inline bool CASGCPhase(GCPhase expected, GCPhase set)
537     {
538         return phase_.compare_exchange_strong(expected, set);
539     }
540 
GetStats()541     GCInstanceStats *GetStats()
542     {
543         return &instanceStats_;
544     }
545 
SetType(GCType gcType)546     inline void SetType(GCType gcType)
547     {
548         gcType_ = gcType;
549     }
550 
SetTLABsSupported()551     inline void SetTLABsSupported()
552     {
553         tlabsSupported_ = true;
554     }
555 
SetGCBarrierSet(GCBarrierSet * barrierSet)556     void SetGCBarrierSet(GCBarrierSet *barrierSet)
557     {
558         ASSERT(gcBarrierSet_ == nullptr);
559         gcBarrierSet_ = barrierSet;
560     }
561 
562     /**
563      * @brief Create GC workers task pool which runs some gc phases in parallel
564      * This pool can be based on internal thread pool or TaskManager workers
565      */
566     void CreateWorkersTaskPool();
567 
568     /// @brief Destroy GC workers task pool if it was created
569     void DestroyWorkersTaskPool();
570 
571     /// Mark all references which we added by AddReference method
572     virtual void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) = 0;
573 
574     virtual void UpdateRefsToMovedObjectsInPygoteSpace() = 0;
575     /// Update all refs to moved objects
576     virtual void CommonUpdateRefsToMovedObjects() = 0;
577 
578     virtual void UpdateVmRefs() = 0;
579 
580     virtual void UpdateGlobalObjectStorage() = 0;
581 
582     virtual void UpdateClassLinkerContextRoots() = 0;
583 
584     void UpdateRefsInVRegs(ManagedThread *thread);
585 
586     const ObjectHeader *PopObjectFromStack(GCMarkingStackType *objectsStack);
587 
GetTiming()588     Timing *GetTiming()
589     {
590         return &timing_;
591     }
592 
593     template <GCScopeType GC_SCOPE_TYPE>
594     friend class GCScope;
595 
596     void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
597 
598     // vector here because we can add some references on young-gc and get new refs on old-gc
599     // it's possible if we make 2 GCs for one safepoint
600     // max length of this vector - is 2
601     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(clearedReferencesLock_)602     PandaVector<ark::mem::Reference *> *clearedReferences_ GUARDED_BY(clearedReferencesLock_) {nullptr};
603 
604     os::memory::Mutex *clearedReferencesLock_ {nullptr};  // NOLINT(misc-non-private-member-variables-in-classes)
605 
606     std::atomic<size_t> gcCounter_ {0};  // NOLINT(misc-non-private-member-variables-in-classes)
607     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
608     std::atomic<GCTaskCause> lastCause_ {GCTaskCause::INVALID_CAUSE};
609 
IsExplicitFull(const ark::GCTask & task)610     bool IsExplicitFull(const ark::GCTask &task) const
611     {
612         return (task.reason == GCTaskCause::EXPLICIT_CAUSE) && !gcSettings_.IsExplicitConcurrentGcEnabled();
613     }
614 
GetReferenceProcessor()615     const ReferenceProcessor *GetReferenceProcessor() const
616     {
617         return referenceProcessor_;
618     }
619 
IsWorkerThreadsExist()620     bool IsWorkerThreadsExist() const
621     {
622         return gcSettings_.GCWorkersCount() != 0;
623     }
624 
625     void EnableWorkerThreads();
626     void DisableWorkerThreads();
627 
628     /// @return true if GC can work in concurrent mode
IsConcurrencyAllowed()629     bool IsConcurrencyAllowed() const
630     {
631         return gcSettings_.IsConcurrencyEnabled();
632     }
633 
634     Logger::Buffer GetLogPrefix() const;
635 
636     void FireGCStarted(const GCTask &task, size_t bytesInHeapBeforeGc);
637     void FireGCFinished(const GCTask &task, size_t bytesInHeapBeforeGc, size_t bytesInHeapAfterGc);
638     void FireGCPhaseStarted(GCPhase phase);
639     void FireGCPhaseFinished(GCPhase phase);
640 
641     void SetFullGC(bool value);
642 
643     /// Set GC Threads on best and middle cores before GC
644     void SetupCpuAffinity();
645 
646     /// Set GC Threads on best and middle cores after concurrent phase
647     void SetupCpuAffinityAfterConcurrent();
648 
649     /// Set GC Threads on saved or weak cores before concurrent phase
650     void SetupCpuAffinityBeforeConcurrent();
651 
652     /// Restore GC Threads after GC on saved cores
653     void RestoreCpuAffinity();
654 
655     virtual void StartConcurrentScopeRoutine() const;
656     virtual void EndConcurrentScopeRoutine() const;
657 
658     virtual void PrintDetailedLog();
659 
660     Timing timing_;  // NOLINT(misc-non-private-member-variables-in-classes)
661 
662     PandaVector<std::pair<PandaString, uint64_t>>
663         footprintList_;  // NOLINT(misc-non-private-member-variables-in-classes)
664 
665     virtual size_t VerifyHeap() = 0;
666 
667 private:
668     /// Reset GC Threads on saved or weak cores
669     void ResetCpuAffinity(bool beforeConcurrent);
670 
671     /**
672      * Check whether run GC after waiting for mutator threads. Tasks for GC can pass from several mutator threads, so
673      * sometime no need to run GC many times. Also some GCs run in place, but in this time GC can run in GC-thread, and
674      * "in-place" GC wait for idle state for running, so need to check whether run such GC after waiting for threads
675      * @see WaitForIdleGC
676      *
677      * @param counter_before_waiting value of gc counter before waiting for mutator threads
678      * @param task current GC task
679      *
680      * @return true if need to run GC with current task after waiting for mutator threads or false otherwise
681      */
682     bool NeedRunGCAfterWaiting(size_t counterBeforeWaiting, const GCTask &task) const;
683 
684     /**
685      * @brief Making several setups before phases launch
686      * @return true if GC run is still needed
687      */
688     bool GCPhasesPreparation(const GCTask &task);
689 
690     /// @brief Getting logs, heap dumps and launching PostGCHeapVerification after GC phases
691     void GCPhasesFinish(const GCTask &task);
692 
693     /**
694      * @brief Create GC worker if needed and set gc status to running (gc_running_ variable)
695      * @see IsGCRunning
696      */
697     void CreateWorker();
698 
699     /**
700      * @brief Join and destroy GC worker if needed and set gc status to non-running (gc_running_ variable)
701      * @see IsGCRunning
702      */
703     void DestroyWorker();
704 
705     /// Move small objects to pygote space at first pygote fork
706     void MoveObjectsToPygoteSpace();
707 
708     size_t GetNativeBytesFromMallinfoAndRegister() const;
709     virtual void ClearLocalInternalAllocatorPools() = 0;
710     virtual void UpdateThreadLocals() = 0;
711     NativeGcTriggerType GetNativeGcTriggerType();
712 
713     class GCListenerManager {
714     public:
715         GCListenerManager() = default;
716         NO_COPY_SEMANTIC(GCListenerManager);
717         NO_MOVE_SEMANTIC(GCListenerManager);
718         ~GCListenerManager() = default;
719 
720         void AddListener(GCListener *newListener);
721         void RemoveListener(GCListener *newListener);
722 
723         void NormalizeListenersOnStartGC();
724 
725         template <class Visitor>
IterateOverListeners(const Visitor & visitor)726         void IterateOverListeners(const Visitor &visitor)
727         {
728             os::memory::LockHolder lh(listenerLock_);
729             for (auto *gcListener : currentListeners_) {
730                 if (gcListener != nullptr) {
731                     visitor(gcListener);
732                 }
733             }
734         }
735 
736     private:
737         os::memory::Mutex listenerLock_;
738         PandaUnorderedSet<GCListener *> currentListeners_ GUARDED_BY(listenerLock_);
739         PandaUnorderedSet<GCListener *> newListeners_ GUARDED_BY(listenerLock_);
740         PandaUnorderedSet<GCListener *> listenersForRemove_ GUARDED_BY(listenerLock_);
741     };
742 
743     volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
744     GCType gcType_ {GCType::INVALID_GC};
745     GCSettings gcSettings_;
746     GCListenerManager *gcListenerManager_ {nullptr};
747     GCBarrierSet *gcBarrierSet_ {nullptr};
748     ObjectAllocatorBase *objectAllocator_ {nullptr};
749     InternalAllocatorPtr internalAllocator_ {nullptr};
750     GCInstanceStats instanceStats_;
751     os::CpuSet affinityBeforeGc_ {};
752 
753     // Additional NativeGC
754     std::atomic<size_t> nativeBytesRegistered_ = 0;
755     std::atomic<size_t> nativeObjectsNotified_ = 0;
756 
757     ReferenceProcessor *referenceProcessor_ {nullptr};
758 
759     // NOTE(ipetrov): choose suitable priority
760     static constexpr size_t GC_TASK_QUEUE_PRIORITY = taskmanager::TaskQueueInterface::MAX_PRIORITY;
761     taskmanager::TaskQueueInterface *gcWorkersTaskQueue_ = nullptr;
762 
763     /* GC worker specific variables */
764     GCWorker *gcWorker_ = nullptr;
765     std::atomic_bool gcRunning_ = false;
766     std::atomic<bool> canAddGcTask_ = true;
767 
768     bool tlabsSupported_ = false;
769 
770     // Additional data for extensions
771     GCExtensionData *extensionData_ {nullptr};
772 
773     GCWorkersTaskPool *workersTaskPool_ {nullptr};
774     class PostForkGCTask;
775 
776     friend class ecmascript::EcmaReferenceProcessor;
777     friend class ark::mem::test::MemStatsGenGCTest;
778     friend class ark::mem::test::ReferenceStorageTest;
779     friend class ark::mem::test::RemSetTest;
780     friend class GCScopedPhase;
781     friend class GlobalObjectStorage;
782     // NOTE(maksenov): Avoid using specific ObjectHelpers class here
783     friend class GCDynamicObjectHelpers;
784     friend class GCStaticObjectHelpers;
785     friend class G1GCTest;
786     friend class GCTestLog;
787 
788     void TriggerGCForNative();
789     size_t SimpleNativeAllocationGcWatermark();
790     /// Waits while current GC task(if any) will be processed
791     void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
792 
793     friend class GCScopedPhase;
794     friend class ConcurrentScope;
795 
796     PandaVM *vm_ {nullptr};
797     std::atomic<bool> isFullGc_ {false};
798     std::atomic<bool> isPostponeEnabled_ {false};
799     bool clearSoftReferencesEnabled_ {false};
800 };
801 
802 /**
803  * @brief Create GC with @param gc_type
804  * @param gc_type - type of create GC
805  * @return pointer to created GC on success, nullptr on failure
806  */
807 template <class LanguageConfig>
808 GC *CreateGC(GCType gcType, ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
809 
810 /// Enable concurrent mode. Should be used only from STW code.
811 class ConcurrentScope final {
812 public:
813     explicit ConcurrentScope(GC *gc, bool autoStart = true);
814     NO_COPY_SEMANTIC(ConcurrentScope);
815     NO_MOVE_SEMANTIC(ConcurrentScope);
816     ~ConcurrentScope();
817     void Start();
818 
819 private:
820     GC *gc_;
821     bool started_ = false;
822 };
823 }  // namespace ark::mem
824 
825 #endif  // PANDA_RUNTIME_MEM_GC_GC_HMA
826