• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_GC_H
16 #define PANDA_RUNTIME_MEM_GC_GC_H
17 
18 #include <atomic>
19 #include <map>
20 #include <string_view>
21 #include <vector>
22 
23 #include "libpandabase/os/cpu_affinity.h"
24 #include "libpandabase/os/mutex.h"
25 #include "libpandabase/os/thread.h"
26 #include "libpandabase/taskmanager/task_queue.h"
27 #include "libpandabase/trace/trace.h"
28 #include "libpandabase/utils/expected.h"
29 #include "runtime/include/gc_task.h"
30 #include "runtime/include/object_header.h"
31 #include "runtime/include/language_config.h"
32 #include "runtime/include/locks.h"
33 #include "runtime/include/mem/panda_containers.h"
34 #include "runtime/include/mem/panda_smart_pointers.h"
35 #include "runtime/include/mem/panda_string.h"
36 #include "runtime/mem/allocator_adapter.h"
37 #include "runtime/mem/gc/gc_settings.h"
38 #include "runtime/mem/gc/gc_barrier_set.h"
39 #include "runtime/mem/gc/gc_phase.h"
40 #include "runtime/mem/gc/gc_root.h"
41 #include "runtime/mem/gc/gc_adaptive_marking_stack.h"
42 #include "runtime/mem/gc/gc_scope.h"
43 #include "runtime/mem/gc/gc_scoped_phase.h"
44 #include "runtime/mem/gc/gc_stats.h"
45 #include "runtime/mem/gc/gc_types.h"
46 #include "runtime/mem/refstorage/reference.h"
47 #include "runtime/mem/gc/bitmap.h"
48 #include "runtime/mem/gc/workers/gc_worker.h"
49 #include "runtime/mem/object_helpers.h"
50 #include "runtime/timing.h"
51 #include "runtime/mem/region_allocator.h"
52 
53 namespace ark {
54 class BaseClass;
55 class HClass;
56 class PandaVM;
57 class Timing;
58 namespace mem {
59 class G1GCTest;
60 class GlobalObjectStorage;
61 class ReferenceProcessor;
62 template <MTModeT MT_MODE>
63 class ObjectAllocatorG1;
64 namespace test {
65 class MemStatsGenGCTest;
66 class ReferenceStorageTest;
67 class RemSetTest;
68 }  // namespace test
69 namespace ecmascript {
70 class EcmaReferenceProcessor;
71 }  // namespace ecmascript
72 }  // namespace mem
73 }  // namespace ark
74 
75 namespace ark::coretypes {
76 class Array;
77 class DynClass;
78 }  // namespace ark::coretypes
79 
80 namespace ark::mem {
81 
82 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
83 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
84 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
85 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
86 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
87 #define LOG_DEBUG_OBJECT_EVENTS LOG(DEBUG, MM_OBJECT_EVENTS)
88 
89 // forward declarations:
90 class GCListener;
91 class GCScopePhase;
92 class GCScopedPhase;
93 class GCQueueInterface;
94 class GCDynamicObjectHelpers;
95 class GCWorkersTaskPool;
96 class GCWorkersTask;
97 
98 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
99 
100 enum ClassRootsVisitFlag : bool {
101     ENABLED = true,
102     DISABLED = false,
103 };
104 
105 enum CardTableVisitFlag : bool {
106     VISIT_ENABLED = true,
107     VISIT_DISABLED = false,
108 };
109 
110 enum BuffersKeepingFlag : bool {
111     KEEP = true,
112     DELETE = false,
113 };
114 
115 class GCListener {
116 public:
117     GCListener() = default;
118     NO_COPY_SEMANTIC(GCListener);
119     DEFAULT_MOVE_SEMANTIC(GCListener);
120     virtual ~GCListener() = default;
GCStarted(const GCTask & task,size_t heapSize)121     virtual void GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize) {}
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)122     virtual void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
123                             [[maybe_unused]] size_t heapSize)
124     {
125     }
GCPhaseStarted(GCPhase phase)126     virtual void GCPhaseStarted([[maybe_unused]] GCPhase phase) {}
GCPhaseFinished(GCPhase phase)127     virtual void GCPhaseFinished([[maybe_unused]] GCPhase phase) {}
128 };
129 
130 class GCExtensionData;
131 
132 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
133 
134 // base class for all GCs
135 class GC {
136 public:
137     using MarkPreprocess = std::function<void(const ObjectHeader *, BaseClass *)>;
138     using ReferenceCheckPredicateT = std::function<bool(const ObjectHeader *)>;
139     using ReferenceClearPredicateT = std::function<bool(const ObjectHeader *)>;
140     using ReferenceProcessPredicateT = std::function<bool(const ObjectHeader *)>;
141     using ReferenceProcessorT = std::function<void(void *)>;
142 
EmptyReferenceProcessPredicate(const ObjectHeader * ref)143     static constexpr bool EmptyReferenceProcessPredicate([[maybe_unused]] const ObjectHeader *ref)
144     {
145         return true;
146     }
147 
EmptyMarkPreprocess(const ObjectHeader * ref,BaseClass * baseKlass)148     static constexpr void EmptyMarkPreprocess([[maybe_unused]] const ObjectHeader *ref,
149                                               [[maybe_unused]] BaseClass *baseKlass)
150     {
151     }
152 
153     explicit GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
154     NO_COPY_SEMANTIC(GC);
155     NO_MOVE_SEMANTIC(GC);
156     virtual ~GC() = 0;
157 
158     GCType GetType();
159 
160     /// @brief Initialize GC
161     void Initialize(PandaVM *vm);
162 
163     /**
164      * @brief Starts GC after initialization
165      * Creates worker thread, sets gc_running_ to true
166      */
167     virtual void StartGC();
168 
169     /**
170      * @brief Stops GC for runtime destruction
171      * Joins GC thread, clears queue
172      */
173     virtual void StopGC();
174 
175     /**
176      * Should be used to wait while GC should work exclusively
177      * Note: for non-mt STW GC can be used to run GC.
178      * @return false if the task is discarded. Otherwise true.
179      * The task may be discarded if the GC already executing a task with
180      * the same reason.
181      */
182     virtual bool WaitForGC(GCTask task);
183 
184     /**
185      * Should be used to wait while GC should be executed in managed scope
186      * @return false if the task is discarded. Otherwise true.
187      * The task may be discarded if the GC already executing a task with
188      * the same reason.
189      */
190     bool WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
191 
192     /// Only be used to at first pygote fork
193     void WaitForGCOnPygoteFork(const GCTask &task);
194 
195     bool IsOnPygoteFork() const;
196 
197     /**
198      * Initialize GC bits on object creation.
199      * Required only for GCs with switched bits
200      */
201     virtual void InitGCBits(ark::ObjectHeader *objHeader) = 0;
202 
203     /// Initialize GC bits on object creation for the TLAB allocation.
204     virtual void InitGCBitsForAllocationInTLAB(ark::ObjectHeader *objHeader) = 0;
205 
IsTLABsSupported()206     bool IsTLABsSupported() const
207     {
208         return tlabsSupported_;
209     }
210 
211     /// @return true if GC supports object pinning (will not move pinned object), false otherwise
212     virtual bool IsPinningSupported() const = 0;
213 
214     /// @return true if cause is suitable for the GC, false otherwise
215     virtual bool CheckGCCause(GCTaskCause cause) const;
216 
217     /**
218      * Trigger GC.
219      * @return false if the task is discarded. Otherwise true.
220      * The task may be discarded if the GC already executing a task with
221      * the same reason. The task may be discarded by other reasons.
222      */
223     virtual bool Trigger(PandaUniquePtr<GCTask> task) = 0;
224 
225     virtual bool IsFullGC() const;
226 
227     /// Return true if gc has generations, false otherwise
228     bool IsGenerational() const;
229 
DumpStatistics()230     PandaString DumpStatistics()
231     {
232         return instanceStats_.GetDump(gcType_);
233     }
234 
AddListener(GCListener * listener)235     void AddListener(GCListener *listener)
236     {
237         ASSERT(gcListenerManager_ != nullptr);
238         gcListenerManager_->AddListener(listener);
239     }
240 
RemoveListener(GCListener * listener)241     void RemoveListener(GCListener *listener)
242     {
243         ASSERT(gcListenerManager_ != nullptr);
244         gcListenerManager_->RemoveListener(listener);
245     }
246 
GetBarrierSet()247     GCBarrierSet *GetBarrierSet()
248     {
249         ASSERT(gcBarrierSet_ != nullptr);
250         return gcBarrierSet_;
251     }
252 
GetWorkersTaskPool()253     GCWorkersTaskPool *GetWorkersTaskPool() const
254     {
255         ASSERT(workersTaskPool_ != nullptr);
256         return workersTaskPool_;
257     }
258 
259     // Additional NativeGC
260     void NotifyNativeAllocations();
261 
262     void RegisterNativeAllocation(size_t bytes);
263 
264     void RegisterNativeFree(size_t bytes);
265 
GetNotifyNativeInterval()266     int32_t GetNotifyNativeInterval()
267     {
268         return NOTIFY_NATIVE_INTERVAL;
269     }
270 
271     // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
272     static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
273 
274     // Calling CheckGCForNative immediately if size exceeds the following
275     static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
276 
IsLogDetailedGcInfoEnabled()277     inline bool IsLogDetailedGcInfoEnabled() const
278     {
279         return gcSettings_.LogDetailedGCInfoEnabled();
280     }
281 
IsLogDetailedGcCompactionInfoEnabled()282     inline bool IsLogDetailedGcCompactionInfoEnabled() const
283     {
284         return gcSettings_.LogDetailedGCCompactionInfoEnabled();
285     }
286 
GetGCPhase()287     inline GCPhase GetGCPhase() const
288     {
289         return phase_;
290     }
291 
GetLastGCCause()292     inline GCTaskCause GetLastGCCause() const
293     {
294         // Atomic with acquire order reason: data race with another threads which can update the variable
295         return lastCause_.load(std::memory_order_acquire);
296     }
297 
IsGCRunning()298     inline bool IsGCRunning()
299     {
300         // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent
301         // order where threads observe all modifications in the same order
302         return gcRunning_.load(std::memory_order_seq_cst);
303     }
304 
305     void PreStartup();
306 
GetInternalAllocator()307     InternalAllocatorPtr GetInternalAllocator() const
308     {
309         return internalAllocator_;
310     }
311 
312     /**
313      * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
314      * ReferenceQueue.class)
315      */
316     void EnqueueReferences();
317 
318     /// Process all references which GC found in marking phase.
319     void ProcessReferences(GCPhase gcPhase, const GCTask &task, const ReferenceClearPredicateT &pred);
320 
321     /// Process all references which were found during evacuation
322     void ProcessReferences(const mem::GC::ReferenceClearPredicateT &pred);
323 
324     virtual void EvacuateStartingWith(void *ref);
325 
GetNativeBytesRegistered()326     size_t GetNativeBytesRegistered()
327     {
328         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
329         // constraints imposed on other reads or writes
330         return nativeBytesRegistered_.load(std::memory_order_relaxed);
331     }
332 
333     virtual void SetPandaVM(PandaVM *vm);
334 
GetPandaVm()335     PandaVM *GetPandaVm() const
336     {
337         return vm_;
338     }
339 
GetWorkersTaskQueue()340     taskmanager::TaskQueueInterface *GetWorkersTaskQueue() const
341     {
342         return gcWorkersTaskQueue_;
343     }
344 
345     virtual void PreZygoteFork();
346 
347     virtual void PostZygoteFork();
348 
349     /**
350      * Processes thread's remaining pre and post barrier buffer entries on its termination.
351      *
352      * @param keep_buffers specifies whether to clear (=BuffersKeepingFlag::KEEP) or deallocate
353      * (=BuffersKeepingFlag::DELETE) pre and post barrier buffers upon OnThreadTerminate() completion
354      */
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)355     virtual void OnThreadTerminate([[maybe_unused]] ManagedThread *thread,
356                                    [[maybe_unused]] mem::BuffersKeepingFlag keepBuffers)
357     {
358     }
359 
360     /// Performs the actions that are required upon thread creation (if any)
OnThreadCreate(ManagedThread * thread)361     virtual void OnThreadCreate([[maybe_unused]] ManagedThread *thread) {}
362 
SetCanAddGCTask(bool canAddTask)363     void SetCanAddGCTask(bool canAddTask)
364     {
365         // Atomic with relaxed order reason: data race with can_add_gc_task_ with no synchronization or ordering
366         // constraints imposed on other reads or writes
367         canAddGcTask_.store(canAddTask, std::memory_order_relaxed);
368     }
369 
GetExtensionData()370     GCExtensionData *GetExtensionData() const
371     {
372         return extensionData_;
373     }
374 
SetExtensionData(GCExtensionData * data)375     virtual void SetExtensionData(GCExtensionData *data)
376     {
377         extensionData_ = data;
378     }
379 
PostForkCallback(size_t restoreLimit)380     virtual void PostForkCallback([[maybe_unused]] size_t restoreLimit) {}
381 
382     /// Check if the object addr is in the GC sweep range
InGCSweepRange(const ObjectHeader * obj)383     virtual bool InGCSweepRange([[maybe_unused]] const ObjectHeader *obj) const
384     {
385         return true;
386     }
387 
GetCardTable()388     virtual CardTable *GetCardTable() const
389     {
390         return nullptr;
391     }
392 
393     /// Called from GCWorker thread to assign thread specific data
InitWorker(void ** workerData)394     virtual bool InitWorker(void **workerData)
395     {
396         *workerData = nullptr;
397         return true;
398     }
399 
400     /// Called from GCWorker thread to destroy thread specific data
DestroyWorker(void * workerData)401     virtual void DestroyWorker([[maybe_unused]] void *workerData) {}
402 
403     /// Process a task sent to GC workers thread.
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)404     virtual void WorkerTaskProcessing([[maybe_unused]] GCWorkersTask *task, [[maybe_unused]] void *workerData)
405     {
406         LOG(FATAL, GC) << "Unimplemented method";
407     }
408 
IsMutatorAllowed()409     virtual bool IsMutatorAllowed()
410     {
411         return false;
412     }
413 
414     /// Return true of ref is an instance of reference or it's ancestor, false otherwise
415     bool IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred);
416 
417     void ProcessReference(GCMarkingStackType *objectsStack, const BaseClass *cls, const ObjectHeader *ref,
418                           const ReferenceProcessPredicateT &pred);
419     void ProcessReferenceForSinglePassCompaction(const BaseClass *cls, const ObjectHeader *ref,
420                                                  const ReferenceProcessorT &processor);
421 
GetObjectAllocator()422     ALWAYS_INLINE ObjectAllocatorBase *GetObjectAllocator() const
423     {
424         return objectAllocator_;
425     }
426 
427     // called if we fail change state from idle to running
428     virtual void OnWaitForIdleFail();
429 
PendingGC()430     virtual void PendingGC() {}
431 
432     /**
433      * Check if the object is marked for GC(alive)
434      * @param object
435      * @return true if object marked for GC
436      */
437     virtual bool IsMarked(const ObjectHeader *object) const = 0;
438 
439     /**
440      * Check if the object is marked for GC(alive)
441      * It is similar to IsMarked method but can contain additional GC specific logic.
442      * If caller is not aware about GC mode it should use this method instead of IsMarked.
443      * @param object
444      * @return true if object marked for GC
445      */
446     virtual bool IsMarkedEx(const ObjectHeader *object) const;
447 
448     /**
449      * Mark object.
450      * Note: for some GCs it is not necessary set GC bit to 1.
451      * @param object_header
452      * @return true if object old state is not marked
453      */
454     virtual bool MarkObjectIfNotMarked(ObjectHeader *objectHeader);
455 
456     /**
457      * Mark object.
458      * Note: for some GCs it is not necessary set GC bit to 1.
459      * @param object_header
460      */
461     virtual void MarkObject(ObjectHeader *objectHeader) = 0;
462 
463     /**
464      * Add reference for later processing in marking phase
465      * @param object - object from which we start to mark
466      */
467     void AddReference(ObjectHeader *fromObject, ObjectHeader *object);
468 
469     void SetGCPhase(GCPhase gcPhase);
470 
471     size_t GetCounter() const;
472 
473     virtual void PostponeGCStart();
474 
475     virtual void PostponeGCEnd();
476 
477     virtual bool IsPostponeGCSupported() const = 0;
478 
479     bool IsPostponeEnabled() const;
480 
ComputeNewSize()481     virtual void ComputeNewSize()
482     {
483         GetObjectAllocator()->GetHeapSpace()->ComputeNewSize();
484     }
485 
486     /// @return GC specific settings based on runtime options and GC type
GetSettings()487     const GCSettings *GetSettings() const
488     {
489         return &gcSettings_;
490     }
491 
492     bool IsClearSoftReferencesEnabled() const;
493 
494     bool GetFastGCFlag() const;
495     void SetFastGCFlag(bool fastGC);
496 
497 protected:
498     /// @brief Runs all phases
499     void RunPhases(GCTask &task);
500 
501     /**
502      * Add task to GC Queue to be run by a GC worker (or run in place)
503      * @return false if the task is discarded. Otherwise true.
504      * The task may be discarded if the GC already executing a task with
505      * the same reason. The task may be discarded by other reasons (for example, task is invalid).
506      */
507     bool AddGCTask(bool isManaged, PandaUniquePtr<GCTask> task);
508 
509     virtual void InitializeImpl() = 0;
510     virtual void PreRunPhasesImpl() = 0;
511     virtual void RunPhasesImpl(GCTask &task) = 0;
PreStartupImp()512     virtual void PreStartupImp() {}
AdujustStartupLimit(size_t startupLimit)513     virtual size_t AdujustStartupLimit(size_t startupLimit)
514     {
515         return startupLimit;
516     }
517 
IsTracingEnabled()518     inline bool IsTracingEnabled() const
519     {
520         return gcSettings_.IsGcEnableTracing();
521     }
522 
BeginTracePoint(const PandaString & tracePointName)523     inline void BeginTracePoint(const PandaString &tracePointName) const
524     {
525         if (IsTracingEnabled()) {
526             trace::BeginTracePoint(tracePointName.c_str());
527         }
528     }
529 
EndTracePoint()530     inline void EndTracePoint() const
531     {
532         if (IsTracingEnabled()) {
533             trace::EndTracePoint();
534         }
535     }
536 
537     virtual void VisitRoots(const GCRootVisitor &gcRootVisitor, VisitGCRootFlags flags) = 0;
538     virtual void VisitClassRoots(const GCRootVisitor &gcRootVisitor) = 0;
539     virtual void VisitCardTableRoots(CardTable *cardTable, const GCRootVisitor &gcRootVisitor,
540                                      const MemRangeChecker &rangeChecker, const ObjectChecker &rangeObjectChecker,
541                                      const ObjectChecker &fromObjectChecker, uint32_t processedFlag) = 0;
542 
CASGCPhase(GCPhase expected,GCPhase set)543     inline bool CASGCPhase(GCPhase expected, GCPhase set)
544     {
545         return phase_.compare_exchange_strong(expected, set);
546     }
547 
GetStats()548     GCInstanceStats *GetStats()
549     {
550         return &instanceStats_;
551     }
552 
SetType(GCType gcType)553     inline void SetType(GCType gcType)
554     {
555         gcType_ = gcType;
556     }
557 
SetTLABsSupported()558     inline void SetTLABsSupported()
559     {
560         tlabsSupported_ = true;
561     }
562 
SetGCBarrierSet(GCBarrierSet * barrierSet)563     void SetGCBarrierSet(GCBarrierSet *barrierSet)
564     {
565         ASSERT(gcBarrierSet_ == nullptr);
566         gcBarrierSet_ = barrierSet;
567     }
568 
569     /**
570      * @brief Create GC workers task pool which runs some gc phases in parallel
571      * This pool can be based on internal thread pool or TaskManager workers
572      */
573     void CreateWorkersTaskPool();
574 
575     /// @brief Destroy GC workers task pool if it was created
576     void DestroyWorkersTaskPool();
577 
578     /// Mark all references which we added by AddReference method
579     virtual void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) = 0;
580 
581     virtual void UpdateRefsToMovedObjectsInPygoteSpace() = 0;
582     /// Update all refs to moved objects
583     virtual void CommonUpdateRefsToMovedObjects() = 0;
584 
585     const ObjectHeader *PopObjectFromStack(GCMarkingStackType *objectsStack);
586 
GetTiming()587     Timing *GetTiming()
588     {
589         return &timing_;
590     }
591 
592     template <GCScopeType GC_SCOPE_TYPE>
593     friend class GCScope;
594 
595     void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
596 
597     // vector here because we can add some references on young-gc and get new refs on old-gc
598     // it's possible if we make 2 GCs for one safepoint
599     // max length of this vector - is 2
600     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(clearedReferencesLock_)601     PandaVector<ark::mem::Reference *> *clearedReferences_ GUARDED_BY(clearedReferencesLock_) {nullptr};
602 
603     os::memory::Mutex *clearedReferencesLock_ {nullptr};  // NOLINT(misc-non-private-member-variables-in-classes)
604 
605     std::atomic<size_t> gcCounter_ {0};  // NOLINT(misc-non-private-member-variables-in-classes)
606     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
607     std::atomic<GCTaskCause> lastCause_ {GCTaskCause::INVALID_CAUSE};
608 
IsExplicitFull(const ark::GCTask & task)609     bool IsExplicitFull(const ark::GCTask &task) const
610     {
611         return (task.reason == GCTaskCause::EXPLICIT_CAUSE) && !gcSettings_.IsExplicitConcurrentGcEnabled();
612     }
613 
GetReferenceProcessor()614     const ReferenceProcessor *GetReferenceProcessor() const
615     {
616         return referenceProcessor_;
617     }
618 
IsWorkerThreadsExist()619     bool IsWorkerThreadsExist() const
620     {
621         return gcSettings_.GCWorkersCount() != 0;
622     }
623 
624     void EnableWorkerThreads();
625     void DisableWorkerThreads();
626 
627     /// @return true if GC can work in concurrent mode
IsConcurrencyAllowed()628     bool IsConcurrencyAllowed() const
629     {
630         return gcSettings_.IsConcurrencyEnabled();
631     }
632 
633     Logger::Buffer GetLogPrefix() const;
634 
635     void FireGCStarted(const GCTask &task, size_t bytesInHeapBeforeGc);
636     void FireGCFinished(const GCTask &task, size_t bytesInHeapBeforeGc, size_t bytesInHeapAfterGc);
637     void FireGCPhaseStarted(GCPhase phase);
638     void FireGCPhaseFinished(GCPhase phase);
639 
640     void SetFullGC(bool value);
641 
642     /// Set GC Threads on best and middle cores before GC
643     void SetupCpuAffinity();
644 
645     /// Set GC Threads on best and middle cores after concurrent phase
646     void SetupCpuAffinityAfterConcurrent();
647 
648     /// Set GC Threads on saved or weak cores before concurrent phase
649     void SetupCpuAffinityBeforeConcurrent();
650 
651     /// Restore GC Threads after GC on saved cores
652     void RestoreCpuAffinity();
653 
654     virtual void StartConcurrentScopeRoutine() const;
655     virtual void EndConcurrentScopeRoutine() const;
656 
657     virtual void PrintDetailedLog();
658 
659     Timing timing_;  // NOLINT(misc-non-private-member-variables-in-classes)
660 
661     PandaVector<std::pair<PandaString, uint64_t>>
662         footprintList_;  // NOLINT(misc-non-private-member-variables-in-classes)
663 
664     virtual size_t VerifyHeap() = 0;
665 
666 private:
667     /// Reset GC Threads on saved or weak cores
668     void ResetCpuAffinity(bool beforeConcurrent);
669 
670     /**
671      * Check whether run GC after waiting for mutator threads. Tasks for GC can pass from several mutator threads, so
672      * sometime no need to run GC many times. Also some GCs run in place, but in this time GC can run in GC-thread, and
673      * "in-place" GC wait for idle state for running, so need to check whether run such GC after waiting for threads
674      * @see WaitForIdleGC
675      *
676      * @param counter_before_waiting value of gc counter before waiting for mutator threads
677      * @param task current GC task
678      *
679      * @return true if need to run GC with current task after waiting for mutator threads or false otherwise
680      */
681     bool NeedRunGCAfterWaiting(size_t counterBeforeWaiting, const GCTask &task) const;
682 
683     /**
684      * @brief Making several setups before phases launch
685      * @return true if GC run is still needed
686      */
687     bool GCPhasesPreparation(const GCTask &task);
688 
689     /// @brief Getting logs, heap dumps and launching PostGCHeapVerification after GC phases
690     void GCPhasesFinish(const GCTask &task);
691 
692     /**
693      * @brief Create GC worker if needed and set gc status to running (gc_running_ variable)
694      * @see IsGCRunning
695      */
696     void CreateWorker();
697 
698     /**
699      * @brief Join and destroy GC worker if needed and set gc status to non-running (gc_running_ variable)
700      * @see IsGCRunning
701      */
702     void DestroyWorker();
703 
704     /// Move small objects to pygote space at first pygote fork
705     void MoveObjectsToPygoteSpace();
706 
707     size_t GetNativeBytesFromMallinfoAndRegister() const;
708     virtual void ClearLocalInternalAllocatorPools() = 0;
709     NativeGcTriggerType GetNativeGcTriggerType();
710 
711     class GCListenerManager {
712     public:
713         GCListenerManager() = default;
714         NO_COPY_SEMANTIC(GCListenerManager);
715         NO_MOVE_SEMANTIC(GCListenerManager);
716         ~GCListenerManager() = default;
717 
718         PANDA_PUBLIC_API void AddListener(GCListener *newListener);
719         PANDA_PUBLIC_API void RemoveListener(GCListener *newListener);
720 
721         void NormalizeListenersOnStartGC();
722 
723         template <class Visitor>
IterateOverListeners(const Visitor & visitor)724         void IterateOverListeners(const Visitor &visitor)
725         {
726             os::memory::LockHolder lh(listenerLock_);
727             for (auto *gcListener : currentListeners_) {
728                 if (gcListener != nullptr) {
729                     visitor(gcListener);
730                 }
731             }
732         }
733 
734     private:
735         os::memory::Mutex listenerLock_;
736         PandaList<GCListener *> currentListeners_ GUARDED_BY(listenerLock_);
737         PandaList<GCListener *> newListeners_ GUARDED_BY(listenerLock_);
738         PandaList<GCListener *> listenersForRemove_ GUARDED_BY(listenerLock_);
739     };
740 
741     volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
742     GCType gcType_ {GCType::INVALID_GC};
743     GCSettings gcSettings_;
744     GCListenerManager *gcListenerManager_ {nullptr};
745     GCBarrierSet *gcBarrierSet_ {nullptr};
746     ObjectAllocatorBase *objectAllocator_ {nullptr};
747     InternalAllocatorPtr internalAllocator_ {nullptr};
748     GCInstanceStats instanceStats_;
749     os::CpuSet affinityBeforeGc_ {};
750 
751     // Additional NativeGC
752     std::atomic<size_t> nativeBytesRegistered_ = 0;
753     std::atomic<size_t> nativeObjectsNotified_ = 0;
754 
755     ReferenceProcessor *referenceProcessor_ {nullptr};
756 
757     // NOTE(ipetrov): choose suitable priority
758     static constexpr size_t GC_TASK_QUEUE_PRIORITY = taskmanager::MAX_QUEUE_PRIORITY;
759     taskmanager::TaskQueueInterface *gcWorkersTaskQueue_ = nullptr;
760 
761     /* GC worker specific variables */
762     GCWorker *gcWorker_ = nullptr;
763     std::atomic_bool gcRunning_ = false;
764     std::atomic<bool> canAddGcTask_ = true;
765 
766     bool tlabsSupported_ = false;
767 
768     // Additional data for extensions
769     GCExtensionData *extensionData_ {nullptr};
770 
771     GCWorkersTaskPool *workersTaskPool_ {nullptr};
772     class PostForkGCTask;
773 
774     friend class ecmascript::EcmaReferenceProcessor;
775     friend class ark::mem::test::MemStatsGenGCTest;
776     friend class ark::mem::test::ReferenceStorageTest;
777     friend class ark::mem::test::RemSetTest;
778     friend class GCScopedPhase;
779     friend class GlobalObjectStorage;
780     // NOTE(maksenov): Avoid using specific ObjectHelpers class here
781     friend class GCDynamicObjectHelpers;
782     friend class GCStaticObjectHelpers;
783     friend class G1GCTest;
784     friend class GCTestLog;
785 
786     void TriggerGCForNative();
787     size_t SimpleNativeAllocationGcWatermark();
788     /// Waits while current GC task(if any) will be processed
789     void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
790 
791     friend class GCScopedPhase;
792     friend class ConcurrentScope;
793 
794     PandaVM *vm_ {nullptr};
795     std::atomic<bool> isFullGc_ {false};
796     std::atomic<bool> isPostponeEnabled_ {false};
797     std::atomic<bool> fastGC_ {false};
798     bool clearSoftReferencesEnabled_ {false};
799 };
800 
801 /**
802  * @brief Create GC with @param gc_type
803  * @param gc_type - type of create GC
804  * @return pointer to created GC on success, nullptr on failure
805  */
806 template <class LanguageConfig>
807 GC *CreateGC(GCType gcType, ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
808 
809 /// Enable concurrent mode. Should be used only from STW code.
810 class ConcurrentScope final {
811 public:
812     explicit ConcurrentScope(GC *gc, bool autoStart = true);
813     NO_COPY_SEMANTIC(ConcurrentScope);
814     NO_MOVE_SEMANTIC(ConcurrentScope);
815     ~ConcurrentScope();
816     void Start();
817 
818 private:
819     GC *gc_;
820     bool started_ = false;
821 };
822 }  // namespace ark::mem
823 
824 #endif  // PANDA_RUNTIME_MEM_GC_GC_HMA
825