• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_GC_H
16 #define PANDA_RUNTIME_MEM_GC_GC_H
17 
18 #include <atomic>
19 #include <map>
20 #include <string_view>
21 #include <vector>
22 
23 #include "libpandabase/os/cpu_affinity.h"
24 #include "libpandabase/os/mutex.h"
25 #include "libpandabase/os/thread.h"
26 #include "libpandabase/taskmanager/task_queue.h"
27 #include "libpandabase/trace/trace.h"
28 #include "libpandabase/utils/expected.h"
29 #include "runtime/include/gc_task.h"
30 #include "runtime/include/object_header.h"
31 #include "runtime/include/language_config.h"
32 #include "runtime/include/locks.h"
33 #include "runtime/include/mem/panda_containers.h"
34 #include "runtime/include/mem/panda_smart_pointers.h"
35 #include "runtime/include/mem/panda_string.h"
36 #include "runtime/mem/allocator_adapter.h"
37 #include "runtime/mem/gc/gc_settings.h"
38 #include "runtime/mem/gc/gc_barrier_set.h"
39 #include "runtime/mem/gc/gc_phase.h"
40 #include "runtime/mem/gc/gc_root.h"
41 #include "runtime/mem/gc/gc_adaptive_stack.h"
42 #include "runtime/mem/gc/gc_scope.h"
43 #include "runtime/mem/gc/gc_scoped_phase.h"
44 #include "runtime/mem/gc/gc_stats.h"
45 #include "runtime/mem/gc/gc_types.h"
46 #include "runtime/mem/refstorage/reference.h"
47 #include "runtime/mem/gc/bitmap.h"
48 #include "runtime/mem/gc/workers/gc_worker.h"
49 #include "runtime/mem/object_helpers.h"
50 #include "runtime/timing.h"
51 #include "runtime/mem/region_allocator.h"
52 
53 namespace panda {
54 class BaseClass;
55 class HClass;
56 class PandaVM;
57 class Timing;
58 namespace mem {
59 class G1GCTest;
60 class GlobalObjectStorage;
61 class ReferenceProcessor;
62 template <MTModeT MT_MODE>
63 class ObjectAllocatorG1;
64 namespace test {
65 class MemStatsGenGCTest;
66 class ReferenceStorageTest;
67 class RemSetTest;
68 }  // namespace test
69 namespace ecmascript {
70 class EcmaReferenceProcessor;
71 }  // namespace ecmascript
72 }  // namespace mem
73 }  // namespace panda
74 
75 namespace panda::coretypes {
76 class Array;
77 class DynClass;
78 }  // namespace panda::coretypes
79 
80 namespace panda::mem {
81 
82 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
83 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
84 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
85 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
86 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
87 #define LOG_DEBUG_OBJECT_EVENTS LOG(DEBUG, MM_OBJECT_EVENTS)
88 
89 // forward declarations:
90 class GCListener;
91 class GCScopePhase;
92 class GCScopedPhase;
93 class GCQueueInterface;
94 class GCDynamicObjectHelpers;
95 class GCWorkersTaskPool;
96 class GCWorkersTask;
97 
98 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
99 
100 enum ClassRootsVisitFlag : bool {
101     ENABLED = true,
102     DISABLED = false,
103 };
104 
105 enum CardTableVisitFlag : bool {
106     VISIT_ENABLED = true,
107     VISIT_DISABLED = false,
108 };
109 
110 enum BuffersKeepingFlag : bool {
111     KEEP = true,
112     DELETE = false,
113 };
114 
115 class GCListener {
116 public:
117     GCListener() = default;
118     NO_COPY_SEMANTIC(GCListener);
119     DEFAULT_MOVE_SEMANTIC(GCListener);
120     virtual ~GCListener() = default;
GCStarted(const GCTask & task,size_t heapSize)121     virtual void GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize) {}
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)122     virtual void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
123                             [[maybe_unused]] size_t heapSize)
124     {
125     }
GCPhaseStarted(GCPhase phase)126     virtual void GCPhaseStarted([[maybe_unused]] GCPhase phase) {}
GCPhaseFinished(GCPhase phase)127     virtual void GCPhaseFinished([[maybe_unused]] GCPhase phase) {}
128 };
129 
130 class GCExtensionData;
131 
132 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
133 
134 // base class for all GCs
135 class GC {
136 public:
137     using MarkPreprocess = std::function<void(const ObjectHeader *, BaseClass *)>;
138     using ReferenceCheckPredicateT = std::function<bool(const ObjectHeader *)>;
139     using ReferenceClearPredicateT = std::function<bool(const ObjectHeader *)>;
140     using ReferenceProcessPredicateT = std::function<bool(const ObjectHeader *)>;
141 
EmptyReferenceProcessPredicate(const ObjectHeader * ref)142     static constexpr bool EmptyReferenceProcessPredicate([[maybe_unused]] const ObjectHeader *ref)
143     {
144         return true;
145     }
146 
EmptyMarkPreprocess(const ObjectHeader * ref,BaseClass * baseKlass)147     static constexpr void EmptyMarkPreprocess([[maybe_unused]] const ObjectHeader *ref,
148                                               [[maybe_unused]] BaseClass *baseKlass)
149     {
150     }
151 
152     explicit GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
153     NO_COPY_SEMANTIC(GC);
154     NO_MOVE_SEMANTIC(GC);
155     virtual ~GC() = 0;
156 
157     GCType GetType();
158 
159     /// @brief Initialize GC
160     void Initialize(PandaVM *vm);
161 
162     /**
163      * @brief Starts GC after initialization
164      * Creates worker thread, sets gc_running_ to true
165      */
166     virtual void StartGC();
167 
168     /**
169      * @brief Stops GC for runtime destruction
170      * Joins GC thread, clears queue
171      */
172     virtual void StopGC();
173 
174     /**
175      * Should be used to wait while GC should work exclusively
176      * Note: for non-mt STW GC can be used to run GC.
177      * @return false if the task is discarded. Otherwise true.
178      * The task may be discarded if the GC already executing a task with
179      * the same reason.
180      */
181     virtual bool WaitForGC(GCTask task);
182 
183     /**
184      * Should be used to wait while GC should be executed in managed scope
185      * @return false if the task is discarded. Otherwise true.
186      * The task may be discarded if the GC already executing a task with
187      * the same reason.
188      */
189     bool WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
190 
191     /// Only be used to at first pygote fork
192     void WaitForGCOnPygoteFork(const GCTask &task);
193 
194     bool IsOnPygoteFork() const;
195 
196     /**
197      * Initialize GC bits on object creation.
198      * Required only for GCs with switched bits
199      */
200     virtual void InitGCBits(panda::ObjectHeader *objHeader) = 0;
201 
202     /// Initialize GC bits on object creation for the TLAB allocation.
203     virtual void InitGCBitsForAllocationInTLAB(panda::ObjectHeader *objHeader) = 0;
204 
IsTLABsSupported()205     bool IsTLABsSupported() const
206     {
207         return tlabsSupported_;
208     }
209 
210     /// @return true if GC supports object pinning (will not move pinned object), false otherwise
211     virtual bool IsPinningSupported() const = 0;
212 
213     /// @return true if cause is suitable for the GC, false otherwise
214     virtual bool CheckGCCause(GCTaskCause cause) const;
215 
216     /**
217      * Trigger GC.
218      * @return false if the task is discarded. Otherwise true.
219      * The task may be discarded if the GC already executing a task with
220      * the same reason. The task may be discarded by other reasons.
221      */
222     virtual bool Trigger(PandaUniquePtr<GCTask> task) = 0;
223 
224     virtual bool IsFullGC() const;
225 
226     /// Return true if gc has generations, false otherwise
227     bool IsGenerational() const;
228 
DumpStatistics()229     PandaString DumpStatistics()
230     {
231         return instanceStats_.GetDump(gcType_);
232     }
233 
AddListener(GCListener * listener)234     void AddListener(GCListener *listener)
235     {
236         ASSERT(gcListenerManager_ != nullptr);
237         gcListenerManager_->AddListener(listener);
238     }
239 
RemoveListener(GCListener * listener)240     void RemoveListener(GCListener *listener)
241     {
242         ASSERT(gcListenerManager_ != nullptr);
243         gcListenerManager_->RemoveListener(listener);
244     }
245 
GetBarrierSet()246     GCBarrierSet *GetBarrierSet()
247     {
248         ASSERT(gcBarrierSet_ != nullptr);
249         return gcBarrierSet_;
250     }
251 
GetWorkersTaskPool()252     GCWorkersTaskPool *GetWorkersTaskPool() const
253     {
254         ASSERT(workersTaskPool_ != nullptr);
255         return workersTaskPool_;
256     }
257 
258     // Additional NativeGC
259     void NotifyNativeAllocations();
260 
261     void RegisterNativeAllocation(size_t bytes);
262 
263     void RegisterNativeFree(size_t bytes);
264 
GetNotifyNativeInterval()265     int32_t GetNotifyNativeInterval()
266     {
267         return NOTIFY_NATIVE_INTERVAL;
268     }
269 
270     // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
271     static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
272 
273     // Calling CheckGCForNative immediately if size exceeds the following
274     static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
275 
IsLogDetailedGcInfoEnabled()276     inline bool IsLogDetailedGcInfoEnabled() const
277     {
278         return gcSettings_.LogDetailedGCInfoEnabled();
279     }
280 
IsLogDetailedGcCompactionInfoEnabled()281     inline bool IsLogDetailedGcCompactionInfoEnabled() const
282     {
283         return gcSettings_.LogDetailedGCCompactionInfoEnabled();
284     }
285 
GetGCPhase()286     inline GCPhase GetGCPhase() const
287     {
288         return phase_;
289     }
290 
GetLastGCCause()291     inline GCTaskCause GetLastGCCause() const
292     {
293         // Atomic with acquire order reason: data race with another threads which can update the variable
294         return lastCause_.load(std::memory_order_acquire);
295     }
296 
IsGCRunning()297     inline bool IsGCRunning()
298     {
299         // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent
300         // order where threads observe all modifications in the same order
301         return gcRunning_.load(std::memory_order_seq_cst);
302     }
303 
304     void PreStartup();
305 
GetInternalAllocator()306     InternalAllocatorPtr GetInternalAllocator() const
307     {
308         return internalAllocator_;
309     }
310 
311     /**
312      * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
313      * ReferenceQueue.class)
314      */
315     void EnqueueReferences();
316 
317     /// Process all references which GC found in marking phase.
318     void ProcessReferences(GCPhase gcPhase, const GCTask &task, const ReferenceClearPredicateT &pred);
319 
GetNativeBytesRegistered()320     size_t GetNativeBytesRegistered()
321     {
322         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
323         // constraints imposed on other reads or writes
324         return nativeBytesRegistered_.load(std::memory_order_relaxed);
325     }
326 
327     virtual void SetPandaVM(PandaVM *vm);
328 
GetPandaVm()329     PandaVM *GetPandaVm() const
330     {
331         return vm_;
332     }
333 
GetWorkersTaskQueue()334     taskmanager::TaskQueueInterface *GetWorkersTaskQueue() const
335     {
336         return gcWorkersTaskQueue_;
337     }
338 
339     virtual void PreZygoteFork();
340 
341     virtual void PostZygoteFork();
342 
343     /**
344      * Processes thread's remaining pre and post barrier buffer entries on its termination.
345      *
346      * @param keep_buffers specifies whether to clear (=BuffersKeepingFlag::KEEP) or deallocate
347      * (=BuffersKeepingFlag::DELETE) pre and post barrier buffers upon OnThreadTerminate() completion
348      */
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)349     virtual void OnThreadTerminate([[maybe_unused]] ManagedThread *thread,
350                                    [[maybe_unused]] mem::BuffersKeepingFlag keepBuffers)
351     {
352     }
353 
354     /// Performs the actions that are required upon thread creation (if any)
OnThreadCreate(ManagedThread * thread)355     virtual void OnThreadCreate([[maybe_unused]] ManagedThread *thread) {}
356 
SetCanAddGCTask(bool canAddTask)357     void SetCanAddGCTask(bool canAddTask)
358     {
359         // Atomic with relaxed order reason: data race with can_add_gc_task_ with no synchronization or ordering
360         // constraints imposed on other reads or writes
361         canAddGcTask_.store(canAddTask, std::memory_order_relaxed);
362     }
363 
GetExtensionData()364     GCExtensionData *GetExtensionData() const
365     {
366         return extensionData_;
367     }
368 
SetExtensionData(GCExtensionData * data)369     void SetExtensionData(GCExtensionData *data)
370     {
371         extensionData_ = data;
372     }
373 
PostForkCallback()374     virtual void PostForkCallback() {}
375 
376     /// Check if the object addr is in the GC sweep range
InGCSweepRange(const ObjectHeader * obj)377     virtual bool InGCSweepRange([[maybe_unused]] const ObjectHeader *obj) const
378     {
379         return true;
380     }
381 
GetCardTable()382     virtual CardTable *GetCardTable() const
383     {
384         return nullptr;
385     }
386 
387     /// Called from GCWorker thread to assign thread specific data
InitWorker(void ** workerData)388     virtual bool InitWorker(void **workerData)
389     {
390         *workerData = nullptr;
391         return true;
392     }
393 
394     /// Called from GCWorker thread to destroy thread specific data
DestroyWorker(void * workerData)395     virtual void DestroyWorker([[maybe_unused]] void *workerData) {}
396 
397     /// Process a task sent to GC workers thread.
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)398     virtual void WorkerTaskProcessing([[maybe_unused]] GCWorkersTask *task, [[maybe_unused]] void *workerData)
399     {
400         LOG(FATAL, GC) << "Unimplemented method";
401     }
402 
IsMutatorAllowed()403     virtual bool IsMutatorAllowed()
404     {
405         return false;
406     }
407 
408     /// Return true of ref is an instance of reference or it's ancestor, false otherwise
409     bool IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred);
410 
411     void ProcessReference(GCMarkingStackType *objectsStack, const BaseClass *cls, const ObjectHeader *ref,
412                           const ReferenceProcessPredicateT &pred);
413 
GetObjectAllocator()414     ALWAYS_INLINE ObjectAllocatorBase *GetObjectAllocator() const
415     {
416         return objectAllocator_;
417     }
418 
419     // called if we fail change state from idle to running
420     virtual void OnWaitForIdleFail();
421 
PendingGC()422     virtual void PendingGC() {}
423 
424     /**
425      * Check if the object is marked for GC(alive)
426      * @param object
427      * @return true if object marked for GC
428      */
429     virtual bool IsMarked(const ObjectHeader *object) const = 0;
430 
431     /**
432      * Mark object.
433      * Note: for some GCs it is not necessary set GC bit to 1.
434      * @param object_header
435      * @return true if object old state is not marked
436      */
437     virtual bool MarkObjectIfNotMarked(ObjectHeader *objectHeader);
438 
439     /**
440      * Mark object.
441      * Note: for some GCs it is not necessary set GC bit to 1.
442      * @param object_header
443      */
444     virtual void MarkObject(ObjectHeader *objectHeader) = 0;
445 
446     /**
447      * Add reference for later processing in marking phase
448      * @param object - object from which we start to mark
449      */
450     void AddReference(ObjectHeader *fromObject, ObjectHeader *object);
451 
SetGCPhase(GCPhase gcPhase)452     inline void SetGCPhase(GCPhase gcPhase)
453     {
454         phase_ = gcPhase;
455     }
456 
GetCounter()457     size_t GetCounter() const
458     {
459         return gcCounter_;
460     }
461 
PostponeGCStart()462     virtual void PostponeGCStart()
463     {
464         ASSERT(IsPostponeGCSupported());
465         isPostponeEnabled_ = true;
466     }
467 
PostponeGCEnd()468     virtual void PostponeGCEnd()
469     {
470         ASSERT(IsPostponeGCSupported());
471         ASSERT(IsPostponeEnabled());
472         isPostponeEnabled_ = false;
473     }
474 
475     virtual bool IsPostponeGCSupported() const = 0;
476 
IsPostponeEnabled()477     bool IsPostponeEnabled()
478     {
479         return isPostponeEnabled_;
480     }
481 
ComputeNewSize()482     virtual void ComputeNewSize()
483     {
484         GetObjectAllocator()->GetHeapSpace()->ComputeNewSize();
485     }
486 
487     /// @return GC specific settings based on runtime options and GC type
GetSettings()488     const GCSettings *GetSettings() const
489     {
490         return &gcSettings_;
491     }
492 
493 protected:
494     /// @brief Runs all phases
495     void RunPhases(GCTask &task);
496 
497     /**
498      * Add task to GC Queue to be run by a GC worker (or run in place)
499      * @return false if the task is discarded. Otherwise true.
500      * The task may be discarded if the GC already executing a task with
501      * the same reason. The task may be discarded by other reasons (for example, task is invalid).
502      */
503     bool AddGCTask(bool isManaged, PandaUniquePtr<GCTask> task);
504 
505     virtual void InitializeImpl() = 0;
506     virtual void PreRunPhasesImpl() = 0;
507     virtual void RunPhasesImpl(GCTask &task) = 0;
PreStartupImp()508     virtual void PreStartupImp() {}
509 
IsTracingEnabled()510     inline bool IsTracingEnabled() const
511     {
512         return gcSettings_.IsGcEnableTracing();
513     }
514 
BeginTracePoint(const PandaString & tracePointName)515     inline void BeginTracePoint(const PandaString &tracePointName) const
516     {
517         if (IsTracingEnabled()) {
518             trace::BeginTracePoint(tracePointName.c_str());
519         }
520     }
521 
EndTracePoint()522     inline void EndTracePoint() const
523     {
524         if (IsTracingEnabled()) {
525             trace::EndTracePoint();
526         }
527     }
528 
529     virtual void VisitRoots(const GCRootVisitor &gcRootVisitor, VisitGCRootFlags flags) = 0;
530     virtual void VisitClassRoots(const GCRootVisitor &gcRootVisitor) = 0;
531     virtual void VisitCardTableRoots(CardTable *cardTable, const GCRootVisitor &gcRootVisitor,
532                                      const MemRangeChecker &rangeChecker, const ObjectChecker &rangeObjectChecker,
533                                      const ObjectChecker &fromObjectChecker, uint32_t processedFlag) = 0;
534 
CASGCPhase(GCPhase expected,GCPhase set)535     inline bool CASGCPhase(GCPhase expected, GCPhase set)
536     {
537         return phase_.compare_exchange_strong(expected, set);
538     }
539 
GetStats()540     GCInstanceStats *GetStats()
541     {
542         return &instanceStats_;
543     }
544 
SetType(GCType gcType)545     inline void SetType(GCType gcType)
546     {
547         gcType_ = gcType;
548     }
549 
SetTLABsSupported()550     inline void SetTLABsSupported()
551     {
552         tlabsSupported_ = true;
553     }
554 
SetGCBarrierSet(GCBarrierSet * barrierSet)555     void SetGCBarrierSet(GCBarrierSet *barrierSet)
556     {
557         ASSERT(gcBarrierSet_ == nullptr);
558         gcBarrierSet_ = barrierSet;
559     }
560 
561     /**
562      * @brief Create GC workers task pool which runs some gc phases in parallel
563      * This pool can be based on internal thread pool or TaskManager workers
564      */
565     void CreateWorkersTaskPool();
566 
567     /// @brief Destroy GC workers task pool if it was created
568     void DestroyWorkersTaskPool();
569 
570     /// Mark all references which we added by AddReference method
571     virtual void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) = 0;
572 
573     virtual void UpdateRefsToMovedObjectsInPygoteSpace() = 0;
574     /// Update all refs to moved objects
575     virtual void CommonUpdateRefsToMovedObjects() = 0;
576 
577     virtual void UpdateVmRefs() = 0;
578 
579     virtual void UpdateGlobalObjectStorage() = 0;
580 
581     virtual void UpdateClassLinkerContextRoots() = 0;
582 
583     void UpdateRefsInVRegs(ManagedThread *thread);
584 
585     const ObjectHeader *PopObjectFromStack(GCMarkingStackType *objectsStack);
586 
GetTiming()587     Timing *GetTiming()
588     {
589         return &timing_;
590     }
591 
592     template <GCScopeType GC_SCOPE_TYPE>
593     friend class GCScope;
594 
595     void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
596 
597     // vector here because we can add some references on young-gc and get new refs on old-gc
598     // it's possible if we make 2 GCs for one safepoint
599     // max length of this vector - is 2
600     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(clearedReferencesLock_)601     PandaVector<panda::mem::Reference *> *clearedReferences_ GUARDED_BY(clearedReferencesLock_) {nullptr};
602 
603     os::memory::Mutex *clearedReferencesLock_ {nullptr};  // NOLINT(misc-non-private-member-variables-in-classes)
604 
605     std::atomic<size_t> gcCounter_ {0};  // NOLINT(misc-non-private-member-variables-in-classes)
606     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
607     std::atomic<GCTaskCause> lastCause_ {GCTaskCause::INVALID_CAUSE};
608 
IsExplicitFull(const panda::GCTask & task)609     bool IsExplicitFull(const panda::GCTask &task) const
610     {
611         return (task.reason == GCTaskCause::EXPLICIT_CAUSE) && !gcSettings_.IsExplicitConcurrentGcEnabled();
612     }
613 
GetReferenceProcessor()614     const ReferenceProcessor *GetReferenceProcessor() const
615     {
616         return referenceProcessor_;
617     }
618 
IsWorkerThreadsExist()619     bool IsWorkerThreadsExist() const
620     {
621         return gcSettings_.GCWorkersCount() != 0;
622     }
623 
624     void EnableWorkerThreads();
625     void DisableWorkerThreads();
626 
627     /// @return true if GC can work in concurrent mode
IsConcurrencyAllowed()628     bool IsConcurrencyAllowed() const
629     {
630         return gcSettings_.IsConcurrencyEnabled();
631     }
632 
633     Logger::Buffer GetLogPrefix() const;
634 
635     void FireGCStarted(const GCTask &task, size_t bytesInHeapBeforeGc);
636     void FireGCFinished(const GCTask &task, size_t bytesInHeapBeforeGc, size_t bytesInHeapAfterGc);
637     void FireGCPhaseStarted(GCPhase phase);
638     void FireGCPhaseFinished(GCPhase phase);
639 
640     void SetFullGC(bool value);
641 
642     /// Set GC Threads on best and middle cores before GC
643     void SetupCpuAffinity();
644 
645     /// Set GC Threads on best and middle cores after concurrent phase
646     void SetupCpuAffinityAfterConcurrent();
647 
648     /// Set GC Threads on saved or weak cores before concurrent phase
649     void SetupCpuAffinityBeforeConcurrent();
650 
651     /// Restore GC Threads after GC on saved cores
652     void RestoreCpuAffinity();
653 
654     virtual void StartConcurrentScopeRoutine() const;
655     virtual void EndConcurrentScopeRoutine() const;
656 
657     virtual void PrintDetailedLog();
658 
659     Timing timing_;  // NOLINT(misc-non-private-member-variables-in-classes)
660 
661     PandaVector<std::pair<PandaString, uint64_t>>
662         footprintList_;  // NOLINT(misc-non-private-member-variables-in-classes)
663 
664     virtual size_t VerifyHeap() = 0;
665 
666 private:
667     /// Reset GC Threads on saved or weak cores
668     void ResetCpuAffinity(bool beforeConcurrent);
669 
670     /**
671      * Check whether run GC after waiting for mutator threads. Tasks for GC can pass from several mutator threads, so
672      * sometime no need to run GC many times. Also some GCs run in place, but in this time GC can run in GC-thread, and
673      * "in-place" GC wait for idle state for running, so need to check whether run such GC after waiting for threads
674      * @see WaitForIdleGC
675      *
676      * @param counter_before_waiting value of gc counter before waiting for mutator threads
677      * @param task current GC task
678      *
679      * @return true if need to run GC with current task after waiting for mutator threads or false otherwise
680      */
681     bool NeedRunGCAfterWaiting(size_t counterBeforeWaiting, const GCTask &task) const;
682 
683     /**
684      * @brief Create GC worker if needed and set gc status to running (gc_running_ variable)
685      * @see IsGCRunning
686      */
687     void CreateWorker();
688 
689     /**
690      * @brief Join and destroy GC worker if needed and set gc status to non-running (gc_running_ variable)
691      * @see IsGCRunning
692      */
693     void DestroyWorker();
694 
695     /// Move small objects to pygote space at first pygote fork
696     void MoveObjectsToPygoteSpace();
697 
698     size_t GetNativeBytesFromMallinfoAndRegister() const;
699     virtual void ClearLocalInternalAllocatorPools() = 0;
700     virtual void UpdateThreadLocals() = 0;
701     NativeGcTriggerType GetNativeGcTriggerType();
702 
703     class GCListenerManager {
704     public:
705         GCListenerManager() = default;
706         NO_COPY_SEMANTIC(GCListenerManager);
707         NO_MOVE_SEMANTIC(GCListenerManager);
708         ~GCListenerManager() = default;
709 
710         void AddListener(GCListener *newListener);
711         void RemoveListener(GCListener *newListener);
712 
713         void NormalizeListenersOnStartGC();
714 
715         template <class Visitor>
IterateOverListeners(const Visitor & visitor)716         void IterateOverListeners(const Visitor &visitor)
717         {
718             os::memory::LockHolder lh(listenerLock_);
719             for (auto *gcListener : currentListeners_) {
720                 if (gcListener != nullptr) {
721                     visitor(gcListener);
722                 }
723             }
724         }
725 
726     private:
727         os::memory::Mutex listenerLock_;
728         PandaUnorderedSet<GCListener *> currentListeners_ GUARDED_BY(listenerLock_);
729         PandaUnorderedSet<GCListener *> newListeners_ GUARDED_BY(listenerLock_);
730         PandaUnorderedSet<GCListener *> listenersForRemove_ GUARDED_BY(listenerLock_);
731     };
732 
733     volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
734     GCType gcType_ {GCType::INVALID_GC};
735     GCSettings gcSettings_;
736     GCListenerManager *gcListenerManager_ {nullptr};
737     GCBarrierSet *gcBarrierSet_ {nullptr};
738     ObjectAllocatorBase *objectAllocator_ {nullptr};
739     InternalAllocatorPtr internalAllocator_ {nullptr};
740     GCInstanceStats instanceStats_;
741     os::CpuSet affinityBeforeGc_ {};
742 
743     // Additional NativeGC
744     std::atomic<size_t> nativeBytesRegistered_ = 0;
745     std::atomic<size_t> nativeObjectsNotified_ = 0;
746 
747     ReferenceProcessor *referenceProcessor_ {nullptr};
748 
749     // NOTE(ipetrov): choose suitable priority
750     static constexpr size_t GC_TASK_QUEUE_PRIORITY = 6U;
751     taskmanager::TaskQueueInterface *gcWorkersTaskQueue_ = nullptr;
752 
753     /* GC worker specific variables */
754     GCWorker *gcWorker_ = nullptr;
755     std::atomic_bool gcRunning_ = false;
756     std::atomic<bool> canAddGcTask_ = true;
757 
758     bool tlabsSupported_ = false;
759 
760     // Additional data for extensions
761     GCExtensionData *extensionData_ {nullptr};
762 
763     GCWorkersTaskPool *workersTaskPool_ {nullptr};
764     class PostForkGCTask;
765 
766     friend class ecmascript::EcmaReferenceProcessor;
767     friend class panda::mem::test::MemStatsGenGCTest;
768     friend class panda::mem::test::ReferenceStorageTest;
769     friend class panda::mem::test::RemSetTest;
770     friend class GCScopedPhase;
771     friend class GlobalObjectStorage;
772     // NOTE(maksenov): Avoid using specific ObjectHelpers class here
773     friend class GCDynamicObjectHelpers;
774     friend class GCStaticObjectHelpers;
775     friend class G1GCTest;
776     friend class GCTestLog;
777 
778     void TriggerGCForNative();
779     size_t SimpleNativeAllocationGcWatermark();
780     /// Waits while current GC task(if any) will be processed
781     void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
782 
783     friend class GCScopedPhase;
784     friend class ConcurrentScope;
785 
786     PandaVM *vm_ {nullptr};
787     std::atomic<bool> isFullGc_ {false};
788     std::atomic<bool> isPostponeEnabled_ {false};
789 };
790 
791 /**
792  * @brief Create GC with @param gc_type
793  * @param gc_type - type of create GC
794  * @return pointer to created GC on success, nullptr on failure
795  */
796 template <class LanguageConfig>
797 GC *CreateGC(GCType gcType, ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
798 
799 /// Enable concurrent mode. Should be used only from STW code.
800 class ConcurrentScope final {
801 public:
802     explicit ConcurrentScope(GC *gc, bool autoStart = true);
803     NO_COPY_SEMANTIC(ConcurrentScope);
804     NO_MOVE_SEMANTIC(ConcurrentScope);
805     ~ConcurrentScope();
806     void Start();
807 
808 private:
809     GC *gc_;
810     bool started_ = false;
811 };
812 
813 }  // namespace panda::mem
814 
815 #endif  // PANDA_RUNTIME_MEM_GC_GC_HMA
816