• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_GC_H
16 #define PANDA_RUNTIME_MEM_GC_GC_H
17 
18 #include <atomic>
19 #include <map>
20 #include <string_view>
21 #include <vector>
22 
23 #include "libpandabase/os/cpu_affinity.h"
24 #include "libpandabase/os/mutex.h"
25 #include "libpandabase/os/thread.h"
26 #include "libpandabase/taskmanager/task_queue.h"
27 #include "libpandabase/trace/trace.h"
28 #include "libpandabase/utils/expected.h"
29 #include "runtime/include/gc_task.h"
30 #include "runtime/include/object_header.h"
31 #include "runtime/include/language_config.h"
32 #include "runtime/include/locks.h"
33 #include "runtime/include/mem/panda_containers.h"
34 #include "runtime/include/mem/panda_smart_pointers.h"
35 #include "runtime/include/mem/panda_string.h"
36 #include "runtime/mem/allocator_adapter.h"
37 #include "runtime/mem/gc/gc_settings.h"
38 #include "runtime/mem/gc/gc_barrier_set.h"
39 #include "runtime/mem/gc/gc_phase.h"
40 #include "runtime/mem/gc/gc_root.h"
41 #include "runtime/mem/gc/gc_adaptive_stack.h"
42 #include "runtime/mem/gc/gc_scope.h"
43 #include "runtime/mem/gc/gc_scoped_phase.h"
44 #include "runtime/mem/gc/gc_stats.h"
45 #include "runtime/mem/gc/gc_types.h"
46 #include "runtime/mem/refstorage/reference.h"
47 #include "runtime/mem/gc/bitmap.h"
48 #include "runtime/mem/gc/workers/gc_worker.h"
49 #include "runtime/mem/object_helpers.h"
50 #include "runtime/timing.h"
51 #include "runtime/mem/region_allocator.h"
52 
53 namespace ark {
54 class BaseClass;
55 class HClass;
56 class PandaVM;
57 class Timing;
58 namespace mem {
59 class G1GCTest;
60 class GlobalObjectStorage;
61 class ReferenceProcessor;
62 template <MTModeT MT_MODE>
63 class ObjectAllocatorG1;
64 namespace test {
65 class MemStatsGenGCTest;
66 class ReferenceStorageTest;
67 class RemSetTest;
68 }  // namespace test
69 namespace ecmascript {
70 class EcmaReferenceProcessor;
71 }  // namespace ecmascript
72 }  // namespace mem
73 }  // namespace ark
74 
75 namespace ark::coretypes {
76 class Array;
77 class DynClass;
78 }  // namespace ark::coretypes
79 
80 namespace ark::mem {
81 
82 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
83 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
84 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
85 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
86 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
87 #define LOG_DEBUG_OBJECT_EVENTS LOG(DEBUG, MM_OBJECT_EVENTS)
88 
89 // forward declarations:
90 class GCListener;
91 class GCScopePhase;
92 class GCScopedPhase;
93 class GCQueueInterface;
94 class GCDynamicObjectHelpers;
95 class GCWorkersTaskPool;
96 class GCWorkersTask;
97 
98 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
99 
100 enum ClassRootsVisitFlag : bool {
101     ENABLED = true,
102     DISABLED = false,
103 };
104 
105 enum CardTableVisitFlag : bool {
106     VISIT_ENABLED = true,
107     VISIT_DISABLED = false,
108 };
109 
110 enum BuffersKeepingFlag : bool {
111     KEEP = true,
112     DELETE = false,
113 };
114 
115 class GCListener {
116 public:
117     GCListener() = default;
118     NO_COPY_SEMANTIC(GCListener);
119     DEFAULT_MOVE_SEMANTIC(GCListener);
120     virtual ~GCListener() = default;
GCStarted(const GCTask & task,size_t heapSize)121     virtual void GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize) {}
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)122     virtual void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
123                             [[maybe_unused]] size_t heapSize)
124     {
125     }
GCPhaseStarted(GCPhase phase)126     virtual void GCPhaseStarted([[maybe_unused]] GCPhase phase) {}
GCPhaseFinished(GCPhase phase)127     virtual void GCPhaseFinished([[maybe_unused]] GCPhase phase) {}
128 };
129 
130 class GCExtensionData;
131 
132 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
133 
134 // base class for all GCs
135 class GC {
136 public:
137     using MarkPreprocess = std::function<void(const ObjectHeader *, BaseClass *)>;
138     using ReferenceCheckPredicateT = std::function<bool(const ObjectHeader *)>;
139     using ReferenceClearPredicateT = std::function<bool(const ObjectHeader *)>;
140     using ReferenceProcessPredicateT = std::function<bool(const ObjectHeader *)>;
141     using ReferenceProcessorT = std::function<void(void *)>;
142 
EmptyReferenceProcessPredicate(const ObjectHeader * ref)143     static constexpr bool EmptyReferenceProcessPredicate([[maybe_unused]] const ObjectHeader *ref)
144     {
145         return true;
146     }
147 
EmptyMarkPreprocess(const ObjectHeader * ref,BaseClass * baseKlass)148     static constexpr void EmptyMarkPreprocess([[maybe_unused]] const ObjectHeader *ref,
149                                               [[maybe_unused]] BaseClass *baseKlass)
150     {
151     }
152 
153     explicit GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
154     NO_COPY_SEMANTIC(GC);
155     NO_MOVE_SEMANTIC(GC);
156     virtual ~GC() = 0;
157 
158     GCType GetType();
159 
160     /// @brief Initialize GC
161     void Initialize(PandaVM *vm);
162 
163     /**
164      * @brief Starts GC after initialization
165      * Creates worker thread, sets gc_running_ to true
166      */
167     virtual void StartGC();
168 
169     /**
170      * @brief Stops GC for runtime destruction
171      * Joins GC thread, clears queue
172      */
173     virtual void StopGC();
174 
175     /**
176      * Should be used to wait while GC should work exclusively
177      * Note: for non-mt STW GC can be used to run GC.
178      * @return false if the task is discarded. Otherwise true.
179      * The task may be discarded if the GC already executing a task with
180      * the same reason.
181      */
182     virtual bool WaitForGC(GCTask task);
183 
184     /**
185      * Should be used to wait while GC should be executed in managed scope
186      * @return false if the task is discarded. Otherwise true.
187      * The task may be discarded if the GC already executing a task with
188      * the same reason.
189      */
190     bool WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
191 
192     /// Only be used to at first pygote fork
193     void WaitForGCOnPygoteFork(const GCTask &task);
194 
195     bool IsOnPygoteFork() const;
196 
197     /**
198      * Initialize GC bits on object creation.
199      * Required only for GCs with switched bits
200      */
201     virtual void InitGCBits(ark::ObjectHeader *objHeader) = 0;
202 
203     /// Initialize GC bits on object creation for the TLAB allocation.
204     virtual void InitGCBitsForAllocationInTLAB(ark::ObjectHeader *objHeader) = 0;
205 
IsTLABsSupported()206     bool IsTLABsSupported() const
207     {
208         return tlabsSupported_;
209     }
210 
211     /// @return true if GC supports object pinning (will not move pinned object), false otherwise
212     virtual bool IsPinningSupported() const = 0;
213 
214     /// @return true if cause is suitable for the GC, false otherwise
215     virtual bool CheckGCCause(GCTaskCause cause) const;
216 
217     /**
218      * Trigger GC.
219      * @return false if the task is discarded. Otherwise true.
220      * The task may be discarded if the GC already executing a task with
221      * the same reason. The task may be discarded by other reasons.
222      */
223     virtual bool Trigger(PandaUniquePtr<GCTask> task) = 0;
224 
225     virtual bool IsFullGC() const;
226 
227     /// Return true if gc has generations, false otherwise
228     bool IsGenerational() const;
229 
DumpStatistics()230     PandaString DumpStatistics()
231     {
232         return instanceStats_.GetDump(gcType_);
233     }
234 
AddListener(GCListener * listener)235     void AddListener(GCListener *listener)
236     {
237         ASSERT(gcListenerManager_ != nullptr);
238         gcListenerManager_->AddListener(listener);
239     }
240 
RemoveListener(GCListener * listener)241     void RemoveListener(GCListener *listener)
242     {
243         ASSERT(gcListenerManager_ != nullptr);
244         gcListenerManager_->RemoveListener(listener);
245     }
246 
GetBarrierSet()247     GCBarrierSet *GetBarrierSet()
248     {
249         ASSERT(gcBarrierSet_ != nullptr);
250         return gcBarrierSet_;
251     }
252 
GetWorkersTaskPool()253     GCWorkersTaskPool *GetWorkersTaskPool() const
254     {
255         ASSERT(workersTaskPool_ != nullptr);
256         return workersTaskPool_;
257     }
258 
259     // Additional NativeGC
260     void NotifyNativeAllocations();
261 
262     void RegisterNativeAllocation(size_t bytes);
263 
264     void RegisterNativeFree(size_t bytes);
265 
GetNotifyNativeInterval()266     int32_t GetNotifyNativeInterval()
267     {
268         return NOTIFY_NATIVE_INTERVAL;
269     }
270 
271     // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
272     static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
273 
274     // Calling CheckGCForNative immediately if size exceeds the following
275     static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
276 
IsLogDetailedGcInfoEnabled()277     inline bool IsLogDetailedGcInfoEnabled() const
278     {
279         return gcSettings_.LogDetailedGCInfoEnabled();
280     }
281 
IsLogDetailedGcCompactionInfoEnabled()282     inline bool IsLogDetailedGcCompactionInfoEnabled() const
283     {
284         return gcSettings_.LogDetailedGCCompactionInfoEnabled();
285     }
286 
GetGCPhase()287     inline GCPhase GetGCPhase() const
288     {
289         return phase_;
290     }
291 
GetLastGCCause()292     inline GCTaskCause GetLastGCCause() const
293     {
294         // Atomic with acquire order reason: data race with another threads which can update the variable
295         return lastCause_.load(std::memory_order_acquire);
296     }
297 
IsGCRunning()298     inline bool IsGCRunning()
299     {
300         // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent
301         // order where threads observe all modifications in the same order
302         return gcRunning_.load(std::memory_order_seq_cst);
303     }
304 
305     void PreStartup();
306 
GetInternalAllocator()307     InternalAllocatorPtr GetInternalAllocator() const
308     {
309         return internalAllocator_;
310     }
311 
312     /**
313      * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
314      * ReferenceQueue.class)
315      */
316     void EnqueueReferences();
317 
318     /// Process all references which GC found in marking phase.
319     void ProcessReferences(GCPhase gcPhase, const GCTask &task, const ReferenceClearPredicateT &pred);
320 
321     /// Process all references which were found during evacuation
322     void ProcessReferences(const GCTask &task, const ReferenceClearPredicateT &pred);
323 
GetNativeBytesRegistered()324     size_t GetNativeBytesRegistered()
325     {
326         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
327         // constraints imposed on other reads or writes
328         return nativeBytesRegistered_.load(std::memory_order_relaxed);
329     }
330 
331     virtual void SetPandaVM(PandaVM *vm);
332 
GetPandaVm()333     PandaVM *GetPandaVm() const
334     {
335         return vm_;
336     }
337 
GetWorkersTaskQueue()338     taskmanager::TaskQueueInterface *GetWorkersTaskQueue() const
339     {
340         return gcWorkersTaskQueue_;
341     }
342 
343     virtual void PreZygoteFork();
344 
345     virtual void PostZygoteFork();
346 
347     /**
348      * Processes thread's remaining pre and post barrier buffer entries on its termination.
349      *
350      * @param keep_buffers specifies whether to clear (=BuffersKeepingFlag::KEEP) or deallocate
351      * (=BuffersKeepingFlag::DELETE) pre and post barrier buffers upon OnThreadTerminate() completion
352      */
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)353     virtual void OnThreadTerminate([[maybe_unused]] ManagedThread *thread,
354                                    [[maybe_unused]] mem::BuffersKeepingFlag keepBuffers)
355     {
356     }
357 
358     /// Performs the actions that are required upon thread creation (if any)
OnThreadCreate(ManagedThread * thread)359     virtual void OnThreadCreate([[maybe_unused]] ManagedThread *thread) {}
360 
SetCanAddGCTask(bool canAddTask)361     void SetCanAddGCTask(bool canAddTask)
362     {
363         // Atomic with relaxed order reason: data race with can_add_gc_task_ with no synchronization or ordering
364         // constraints imposed on other reads or writes
365         canAddGcTask_.store(canAddTask, std::memory_order_relaxed);
366     }
367 
GetExtensionData()368     GCExtensionData *GetExtensionData() const
369     {
370         return extensionData_;
371     }
372 
SetExtensionData(GCExtensionData * data)373     void SetExtensionData(GCExtensionData *data)
374     {
375         extensionData_ = data;
376     }
377 
PostForkCallback()378     virtual void PostForkCallback() {}
379 
380     /// Check if the object addr is in the GC sweep range
InGCSweepRange(const ObjectHeader * obj)381     virtual bool InGCSweepRange([[maybe_unused]] const ObjectHeader *obj) const
382     {
383         return true;
384     }
385 
GetCardTable()386     virtual CardTable *GetCardTable() const
387     {
388         return nullptr;
389     }
390 
391     /// Called from GCWorker thread to assign thread specific data
InitWorker(void ** workerData)392     virtual bool InitWorker(void **workerData)
393     {
394         *workerData = nullptr;
395         return true;
396     }
397 
398     /// Called from GCWorker thread to destroy thread specific data
DestroyWorker(void * workerData)399     virtual void DestroyWorker([[maybe_unused]] void *workerData) {}
400 
401     /// Process a task sent to GC workers thread.
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)402     virtual void WorkerTaskProcessing([[maybe_unused]] GCWorkersTask *task, [[maybe_unused]] void *workerData)
403     {
404         LOG(FATAL, GC) << "Unimplemented method";
405     }
406 
IsMutatorAllowed()407     virtual bool IsMutatorAllowed()
408     {
409         return false;
410     }
411 
412     /// Return true of ref is an instance of reference or it's ancestor, false otherwise
413     bool IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred);
414     bool IsReference(const BaseClass *cls, const ObjectHeader *ref);
415 
416     void ProcessReference(GCMarkingStackType *objectsStack, const BaseClass *cls, const ObjectHeader *ref,
417                           const ReferenceProcessPredicateT &pred);
418     void ProcessReferenceForSinglePassCompaction(const BaseClass *cls, const ObjectHeader *ref,
419                                                  const ReferenceProcessorT &processor);
420 
GetObjectAllocator()421     ALWAYS_INLINE ObjectAllocatorBase *GetObjectAllocator() const
422     {
423         return objectAllocator_;
424     }
425 
426     // called if we fail change state from idle to running
427     virtual void OnWaitForIdleFail();
428 
PendingGC()429     virtual void PendingGC() {}
430 
431     /**
432      * Check if the object is marked for GC(alive)
433      * @param object
434      * @return true if object marked for GC
435      */
436     virtual bool IsMarked(const ObjectHeader *object) const = 0;
437 
438     /**
439      * Mark object.
440      * Note: for some GCs it is not necessary set GC bit to 1.
441      * @param object_header
442      * @return true if object old state is not marked
443      */
444     virtual bool MarkObjectIfNotMarked(ObjectHeader *objectHeader);
445 
446     /**
447      * Mark object.
448      * Note: for some GCs it is not necessary set GC bit to 1.
449      * @param object_header
450      */
451     virtual void MarkObject(ObjectHeader *objectHeader) = 0;
452 
453     /**
454      * Add reference for later processing in marking phase
455      * @param object - object from which we start to mark
456      */
457     void AddReference(ObjectHeader *fromObject, ObjectHeader *object);
458 
SetGCPhase(GCPhase gcPhase)459     inline void SetGCPhase(GCPhase gcPhase)
460     {
461         phase_ = gcPhase;
462     }
463 
GetCounter()464     size_t GetCounter() const
465     {
466         return gcCounter_;
467     }
468 
PostponeGCStart()469     virtual void PostponeGCStart()
470     {
471         ASSERT(IsPostponeGCSupported());
472         isPostponeEnabled_ = true;
473     }
474 
PostponeGCEnd()475     virtual void PostponeGCEnd()
476     {
477         ASSERT(IsPostponeGCSupported());
478         ASSERT(IsPostponeEnabled());
479         isPostponeEnabled_ = false;
480     }
481 
482     virtual bool IsPostponeGCSupported() const = 0;
483 
IsPostponeEnabled()484     bool IsPostponeEnabled()
485     {
486         return isPostponeEnabled_;
487     }
488 
ComputeNewSize()489     virtual void ComputeNewSize()
490     {
491         GetObjectAllocator()->GetHeapSpace()->ComputeNewSize();
492     }
493 
494     /// @return GC specific settings based on runtime options and GC type
GetSettings()495     const GCSettings *GetSettings() const
496     {
497         return &gcSettings_;
498     }
499 
500 protected:
501     /// @brief Runs all phases
502     void RunPhases(GCTask &task);
503 
504     /**
505      * Add task to GC Queue to be run by a GC worker (or run in place)
506      * @return false if the task is discarded. Otherwise true.
507      * The task may be discarded if the GC already executing a task with
508      * the same reason. The task may be discarded by other reasons (for example, task is invalid).
509      */
510     bool AddGCTask(bool isManaged, PandaUniquePtr<GCTask> task);
511 
512     virtual void InitializeImpl() = 0;
513     virtual void PreRunPhasesImpl() = 0;
514     virtual void RunPhasesImpl(GCTask &task) = 0;
PreStartupImp()515     virtual void PreStartupImp() {}
516 
IsTracingEnabled()517     inline bool IsTracingEnabled() const
518     {
519         return gcSettings_.IsGcEnableTracing();
520     }
521 
BeginTracePoint(const PandaString & tracePointName)522     inline void BeginTracePoint(const PandaString &tracePointName) const
523     {
524         if (IsTracingEnabled()) {
525             trace::BeginTracePoint(tracePointName.c_str());
526         }
527     }
528 
EndTracePoint()529     inline void EndTracePoint() const
530     {
531         if (IsTracingEnabled()) {
532             trace::EndTracePoint();
533         }
534     }
535 
536     virtual void VisitRoots(const GCRootVisitor &gcRootVisitor, VisitGCRootFlags flags) = 0;
537     virtual void VisitClassRoots(const GCRootVisitor &gcRootVisitor) = 0;
538     virtual void VisitCardTableRoots(CardTable *cardTable, const GCRootVisitor &gcRootVisitor,
539                                      const MemRangeChecker &rangeChecker, const ObjectChecker &rangeObjectChecker,
540                                      const ObjectChecker &fromObjectChecker, uint32_t processedFlag) = 0;
541 
CASGCPhase(GCPhase expected,GCPhase set)542     inline bool CASGCPhase(GCPhase expected, GCPhase set)
543     {
544         return phase_.compare_exchange_strong(expected, set);
545     }
546 
GetStats()547     GCInstanceStats *GetStats()
548     {
549         return &instanceStats_;
550     }
551 
SetType(GCType gcType)552     inline void SetType(GCType gcType)
553     {
554         gcType_ = gcType;
555     }
556 
SetTLABsSupported()557     inline void SetTLABsSupported()
558     {
559         tlabsSupported_ = true;
560     }
561 
SetGCBarrierSet(GCBarrierSet * barrierSet)562     void SetGCBarrierSet(GCBarrierSet *barrierSet)
563     {
564         ASSERT(gcBarrierSet_ == nullptr);
565         gcBarrierSet_ = barrierSet;
566     }
567 
568     /**
569      * @brief Create GC workers task pool which runs some gc phases in parallel
570      * This pool can be based on internal thread pool or TaskManager workers
571      */
572     void CreateWorkersTaskPool();
573 
574     /// @brief Destroy GC workers task pool if it was created
575     void DestroyWorkersTaskPool();
576 
577     /// Mark all references which we added by AddReference method
578     virtual void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) = 0;
579 
580     virtual void UpdateRefsToMovedObjectsInPygoteSpace() = 0;
581     /// Update all refs to moved objects
582     virtual void CommonUpdateRefsToMovedObjects() = 0;
583 
584     virtual void UpdateVmRefs() = 0;
585 
586     virtual void UpdateGlobalObjectStorage() = 0;
587 
588     virtual void UpdateClassLinkerContextRoots() = 0;
589 
590     void UpdateRefsInVRegs(ManagedThread *thread);
591 
592     const ObjectHeader *PopObjectFromStack(GCMarkingStackType *objectsStack);
593 
GetTiming()594     Timing *GetTiming()
595     {
596         return &timing_;
597     }
598 
599     template <GCScopeType GC_SCOPE_TYPE>
600     friend class GCScope;
601 
602     void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
603 
604     // vector here because we can add some references on young-gc and get new refs on old-gc
605     // it's possible if we make 2 GCs for one safepoint
606     // max length of this vector - is 2
607     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(clearedReferencesLock_)608     PandaVector<ark::mem::Reference *> *clearedReferences_ GUARDED_BY(clearedReferencesLock_) {nullptr};
609 
610     os::memory::Mutex *clearedReferencesLock_ {nullptr};  // NOLINT(misc-non-private-member-variables-in-classes)
611 
612     std::atomic<size_t> gcCounter_ {0};  // NOLINT(misc-non-private-member-variables-in-classes)
613     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
614     std::atomic<GCTaskCause> lastCause_ {GCTaskCause::INVALID_CAUSE};
615 
IsExplicitFull(const ark::GCTask & task)616     bool IsExplicitFull(const ark::GCTask &task) const
617     {
618         return (task.reason == GCTaskCause::EXPLICIT_CAUSE) && !gcSettings_.IsExplicitConcurrentGcEnabled();
619     }
620 
GetReferenceProcessor()621     const ReferenceProcessor *GetReferenceProcessor() const
622     {
623         return referenceProcessor_;
624     }
625 
IsWorkerThreadsExist()626     bool IsWorkerThreadsExist() const
627     {
628         return gcSettings_.GCWorkersCount() != 0;
629     }
630 
631     void EnableWorkerThreads();
632     void DisableWorkerThreads();
633 
634     /// @return true if GC can work in concurrent mode
IsConcurrencyAllowed()635     bool IsConcurrencyAllowed() const
636     {
637         return gcSettings_.IsConcurrencyEnabled();
638     }
639 
640     Logger::Buffer GetLogPrefix() const;
641 
642     void FireGCStarted(const GCTask &task, size_t bytesInHeapBeforeGc);
643     void FireGCFinished(const GCTask &task, size_t bytesInHeapBeforeGc, size_t bytesInHeapAfterGc);
644     void FireGCPhaseStarted(GCPhase phase);
645     void FireGCPhaseFinished(GCPhase phase);
646 
647     void SetFullGC(bool value);
648 
649     /// Set GC Threads on best and middle cores before GC
650     void SetupCpuAffinity();
651 
652     /// Set GC Threads on best and middle cores after concurrent phase
653     void SetupCpuAffinityAfterConcurrent();
654 
655     /// Set GC Threads on saved or weak cores before concurrent phase
656     void SetupCpuAffinityBeforeConcurrent();
657 
658     /// Restore GC Threads after GC on saved cores
659     void RestoreCpuAffinity();
660 
661     virtual void StartConcurrentScopeRoutine() const;
662     virtual void EndConcurrentScopeRoutine() const;
663 
664     virtual void PrintDetailedLog();
665 
666     Timing timing_;  // NOLINT(misc-non-private-member-variables-in-classes)
667 
668     PandaVector<std::pair<PandaString, uint64_t>>
669         footprintList_;  // NOLINT(misc-non-private-member-variables-in-classes)
670 
671     virtual size_t VerifyHeap() = 0;
672 
673 private:
674     /// Reset GC Threads on saved or weak cores
675     void ResetCpuAffinity(bool beforeConcurrent);
676 
677     /**
678      * Check whether run GC after waiting for mutator threads. Tasks for GC can pass from several mutator threads, so
679      * sometime no need to run GC many times. Also some GCs run in place, but in this time GC can run in GC-thread, and
680      * "in-place" GC wait for idle state for running, so need to check whether run such GC after waiting for threads
681      * @see WaitForIdleGC
682      *
683      * @param counter_before_waiting value of gc counter before waiting for mutator threads
684      * @param task current GC task
685      *
686      * @return true if need to run GC with current task after waiting for mutator threads or false otherwise
687      */
688     bool NeedRunGCAfterWaiting(size_t counterBeforeWaiting, const GCTask &task) const;
689 
690     /**
691      * @brief Making several setups before phases launch
692      * @return true if GC run is still needed
693      */
694     bool GCPhasesPreparation(const GCTask &task);
695 
696     /// @brief Getting logs, heap dumps and launching PostGCHeapVerification after GC phases
697     void GCPhasesFinish(const GCTask &task);
698 
699     /**
700      * @brief Create GC worker if needed and set gc status to running (gc_running_ variable)
701      * @see IsGCRunning
702      */
703     void CreateWorker();
704 
705     /**
706      * @brief Join and destroy GC worker if needed and set gc status to non-running (gc_running_ variable)
707      * @see IsGCRunning
708      */
709     void DestroyWorker();
710 
711     /// Move small objects to pygote space at first pygote fork
712     void MoveObjectsToPygoteSpace();
713 
714     size_t GetNativeBytesFromMallinfoAndRegister() const;
715     virtual void ClearLocalInternalAllocatorPools() = 0;
716     virtual void UpdateThreadLocals() = 0;
717     NativeGcTriggerType GetNativeGcTriggerType();
718 
719     class GCListenerManager {
720     public:
721         GCListenerManager() = default;
722         NO_COPY_SEMANTIC(GCListenerManager);
723         NO_MOVE_SEMANTIC(GCListenerManager);
724         ~GCListenerManager() = default;
725 
726         void AddListener(GCListener *newListener);
727         void RemoveListener(GCListener *newListener);
728 
729         void NormalizeListenersOnStartGC();
730 
731         template <class Visitor>
IterateOverListeners(const Visitor & visitor)732         void IterateOverListeners(const Visitor &visitor)
733         {
734             os::memory::LockHolder lh(listenerLock_);
735             for (auto *gcListener : currentListeners_) {
736                 if (gcListener != nullptr) {
737                     visitor(gcListener);
738                 }
739             }
740         }
741 
742     private:
743         os::memory::Mutex listenerLock_;
744         PandaUnorderedSet<GCListener *> currentListeners_ GUARDED_BY(listenerLock_);
745         PandaUnorderedSet<GCListener *> newListeners_ GUARDED_BY(listenerLock_);
746         PandaUnorderedSet<GCListener *> listenersForRemove_ GUARDED_BY(listenerLock_);
747     };
748 
749     volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
750     GCType gcType_ {GCType::INVALID_GC};
751     GCSettings gcSettings_;
752     GCListenerManager *gcListenerManager_ {nullptr};
753     GCBarrierSet *gcBarrierSet_ {nullptr};
754     ObjectAllocatorBase *objectAllocator_ {nullptr};
755     InternalAllocatorPtr internalAllocator_ {nullptr};
756     GCInstanceStats instanceStats_;
757     os::CpuSet affinityBeforeGc_ {};
758 
759     // Additional NativeGC
760     std::atomic<size_t> nativeBytesRegistered_ = 0;
761     std::atomic<size_t> nativeObjectsNotified_ = 0;
762 
763     ReferenceProcessor *referenceProcessor_ {nullptr};
764 
765     // NOTE(ipetrov): choose suitable priority
766     static constexpr size_t GC_TASK_QUEUE_PRIORITY = taskmanager::TaskQueueInterface::MAX_PRIORITY;
767     taskmanager::TaskQueueInterface *gcWorkersTaskQueue_ = nullptr;
768 
769     /* GC worker specific variables */
770     GCWorker *gcWorker_ = nullptr;
771     std::atomic_bool gcRunning_ = false;
772     std::atomic<bool> canAddGcTask_ = true;
773 
774     bool tlabsSupported_ = false;
775 
776     // Additional data for extensions
777     GCExtensionData *extensionData_ {nullptr};
778 
779     GCWorkersTaskPool *workersTaskPool_ {nullptr};
780     class PostForkGCTask;
781 
782     friend class ecmascript::EcmaReferenceProcessor;
783     friend class ark::mem::test::MemStatsGenGCTest;
784     friend class ark::mem::test::ReferenceStorageTest;
785     friend class ark::mem::test::RemSetTest;
786     friend class GCScopedPhase;
787     friend class GlobalObjectStorage;
788     // NOTE(maksenov): Avoid using specific ObjectHelpers class here
789     friend class GCDynamicObjectHelpers;
790     friend class GCStaticObjectHelpers;
791     friend class G1GCTest;
792     friend class GCTestLog;
793 
794     void TriggerGCForNative();
795     size_t SimpleNativeAllocationGcWatermark();
796     /// Waits while current GC task(if any) will be processed
797     void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
798 
799     friend class GCScopedPhase;
800     friend class ConcurrentScope;
801 
802     PandaVM *vm_ {nullptr};
803     std::atomic<bool> isFullGc_ {false};
804     std::atomic<bool> isPostponeEnabled_ {false};
805 };
806 
807 /**
808  * @brief Create GC with @param gc_type
809  * @param gc_type - type of create GC
810  * @return pointer to created GC on success, nullptr on failure
811  */
812 template <class LanguageConfig>
813 GC *CreateGC(GCType gcType, ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
814 
815 /// Enable concurrent mode. Should be used only from STW code.
816 class ConcurrentScope final {
817 public:
818     explicit ConcurrentScope(GC *gc, bool autoStart = true);
819     NO_COPY_SEMANTIC(ConcurrentScope);
820     NO_MOVE_SEMANTIC(ConcurrentScope);
821     ~ConcurrentScope();
822     void Start();
823 
824 private:
825     GC *gc_;
826     bool started_ = false;
827 };
828 
829 }  // namespace ark::mem
830 
831 #endif  // PANDA_RUNTIME_MEM_GC_GC_HMA
832