• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_GC_H
16 #define PANDA_RUNTIME_MEM_GC_GC_H
17 
18 #include <atomic>
19 #include <map>
20 #include <string_view>
21 #include <vector>
22 
23 #include "libpandabase/os/mutex.h"
24 #include "libpandabase/os/thread.h"
25 #include "libpandabase/trace/trace.h"
26 #include "libpandabase/utils/expected.h"
27 #include "runtime/include/gc_task.h"
28 #include "runtime/include/object_header.h"
29 #include "runtime/include/language_config.h"
30 #include "runtime/include/locks.h"
31 #include "runtime/include/mem/panda_containers.h"
32 #include "runtime/include/mem/panda_smart_pointers.h"
33 #include "runtime/include/mem/panda_string.h"
34 #include "runtime/mem/allocator_adapter.h"
35 #include "runtime/mem/gc/gc_settings.h"
36 #include "runtime/mem/gc/gc_barrier_set.h"
37 #include "runtime/mem/gc/gc_phase.h"
38 #include "runtime/mem/gc/gc_root.h"
39 #include "runtime/mem/gc/gc_adaptive_stack.h"
40 #include "runtime/mem/gc/gc_scope.h"
41 #include "runtime/mem/gc/gc_scoped_phase.h"
42 #include "runtime/mem/gc/gc_stats.h"
43 #include "runtime/mem/gc/gc_types.h"
44 #include "runtime/mem/refstorage/reference.h"
45 #include "runtime/mem/gc/bitmap.h"
46 #include "runtime/mem/object_helpers.h"
47 #include "runtime/timing.h"
48 #include "runtime/mem/region_allocator.h"
49 
50 namespace panda {
51 class BaseClass;
52 class HClass;
53 class PandaVM;
54 class Timing;
55 namespace mem {
56 class G1GCTest;
57 class GlobalObjectStorage;
58 class ReferenceProcessor;
59 namespace test {
60 class MemStatsGenGCTest;
61 class ReferenceStorageTest;
62 class RemSetTest;
63 }  // namespace test
64 namespace ecmascript {
65 class EcmaReferenceProcessor;
66 }  // namespace ecmascript
67 }  // namespace mem
68 }  // namespace panda
69 
70 namespace panda::coretypes {
71 class Array;
72 class DynClass;
73 }  // namespace panda::coretypes
74 
75 namespace panda::mem {
76 
77 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
78 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
79 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
80 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
81 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
82 #define LOG_DEBUG_OBJECT_EVENTS LOG(DEBUG, MM_OBJECT_EVENTS)
83 
84 // forward declarations:
85 class GCListener;
86 class GCScopePhase;
87 class HybridObjectAllocator;
88 class GCScopedPhase;
89 class GCQueueInterface;
90 class GCDynamicObjectHelpers;
91 class GCWorkersThreadPool;
92 class GCWorkersTask;
93 
94 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
95 
96 enum ClassRootsVisitFlag : bool {
97     ENABLED = true,
98     DISABLED = false,
99 };
100 
101 enum CardTableVisitFlag : bool {
102     VISIT_ENABLED = true,
103     VISIT_DISABLED = false,
104 };
105 
106 class GCListener {
107 public:
108     GCListener() = default;
109     NO_COPY_SEMANTIC(GCListener);
110     DEFAULT_MOVE_SEMANTIC(GCListener);
111     virtual ~GCListener() = default;
GCStarted(size_t heap_size)112     virtual void GCStarted([[maybe_unused]] size_t heap_size) {}
GCFinished(const GCTask & task,size_t heap_size_before_gc,size_t heap_size)113     virtual void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heap_size_before_gc,
114                             [[maybe_unused]] size_t heap_size)
115     {
116     }
GCPhaseStarted(GCPhase phase)117     virtual void GCPhaseStarted([[maybe_unused]] GCPhase phase) {}
GCPhaseFinished(GCPhase phase)118     virtual void GCPhaseFinished([[maybe_unused]] GCPhase phase) {}
119 };
120 
121 class GCExtensionData;
122 
123 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
124 
125 // base class for all GCs
126 class GC {
127 public:
128     using MarkPredicate = std::function<bool(const ObjectHeader *)>;
129     using ReferenceCheckPredicateT = std::function<bool(const ObjectHeader *)>;
130     using ReferenceClearPredicateT = std::function<bool(const ObjectHeader *)>;
131     using ReferenceProcessPredicateT = std::function<bool(const ObjectHeader *)>;
132 
EmptyReferenceProcessPredicate(const ObjectHeader * ref)133     static bool EmptyReferenceProcessPredicate([[maybe_unused]] const ObjectHeader *ref)
134     {
135         return true;
136     }
137 
138     explicit GC(ObjectAllocatorBase *object_allocator, const GCSettings &settings);
139     NO_COPY_SEMANTIC(GC);
140     NO_MOVE_SEMANTIC(GC);
141     virtual ~GC() = 0;
142 
143     GCType GetType();
144 
145     /**
146      * \brief Initialize GC
147      */
148     void Initialize(PandaVM *vm);
149 
150     /**
151      * \brief Starts GC after initialization
152      * Creates worker thread, sets gc_running_ to true
153      */
154     virtual void StartGC();
155 
156     /**
157      * \brief Stops GC for runtime destruction
158      * Joins GC thread, clears queue
159      */
160     virtual void StopGC();
161 
162     /**
163      * Should be used to wait while GC should work exlusively
164      * Note: for non-mt STW GC can be used to run GC
165      */
166     virtual void WaitForGC(GCTask task) = 0;
167 
168     /**
169      * Should be used to wait while GC should be executed in managed scope
170      */
171     void WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
172 
173     /**
174      * Only be used to at first pygote fork
175      */
176     void WaitForGCOnPygoteFork(const GCTask &task);
177 
178     bool IsOnPygoteFork() const;
179 
180     /**
181      * Initialize GC bits on object creation.
182      * Required only for GCs with switched bits
183      */
184     virtual void InitGCBits(panda::ObjectHeader *obj_header) = 0;
185 
186     /**
187      * Initialize GC bits on object creation for the TLAB allocation.
188      */
189     virtual void InitGCBitsForAllocationInTLAB(panda::ObjectHeader *obj_header) = 0;
190 
IsTLABsSupported()191     bool IsTLABsSupported()
192     {
193         return tlabs_supported_;
194     }
195 
196     /**
197      * Triggers GC
198      */
199     virtual void Trigger() = 0;
200 
201     virtual bool IsFullGC() const;
202 
203     /**
204      * Return true if gc has generations, false otherwise
205      */
206     bool IsGenerational() const;
207 
DumpStatistics()208     PandaString DumpStatistics()
209     {
210         return instance_stats_.GetDump(gc_type_);
211     }
212 
AddListener(GCListener * listener)213     void AddListener(GCListener *listener)
214     {
215         ASSERT(gc_listeners_ptr_ != nullptr);
216         gc_listeners_ptr_->push_back(listener);
217     }
218 
RemoveListener(GCListener * listener)219     void RemoveListener(GCListener *listener)
220     {
221         ASSERT(gc_listeners_ptr_ != nullptr);
222         auto it = std::find(gc_listeners_ptr_->begin(), gc_listeners_ptr_->end(), listener);
223         *it = nullptr;
224     }
225 
GetBarrierSet()226     GCBarrierSet *GetBarrierSet()
227     {
228         ASSERT(gc_barrier_set_ != nullptr);
229         return gc_barrier_set_;
230     }
231 
GetWorkersPool()232     GCWorkersThreadPool *GetWorkersPool()
233     {
234         ASSERT(workers_pool_ != nullptr);
235         return workers_pool_;
236     }
237 
238     // Additional NativeGC
239     void NotifyNativeAllocations();
240 
241     void RegisterNativeAllocation(size_t bytes);
242 
243     void RegisterNativeFree(size_t bytes);
244 
GetNotifyNativeInterval()245     int32_t GetNotifyNativeInterval()
246     {
247         return NOTIFY_NATIVE_INTERVAL;
248     }
249 
250     // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
251     static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
252 
253     // Calling CheckGCForNative immediately if size exceeds the following
254     static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
255 
IsLogDetailedGcInfoEnabled()256     inline bool IsLogDetailedGcInfoEnabled() const
257     {
258         return gc_settings_.LogDetailedGCInfoEnabled();
259     }
260 
GetGCPhase()261     inline GCPhase GetGCPhase() const
262     {
263         return phase_;
264     }
265 
IsGCRunning()266     inline bool IsGCRunning()
267     {
268         // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent
269         // order where threads observe all modifications in the same order
270         return gc_running_.load(std::memory_order_seq_cst);
271     }
272 
273     void PreStartup();
274 
GetInternalAllocator()275     InternalAllocatorPtr GetInternalAllocator() const
276     {
277         return internal_allocator_;
278     }
279 
280     /**
281      * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
282      * ReferenceQueue.class)
283      */
284     void EnqueueReferences();
285 
286     /**
287      * Process all references which GC found in marking phase.
288      */
289     void ProcessReferences(GCPhase gc_phase, const GCTask &task, const ReferenceClearPredicateT &pred);
290 
GetNativeBytesRegistered()291     size_t GetNativeBytesRegistered()
292     {
293         // Atomic with relaxed order reason: data race with native_bytes_registered_ with no synchronization or ordering
294         // constraints imposed on other reads or writes
295         return native_bytes_registered_.load(std::memory_order_relaxed);
296     }
297 
298     virtual void SetPandaVM(PandaVM *vm);
299 
GetPandaVm()300     PandaVM *GetPandaVm() const
301     {
302         return vm_;
303     }
304 
PreZygoteFork()305     virtual void PreZygoteFork()
306     {
307         JoinWorker();
308     }
309 
PostZygoteFork()310     virtual void PostZygoteFork()
311     {
312         CreateWorker();
313     }
314 
OnThreadTerminate(ManagedThread * thread)315     virtual void OnThreadTerminate([[maybe_unused]] ManagedThread *thread) {}
316 
SetCanAddGCTask(bool can_add_task)317     void SetCanAddGCTask(bool can_add_task)
318     {
319         // Atomic with relaxed order reason: data race with can_add_gc_task_ with no synchronization or ordering
320         // constraints imposed on other reads or writes
321         can_add_gc_task_.store(can_add_task, std::memory_order_relaxed);
322     }
323 
GetExtensionData()324     GCExtensionData *GetExtensionData() const
325     {
326         return extension_data_;
327     }
328 
SetExtensionData(GCExtensionData * data)329     void SetExtensionData(GCExtensionData *data)
330     {
331         extension_data_ = data;
332     }
333 
PostForkCallback()334     virtual void PostForkCallback() {}
335 
336     /**
337      * Check if the object addr is in the GC sweep range
338      */
InGCSweepRange(const ObjectHeader * obj)339     virtual bool InGCSweepRange([[maybe_unused]] const ObjectHeader *obj) const
340     {
341         return true;
342     }
343 
GetCardTable()344     virtual CardTable *GetCardTable()
345     {
346         return nullptr;
347     }
348 
349     /**
350      * Called from GCWorker thread to assign thread specific data
351      */
InitWorker(void ** worker_data)352     virtual bool InitWorker(void **worker_data)
353     {
354         *worker_data = nullptr;
355         return true;
356     }
357 
358     /**
359      * Called from GCWorker thread to destroy thread specific data
360      */
DestroyWorker(void * worker_data)361     virtual void DestroyWorker([[maybe_unused]] void *worker_data) {}
362 
363     /**
364      * Process a task sent to GC workers thread.
365      */
WorkerTaskProcessing(GCWorkersTask * task,void * worker_data)366     virtual void WorkerTaskProcessing([[maybe_unused]] GCWorkersTask *task, [[maybe_unused]] void *worker_data)
367     {
368         LOG(FATAL, GC) << "Unimplemented method";
369     }
370 
IsMutatorAllowed()371     virtual bool IsMutatorAllowed()
372     {
373         return false;
374     }
375 
376     /**
377      * Return true of ref is an instance of reference or it's ancestor, false otherwise
378      */
379     bool IsReference(const BaseClass *cls, const ObjectHeader *ref, const ReferenceCheckPredicateT &pred);
380 
381     void ProcessReference(GCMarkingStackType *objects_stack, const BaseClass *cls, const ObjectHeader *ref,
382                           const ReferenceProcessPredicateT &pred);
383 
GetObjectAllocator()384     ALWAYS_INLINE ObjectAllocatorBase *GetObjectAllocator() const
385     {
386         return object_allocator_;
387     }
388 
389     // called if we fail change state from idle to running
390     virtual void OnWaitForIdleFail();
391 
PendingGC()392     virtual void PendingGC() {}
393 
394     /**
395      * Check if the object is marked for GC(alive)
396      * @param object
397      * @return true if object marked for GC
398      */
399     virtual bool IsMarked(const ObjectHeader *object) const = 0;
400 
401     /**
402      * Mark object.
403      * Note: for some GCs it is not necessary set GC bit to 1.
404      * @param object_header
405      * @return true if object old state is not marked
406      */
407     virtual bool MarkObjectIfNotMarked(ObjectHeader *object_header);
408 
409     /**
410      * Mark object.
411      * Note: for some GCs it is not necessary set GC bit to 1.
412      * @param object_header
413      */
414     virtual void MarkObject(ObjectHeader *object_header) = 0;
415 
416     /**
417      * Add reference for later processing in marking phase
418      * @param object - object from which we start to mark
419      */
420     void AddReference(ObjectHeader *from_object, ObjectHeader *object);
421 
SetGCPhase(GCPhase gc_phase)422     inline void SetGCPhase(GCPhase gc_phase)
423     {
424         phase_ = gc_phase;
425     }
426 
427 protected:
428     /**
429      * \brief Runs all phases
430      */
431     void RunPhases(GCTask &task);
432 
433     /**
434      * Add task to GC Queue to be run by GC thread (or run in place)
435      */
436     void AddGCTask(bool is_managed, PandaUniquePtr<GCTask> task, bool triggered_by_threshold);
437 
438     virtual void InitializeImpl() = 0;
439     virtual void PreRunPhasesImpl() = 0;
440     virtual void RunPhasesImpl(GCTask &task) = 0;
PreStartupImp()441     virtual void PreStartupImp() {}
442 
IsTracingEnabled()443     inline bool IsTracingEnabled() const
444     {
445         return gc_settings_.IsGcEnableTracing();
446     }
447 
BeginTracePoint(const PandaString & trace_point_name)448     inline void BeginTracePoint(const PandaString &trace_point_name) const
449     {
450         if (IsTracingEnabled()) {
451             trace::BeginTracePoint(trace_point_name.c_str());
452         }
453     }
454 
EndTracePoint()455     inline void EndTracePoint() const
456     {
457         if (IsTracingEnabled()) {
458             trace::EndTracePoint();
459         }
460     }
461 
462     virtual void VisitRoots(const GCRootVisitor &gc_root_visitor, VisitGCRootFlags flags) = 0;
463     virtual void VisitClassRoots(const GCRootVisitor &gc_root_visitor) = 0;
464     virtual void VisitCardTableRoots(CardTable *card_table, const GCRootVisitor &gc_root_visitor,
465                                      const MemRangeChecker &range_checker, const ObjectChecker &range_object_checker,
466                                      const ObjectChecker &from_object_checker, uint32_t processed_flag) = 0;
467 
CASGCPhase(GCPhase expected,GCPhase set)468     inline bool CASGCPhase(GCPhase expected, GCPhase set)
469     {
470         return phase_.compare_exchange_strong(expected, set);
471     }
472 
GetStats()473     GCInstanceStats *GetStats()
474     {
475         return &instance_stats_;
476     }
477 
SetType(GCType gc_type)478     inline void SetType(GCType gc_type)
479     {
480         gc_type_ = gc_type;
481     }
482 
SetTLABsSupported()483     inline void SetTLABsSupported()
484     {
485         tlabs_supported_ = true;
486     }
487 
SetGCBarrierSet(GCBarrierSet * barrier_set)488     void SetGCBarrierSet(GCBarrierSet *barrier_set)
489     {
490         ASSERT(gc_barrier_set_ == nullptr);
491         gc_barrier_set_ = barrier_set;
492     }
493 
SetWorkersPool(GCWorkersThreadPool * thread_pool)494     void SetWorkersPool(GCWorkersThreadPool *thread_pool)
495     {
496         ASSERT(workers_pool_ == nullptr);
497         workers_pool_ = thread_pool;
498     }
499 
ClearWorkersPool()500     void ClearWorkersPool()
501     {
502         workers_pool_ = nullptr;
503     }
504 
505     /**
506      * Mark all references which we added by AddReference method
507      */
508     virtual void MarkReferences(GCMarkingStackType *references, GCPhase gc_phase) = 0;
509 
510     friend class HeapRootVisitor;
511 
512     virtual void UpdateRefsToMovedObjectsInPygoteSpace() = 0;
513     /**
514      * Update all refs to moved objects
515      */
516     virtual void CommonUpdateRefsToMovedObjects() = 0;
517 
518     virtual void UpdateVmRefs() = 0;
519 
520     virtual void UpdateGlobalObjectStorage() = 0;
521 
522     virtual void UpdateClassLinkerContextRoots() = 0;
523 
524     void UpdateRefsInVRegs(ManagedThread *thread);
525 
526     const ObjectHeader *PopObjectFromStack(GCMarkingStackType *objects_stack);
527 
GetTiming()528     Timing *GetTiming()
529     {
530         return &timing_;
531     }
532 
533     template <GCScopeType gc_scope_type>
534     friend class GCScope;
535 
536     void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
537 
538     // vector here because we can add some references on young-gc and get new refs on old-gc
539     // it's possible if we make 2 GCs for one safepoint
540     // max length of this vector - is 2
541     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(cleared_references_lock_)542     PandaVector<panda::mem::Reference *> *cleared_references_ GUARDED_BY(cleared_references_lock_) {nullptr};
543 
544     os::memory::Mutex *cleared_references_lock_ {nullptr};  // NOLINT(misc-non-private-member-variables-in-classes)
545 
546     std::atomic<size_t> gc_counter_ {0};  // NOLINT(misc-non-private-member-variables-in-classes)
547     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
548     std::atomic<GCTaskCause> last_cause_ {GCTaskCause::INVALID_CAUSE};
549 
GetSettings()550     const GCSettings *GetSettings() const
551     {
552         return &gc_settings_;
553     }
554 
GetReferenceProcessor()555     const ReferenceProcessor *GetReferenceProcessor() const
556     {
557         return reference_processor_;
558     }
559 
IsWorkerThreadsExist()560     bool IsWorkerThreadsExist() const
561     {
562         return gc_settings_.GCWorkersCount() != 0;
563     }
564 
565     void EnableWorkerThreads();
566     void DisableWorkerThreads();
567 
568     /**
569      * @return true if GC can work in concurrent mode
570      */
IsConcurrencyAllowed()571     bool IsConcurrencyAllowed() const
572     {
573         return gc_settings_.IsConcurrencyEnabled();
574     }
575 
576     Logger::Buffer GetLogPrefix() const;
577 
578     void FireGCPhaseStarted(GCPhase phase);
579     void FireGCPhaseFinished(GCPhase phase);
580 
581     void SetFullGC(bool value);
582 
583     Timing timing_;  // NOLINT(misc-non-private-member-variables-in-classes)
584 
585     PandaVector<std::pair<PandaString, uint64_t>>
586         footprint_list_;  // NOLINT(misc-non-private-member-variables-in-classes)
587 private:
588     /**
589      * Entrypoint for GC worker thread
590      * @param gc pointer to GC structure
591      * @param vm pointer to VM structure
592      */
593     static void GCWorkerEntry(GC *gc, PandaVM *vm);
594 
595     void JoinWorker();
596     void CreateWorker();
597 
598     /**
599      * Move small objects to pygote space at first pygote fork
600      */
601     void MoveObjectsToPygoteSpace();
602 
603     size_t GetNativeBytesFromMallinfoAndRegister() const;
604     virtual void ClearLocalInternalAllocatorPools() = 0;
605     virtual void UpdateThreadLocals() = 0;
606     virtual size_t VerifyHeap() = 0;
607     NativeGcTriggerType GetNativeGcTriggerType();
608 
609     volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
610     GCType gc_type_ {GCType::INVALID_GC};
611     GCSettings gc_settings_;
612     PandaVector<GCListener *> *gc_listeners_ptr_ {nullptr};
613     GCBarrierSet *gc_barrier_set_ {nullptr};
614     ObjectAllocatorBase *object_allocator_ {nullptr};
615     InternalAllocatorPtr internal_allocator_ {nullptr};
616     GCInstanceStats instance_stats_;
617 
618     // Additional NativeGC
619     std::atomic<size_t> native_bytes_registered_ = 0;
620     std::atomic<size_t> native_objects_notified_ = 0;
621 
622     ReferenceProcessor *reference_processor_ {nullptr};
623     std::atomic_bool allow_soft_reference_processing_ = false;
624 
625     GCQueueInterface *gc_queue_ = nullptr;
626     std::thread *worker_ = nullptr;
627     std::atomic_bool gc_running_ = false;
628     std::atomic<bool> can_add_gc_task_ = true;
629     bool tlabs_supported_ = false;
630 
631     // Additional data for extensions
632     GCExtensionData *extension_data_ {nullptr};
633 
634     GCWorkersThreadPool *workers_pool_ {nullptr};
635     class PostForkGCTask;
636 
637     friend class ecmascript::EcmaReferenceProcessor;
638     friend class panda::mem::test::MemStatsGenGCTest;
639     friend class panda::mem::test::ReferenceStorageTest;
640     friend class panda::mem::test::RemSetTest;
641     friend class GCScopedPhase;
642     friend class GlobalObjectStorage;
643     // TODO(maksenov): Avoid using specific ObjectHelpers class here
644     friend class GCDynamicObjectHelpers;
645     friend class GCStaticObjectHelpers;
646     friend class G1GCTest;
647     friend class GCTestLog;
648 
649     void TriggerGCForNative();
650     size_t SimpleNativeAllocationGcWatermark();
651     /**
652      * Waits while current GC task(if any) will be processed
653      */
654     void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
655 
656     friend class GCScopedPhase;
657     friend class ConcurrentScope;
658 
659     PandaVM *vm_ {nullptr};
660     std::atomic<bool> is_full_gc_ {false};
661 };
662 
663 // TODO(dtrubenkov): move configs in more appropriate place
664 template <MTModeT MTMode>
665 class AllocConfig<GCType::STW_GC, MTMode> {
666 public:
667     using ObjectAllocatorType = ObjectAllocatorNoGen<MTMode>;
668     using CodeAllocatorType = CodeAllocator;
669 };
670 
671 template <MTModeT MTMode>
672 class AllocConfig<GCType::EPSILON_GC, MTMode> {
673 public:
674     using ObjectAllocatorType = ObjectAllocatorNoGen<MTMode>;
675     using CodeAllocatorType = CodeAllocator;
676 };
677 
678 template <MTModeT MTMode>
679 class AllocConfig<GCType::GEN_GC, MTMode> {
680 public:
681     using ObjectAllocatorType = ObjectAllocatorGen<MTMode>;
682     using CodeAllocatorType = CodeAllocator;
683 };
684 
685 template <MTModeT MTMode>
686 class AllocConfig<GCType::HYBRID_GC, MTMode> {
687 public:
688     using ObjectAllocatorType = HybridObjectAllocator;
689     using CodeAllocatorType = CodeAllocator;
690 };
691 
692 /**
693  * \brief Create GC with \param gc_type
694  * @param gc_type - type of create GC
695  * @return pointer to created GC on success, nullptr on failure
696  */
697 template <class LanguageConfig>
698 GC *CreateGC(GCType gc_type, ObjectAllocatorBase *object_allocator, const GCSettings &settings);
699 
700 /**
701  * Enable concurrent mode. Should be used only from STW code.
702  */
703 class ConcurrentScope final {
704 public:
705     explicit ConcurrentScope(GC *gc, bool auto_start = true);
706     NO_COPY_SEMANTIC(ConcurrentScope);
707     NO_MOVE_SEMANTIC(ConcurrentScope);
708     ~ConcurrentScope();
709     void Start();
710 
711 private:
712     GC *gc_;
713     bool started_ = false;
714 };
715 
716 }  // namespace panda::mem
717 
718 #endif  // PANDA_RUNTIME_MEM_GC_GC_HMA
719