• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
16 #define PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
17 
18 #include <functional>
19 
20 #include "runtime/mem/gc/card_table.h"
21 #include "runtime/mem/gc/gc.h"
22 #include "runtime/mem/gc/gc_barrier_set.h"
23 #include "runtime/mem/gc/g1/g1-allocator.h"
24 #include "runtime/mem/gc/g1/g1-marker.h"
25 #include "runtime/mem/gc/g1/collection_set.h"
26 #include "runtime/mem/gc/generational-gc-base.h"
27 #include "runtime/mem/heap_verifier.h"
28 #include "runtime/mem/gc/g1/g1_pause_tracker.h"
29 #include "runtime/mem/gc/g1/g1_analytics.h"
30 #include "runtime/mem/gc/g1/update_remset_worker.h"
31 #include "runtime/mem/gc/g1/object_ref.h"
32 #include "runtime/mem/gc/g1/g1-evacuate-regions-task.h"
33 #include "runtime/mem/gc/g1/gc_evacuate_regions_task_stack.h"
34 
35 namespace ark {
36 class ManagedThread;
37 }  // namespace ark
38 namespace ark::mem {
39 
40 template <typename LanguageConfig>
41 class G1EvacuateRegionsWorkerState;
42 
43 /// @brief Class for reference informantion collecting for rem-sets in G1 GC
44 class RefInfo {
45 public:
46     RefInfo() = default;
47 
RefInfo(ObjectHeader * object,uint32_t refOffset)48     RefInfo(ObjectHeader *object, uint32_t refOffset) : object_(object), refOffset_(refOffset) {}
49 
50     ~RefInfo() = default;
51 
GetObject()52     ObjectHeader *GetObject() const
53     {
54         return object_;
55     }
56 
GetReferenceOffset()57     uint32_t GetReferenceOffset() const
58     {
59         return refOffset_;
60     }
61 
62     DEFAULT_COPY_SEMANTIC(RefInfo);
63     DEFAULT_MOVE_SEMANTIC(RefInfo);
64 
65 private:
66     ObjectHeader *object_;
67     uint32_t refOffset_;
68 };
69 
70 /// @brief G1 alike GC
71 template <class LanguageConfig>
72 class G1GC : public GenerationalGC<LanguageConfig> {
73     using RefVector = PandaVector<RefInfo>;
74     using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT;
75     using MemRangeRefsChecker = std::function<bool(Region *, const MemRange &)>;
76     template <bool VECTOR>
77     using MovedObjectsContainer = std::conditional_t<VECTOR, PandaVector<PandaVector<ObjectHeader *> *>,
78                                                      PandaVector<PandaDeque<ObjectHeader *> *>>;
79 
80 public:
81     explicit G1GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
82 
83     ~G1GC() override;
84 
StopGC()85     void StopGC() override
86     {
87         GC::StopGC();
88         // GC is using update_remset_worker so we need to stop GC first before we destroy the worker
89         updateRemsetWorker_->DestroyWorker();
90     }
91 
92     NO_MOVE_SEMANTIC(G1GC);
93     NO_COPY_SEMANTIC(G1GC);
94 
95     void InitGCBits(ark::ObjectHeader *objHeader) override;
96 
97     void InitGCBitsForAllocationInTLAB(ark::ObjectHeader *object) override;
98 
IsPinningSupported()99     bool IsPinningSupported() const final
100     {
101         // G1 GC supports region pinning, so G1 can pin objects
102         return true;
103     }
104 
105     void WorkerTaskProcessing(GCWorkersTask *task, void *workerData) override;
106 
107     void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) override;
108 
109     void MarkObject(ObjectHeader *object) override;
110 
111     bool MarkObjectIfNotMarked(ObjectHeader *object) override;
112 
113     void MarkObjectRecursively(ObjectHeader *object);
114 
115     bool InGCSweepRange(const ObjectHeader *object) const override;
116 
117     void OnThreadTerminate(ManagedThread *thread, mem::BuffersKeepingFlag keepBuffers) override;
118     void OnThreadCreate(ManagedThread *thread) override;
119 
120     void PreZygoteFork() override;
121     void PostZygoteFork() override;
122 
123     void OnWaitForIdleFail() override;
124 
StartGC()125     void StartGC() override
126     {
127         updateRemsetWorker_->CreateWorker();
128         GC::StartGC();
129     }
130 
HasRefFromRemset(ObjectHeader * obj)131     bool HasRefFromRemset(ObjectHeader *obj)
132     {
133         for (auto &refVector : uniqueRefsFromRemsets_) {
134             auto it = std::find_if(refVector->cbegin(), refVector->cend(),
135                                    [obj](auto ref) { return ref.GetObject() == obj; });
136             if (it != refVector->cend()) {
137                 return true;
138             }
139         }
140         return false;
141     }
142 
143     void PostponeGCStart() override;
144     void PostponeGCEnd() override;
145     bool IsPostponeGCSupported() const override;
146 
147     void StartConcurrentScopeRoutine() const override;
148     void EndConcurrentScopeRoutine() const override;
149 
150     void ComputeNewSize() override;
151     bool Trigger(PandaUniquePtr<GCTask> task) override;
152     void EvacuateStartingWith(void *ref) override;
153     void SetExtensionData(GCExtensionData *data) override;
154 
155     void PostForkCallback(size_t restoreLimit) override;
156 
157 protected:
GetG1ObjectAllocator()158     ALWAYS_INLINE ObjectAllocatorG1<LanguageConfig::MT_MODE> *GetG1ObjectAllocator() const
159     {
160         return static_cast<ObjectAllocatorG1<LanguageConfig::MT_MODE> *>(this->GetObjectAllocator());
161     }
162 
163     // NOLINTBEGIN(misc-non-private-member-variables-in-classes)
164     /// Queue with updated refs info
165     GCG1BarrierSet::ThreadLocalCardQueues *updatedRefsQueue_ {nullptr};
166     GCG1BarrierSet::ThreadLocalCardQueues *updatedRefsQueueTemp_ {nullptr};
167     os::memory::Mutex queueLock_;
168     os::memory::Mutex gcWorkerQueueLock_;
169     // NOLINTEND(misc-non-private-member-variables-in-classes)
170 
171 private:
172     using Ref = typename ObjectReference<LanguageConfig::LANG_TYPE>::Type;
173 
174     void CreateUpdateRemsetWorker();
175     void ProcessDirtyCards();
176     bool HaveGarbageRegions();
177     size_t GetOldCollectionSetCandidatesNumber();
178 
179     template <RegionFlag REGION_TYPE, bool FULL_GC>
180     void DoRegionCompacting(Region *region, bool useGcWorkers,
181                             PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector);
182 
183     template <bool ATOMIC, bool CONCURRENTLY>
184     void CollectNonRegularObjects();
185 
186     template <bool ATOMIC, bool CONCURRENTLY>
187     void CollectEmptyRegions(GCTask &task, PandaVector<Region *> *emptyTenuredRegions);
188 
189     template <bool ATOMIC, bool CONCURRENTLY>
190     void ClearEmptyTenuredMovableRegions(PandaVector<Region *> *emptyTenuredRegions);
191 
192     bool NeedToPromote(const Region *region) const;
193 
194     template <bool ATOMIC, RegionFlag REGION_TYPE, bool FULL_GC>
195     void RegionCompactingImpl(Region *region, const ObjectVisitor &movedObjectSaver);
196 
197     template <bool ATOMIC, bool FULL_GC>
198     void RegionPromotionImpl(Region *region, const ObjectVisitor &movedObjectSaver);
199 
200     // Return whether all cross region references were processed in mem_range
201     template <typename Handler>
202     void IterateOverRefsInMemRange(const MemRange &memRange, Region *region, Handler &refsHandler);
203 
204     template <typename Visitor>
205     void CacheRefsFromDirtyCards(GlobalRemSet &globalRemSet, Visitor visitor);
206 
207     void InitializeImpl() override;
208 
209     bool NeedFullGC(const ark::GCTask &task);
210 
211     bool NeedToRunGC(const ark::GCTask &task);
212 
213     void StartGCCollection(ark::GCTask &task);
214 
215     void RunPhasesImpl(GCTask &task) override;
216 
217     void RunFullGC(ark::GCTask &task);
218     void TryRunMixedGC(ark::GCTask &task);
219     void CollectAndMoveTenuredRegions(const CollectionSet &collectionSet);
220     void CollectAndMoveYoungRegions(const CollectionSet &collectionSet);
221 
222     void RunMixedGC(ark::GCTask &task, const CollectionSet &collectionSet);
223 
224     /// Determine whether GC need to run concurrent mark or mixed GC
225     bool ScheduleMixedGCAndConcurrentMark(ark::GCTask &task);
226 
227     /// Start concurrent GC
228     template <typename OnPauseMarker, typename ConcurrentMarker>
229     void RunConcurrentGC(ark::GCTask &task, OnPauseMarker &pmarker, ConcurrentMarker &cmarker);
230 
231     void RunPhasesForRegions([[maybe_unused]] ark::GCTask &task, const CollectionSet &collectibleRegions);
232 
233     void PreStartupImp() override;
234 
235     size_t AdujustStartupLimit(size_t startupLimit) override;
236 
237     void VisitCard(CardTable::CardPtr card, const ObjectVisitor &objectVisitor, const CardVisitor &cardVisitor);
238 
239     /// GC for young generation. Runs with STW.
240     void RunGC(GCTask &task, const CollectionSet &collectibleRegions);
241 
242     /**
243      * Return true if garbage can be collected in single pass (VM supports it, no pinned objects, GC is not postponed
244      * etc) otherwise false
245      */
246     bool SinglePassCompactionAvailable();
247     void CollectInSinglePass(const GCTask &task);
248     void EvacuateCollectionSet(const RemSet<> &remset);
249     void MergeRemSet(RemSet<> *remset);
250     void HandleReferences(const GCTask &task);
251     void ResetRegionAfterMixedGC();
252 
253     /// GC for tenured generation.
254     void RunTenuredGC(const GCTask &task);
255 
256     /**
257      * Mark predicate with calculation of live bytes in region
258      * @tparam ATOMICALLY atomically or non-atomically live bytes calculation
259      * @param object marked object from marking-stack
260      * @param baseKlass class of the passed object from marking-stack
261      * @see MarkStack
262      * @see ConcurrentMarkImpl
263      */
264     template <bool ATOMICALLY = true>
265     static void CalcLiveBytesMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass);
266 
267     /// Caches refs from remset and marks objects in collection set (young-generation + maybe some tenured regions).
268     void MixedMarkAndCacheRefs(const GCTask &task, const CollectionSet &collectibleRegions);
269 
270     void FastYoungMark(const CollectionSet &collectibleRegions);
271 
272     template <typename Marker, typename Predicate>
273     GCRootVisitor CreateGCRootVisitor(GCMarkingStackType &objectsStack, Marker &marker, const Predicate &refPred);
274 
275     /**
276      * Mark roots and add them to the stack
277      * @param objects_stack
278      * @param visit_class_roots
279      * @param visit_card_table_roots
280      */
281     void MarkRoots(GCMarkingStackType *objectsStack, CardTableVisitFlag visitCardTableRoots,
282                    VisitGCRootFlags flags = VisitGCRootFlags::ACCESS_ROOT_ALL);
283 
284     /**
285      * Initial marks roots and fill in 1st level from roots into stack.
286      * STW
287      * @param objects_stack
288      */
289     template <bool PROCESS_WEAK_REFS, typename Marker>
290     void InitialMark(GCMarkingStackType &markingStack, Marker &marker);
291 
292     template <typename Marker>
293     void UnmarkAll(Marker &marker);
294 
295     void MarkStackMixed(GCMarkingStackType *stack);
296 
297     void MarkStackFull(GCMarkingStackType *stack);
298 
299     bool IsInCollectionSet(ObjectHeader *object);
300 
301     template <bool FULL_GC>
302     void UpdateRefsAndClear(const CollectionSet &collectionSet, MovedObjectsContainer<FULL_GC> *movedObjectsContainer,
303                             PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector,
304                             HeapVerifierIntoGC<LanguageConfig> *collectVerifier);
305 
306     /**
307      * Collect dead objects in young generation and move survivors
308      * @return true if moving was success, false otherwise
309      */
310     template <bool FULL_GC>
311     bool CollectAndMove(const CollectionSet &collectionSet);
312 
313     /**
314      * Collect verification info for CollectAndMove phase
315      * @param collection_set collection set for the current phase
316      * @return instance of verifier to be used to verify for updated references
317      */
318     [[nodiscard]] HeapVerifierIntoGC<LanguageConfig> CollectVerificationInfo(const CollectionSet &collectionSet);
319 
320     /**
321      * Verify updted references
322      * @param collect_verifier instance of the verifier that was obtained before references were updated
323      * @param collection_set collection set for the current phase
324      *
325      * @see CollectVerificationInfo
326      * @see UpdateRefsToMovedObjects
327      */
328     void VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&collectVerifier, const CollectionSet &collectionSet);
329 
330     template <bool FULL_GC, bool NEED_LOCK>
331     std::conditional_t<FULL_GC, UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>,
332                        EnqueueRemsetRefUpdater<LanguageConfig>>
333     CreateRefUpdater(GCG1BarrierSet::ThreadLocalCardQueues *updatedRefQueue) const;
334 
335     template <class ObjectsContainer>
336     void ProcessMovedObjects(ObjectsContainer *movedObjects);
337 
338     /// Update refs to objects which were moved while garbage collection
339     template <bool FULL_GC, bool ENABLE_WORKERS, class Visitor>
340     void UpdateMovedObjectsReferences(MovedObjectsContainer<FULL_GC> *movedObjectsContainer, const Visitor &refUpdater);
341 
342     /// Update all refs to moved objects
343     template <bool FULL_GC, bool USE_WORKERS>
344     void UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> *movedObjectsContainer);
345 
346     bool IsMarked(const ObjectHeader *object) const override;
347     bool IsMarkedEx(const ObjectHeader *object) const override;
348 
349     /// Start process of on pause marking
350     void FullMarking(ark::GCTask &task);
351 
352     /**
353      * Marking all objects on pause
354      * @param task gc task for current GC
355      * @param objects_stack stack for marked objects
356      * @param use_gc_workers whether do marking in parallel
357      */
358     void OnPauseMark(GCTask &task, GCMarkingStackType *objectsStack, bool useGcWorkers);
359 
360     /// Iterate over roots and mark them concurrently
361     template <bool PROCESS_WEAK_REFS, bool ATOMICALLY, typename Marker>
362     NO_THREAD_SAFETY_ANALYSIS void ConcurrentMarkImpl(GCMarkingStackType *objectsStack, Marker &marker);
363 
364     void PauseTimeGoalDelay();
365 
366     /*
367      * Mark the heap in concurrent mode and calculate live bytes
368      */
369     template <bool PROCESS_WEAK_REFS, typename Marker>
370     void ConcurrentMark(const GCTask &task, GCMarkingStackType *objectsStack, Marker &marker);
371 
372     /// ReMarks objects after Concurrent marking and actualize information about live bytes
373     template <bool PROCESS_WEAK_REFS, typename Marker>
374     void Remark(const GCTask &task, Marker &marker);
375 
376     /// Sweep VM refs for non-regular (humongous + nonmovable) objects
377     void SweepNonRegularVmRefs();
378 
379     void SweepRegularVmRefs();
380 
381     void VerifyHeapBeforeConcurrent();
382 
383     void ConcurrentSweep(ark::GCTask &task);
384 
385     /// Return collectible regions
386     CollectionSet GetCollectibleRegions(ark::GCTask const &task, bool isMixed);
387     template <typename Predicate>
388     void DrainOldRegions(CollectionSet &collectionSet, Predicate pred);
389     void AddOldRegionsMaxAllowed(CollectionSet &collectionSet);
390     void AddOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet);
391     uint64_t AddMoreOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet, uint64_t gcPauseTimeBudget);
392     void ReleasePagesInFreePools();
393 
394     CollectionSet GetFullCollectionSet();
395 
396     void UpdateCollectionSet(const CollectionSet &collectibleRegions);
397 
398     /// Interrupts release pages if the process is running
399     void InterruptReleasePagesIfNeeded();
400 
401     /**
402      * Starts release pages if the current status
403      *  of releasePagesInterruptFlag_ equals @param oldStatus
404      * @param oldStatus estimated status of releasePagesInterruptFlag_
405      */
406     void StartReleasePagesIfNeeded(ReleasePagesStatus oldStatus);
407 
408     /// Estimate space in tenured to objects from collectible regions
409     bool HaveEnoughSpaceToMove(const CollectionSet &collectibleRegions);
410 
411     /// Check if we have enough free regions in tenured space
412     bool HaveEnoughRegionsToMove(size_t num);
413 
414     /**
415      * Add data from SATB buffer to the object stack
416      * @param object_stack - stack to add data to
417      */
418     template <typename Marker>
419     void DrainSatb(GCAdaptiveMarkingStack *objectStack, Marker &marker);
420 
421     void HandlePendingDirtyCards();
422 
423     void ReenqueueDirtyCards();
424 
425     void ClearSatb();
426 
427     /**
428      * Iterate over object references in rem sets.
429      * The Visitor is a functor which accepts an object (referee), the reference value,
430      * offset of the reference in the object and the flag whether the reference is volatile.
431      * The visitor can be called for the references to the collection set in the object or
432      * for all references in an object which has at least one reference to the collection set.
433      * The decision is implementation dependent.
434      */
435     template <class Visitor>
436     void VisitRemSets(const Visitor &visitor);
437 
438     template <class Visitor>
439     void UpdateRefsFromRemSets(const Visitor &visitor);
440 
441     void CacheRefsFromRemsets(const MemRangeRefsChecker &refsChecker);
442     bool IsCollectionSetFullyPromoted() const;
443 
444     void ClearRefsFromRemsetsCache();
445 
446     void ActualizeRemSets();
447 
448     bool ShouldRunTenuredGC(const GCTask &task) override;
449 
450     void RestoreYoungCards(const CollectionSet &collectionSet);
451 
452     void ClearYoungCards(const CollectionSet &collectionSet);
453 
454     void ClearTenuredCards(const CollectionSet &collectionSet);
455 
456     size_t GetMaxMixedRegionsCount();
457 
458     void PrepareYoungRegionsForFullGC(const CollectionSet &collectionSet);
459 
460     void RestoreYoungRegionsAfterFullGC(const CollectionSet &collectionSet);
461 
462     template <typename Container>
463     void BuildCrossYoungRemSets(const Container &young);
464 
465     size_t CalculateDesiredEdenLengthByPauseDelay();
466     size_t CalculateDesiredEdenLengthByPauseDuration();
467 
468     template <bool ENABLE_BARRIER>
469     void UpdatePreWrbEntrypointInThreads();
470 
EnablePreWrbInThreads()471     void EnablePreWrbInThreads()
472     {
473         UpdatePreWrbEntrypointInThreads<true>();
474     }
475 
DisablePreWrbInThreads()476     void DisablePreWrbInThreads()
477     {
478         UpdatePreWrbEntrypointInThreads<false>();
479     }
480 
481     void EnsurePreWrbDisabledInThreads();
482 
483     size_t GetUniqueRemsetRefsCount() const;
484 
485     void ExecuteMarkingTask(GCMarkWorkersTask::StackType *objectsStack);
486     template <typename Marker>
487     void ExecuteRemarkTask(GCMarkWorkersTask::StackType *objectsStack, Marker &marker);
488     /**
489      * @brief Method gets stack with only one array object initially.
490      * The stack also has an information about the interval in the array
491      * that should be traversed in the current task
492      * @param objectsStack stack, initially having one array object
493      */
494     template <typename Marker>
495     void ExecuteHugeArrayMarkTask(GCMarkWorkersTask::StackType *objectsStack, Marker &marker);
496     void ExecuteFullMarkingTask(GCMarkWorkersTask::StackType *objectsStack);
497     void ExecuteCompactingTask(Region *region, const ObjectVisitor &movedObjectsSaver);
498     void ExecuteEnqueueRemsetsTask(GCUpdateRefsWorkersTask<false>::MovedObjectsRange *movedObjectsRange);
499     void ExecuteEvacuateTask(typename G1EvacuateRegionsTask<Ref>::StackType *stack);
500 
501     void PrintFragmentationMetrics(const char *title);
502 
503     G1GCMarker<LanguageConfig, true> marker_;
504     G1GCMarker<LanguageConfig, false> concMarker_;
505     G1GCMixedMarker<LanguageConfig> mixedMarker_;
506     XGCMarker<LanguageConfig, true> onPauseXMarker_;
507     // NOTE(audovichenko): Do not use atomics in concXMarker_
508     XGCMarker<LanguageConfig, true> concXMarker_;
509     /// Flag indicates if we currently in concurrent marking phase
510     std::atomic<bool> concurrentMarkingFlag_ {false};
511     /// Flag indicates if we need to interrupt concurrent marking
512     std::atomic<bool> interruptConcurrentFlag_ {false};
513     /// Function called in the post WRB
514     std::function<void(const void *, const void *)> postQueueFunc_ {nullptr};
515     /// Current pre WRB entrypoint: either nullptr or the real function
516     ObjRefProcessFunc currentPreWrbEntrypoint_ {nullptr};
517     /**
518      * After first process it stores humongous objects only, after marking them it's still store them for updating
519      * pointers from Humongous
520      */
GUARDED_BY(satbAndNewobjBufLock_)521     PandaList<PandaVector<ObjectHeader *> *> satbBuffList_ GUARDED_BY(satbAndNewobjBufLock_) {};
522     PandaVector<ObjectHeader *> newobjBuffer_ GUARDED_BY(satbAndNewobjBufLock_);
523     // The lock guards both variables: satb_buff_list_ and newobj_buffer_
524     os::memory::Mutex satbAndNewobjBufLock_;
525     UpdateRemsetWorker<LanguageConfig> *updateRemsetWorker_ {nullptr};
526     GCMarkingStackType concurrentMarkingStack_;
527     GCMarkingStackType::MarkedObjects mixedMarkedObjects_;
528     std::atomic<bool> isMixedGcRequired_ {false};
529     /// Number of tenured regions added at the young GC
530     size_t numberOfMixedTenuredRegions_ {2};
531     double regionGarbageRateThreshold_ {0.0};
532     double g1PromotionRegionAliveRate_ {0.0};
533     bool g1TrackFreedObjects_ {false};
534     bool isExplicitConcurrentGcEnabled_ {false};
535     bool fullCollectionSetPromotion_ {false};
536     // There are may be some regions with pinned objects that GC cannot collect
537     PandaVector<std::pair<uint32_t, Region *>> topGarbageRegions_ {};
538     CollectionSet collectionSet_;
539     // Max size of unique_refs_from_remsets_ buffer. It should be enough to store
540     // almost all references to the collection set.
541     // But any way there may be humongous arrays which contains a lot of references to the collection set.
542     // For such objects GC created a new RefVector, which will be cleared at the end of the collections.
543     static constexpr size_t MAX_REFS = 1024;
544     // Storages for references from remsets to the collection set.
545     // List elements have RefVector inside, with double size compare to previous one (starts from MAX_REFS)
546     // Each vector element contains an object from the remset and the offset of
547     // the field which refers to the collection set.
548     PandaList<RefVector *> uniqueRefsFromRemsets_;
549     // Dirty cards which are not fully processed before collection.
550     // These cards are processed later.
551     PandaUnorderedSet<CardTable::CardPtr> dirtyCards_;
552 #ifndef NDEBUG
553     bool uniqueCardsInitialized_ = false;
554 #endif  // NDEBUG
555     size_t regionSizeBits_;
556     G1PauseTracker g1PauseTracker_;
557     os::memory::Mutex concurrentMarkMutex_;
558     os::memory::Mutex mixedMarkedObjectsMutex_;
559     os::memory::ConditionVariable concurrentMarkCondVar_;
560     G1Analytics analytics_;
561 
562     /// Flag indicates if we need to interrupt release physical pages to OS
563     std::atomic<ReleasePagesStatus> releasePagesInterruptFlag_ {ReleasePagesStatus::FINISHED};
564 
565     size_t copiedBytesYoung_ {0};
566     size_t copiedBytesOld_ {0};
567     bool singlePassCompactionEnabled_ {false};
568 
569     template <class, bool>
570     friend class RefCacheBuilder;
571     friend class G1GCTest;
572     friend class RemSetChecker;
573     template <class>
574     friend class G1EvacuateRegionsWorkerState;
575 };
576 
577 template <MTModeT MT_MODE>
578 class AllocConfig<GCType::G1_GC, MT_MODE> {
579 public:
580     using ObjectAllocatorType = ObjectAllocatorG1<MT_MODE>;
581     using CodeAllocatorType = CodeAllocator;
582 };
583 
584 }  // namespace ark::mem
585 
586 #endif  // PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
587