• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
16 #define PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
17 
18 #include <functional>
19 
20 #include "runtime/mem/gc/card_table.h"
21 #include "runtime/mem/gc/gc.h"
22 #include "runtime/mem/gc/gc_barrier_set.h"
23 #include "runtime/mem/gc/g1/g1-allocator.h"
24 #include "runtime/mem/gc/g1/g1-marker.h"
25 #include "runtime/mem/gc/g1/collection_set.h"
26 #include "runtime/mem/gc/generational-gc-base.h"
27 #include "runtime/mem/heap_verifier.h"
28 #include "runtime/mem/gc/g1/g1_pause_tracker.h"
29 #include "runtime/mem/gc/g1/g1_analytics.h"
30 #include "runtime/mem/gc/g1/update_remset_worker.h"
31 #include "runtime/mem/gc/g1/object_ref.h"
32 #include "runtime/mem/gc/g1/g1-evacuate-regions-task.h"
33 #include "runtime/mem/gc/g1/gc_evacuate_regions_task_stack.h"
34 
35 namespace ark {
36 class ManagedThread;
37 }  // namespace ark
38 namespace ark::mem {
39 
40 template <typename LanguageConfig>
41 class G1EvacuateRegionsWorkerState;
42 
43 /// @brief Class for reference informantion collecting for rem-sets in G1 GC
44 class RefInfo {
45 public:
46     RefInfo() = default;
47 
RefInfo(ObjectHeader * object,uint32_t refOffset)48     RefInfo(ObjectHeader *object, uint32_t refOffset) : object_(object), refOffset_(refOffset) {}
49 
50     ~RefInfo() = default;
51 
GetObject()52     ObjectHeader *GetObject() const
53     {
54         return object_;
55     }
56 
GetReferenceOffset()57     uint32_t GetReferenceOffset() const
58     {
59         return refOffset_;
60     }
61 
62     DEFAULT_COPY_SEMANTIC(RefInfo);
63     DEFAULT_MOVE_SEMANTIC(RefInfo);
64 
65 private:
66     ObjectHeader *object_;
67     uint32_t refOffset_;
68 };
69 
70 /// @brief G1 alike GC
71 template <class LanguageConfig>
72 class G1GC : public GenerationalGC<LanguageConfig> {
73     using RefVector = PandaVector<RefInfo>;
74     using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT;
75     using MemRangeRefsChecker = std::function<bool(Region *, const MemRange &)>;
76     template <bool VECTOR>
77     using MovedObjectsContainer = std::conditional_t<VECTOR, PandaVector<PandaVector<ObjectHeader *> *>,
78                                                      PandaVector<PandaDeque<ObjectHeader *> *>>;
79 
80 public:
81     explicit G1GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings);
82 
83     ~G1GC() override;
84 
StopGC()85     void StopGC() override
86     {
87         GC::StopGC();
88         // GC is using update_remset_worker so we need to stop GC first before we destroy the worker
89         updateRemsetWorker_->DestroyWorker();
90     }
91 
92     NO_MOVE_SEMANTIC(G1GC);
93     NO_COPY_SEMANTIC(G1GC);
94 
95     void InitGCBits(ark::ObjectHeader *objHeader) override;
96 
97     void InitGCBitsForAllocationInTLAB(ark::ObjectHeader *object) override;
98 
IsPinningSupported()99     bool IsPinningSupported() const final
100     {
101         // G1 GC supports region pinning, so G1 can pin objects
102         return true;
103     }
104 
105     void WorkerTaskProcessing(GCWorkersTask *task, void *workerData) override;
106 
107     void MarkReferences(GCMarkingStackType *references, GCPhase gcPhase) override;
108 
109     void MarkObject(ObjectHeader *object) override;
110 
111     bool MarkObjectIfNotMarked(ObjectHeader *object) override;
112 
113     bool InGCSweepRange(const ObjectHeader *object) const override;
114 
115     void OnThreadTerminate(ManagedThread *thread, mem::BuffersKeepingFlag keepBuffers) override;
116     void OnThreadCreate(ManagedThread *thread) override;
117 
118     void PreZygoteFork() override;
119     void PostZygoteFork() override;
120 
121     void OnWaitForIdleFail() override;
122 
StartGC()123     void StartGC() override
124     {
125         updateRemsetWorker_->CreateWorker();
126         GC::StartGC();
127     }
128 
HasRefFromRemset(ObjectHeader * obj)129     bool HasRefFromRemset(ObjectHeader *obj)
130     {
131         for (auto &refVector : uniqueRefsFromRemsets_) {
132             auto it = std::find_if(refVector->cbegin(), refVector->cend(),
133                                    [obj](auto ref) { return ref.GetObject() == obj; });
134             if (it != refVector->cend()) {
135                 return true;
136             }
137         }
138         return false;
139     }
140 
141     void PostponeGCStart() override;
142     void PostponeGCEnd() override;
143     bool IsPostponeGCSupported() const override;
144 
145     void StartConcurrentScopeRoutine() const override;
146     void EndConcurrentScopeRoutine() const override;
147 
148     void ComputeNewSize() override;
149     bool Trigger(PandaUniquePtr<GCTask> task) override;
150     void EvacuateStartingWith(void *ref) override;
151 
152 protected:
GetG1ObjectAllocator()153     ALWAYS_INLINE ObjectAllocatorG1<LanguageConfig::MT_MODE> *GetG1ObjectAllocator() const
154     {
155         return static_cast<ObjectAllocatorG1<LanguageConfig::MT_MODE> *>(this->GetObjectAllocator());
156     }
157 
158     // NOLINTBEGIN(misc-non-private-member-variables-in-classes)
159     /// Queue with updated refs info
160     GCG1BarrierSet::ThreadLocalCardQueues *updatedRefsQueue_ {nullptr};
161     GCG1BarrierSet::ThreadLocalCardQueues *updatedRefsQueueTemp_ {nullptr};
162     os::memory::Mutex queueLock_;
163     os::memory::Mutex gcWorkerQueueLock_;
164     // NOLINTEND(misc-non-private-member-variables-in-classes)
165 
166 private:
167     using Ref = typename ObjectReference<LanguageConfig::LANG_TYPE>::Type;
168 
169     void CreateUpdateRemsetWorker();
170     void ProcessDirtyCards();
171     bool HaveGarbageRegions();
172     size_t GetOldCollectionSetCandidatesNumber();
173 
174     template <RegionFlag REGION_TYPE, bool FULL_GC>
175     void DoRegionCompacting(Region *region, bool useGcWorkers,
176                             PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector);
177 
178     template <bool ATOMIC, bool CONCURRENTLY>
179     void CollectNonRegularObjects();
180 
181     template <bool ATOMIC, bool CONCURRENTLY>
182     void CollectEmptyRegions(GCTask &task, PandaVector<Region *> *emptyTenuredRegions);
183 
184     template <bool ATOMIC, bool CONCURRENTLY>
185     void ClearEmptyTenuredMovableRegions(PandaVector<Region *> *emptyTenuredRegions);
186 
187     bool NeedToPromote(const Region *region) const;
188 
189     template <bool ATOMIC, RegionFlag REGION_TYPE, bool FULL_GC>
190     void RegionCompactingImpl(Region *region, const ObjectVisitor &movedObjectSaver);
191 
192     template <bool ATOMIC, bool FULL_GC>
193     void RegionPromotionImpl(Region *region, const ObjectVisitor &movedObjectSaver);
194 
195     // Return whether all cross region references were processed in mem_range
196     template <typename Handler>
197     void IterateOverRefsInMemRange(const MemRange &memRange, Region *region, Handler &refsHandler);
198 
199     template <typename Visitor>
200     void CacheRefsFromDirtyCards(GlobalRemSet &globalRemSet, Visitor visitor);
201 
202     void InitializeImpl() override;
203 
204     bool NeedFullGC(const ark::GCTask &task);
205 
206     bool NeedToRunGC(const ark::GCTask &task);
207 
208     void RunPhasesImpl(GCTask &task) override;
209 
210     void RunFullGC(ark::GCTask &task);
211     void TryRunMixedGC(ark::GCTask &task);
212     void CollectAndMoveTenuredRegions(const CollectionSet &collectionSet);
213     void CollectAndMoveYoungRegions(const CollectionSet &collectionSet);
214 
215     void RunMixedGC(ark::GCTask &task, const CollectionSet &collectionSet);
216 
217     /// Determine whether GC need to run concurrent mark or mixed GC
218     bool ScheduleMixedGCAndConcurrentMark(ark::GCTask &task);
219 
220     /// Start concurrent mark
221     void RunConcurrentMark(ark::GCTask &task);
222 
223     void RunPhasesForRegions([[maybe_unused]] ark::GCTask &task, const CollectionSet &collectibleRegions);
224 
225     void PreStartupImp() override;
226 
227     void VisitCard(CardTable::CardPtr card, const ObjectVisitor &objectVisitor, const CardVisitor &cardVisitor);
228 
229     /// GC for young generation. Runs with STW.
230     void RunGC(GCTask &task, const CollectionSet &collectibleRegions);
231 
232     /**
233      * Return true if garbage can be collected in single pass (VM supports it, no pinned objects, GC is not postponed
234      * etc) otherwise false
235      */
236     bool SinglePassCompactionAvailable();
237     void CollectInSinglePass(const GCTask &task);
238     void EvacuateCollectionSet(const RemSet<> &remset);
239     void MergeRemSet(RemSet<> *remset);
240     void HandleReferences(const GCTask &task);
241     void ResetRegionAfterMixedGC();
242 
243     /// GC for tenured generation.
244     void RunTenuredGC(const GCTask &task);
245 
246     /**
247      * Mark predicate with calculation of live bytes in region
248      * @see MarkStackCond
249      *
250      * @param object marked object from marking-stack
251      *
252      * @return true
253      */
254     static void CalcLiveBytesMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass);
255 
256     /**
257      * Mark predicate with calculation of live bytes in region, not atomically
258      * @see ConcurrentMarkImpl
259      *
260      * @param object marked object from marking-stack
261      */
262     static void CalcLiveBytesNotAtomicallyMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass);
263 
264     /// Caches refs from remset and marks objects in collection set (young-generation + maybe some tenured regions).
265     void MixedMarkAndCacheRefs(const GCTask &task, const CollectionSet &collectibleRegions);
266 
267     GCRootVisitor CreateGCRootVisitorForMixedMark(GCMarkingStackType &objectsStack);
268 
269     /**
270      * Mark roots and add them to the stack
271      * @param objects_stack
272      * @param visit_class_roots
273      * @param visit_card_table_roots
274      */
275     void MarkRoots(GCMarkingStackType *objectsStack, CardTableVisitFlag visitCardTableRoots,
276                    VisitGCRootFlags flags = VisitGCRootFlags::ACCESS_ROOT_ALL);
277 
278     /**
279      * Initial marks roots and fill in 1st level from roots into stack.
280      * STW
281      * @param objects_stack
282      */
283     void InitialMark(GCMarkingStackType *objectsStack);
284 
285     void MarkStackMixed(GCMarkingStackType *stack);
286 
287     void MarkStackFull(GCMarkingStackType *stack);
288 
289     bool IsInCollectionSet(ObjectHeader *object);
290 
291     template <bool FULL_GC>
292     void UpdateRefsAndClear(const CollectionSet &collectionSet, MovedObjectsContainer<FULL_GC> *movedObjectsContainer,
293                             PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector,
294                             HeapVerifierIntoGC<LanguageConfig> *collectVerifier);
295 
296     /**
297      * Collect dead objects in young generation and move survivors
298      * @return true if moving was success, false otherwise
299      */
300     template <bool FULL_GC>
301     bool CollectAndMove(const CollectionSet &collectionSet);
302 
303     /**
304      * Collect verification info for CollectAndMove phase
305      * @param collection_set collection set for the current phase
306      * @return instance of verifier to be used to verify for updated references
307      */
308     [[nodiscard]] HeapVerifierIntoGC<LanguageConfig> CollectVerificationInfo(const CollectionSet &collectionSet);
309 
310     /**
311      * Verify updted references
312      * @param collect_verifier instance of the verifier that was obtained before references were updated
313      * @param collection_set collection set for the current phase
314      *
315      * @see CollectVerificationInfo
316      * @see UpdateRefsToMovedObjects
317      */
318     void VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&collectVerifier, const CollectionSet &collectionSet);
319 
320     template <bool FULL_GC, bool NEED_LOCK>
321     std::conditional_t<FULL_GC, UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>,
322                        EnqueueRemsetRefUpdater<LanguageConfig>>
323     CreateRefUpdater(GCG1BarrierSet::ThreadLocalCardQueues *updatedRefQueue) const;
324 
325     template <class ObjectsContainer>
326     void ProcessMovedObjects(ObjectsContainer *movedObjects);
327 
328     /// Update refs to objects which were moved while garbage collection
329     template <bool FULL_GC, bool ENABLE_WORKERS, class Visitor>
330     void UpdateMovedObjectsReferences(MovedObjectsContainer<FULL_GC> *movedObjectsContainer, const Visitor &refUpdater);
331 
332     /// Update all refs to moved objects
333     template <bool FULL_GC, bool USE_WORKERS>
334     void UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> *movedObjectsContainer);
335 
336     void Sweep();
337 
338     bool IsMarked(const ObjectHeader *object) const override;
339     bool IsMarkedEx(const ObjectHeader *object) const override;
340 
341     /// Start process of on pause marking
342     void FullMarking(ark::GCTask &task);
343 
344     /**
345      * Marking all objects on pause
346      * @param task gc task for current GC
347      * @param objects_stack stack for marked objects
348      * @param use_gc_workers whether do marking in parallel
349      */
350     void OnPauseMark(GCTask &task, GCMarkingStackType *objectsStack, bool useGcWorkers);
351 
352     /// Start process of concurrent marking
353     void ConcurrentMarking(ark::GCTask &task);
354 
355     /// Iterate over roots and mark them concurrently
356     template <bool PROCESS_WEAK_REFS>
357     NO_THREAD_SAFETY_ANALYSIS void ConcurrentMarkImpl(GCMarkingStackType *objectsStack);
358 
359     void PauseTimeGoalDelay();
360 
361     void InitialMark(GCMarkingStackType &markingStack);
362 
363     /*
364      * Mark the heap in concurrent mode and calculate live bytes
365      */
366     template <bool PROCESS_WEAK_REFS>
367     void ConcurrentMark(GCMarkingStackType *objectsStack);
368 
369     /// ReMarks objects after Concurrent marking and actualize information about live bytes
370     void Remark(ark::GCTask const &task);
371 
372     /// Sweep VM refs for non-regular (humongous + nonmovable) objects
373     void SweepNonRegularVmRefs();
374 
375     void SweepRegularVmRefs();
376 
377     /// Return collectible regions
378     CollectionSet GetCollectibleRegions(ark::GCTask const &task, bool isMixed);
379     template <typename Predicate>
380     void DrainOldRegions(CollectionSet &collectionSet, Predicate pred);
381     void AddOldRegionsMaxAllowed(CollectionSet &collectionSet);
382     void AddOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet);
383     uint64_t AddMoreOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet, uint64_t gcPauseTimeBudget);
384     void ReleasePagesInFreePools();
385 
386     CollectionSet GetFullCollectionSet();
387 
388     void UpdateCollectionSet(const CollectionSet &collectibleRegions);
389 
390     /// Interrupts release pages if the process is running
391     void InterruptReleasePagesIfNeeded();
392 
393     /**
394      * Starts release pages if the current status
395      *  of releasePagesInterruptFlag_ equals @param oldStatus
396      * @param oldStatus estimated status of releasePagesInterruptFlag_
397      */
398     void StartReleasePagesIfNeeded(ReleasePagesStatus oldStatus);
399 
400     /// Estimate space in tenured to objects from collectible regions
401     bool HaveEnoughSpaceToMove(const CollectionSet &collectibleRegions);
402 
403     /// Check if we have enough free regions in tenured space
404     bool HaveEnoughRegionsToMove(size_t num);
405 
406     /**
407      * Add data from SATB buffer to the object stack
408      * @param object_stack - stack to add data to
409      */
410     void DrainSatb(GCAdaptiveMarkingStack *objectStack);
411 
412     void HandlePendingDirtyCards();
413 
414     void ReenqueueDirtyCards();
415 
416     void ClearSatb();
417 
418     /**
419      * Iterate over object references in rem sets.
420      * The Visitor is a functor which accepts an object (referee), the reference value,
421      * offset of the reference in the object and the flag whether the reference is volatile.
422      * The visitor can be called for the references to the collection set in the object or
423      * for all references in an object which has at least one reference to the collection set.
424      * The decision is implementation dependent.
425      */
426     template <class Visitor>
427     void VisitRemSets(const Visitor &visitor);
428 
429     template <class Visitor>
430     void UpdateRefsFromRemSets(const Visitor &visitor);
431 
432     void CacheRefsFromRemsets(const MemRangeRefsChecker &refsChecker);
433     bool IsCollectionSetFullyPromoted() const;
434 
435     void ClearRefsFromRemsetsCache();
436 
437     void ActualizeRemSets();
438 
439     bool ShouldRunTenuredGC(const GCTask &task) override;
440 
441     void RestoreYoungCards(const CollectionSet &collectionSet);
442 
443     void ClearYoungCards(const CollectionSet &collectionSet);
444 
445     void ClearTenuredCards(const CollectionSet &collectionSet);
446 
447     size_t GetMaxMixedRegionsCount();
448 
449     void PrepareYoungRegionsForFullGC(const CollectionSet &collectionSet);
450 
451     void RestoreYoungRegionsAfterFullGC(const CollectionSet &collectionSet);
452 
453     template <typename Container>
454     void BuildCrossYoungRemSets(const Container &young);
455 
456     size_t CalculateDesiredEdenLengthByPauseDelay();
457     size_t CalculateDesiredEdenLengthByPauseDuration();
458 
459     template <bool ENABLE_BARRIER>
460     void UpdatePreWrbEntrypointInThreads();
461 
EnablePreWrbInThreads()462     void EnablePreWrbInThreads()
463     {
464         UpdatePreWrbEntrypointInThreads<true>();
465     }
466 
DisablePreWrbInThreads()467     void DisablePreWrbInThreads()
468     {
469         UpdatePreWrbEntrypointInThreads<false>();
470     }
471 
472     void EnsurePreWrbDisabledInThreads();
473 
474     size_t GetUniqueRemsetRefsCount() const;
475 
476     void ExecuteMarkingTask(GCMarkWorkersTask::StackType *objectsStack);
477     void ExecuteRemarkTask(GCMarkWorkersTask::StackType *objectsStack);
478     void ExecuteFullMarkingTask(GCMarkWorkersTask::StackType *objectsStack);
479     void ExecuteCompactingTask(Region *region, const ObjectVisitor &movedObjectsSaver);
480     void ExecuteEnqueueRemsetsTask(GCUpdateRefsWorkersTask<false>::MovedObjectsRange *movedObjectsRange);
481     void ExecuteEvacuateTask(typename G1EvacuateRegionsTask<Ref>::StackType *stack);
482 
483     void PrintFragmentationMetrics(const char *title);
484 
485     G1GCPauseMarker<LanguageConfig> marker_;
486     G1GCConcurrentMarker<LanguageConfig> concMarker_;
487     G1GCMixedMarker<LanguageConfig> mixedMarker_;
488     /// Flag indicates if we currently in concurrent marking phase
489     std::atomic<bool> concurrentMarkingFlag_ {false};
490     /// Flag indicates if we need to interrupt concurrent marking
491     std::atomic<bool> interruptConcurrentFlag_ {false};
492     /// Function called in the post WRB
493     std::function<void(const void *, const void *)> postQueueFunc_ {nullptr};
494     /// Current pre WRB entrypoint: either nullptr or the real function
495     ObjRefProcessFunc currentPreWrbEntrypoint_ {nullptr};
496     /**
497      * After first process it stores humongous objects only, after marking them it's still store them for updating
498      * pointers from Humongous
499      */
GUARDED_BY(satbAndNewobjBufLock_)500     PandaList<PandaVector<ObjectHeader *> *> satbBuffList_ GUARDED_BY(satbAndNewobjBufLock_) {};
501     PandaVector<ObjectHeader *> newobjBuffer_ GUARDED_BY(satbAndNewobjBufLock_);
502     // The lock guards both variables: satb_buff_list_ and newobj_buffer_
503     os::memory::Mutex satbAndNewobjBufLock_;
504     UpdateRemsetWorker<LanguageConfig> *updateRemsetWorker_ {nullptr};
505     GCMarkingStackType concurrentMarkingStack_;
506     GCMarkingStackType::MarkedObjects mixedMarkedObjects_;
507     std::atomic<bool> isMixedGcRequired_ {false};
508     /// Number of tenured regions added at the young GC
509     size_t numberOfMixedTenuredRegions_ {2};
510     double regionGarbageRateThreshold_ {0.0};
511     double g1PromotionRegionAliveRate_ {0.0};
512     bool g1TrackFreedObjects_ {false};
513     bool isExplicitConcurrentGcEnabled_ {false};
514     // There are may be some regions with pinned objects that GC cannot collect
515     PandaVector<std::pair<uint32_t, Region *>> topGarbageRegions_ {};
516     CollectionSet collectionSet_;
517     // Max size of unique_refs_from_remsets_ buffer. It should be enough to store
518     // almost all references to the collection set.
519     // But any way there may be humongous arrays which contains a lot of references to the collection set.
520     // For such objects GC created a new RefVector, which will be cleared at the end of the collections.
521     static constexpr size_t MAX_REFS = 1024;
522     // Storages for references from remsets to the collection set.
523     // List elements have RefVector inside, with double size compare to previous one (starts from MAX_REFS)
524     // Each vector element contains an object from the remset and the offset of
525     // the field which refers to the collection set.
526     PandaList<RefVector *> uniqueRefsFromRemsets_;
527     // Dirty cards which are not fully processed before collection.
528     // These cards are processed later.
529     PandaUnorderedSet<CardTable::CardPtr> dirtyCards_;
530 #ifndef NDEBUG
531     bool uniqueCardsInitialized_ = false;
532 #endif  // NDEBUG
533     size_t regionSizeBits_;
534     G1PauseTracker g1PauseTracker_;
535     os::memory::Mutex concurrentMarkMutex_;
536     os::memory::Mutex mixedMarkedObjectsMutex_;
537     os::memory::ConditionVariable concurrentMarkCondVar_;
538     G1Analytics analytics_;
539 
540     /// Flag indicates if we need to interrupt release physical pages to OS
541     std::atomic<ReleasePagesStatus> releasePagesInterruptFlag_ {ReleasePagesStatus::FINISHED};
542 
543     size_t copiedBytesYoung_ {0};
544     size_t copiedBytesOld_ {0};
545     bool singlePassCompactionEnabled_ {false};
546 
547     template <class>
548     friend class RefCacheBuilder;
549     friend class G1GCTest;
550     friend class RemSetChecker;
551     template <class>
552     friend class G1EvacuateRegionsWorkerState;
553 };
554 
555 template <MTModeT MT_MODE>
556 class AllocConfig<GCType::G1_GC, MT_MODE> {
557 public:
558     using ObjectAllocatorType = ObjectAllocatorG1<MT_MODE>;
559     using CodeAllocatorType = CodeAllocator;
560 };
561 
562 }  // namespace ark::mem
563 
564 #endif  // PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
565