• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "libpandabase/mem/space.h"
17 #include "runtime/include/language_config.h"
18 #include "runtime/include/class.h"
19 #include "runtime/include/mem/panda_string.h"
20 #include "runtime/include/panda_vm.h"
21 #include "runtime/mem/gc/card_table-inl.h"
22 #include "runtime/mem/gc/dynamic/gc_marker_dynamic-inl.h"
23 #include "runtime/mem/gc/gc.h"
24 #include "runtime/mem/gc/g1/g1-gc.h"
25 #include "runtime/mem/gc/g1/g1-helpers.h"
26 #include "runtime/mem/gc/g1/ref_cache_builder.h"
27 #include "runtime/mem/gc/g1/update_remset_task_queue.h"
28 #include "runtime/mem/gc/g1/update_remset_thread.h"
29 #include "runtime/mem/gc/workers/gc_workers_task_pool.h"
30 #include "runtime/mem/gc/generational-gc-base-inl.h"
31 #include "runtime/mem/gc/static/gc_marker_static-inl.h"
32 #include "runtime/mem/gc/reference-processor/reference_processor.h"
33 #include "runtime/mem/object_helpers-inl.h"
34 #include "runtime/mem/rem_set-inl.h"
35 #include "runtime/include/thread-inl.h"
36 #include "runtime/include/managed_thread.h"
37 #include "runtime/mem/gc/g1/ref_updater.h"
38 #include "runtime/mem/region_space.h"
39 #include "runtime/include/stack_walker-inl.h"
40 #include "runtime/mem/refstorage/global_object_storage.h"
41 #include "runtime/mem/gc/g1/g1-evacuate-regions-worker-state-inl.h"
42 #include "runtime/mem/gc/g1/xgc-extension-data.h"
43 
44 namespace ark::mem {
45 
Unreachable(ObjectHeader * obj)46 void Unreachable([[maybe_unused]] ObjectHeader *obj)
47 {
48     UNREACHABLE();
49 }
50 
51 /* static */
52 template <class LanguageConfig>
53 template <bool ATOMICALLY>
CalcLiveBytesMarkPreprocess(const ObjectHeader * object,BaseClass * baseKlass)54 void G1GC<LanguageConfig>::CalcLiveBytesMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass)
55 {
56     Region *region = ObjectToRegion(object);
57     size_t objectSize = GetAlignedObjectSize(object->ObjectSize<LanguageConfig::LANG_TYPE>(baseKlass));
58     region->AddLiveBytes<ATOMICALLY>(objectSize);
59 }
60 
61 template <class LanguageConfig>
G1GC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)62 G1GC<LanguageConfig>::G1GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
63     : GenerationalGC<LanguageConfig>(objectAllocator, settings),
64       marker_(this),
65       concMarker_(this),
66       mixedMarker_(this),
67       onPauseXMarker_(this, Unreachable),
68       concXMarker_(this, Unreachable),
69       concurrentMarkingStack_(this),
70       numberOfMixedTenuredRegions_(settings.GetG1NumberOfTenuredRegionsAtMixedCollection()),
71       regionGarbageRateThreshold_(settings.G1RegionGarbageRateThreshold()),
72       g1PromotionRegionAliveRate_(settings.G1PromotionRegionAliveRate()),
73       g1TrackFreedObjects_(settings.G1TrackFreedObjects()),
74       isExplicitConcurrentGcEnabled_(settings.IsExplicitConcurrentGcEnabled()),
75       regionSizeBits_(ark::helpers::math::GetIntLog2(this->GetG1ObjectAllocator()->GetRegionSize())),
76       g1PauseTracker_(settings.GetG1GcPauseIntervalInMillis(), settings.GetG1MaxGcPauseInMillis()),
77       analytics_(ark::time::GetCurrentTimeInNanos())
78 {
79     InternalAllocatorPtr allocator = this->GetInternalAllocator();
80     this->SetType(GCType::G1_GC);
81     this->SetTLABsSupported();
82     updatedRefsQueue_ = allocator->New<GCG1BarrierSet::ThreadLocalCardQueues>();
83     updatedRefsQueueTemp_ = allocator->New<GCG1BarrierSet::ThreadLocalCardQueues>();
84     auto *firstRefVector = allocator->New<RefVector>();
85     ASSERT(firstRefVector != nullptr);
86     firstRefVector->reserve(MAX_REFS);
87     uniqueRefsFromRemsets_.push_back(firstRefVector);
88     GetG1ObjectAllocator()->ReserveRegionIfNeeded();
89 }
90 
91 template <class LanguageConfig>
~G1GC()92 G1GC<LanguageConfig>::~G1GC()
93 {
94     InternalAllocatorPtr allocator = this->GetInternalAllocator();
95     {
96         for (auto objVector : satbBuffList_) {
97             allocator->Delete(objVector);
98         }
99     }
100     allocator->Delete(updatedRefsQueue_);
101     allocator->Delete(updatedRefsQueueTemp_);
102     ASSERT(uniqueRefsFromRemsets_.size() == 1);
103     allocator->Delete(uniqueRefsFromRemsets_.front());
104     uniqueRefsFromRemsets_.clear();
105     this->GetInternalAllocator()->Delete(updateRemsetWorker_);
106 }
107 
108 template <class LanguageConfig>
InitGCBits(ark::ObjectHeader * objHeader)109 void G1GC<LanguageConfig>::InitGCBits(ark::ObjectHeader *objHeader)
110 {
111     // The mutator may create a new object during concurrent marking phase.
112     // In this case GC may don't mark it (for example only vregs may contain reference to the new object)
113     // and collect. To avoid such situations add objects to a special buffer which
114     // will be processed at remark stage.
115     if (this->GetCardTable()->GetCardPtr(ToUintPtr(objHeader))->IsYoung() ||
116         // Atomic with acquire order reason: read variable modified in GC thread
117         !concurrentMarkingFlag_.load(std::memory_order_acquire)) {
118         return;
119     }
120     os::memory::LockHolder lock(satbAndNewobjBufLock_);
121     newobjBuffer_.push_back(objHeader);
122 }
123 
124 template <class LanguageConfig>
PreStartupImp()125 void G1GC<LanguageConfig>::PreStartupImp()
126 {
127     GenerationalGC<LanguageConfig>::DisableTenuredGC();
128 }
129 
130 template <class LanguageConfig>
AdujustStartupLimit(size_t startupLimit)131 size_t G1GC<LanguageConfig>::AdujustStartupLimit(size_t startupLimit)
132 {
133     return GetG1ObjectAllocator()->GetHeapSpace()->UpdateYoungSpaceMaxSize(startupLimit);
134 }
135 
136 template <class LanguageConfig>
PostForkCallback(size_t restoreLimit)137 void G1GC<LanguageConfig>::PostForkCallback(size_t restoreLimit)
138 {
139     GenerationalGC<LanguageConfig>::RestoreTenuredGC();
140     GetG1ObjectAllocator()->GetHeapSpace()->UpdateYoungSpaceMaxSize(restoreLimit);
141 }
142 
143 template <class LanguageConfig>
144 template <RegionFlag REGION_TYPE, bool FULL_GC>
DoRegionCompacting(Region * region,bool useGcWorkers,PandaVector<PandaVector<ObjectHeader * > * > * movedObjectsVector)145 void G1GC<LanguageConfig>::DoRegionCompacting(Region *region, bool useGcWorkers,
146                                               PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector)
147 {
148     auto internalAllocator = this->GetInternalAllocator();
149     ObjectVisitor movedObjectSaver;
150     if constexpr (FULL_GC) {
151         PandaVector<ObjectHeader *> *movedObjects;
152         if (useGcWorkers) {
153             movedObjects = internalAllocator->template New<PandaVector<ObjectHeader *>>();
154             ASSERT(movedObjects != nullptr);
155             movedObjectsVector->push_back(movedObjects);
156             size_t moveSize = region->GetAllocatedBytes();
157             movedObjects->reserve(moveSize / GetMinimalObjectSize());
158         } else {
159             ASSERT(movedObjectsVector->size() == 1);
160             movedObjects = movedObjectsVector->back();
161         }
162         movedObjectSaver = [movedObjects](ObjectHeader *object) { movedObjects->push_back(object); };
163     } else {
164         movedObjectSaver = []([[maybe_unused]] const ObjectHeader *object) {};
165     }
166 
167     if (useGcWorkers) {
168         auto *storage =
169             internalAllocator->template New<GCRegionCompactWorkersTask::RegionDataType>(region, movedObjectSaver);
170         if (!this->GetWorkersTaskPool()->AddTask(GCRegionCompactWorkersTask(storage))) {
171             // We couldn't send a task to workers. Therefore, do it here.
172             internalAllocator->Delete(storage);
173             RegionCompactingImpl<true, REGION_TYPE, FULL_GC>(region, movedObjectSaver);
174         }
175     } else {
176         RegionCompactingImpl<false, REGION_TYPE, FULL_GC>(region, movedObjectSaver);
177     }
178 }
179 
180 class ScopedRegionCollectionInfo {
181 public:
ScopedRegionCollectionInfo(const GC * gc,const char * title,const Region * region,bool isYoung,const size_t & movedSize)182     ScopedRegionCollectionInfo(const GC *gc, const char *title, const Region *region, bool isYoung,
183                                const size_t &movedSize)
184         : gc_(gc),
185           title_(title),
186           region_(region),
187           isYoung_(isYoung),
188           movedSize_(movedSize),
189           startTimeNs_(time::GetCurrentTimeInNanos())
190     {
191     }
192 
193     NO_COPY_SEMANTIC(ScopedRegionCollectionInfo);
194     NO_MOVE_SEMANTIC(ScopedRegionCollectionInfo);
195 
~ScopedRegionCollectionInfo()196     ~ScopedRegionCollectionInfo()
197     {
198         if (gc_->IsLogDetailedGcCompactionInfoEnabled()) {
199             LOG(INFO, GC) << *this;
200         }
201     }
202 
203 private:
204     const GC *gc_;
205     const char *title_;
206     const Region *region_;
207     bool isYoung_;
208     const size_t &movedSize_;
209     uint64_t startTimeNs_;
210 
operator <<(std::ostream & log,const ScopedRegionCollectionInfo & regionInfo)211     friend std::ostream &operator<<(std::ostream &log, const ScopedRegionCollectionInfo &regionInfo)
212     {
213         auto region = regionInfo.region_;
214         log << '[' << regionInfo.gc_->GetCounter() << "] " << regionInfo.title_ << ": ";
215         // Need to use saved is_young_ flag since region flags can be changed during region promotion
216         if (regionInfo.isYoung_) {
217             log << 'Y';
218         } else {
219             log << 'T';
220         }
221         DumpRegionRange(log, *region) << " A " << ark::helpers::MemoryConverter(region->GetAllocatedBytes()) << " L ";
222         if (regionInfo.isYoung_) {
223             log << '-';
224         } else {
225             log << ark::helpers::MemoryConverter(region->GetLiveBytes());
226         }
227         log << " RS " << region->GetRemSetSize() << " M " << ark::helpers::MemoryConverter(regionInfo.movedSize_)
228             << " D " << ark::helpers::TimeConverter(time::GetCurrentTimeInNanos() - regionInfo.startTimeNs_);
229         return log;
230     }
231 };
232 
233 template <class LanguageConfig>
234 template <bool ATOMIC, bool FULL_GC>
RegionPromotionImpl(Region * region,const ObjectVisitor & movedObjectSaver)235 void G1GC<LanguageConfig>::RegionPromotionImpl(Region *region, const ObjectVisitor &movedObjectSaver)
236 {
237     size_t moveSize = region->GetAllocatedBytes();
238     size_t aliveMoveCount = 0;
239     size_t deadMoveCount = 0;
240     auto objectAllocator = this->GetG1ObjectAllocator();
241     auto promotionMoveChecker = [&aliveMoveCount, &movedObjectSaver](ObjectHeader *src) {
242         ++aliveMoveCount;
243         LOG_DEBUG_OBJECT_EVENTS << "PROMOTE YOUNG object " << src;
244         ASSERT(ObjectToRegion(src)->HasFlag(RegionFlag::IS_EDEN));
245         movedObjectSaver(src);
246     };
247     auto promotionDeathChecker = [this, &deadMoveCount](ObjectHeader *objectHeader) {
248         if (IsMarked(objectHeader)) {
249             return ObjectStatus::ALIVE_OBJECT;
250         }
251         ++deadMoveCount;
252         LOG_DEBUG_OBJECT_EVENTS << "PROMOTE DEAD YOUNG object " << objectHeader;
253         return ObjectStatus::DEAD_OBJECT;
254     };
255     ScopedRegionCollectionInfo collectionInfo(this, "Region promoted", region, true, moveSize);
256     if (g1TrackFreedObjects_) {
257         // We want to track all moved objects (including), therefore, iterate over all objects in region.
258         objectAllocator->template PromoteYoungRegion<false, FULL_GC>(region, promotionDeathChecker,
259                                                                      promotionMoveChecker);
260     } else {
261         aliveMoveCount += objectAllocator->template PromoteYoungRegion<true, FULL_GC>(region, promotionDeathChecker,
262                                                                                       promotionMoveChecker);
263         ASSERT(deadMoveCount == 0);
264     }
265     region->RmvFlag(RegionFlag::IS_COLLECTION_SET);
266     this->memStats_.template RecordSizeMovedYoung<ATOMIC>(moveSize);
267     this->memStats_.template RecordCountMovedYoung<ATOMIC>(aliveMoveCount + deadMoveCount);
268     analytics_.ReportPromotedRegion();
269     analytics_.ReportLiveObjects(aliveMoveCount);
270 }
271 
272 template <class LanguageConfig>
273 template <typename Handler>
IterateOverRefsInMemRange(const MemRange & memRange,Region * region,Handler & refsHandler)274 void G1GC<LanguageConfig>::IterateOverRefsInMemRange(const MemRange &memRange, Region *region, Handler &refsHandler)
275 {
276     MarkBitmap *bitmap = nullptr;
277     if (region->IsEden()) {
278         ASSERT(this->IsFullGC());
279         bitmap = region->GetMarkBitmap();
280     } else {
281         bitmap = region->GetLiveBitmap();
282     }
283     auto *startAddress = ToVoidPtr(memRange.GetStartAddress());
284     auto *endAddress = ToVoidPtr(memRange.GetEndAddress());
285     auto visitor = [&refsHandler, startAddress, endAddress](void *mem) {
286         ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(
287             static_cast<ObjectHeader *>(mem), refsHandler, startAddress, endAddress);
288     };
289     if (region->HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
290         bitmap->CallForMarkedChunkInHumongousRegion<false>(ToVoidPtr(region->Begin()), visitor);
291     } else {
292         bitmap->IterateOverMarkedChunkInRange(startAddress, endAddress, visitor);
293     }
294 }
295 
296 template <class LanguageConfig, bool CONCURRENTLY, bool COLLECT_CLASSES>
297 class NonRegularObjectsDeathChecker {
298 public:
NonRegularObjectsDeathChecker(size_t * deleteSize,size_t * deleteCount)299     NonRegularObjectsDeathChecker(size_t *deleteSize, size_t *deleteCount)
300         : deleteSize_(deleteSize), deleteCount_(deleteCount)
301     {
302     }
303 
304     ~NonRegularObjectsDeathChecker() = default;
305 
operator ()(ObjectHeader * objectHeader)306     ObjectStatus operator()(ObjectHeader *objectHeader)
307     {
308         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
309         if constexpr (CONCURRENTLY) {
310             // We may face a newly created object without live bitmap initialization.
311             if (objectHeader->template ClassAddr<BaseClass>() == nullptr) {
312                 return ObjectStatus::ALIVE_OBJECT;
313             }
314         }
315         Region *region = ObjectToRegion(objectHeader);
316         auto liveBitmap = region->GetLiveBitmap();
317         if (liveBitmap->AtomicTest(objectHeader)) {
318             return ObjectStatus::ALIVE_OBJECT;
319         }
320         if constexpr (!COLLECT_CLASSES) {
321             if (ObjectHelpers<LanguageConfig::LANG_TYPE>::IsClassObject(objectHeader)) {
322                 LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE class object " << objectHeader
323                                         << " but don't free memory";
324                 return ObjectStatus::ALIVE_OBJECT;
325             }
326         }
327 
328         if (region->HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
329             LOG_DEBUG_OBJECT_EVENTS << "DELETE HUMONGOUS object " << objectHeader;
330             // humongous allocator increases size by region size
331             *deleteSize_ += region->Size();
332             ++(*deleteCount_);
333         } else {
334             ASSERT(region->HasFlag(RegionFlag::IS_NONMOVABLE));
335             LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE object " << objectHeader;
336         }
337         return ObjectStatus::DEAD_OBJECT;
338     }
339 
340     DEFAULT_COPY_SEMANTIC(NonRegularObjectsDeathChecker);
341     DEFAULT_MOVE_SEMANTIC(NonRegularObjectsDeathChecker);
342 
343 private:
344     size_t *deleteSize_;
345     size_t *deleteCount_;
346 };
347 
348 template <class LanguageConfig>
349 template <bool ATOMIC, bool CONCURRENTLY>
CollectEmptyRegions(GCTask & task,PandaVector<Region * > * emptyTenuredRegions)350 void G1GC<LanguageConfig>::CollectEmptyRegions(GCTask &task, PandaVector<Region *> *emptyTenuredRegions)
351 {
352     ScopedTiming t(__FUNCTION__, *this->GetTiming());
353     CollectNonRegularObjects<ATOMIC, CONCURRENTLY>();
354     ClearEmptyTenuredMovableRegions<ATOMIC, CONCURRENTLY>(emptyTenuredRegions);
355     task.UpdateGCCollectionType(GCCollectionType::TENURED);
356 }
357 
358 template <class LanguageConfig>
359 template <bool ATOMIC, bool CONCURRENTLY>
CollectNonRegularObjects()360 void G1GC<LanguageConfig>::CollectNonRegularObjects()
361 {
362     ScopedTiming t(__FUNCTION__, *this->GetTiming());
363     size_t deleteSize = 0;
364     size_t deleteCount = 0;
365     // Don't collect classes if --g1-track-free-objects is enabled.
366     // We need to know size of objects while iterating over all objects in the collected region.
367     auto deathChecker =
368         g1TrackFreedObjects_
369             ? GCObjectVisitor(
370                   // CC-OFFNXT(G.FMT.06-CPP) project code style
371                   NonRegularObjectsDeathChecker<LanguageConfig, CONCURRENTLY, false>(&deleteSize, &deleteCount))
372             : GCObjectVisitor(
373                   // CC-OFFNXT(G.FMT.06-CPP) project code style
374                   NonRegularObjectsDeathChecker<LanguageConfig, CONCURRENTLY, true>(&deleteSize, &deleteCount));
375     auto regionVisitor = [this](PandaVector<Region *> &regions) {
376         if constexpr (CONCURRENTLY) {
377             updateRemsetWorker_->InvalidateRegions(&regions);
378         } else {
379             updateRemsetWorker_->GCInvalidateRegions(&regions);
380         }
381     };
382     this->GetG1ObjectAllocator()->CollectNonRegularRegions(regionVisitor, deathChecker);
383     this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
384     this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
385 }
386 
GetEmptyTenuredRegularRegions(PandaVector<std::pair<uint32_t,Region * >> & garbageRegions)387 static PandaVector<Region *> GetEmptyTenuredRegularRegions(PandaVector<std::pair<uint32_t, Region *>> &garbageRegions)
388 {
389     auto firstEmptyRegionIter =
390         std::find_if_not(garbageRegions.rbegin(), garbageRegions.rend(),
391                          [](const std::pair<uint32_t, Region *> &entry) { return entry.first == DEFAULT_REGION_SIZE; });
392     if (firstEmptyRegionIter == garbageRegions.rend()) {
393         return {};
394     }
395     PandaVector<Region *> emptyTenuredRegions;
396     emptyTenuredRegions.reserve(garbageRegions.end() - firstEmptyRegionIter.base());
397     for (auto iter = firstEmptyRegionIter.base(); iter != garbageRegions.end(); ++iter) {
398         emptyTenuredRegions.emplace_back(iter->second);
399     }
400     garbageRegions.erase(firstEmptyRegionIter.base(), garbageRegions.end());
401     return emptyTenuredRegions;
402 }
403 
404 template <class LanguageConfig>
405 template <bool ATOMIC, bool CONCURRENTLY>
ClearEmptyTenuredMovableRegions(PandaVector<Region * > * emptyTenuredRegions)406 void G1GC<LanguageConfig>::ClearEmptyTenuredMovableRegions(PandaVector<Region *> *emptyTenuredRegions)
407 {
408     ScopedTiming t(__FUNCTION__, *this->GetTiming());
409     {
410         ScopedTiming t1("Region Invalidation", *this->GetTiming());
411         if constexpr (CONCURRENTLY) {
412             updateRemsetWorker_->InvalidateRegions(emptyTenuredRegions);
413         } else {
414             updateRemsetWorker_->GCInvalidateRegions(emptyTenuredRegions);
415         }
416     }
417     size_t deleteSize = 0;
418     size_t deleteCount = 0;
419     auto deathVisitor = [](ObjectHeader *objectHeader) {
420         LOG_DEBUG_OBJECT_EVENTS << "DELETE tenured object " << objectHeader;
421     };
422     for (auto *region : *emptyTenuredRegions) {
423         deleteCount += region->GetAllocatedObjects();
424         deleteSize += region->GetAllocatedBytes();
425         ASSERT_PRINT(region->GetLiveBitmap()->FindFirstMarkedChunks() == nullptr,
426                      *region << " contains marked object: " << region->GetLiveBitmap()->FindFirstMarkedChunks());
427         if (g1TrackFreedObjects_) {
428             region->IterateOverObjects(deathVisitor);
429         }
430     }
431     {
432         ScopedTiming t2("Reset regions", *this->GetTiming());
433         if (CONCURRENTLY) {
434             this->GetG1ObjectAllocator()
435                 ->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::NoRelease,
436                                         OSPagesPolicy::IMMEDIATE_RETURN, true, PandaVector<Region *>>(
437                     // CC-OFFNXT(G.FMT.06-CPP) project code style
438                     *emptyTenuredRegions);
439         } else {
440             this->GetG1ObjectAllocator()
441                 ->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::Release,
442                                         OSPagesPolicy::NO_RETURN, false, PandaVector<Region *>>(*emptyTenuredRegions);
443         }
444     }
445     this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
446     this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
447 }
448 
449 template <class LanguageConfig>
NeedToPromote(const Region * region) const450 bool G1GC<LanguageConfig>::NeedToPromote(const Region *region) const
451 {
452     ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
453     if (fullCollectionSetPromotion_ || region->HasPinnedObjects()) {
454         return true;
455     }
456     if ((g1PromotionRegionAliveRate_ < PERCENT_100_D) && !this->IsFullGC()) {
457         size_t aliveBytes = region->GetLiveBytes();
458         double alivePercentage = static_cast<double>(aliveBytes) / region->Size() * PERCENT_100_D;
459         if (alivePercentage >= g1PromotionRegionAliveRate_) {
460             return true;
461         }
462     }
463     return false;
464 }
465 
466 template <class LanguageConfig>
467 template <bool ATOMIC, RegionFlag REGION_TYPE, bool FULL_GC>
468 // CC-OFFNXT(G.FUN.01-CPP) solid logic, the fix will degrade the readability and maintenance of the code
RegionCompactingImpl(Region * region,const ObjectVisitor & movedObjectSaver)469 void G1GC<LanguageConfig>::RegionCompactingImpl(Region *region, const ObjectVisitor &movedObjectSaver)
470 {
471     auto objectAllocator = this->GetG1ObjectAllocator();
472     // Calculated live bytes in region for all marked objects during MixedMark
473     size_t moveSize = region->GetLiveBytes();
474     size_t moveCount = 0;
475     size_t allocatedSize = region->GetAllocatedBytes();
476     ASSERT(moveSize <= allocatedSize);
477     size_t deleteSize = allocatedSize - moveSize;
478     size_t deleteCount = 0;
479 
480     auto moveChecker = [this, &moveCount, &movedObjectSaver](ObjectHeader *src, ObjectHeader *dst) {
481         LOG_DEBUG_OBJECT_EVENTS << "MOVE object " << src << " -> " << dst;
482         ASSERT(ObjectToRegion(dst)->HasFlag(RegionFlag::IS_OLD));
483         this->SetForwardAddress(src, dst);
484         ++moveCount;
485         movedObjectSaver(dst);
486     };
487 
488     auto deathChecker = [this, &deleteCount](ObjectHeader *objectHeader) {
489         if (IsMarked(objectHeader)) {
490             return ObjectStatus::ALIVE_OBJECT;
491         }
492         ++deleteCount;
493         if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
494             LOG_DEBUG_OBJECT_EVENTS << "DELETE YOUNG object " << objectHeader;
495         } else {
496             ASSERT(REGION_TYPE == RegionFlag::IS_OLD);
497             LOG_DEBUG_OBJECT_EVENTS << "DELETE TENURED object " << objectHeader;
498         }
499         return ObjectStatus::DEAD_OBJECT;
500     };
501     if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
502         if (!this->NeedToPromote(region)) {
503             ScopedRegionCollectionInfo collectionInfo(this, "Region compacted", region, true, moveSize);
504             if (g1TrackFreedObjects_) {
505                 // We want to track all freed objects, therefore, iterate over all objects in region.
506                 objectAllocator->template CompactRegion<RegionFlag::IS_EDEN, false>(region, deathChecker, moveChecker);
507             } else {
508                 objectAllocator->template CompactRegion<RegionFlag::IS_EDEN, true>(region, deathChecker, moveChecker);
509                 // delete_count is equal to 0 because we don't track allocation in TLABs by a default.
510                 // We will do it only with PANDA_TRACK_TLAB_ALLOCATIONS key
511                 ASSERT(deleteCount == 0);
512             }
513             this->memStats_.template RecordSizeMovedYoung<ATOMIC>(moveSize);
514             this->memStats_.template RecordCountMovedYoung<ATOMIC>(moveCount);
515             this->memStats_.template RecordSizeFreedYoung<ATOMIC>(deleteSize);
516             this->memStats_.template RecordCountFreedYoung<ATOMIC>(deleteCount);
517             analytics_.ReportEvacuatedBytes(moveSize);
518             analytics_.ReportLiveObjects(moveCount);
519         } else {
520             RegionPromotionImpl<ATOMIC, FULL_GC>(region, movedObjectSaver);
521         }
522     } else {
523         ScopedRegionCollectionInfo collectionInfo(this, "Region compacted", region, false, moveSize);
524         ASSERT(region->HasFlag(RegionFlag::IS_OLD));
525         ASSERT(!region->HasFlag(RegionFlag::IS_NONMOVABLE) && !region->HasFlag(RegionFlag::IS_LARGE_OBJECT));
526         if (g1TrackFreedObjects_) {
527             // We want to track all freed objects, therefore, iterate over all objects in region.
528             objectAllocator->template CompactRegion<RegionFlag::IS_OLD, false>(region, deathChecker, moveChecker);
529         } else {
530             objectAllocator->template CompactRegion<RegionFlag::IS_OLD, true>(region, deathChecker, moveChecker);
531             size_t allocatedObjects = region->GetAllocatedObjects();
532             ASSERT(moveCount <= allocatedObjects);
533             ASSERT(deleteCount == 0);
534             deleteCount = allocatedObjects - moveCount;
535         }
536         this->memStats_.template RecordSizeMovedTenured<ATOMIC>(moveSize);
537         this->memStats_.template RecordCountMovedTenured<ATOMIC>(moveCount);
538         this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
539         this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
540     }
541 }
542 
543 template <class LanguageConfig, typename RefUpdater, bool FULL_GC>
DoUpdateReferencesToMovedObjectsRange(typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange * movedObjects,RefUpdater & refUpdater)544 void DoUpdateReferencesToMovedObjectsRange(typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange *movedObjects,
545                                            RefUpdater &refUpdater)
546 {
547     for (auto *obj : *movedObjects) {
548         if constexpr (!FULL_GC) {
549             obj = obj->IsForwarded() ? GetForwardAddress(obj) : obj;
550         }
551         ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(obj, refUpdater);
552     }
553 }
554 
555 template <class LanguageConfig>
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)556 void G1GC<LanguageConfig>::WorkerTaskProcessing(GCWorkersTask *task, [[maybe_unused]] void *workerData)
557 {
558     switch (task->GetType()) {
559         case GCWorkersTaskTypes::TASK_MARKING: {
560             ExecuteMarkingTask(task->Cast<GCMarkWorkersTask>()->GetMarkingStack());
561             break;
562         }
563         case GCWorkersTaskTypes::TASK_REMARK: {
564             ExecuteRemarkTask(task->Cast<GCMarkWorkersTask>()->GetMarkingStack(), marker_);
565             break;
566         }
567         case GCWorkersTaskTypes::TASK_HUGE_ARRAY_MARKING_REMARK: {
568             auto *hugeArrayTask = task->Cast<GCMarkWorkersTask>();
569             ExecuteHugeArrayMarkTask(hugeArrayTask->GetMarkingStack(), marker_);
570             break;
571         }
572         case GCWorkersTaskTypes::TASK_XREMARK: {
573             ExecuteRemarkTask(task->Cast<GCMarkWorkersTask>()->GetMarkingStack(), onPauseXMarker_);
574             break;
575         }
576         case GCWorkersTaskTypes::TASK_FULL_MARK: {
577             ExecuteFullMarkingTask(task->Cast<GCMarkWorkersTask>()->GetMarkingStack());
578             break;
579         }
580         case GCWorkersTaskTypes::TASK_REGION_COMPACTING: {
581             auto *data = task->Cast<GCRegionCompactWorkersTask>()->GetRegionData();
582             ExecuteCompactingTask(data->first, data->second);
583             this->GetInternalAllocator()->Delete(data);
584             break;
585         }
586         case GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS: {
587             auto wasInterrupted =
588                 PoolManager::GetMmapMemPool()->ReleaseFreePagesToOSWithInterruption(releasePagesInterruptFlag_);
589             releasePagesInterruptFlag_ =
590                 wasInterrupted ? ReleasePagesStatus::WAS_INTERRUPTED : ReleasePagesStatus::FINISHED;
591             break;
592         }
593         case GCWorkersTaskTypes::TASK_ENQUEUE_REMSET_REFS: {
594             ExecuteEnqueueRemsetsTask(task->Cast<GCUpdateRefsWorkersTask<false>>()->GetMovedObjectsRange());
595             break;
596         }
597         case GCWorkersTaskTypes::TASK_EVACUATE_REGIONS: {
598             ExecuteEvacuateTask(task->Cast<G1EvacuateRegionsTask<Ref>>()->GetMarkingStack());
599             break;
600         }
601         case GCWorkersTaskTypes::TASK_MARK_WHOLE_REGION: {
602             auto [region, markedObjDeque] = task->Cast<GCMarkWholeRegionTask>()->GetInfo();
603             auto processor = [region = region, markedObjDeque = markedObjDeque](ObjectHeader *obj) {
604                 auto *objClass = obj->template ClassAddr<BaseClass>();
605                 CalcLiveBytesMarkPreprocess<false>(obj, objClass);
606                 region->GetMarkBitmap()->Set(obj);
607                 markedObjDeque->emplace_back(obj);
608             };
609             region->IterateOverObjects(processor);
610             break;
611         }
612         default:
613             LOG(FATAL, GC) << "Unimplemented for " << GCWorkersTaskTypesToString(task->GetType());
614             UNREACHABLE();
615     }
616 }
617 
618 template <class LanguageConfig>
ExecuteMarkingTask(GCMarkWorkersTask::StackType * objectsStack)619 void G1GC<LanguageConfig>::ExecuteMarkingTask(GCMarkWorkersTask::StackType *objectsStack)
620 {
621     MarkStackMixed(objectsStack);
622     ASSERT(objectsStack->Empty());
623     this->GetInternalAllocator()->Delete(objectsStack);
624 }
625 
626 template <class LanguageConfig>
627 template <typename Marker>
ExecuteRemarkTask(GCMarkWorkersTask::StackType * objectsStack,Marker & marker)628 void G1GC<LanguageConfig>::ExecuteRemarkTask(GCMarkWorkersTask::StackType *objectsStack, Marker &marker)
629 {
630     this->MarkStack(&marker, objectsStack, CalcLiveBytesMarkPreprocess<true>);
631     ASSERT(objectsStack->Empty());
632     this->GetInternalAllocator()->Delete(objectsStack);
633 }
634 
635 template <class LanguageConfig>
636 template <typename Marker>
ExecuteHugeArrayMarkTask(GCMarkWorkersTask::StackType * objectsStack,Marker & marker)637 void G1GC<LanguageConfig>::ExecuteHugeArrayMarkTask(GCMarkWorkersTask::StackType *objectsStack, Marker &marker)
638 {
639     ASSERT(objectsStack->Size() == 1);
640     auto *markingRanges = reinterpret_cast<std::pair<size_t, size_t> *>(objectsStack->GetAdditionalMarkingInfo());
641     ASSERT(markingRanges != nullptr);
642     auto [traversingStartIndex, traversingEndIndex] = *markingRanges;
643     auto *arrayObject = coretypes::Array::Cast(objectsStack->PopFromStack());
644     ASSERT(traversingEndIndex <= arrayObject->GetLength());
645     objectsStack->SetTaskType(GCWorkersTaskTypes::TASK_REMARK);
646     for (size_t i = traversingStartIndex; i < traversingEndIndex; ++i) {
647         auto *arrayElement = arrayObject->Get<ObjectHeader *>(i);
648         if (arrayElement == nullptr) {
649             continue;
650         }
651 #ifndef NDEBUG
652         auto arrayElementCls = arrayElement->ClassAddr<Class>();
653         LOG_IF(arrayElementCls == nullptr, ERROR, GC)
654             << " object's class is nullptr: " << arrayElement << " from array: " << arrayObject;
655         ASSERT(arrayElementCls != nullptr);
656 #endif
657         if (marker_.MarkIfNotMarked(arrayElement)) {
658             objectsStack->PushToStack(arrayObject, arrayElement);
659         }
660     }
661     ExecuteRemarkTask(objectsStack, marker);
662 }
663 
664 template <class LanguageConfig>
ExecuteFullMarkingTask(GCMarkWorkersTask::StackType * objectsStack)665 void G1GC<LanguageConfig>::ExecuteFullMarkingTask(GCMarkWorkersTask::StackType *objectsStack)
666 {
667     const ReferenceCheckPredicateT &refEnablePred = []([[maybe_unused]] const ObjectHeader *obj) {
668         // process all refs
669         return true;
670     };
671     this->MarkStack(&marker_, objectsStack, CalcLiveBytesMarkPreprocess<true>, refEnablePred);
672     ASSERT(objectsStack->Empty());
673     this->GetInternalAllocator()->Delete(objectsStack);
674 }
675 
676 template <class LanguageConfig>
ExecuteCompactingTask(Region * region,const ObjectVisitor & movedObjectsSaver)677 void G1GC<LanguageConfig>::ExecuteCompactingTask(Region *region, const ObjectVisitor &movedObjectsSaver)
678 {
679     if (region->HasFlag(RegionFlag::IS_EDEN)) {
680         if (this->IsFullGC()) {
681             RegionCompactingImpl<true, RegionFlag::IS_EDEN, true>(region, movedObjectsSaver);
682         } else {
683             RegionCompactingImpl<true, RegionFlag::IS_EDEN, false>(region, movedObjectsSaver);
684         }
685     } else if (region->HasFlag(RegionFlag::IS_OLD)) {
686         RegionCompactingImpl<true, RegionFlag::IS_OLD, false>(region, movedObjectsSaver);
687     } else {
688         LOG(FATAL, GC) << "Unsupported region type";
689     }
690 }
691 
692 template <class LanguageConfig>
ExecuteEnqueueRemsetsTask(GCUpdateRefsWorkersTask<false>::MovedObjectsRange * movedObjectsRange)693 void G1GC<LanguageConfig>::ExecuteEnqueueRemsetsTask(
694     GCUpdateRefsWorkersTask<false>::MovedObjectsRange *movedObjectsRange)
695 {
696     auto *taskUpdatedRefsQueue = this->GetInternalAllocator()->template New<GCG1BarrierSet::ThreadLocalCardQueues>();
697     EnqueueRemsetRefUpdater<LanguageConfig> refUpdater(this->GetCardTable(), taskUpdatedRefsQueue, regionSizeBits_);
698     DoUpdateReferencesToMovedObjectsRange<LanguageConfig, decltype(refUpdater), false>(movedObjectsRange, refUpdater);
699     {
700         os::memory::LockHolder lock(gcWorkerQueueLock_);
701         updatedRefsQueue_->insert(updatedRefsQueue_->end(), taskUpdatedRefsQueue->begin(), taskUpdatedRefsQueue->end());
702     }
703     this->GetInternalAllocator()->Delete(movedObjectsRange);
704     this->GetInternalAllocator()->Delete(taskUpdatedRefsQueue);
705 }
706 
707 template <class LanguageConfig>
ExecuteEvacuateTask(typename G1EvacuateRegionsTask<Ref>::StackType * stack)708 void G1GC<LanguageConfig>::ExecuteEvacuateTask(typename G1EvacuateRegionsTask<Ref>::StackType *stack)
709 {
710     G1EvacuateRegionsWorkerState<LanguageConfig> state(this, stack);
711     state.EvacuateLiveObjects();
712     ASSERT(stack->Empty());
713     this->GetInternalAllocator()->Delete(stack);
714 }
715 
716 template <class LanguageConfig>
UpdateCollectionSet(const CollectionSet & collectibleRegions)717 void G1GC<LanguageConfig>::UpdateCollectionSet(const CollectionSet &collectibleRegions)
718 {
719     collectionSet_ = collectibleRegions;
720     for (auto r : collectionSet_) {
721         // we don't need to reset flag, because we don't reuse collectionSet region
722         r->AddFlag(RegionFlag::IS_COLLECTION_SET);
723         LOG_DEBUG_GC << "dump region: " << *r;
724     }
725 }
726 
727 template <class LanguageConfig>
RunPhasesForRegions(ark::GCTask & task,const CollectionSet & collectibleRegions)728 void G1GC<LanguageConfig>::RunPhasesForRegions(ark::GCTask &task, const CollectionSet &collectibleRegions)
729 {
730     if (collectibleRegions.empty()) {
731         LOG_DEBUG_GC << "No regions specified for collection " << task.reason;
732     }
733     ASSERT(concurrentMarkingStack_.Empty());
734     this->GetObjectGenAllocator()->InvalidateSpaceData();
735     this->GetObjectGenAllocator()->UpdateSpaceData();
736     RunGC(task, collectibleRegions);
737 }
738 
739 template <class LanguageConfig>
NeedToRunGC(const ark::GCTask & task)740 bool G1GC<LanguageConfig>::NeedToRunGC(const ark::GCTask &task)
741 {
742     return (task.reason == GCTaskCause::YOUNG_GC_CAUSE) || (task.reason == GCTaskCause::OOM_CAUSE) ||
743            (task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE) ||
744            (task.reason == GCTaskCause::STARTUP_COMPLETE_CAUSE) || (task.reason == GCTaskCause::EXPLICIT_CAUSE) ||
745            (task.reason == GCTaskCause::NATIVE_ALLOC_CAUSE) || (task.reason == GCTaskCause::MIXED);
746 }
747 
748 template <class LanguageConfig>
NeedFullGC(const ark::GCTask & task)749 bool G1GC<LanguageConfig>::NeedFullGC(const ark::GCTask &task)
750 {
751     return this->IsExplicitFull(task) || (task.reason == GCTaskCause::OOM_CAUSE);
752 }
753 
754 template <class LanguageConfig>
SetExtensionData(GCExtensionData * data)755 void G1GC<LanguageConfig>::SetExtensionData(GCExtensionData *data)
756 {
757     GenerationalGC<LanguageConfig>::SetExtensionData(data);
758     auto *xdata = reinterpret_cast<XGCExtensionData *>(data);
759     onPauseXMarker_ = XGCMarker<LanguageConfig, true>(this, xdata->GetXObjectHandler());
760     concXMarker_ = XGCMarker<LanguageConfig, true>(this, xdata->GetXObjectHandler());
761 }
762 
763 template <class LanguageConfig>
StartGCCollection(ark::GCTask & task)764 void G1GC<LanguageConfig>::StartGCCollection(ark::GCTask &task)
765 {
766     GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats());
767     this->memStats_.Reset();
768     if (NeedToRunGC(task)) {
769         // Check there is no concurrent mark running by another thread.
770         EnsurePreWrbDisabledInThreads();
771 
772         if (this->GetSettings()->LogDetailedGCInfoEnabled()) {
773             PrintFragmentationMetrics("Fragmentation before GC: ");
774         }
775 
776         if (NeedFullGC(task)) {
777             task.collectionType = GCCollectionType::FULL;
778             RunFullGC(task);
779         } else {
780             fullCollectionSetPromotion_ = this->GetFastGCFlag();
781             TryRunMixedGC(task);
782         }
783 
784         if (this->GetSettings()->LogDetailedGCInfoEnabled()) {
785             PrintFragmentationMetrics("Fragmentation after GC: ");
786         }
787     }
788 }
789 
790 template <class LanguageConfig>
RunPhasesImpl(ark::GCTask & task)791 void G1GC<LanguageConfig>::RunPhasesImpl(ark::GCTask &task)
792 {
793     SuspendUpdateRemsetWorkerScope stopUpdateRemsetWorkerScope(updateRemsetWorker_);
794     interruptConcurrentFlag_ = false;
795     LOG_DEBUG_GC << "G1GC start, reason: " << task.reason;
796     LOG_DEBUG_GC << "Footprint before GC: " << this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
797     task.UpdateGCCollectionType(GCCollectionType::YOUNG);
798 
799     InterruptReleasePagesIfNeeded();
800 
801     size_t bytesInHeapBeforeMove = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
802     {
803         ScopedTiming t("G1 GC", *this->GetTiming());
804         auto startCollectionTime = ark::time::GetCurrentTimeInNanos();
805         analytics_.ReportCollectionStart(startCollectionTime);
806 
807         StartGCCollection(task);
808 
809         if (this->GetSettings()->G1EnablePauseTimeGoal()) {
810             auto endCollectionTime = ark::time::GetCurrentTimeInNanos();
811             g1PauseTracker_.AddPauseInNanos(startCollectionTime, endCollectionTime);
812             analytics_.ReportCollectionEnd(task.reason, endCollectionTime, collectionSet_,
813                                            singlePassCompactionEnabled_);
814         }
815         collectionSet_.clear();
816         singlePassCompactionEnabled_ = false;
817         if (fullCollectionSetPromotion_) {
818             isMixedGcRequired_ = true;
819         }
820         if (task.reason == GCTaskCause::CROSSREF_CAUSE) {
821             RunConcurrentGC(task, onPauseXMarker_, concXMarker_);
822         } else if (ScheduleMixedGCAndConcurrentMark(task)) {
823             RunConcurrentGC(task, marker_, concMarker_);
824         }
825     }
826     // Update global and GC memstats based on generational memstats information
827     // We will update tenured stats and record allocations, so set 'true' values
828     this->UpdateMemStats(bytesInHeapBeforeMove, true, true);
829 
830     StartReleasePagesIfNeeded(ReleasePagesStatus::WAS_INTERRUPTED);
831 
832     LOG_DEBUG_GC << "Footprint after GC: " << this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
833     this->SetFullGC(false);
834 }
835 
836 template <class LanguageConfig>
RunFullGC(ark::GCTask & task)837 void G1GC<LanguageConfig>::RunFullGC(ark::GCTask &task)
838 {
839     ScopedTiming t("Run Full GC", *this->GetTiming());
840     GetG1ObjectAllocator()->template ReleaseEmptyRegions<RegionFlag::IS_OLD, OSPagesPolicy::NO_RETURN>();
841     LOG_DEBUG_GC << "Explicit Full GC invocation due to a reason: " << task.reason;
842     this->SetFullGC(true);
843     FullMarking(task);
844     if (!HaveEnoughRegionsToMove(1)) {
845         GetG1ObjectAllocator()->ReleaseReservedRegion();
846         // After release reserved region we always have minimum 1 region for tenured collection
847         ASSERT(HaveEnoughRegionsToMove(1));
848     }
849     CollectionSet collectionSet = GetFullCollectionSet();
850     ClearTenuredCards(collectionSet);
851     PrepareYoungRegionsForFullGC(collectionSet);
852     CollectAndMoveTenuredRegions(collectionSet);
853     // Reserve a region to prevent OOM in case a lot of garbage in tenured space
854     GetG1ObjectAllocator()->ReserveRegionIfNeeded();
855     CollectAndMoveYoungRegions(collectionSet);
856     ReleasePagesInFreePools();
857     this->SetFullGC(false);
858     topGarbageRegions_.clear();
859 }
860 
861 template <class LanguageConfig>
TryRunMixedGC(ark::GCTask & task)862 void G1GC<LanguageConfig>::TryRunMixedGC(ark::GCTask &task)
863 {
864     bool isMixed = false;
865     if (task.reason == GCTaskCause::MIXED && !interruptConcurrentFlag_) {
866         isMixed = true;
867     } else {
868         // Atomic with acquire order reason: to see changes made by GC thread (which do concurrent
869         // marking and than set isMixedGcRequired_) in mutator thread which waits for the end of
870         // concurrent marking.
871         isMixed = isMixedGcRequired_.load(std::memory_order_acquire);
872     }
873     task.collectionType = isMixed ? GCCollectionType::MIXED : GCCollectionType::YOUNG;
874     // Handle pending dirty cards here to be able to estimate scanning time while adding old regions to
875     // collection set
876     HandlePendingDirtyCards();
877     auto collectibleRegions = GetCollectibleRegions(task, isMixed);
878     if (!collectibleRegions.empty() && HaveEnoughSpaceToMove(collectibleRegions)) {
879         // Ordinary collection flow
880         RunMixedGC(task, collectibleRegions);
881     } else if (collectibleRegions.empty()) {
882         LOG_DEBUG_GC << "Failed to run gc: nothing to collect in movable space";
883     } else {
884         // There are no space to move objects. Need to skip concurrent marking
885         // in this case, since it ignores young roots
886         // Atomic with release order reason: to see changes made by GC thread (which do concurrent
887         // marking and than set isMixedGcRequired_) in mutator thread which waits for the end of
888         // concurrent marking.
889         isMixedGcRequired_.store(true, std::memory_order_release);
890         LOG_DEBUG_GC << "Failed to run gc: not enough free regions to move";
891     }
892     ReenqueueDirtyCards();
893 }
894 
895 template <class LanguageConfig>
CollectAndMoveTenuredRegions(const CollectionSet & collectionSet)896 void G1GC<LanguageConfig>::CollectAndMoveTenuredRegions(const CollectionSet &collectionSet)
897 {
898     auto curRegionIt = collectionSet.Tenured().begin();
899     auto endRegionIt = collectionSet.Tenured().end();
900     while (curRegionIt != endRegionIt) {
901         ASSERT(HaveEnoughRegionsToMove(1));
902         CollectionSet cs;
903         while ((curRegionIt != endRegionIt) && (HaveEnoughRegionsToMove(cs.Movable().size() + 1))) {
904             Region *region = *curRegionIt;
905             curRegionIt++;
906             if (region->GetGarbageBytes() > 0) {
907                 LOG_DEBUG_GC << "Add region " << *region << " to a collection set";
908                 cs.AddRegion(region);
909                 continue;
910             }
911 
912             double regionFragmentation = region->GetFragmentation();
913             if (regionFragmentation < this->GetSettings()->G1FullGCRegionFragmentationRate()) {
914                 LOG_DEBUG_GC << "Skip region " << *region << " because it has no garbage inside";
915                 continue;
916             }
917 
918             LOG_DEBUG_GC << "Add region " << *region
919                          << " to a collection set because it has a big fragmentation = " << regionFragmentation;
920             cs.AddRegion(region);
921         }
922         UpdateCollectionSet(cs);
923         CollectAndMove<true>(cs);
924         LOG_DEBUG_GC << "Iterative full GC, collected " << cs.size() << " regions";
925     }
926 }
927 
928 template <class LanguageConfig>
CollectAndMoveYoungRegions(const CollectionSet & collectionSet)929 void G1GC<LanguageConfig>::CollectAndMoveYoungRegions(const CollectionSet &collectionSet)
930 {
931     if (!collectionSet.Young().empty()) {
932         CollectionSet cs(collectionSet.Young());
933         if (HaveEnoughRegionsToMove(cs.Movable().size())) {
934             LOG_DEBUG_GC << "Iterative full GC. Collecting " << cs.size() << " young regions";
935             UpdateCollectionSet(cs);
936             CollectAndMove<true>(cs);
937         } else {
938             RestoreYoungRegionsAfterFullGC(cs);
939             LOG_INFO_GC << "Failed to run gc, not enough free regions for young";
940             LOG_INFO_GC << "Accounted total object used bytes = "
941                         << PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
942         }
943     }
944 }
945 
946 template <class LanguageConfig>
ReleasePagesInFreePools()947 void G1GC<LanguageConfig>::ReleasePagesInFreePools()
948 {
949     ScopedTiming releasePages("Release Pages in Free Pools", *this->GetTiming());
950     bool useGcWorkers = this->GetSettings()->GCWorkersCount() != 0;
951     if (useGcWorkers) {
952         StartReleasePagesIfNeeded(ReleasePagesStatus::FINISHED);
953     } else {
954         PoolManager::GetMmapMemPool()->ReleaseFreePagesToOS();
955     }
956 }
957 
958 template <class LanguageConfig>
RunMixedGC(ark::GCTask & task,const CollectionSet & collectionSet)959 void G1GC<LanguageConfig>::RunMixedGC(ark::GCTask &task, const CollectionSet &collectionSet)
960 {
961     auto startTime = ark::time::GetCurrentTimeInNanos();
962     LOG_DEBUG_GC << "Collect regions size:" << collectionSet.size();
963     UpdateCollectionSet(collectionSet);
964     RunPhasesForRegions(task, collectionSet);
965     auto endTime = ark::time::GetCurrentTimeInNanos();
966     this->GetStats()->AddTimeValue(endTime - startTime, TimeTypeStats::YOUNG_TOTAL_TIME);
967 }
968 
969 template <class LanguageConfig>
ScheduleMixedGCAndConcurrentMark(ark::GCTask & task)970 bool G1GC<LanguageConfig>::ScheduleMixedGCAndConcurrentMark(ark::GCTask &task)
971 {
972     // Atomic with acquire order reason: to see changes made by GC thread (which do concurrent marking and than set
973     // isMixedGcRequired_) in mutator thread which waits for the end of concurrent marking.
974     if (isMixedGcRequired_.load(std::memory_order_acquire)) {
975         if (!HaveGarbageRegions()) {
976             // Atomic with release order reason: to see changes made by GC thread (which do concurrent marking and
977             // than set isMixedGcRequired_) in mutator thread which waits for the end of concurrent marking.
978             isMixedGcRequired_.store(false, std::memory_order_release);
979         }
980         return false;  // don't run concurrent mark
981     }
982     concurrentMarkingFlag_ = !interruptConcurrentFlag_ && this->ShouldRunTenuredGC(task);
983     // Atomic with relaxed order reason: read variable modified in the same thread
984     return concurrentMarkingFlag_.load(std::memory_order_relaxed);
985 }
986 
987 template <class LanguageConfig>
988 template <bool ENABLE_BARRIER>
UpdatePreWrbEntrypointInThreads()989 void G1GC<LanguageConfig>::UpdatePreWrbEntrypointInThreads()
990 {
991     ObjRefProcessFunc entrypointFunc = nullptr;
992     if constexpr (ENABLE_BARRIER) {
993         auto addr = this->GetBarrierSet()->GetBarrierOperand(ark::mem::BarrierPosition::BARRIER_POSITION_PRE,
994                                                              "STORE_IN_BUFF_TO_MARK_FUNC");
995         entrypointFunc = std::get<ObjRefProcessFunc>(addr.GetValue());
996     }
997     auto setEntrypoint = [this, &entrypointFunc](ManagedThread *thread) {
998         void *entrypointFuncUntyped = reinterpret_cast<void *>(entrypointFunc);
999         ASSERT(thread->GetPreWrbEntrypoint() != entrypointFuncUntyped);
1000         thread->SetPreWrbEntrypoint(entrypointFuncUntyped);
1001 
1002         // currentPreWrbEntrypoint_ is not required to be set multiple times, but this has to be done under the
1003         // EnumerateThreads()'s lock, hence the repetition
1004         currentPreWrbEntrypoint_ = entrypointFunc;
1005         return true;
1006     };
1007     this->GetPandaVm()->GetThreadManager()->EnumerateThreads(setEntrypoint);
1008 }
1009 
1010 template <class LanguageConfig>
EnsurePreWrbDisabledInThreads()1011 void G1GC<LanguageConfig>::EnsurePreWrbDisabledInThreads()
1012 {
1013     [[maybe_unused]] auto callback = [](ManagedThread *thread) { return thread->GetPreWrbEntrypoint() == nullptr; };
1014     ASSERT(this->GetPandaVm()->GetThreadManager()->EnumerateThreads(callback));
1015 }
1016 
1017 template <class LanguageConfig>
1018 template <typename OnPauseMarker, typename ConcurrentMarker>
RunConcurrentGC(ark::GCTask & task,OnPauseMarker & pmarker,ConcurrentMarker & cmarker)1019 void G1GC<LanguageConfig>::RunConcurrentGC(ark::GCTask &task, OnPauseMarker &pmarker, ConcurrentMarker &cmarker)
1020 {
1021     ASSERT(collectionSet_.empty());
1022     // Init concurrent marking
1023     EnablePreWrbInThreads();
1024 
1025     if (this->GetSettings()->BeforeG1ConcurrentHeapVerification()) {
1026         VerifyHeapBeforeConcurrent();
1027     }
1028 
1029     bool refProcess = task.reason == GCTaskCause::NATIVE_ALLOC_CAUSE || task.reason == GCTaskCause::CROSSREF_CAUSE;
1030     {
1031         PauseTimeGoalDelay();
1032         auto scopedTracker = g1PauseTracker_.CreateScope();
1033         GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::COMMON_PAUSE);
1034         if (UNLIKELY(refProcess)) {
1035             InitialMark<true>(concurrentMarkingStack_, pmarker);
1036         } else {
1037             InitialMark<false>(concurrentMarkingStack_, pmarker);
1038         }
1039     }
1040 
1041     if (UNLIKELY(refProcess)) {
1042         ConcurrentMark<true>(task, &concurrentMarkingStack_, cmarker);
1043     } else {
1044         ConcurrentMark<false>(task, &concurrentMarkingStack_, cmarker);
1045     }
1046     PauseTimeGoalDelay();
1047 
1048     DisablePreWrbInThreads();
1049 
1050     concurrentMarkingFlag_ = false;
1051     if (!interruptConcurrentFlag_) {
1052         if (UNLIKELY(refProcess)) {
1053             Remark<true>(task, pmarker);
1054         } else {
1055             Remark<false>(task, pmarker);
1056         }
1057         // Enable mixed GC
1058         topGarbageRegions_ = GetG1ObjectAllocator()->template GetTopGarbageRegions<false>(regionGarbageRateThreshold_);
1059         if (HaveGarbageRegions()) {
1060             // Atomic with release order reason: to see changes made by GC thread (which do concurrent marking
1061             // and than set isMixedGcRequired_) in mutator thread which waits for the end of concurrent
1062             // marking.
1063             isMixedGcRequired_.store(true, std::memory_order_release);
1064         }
1065 
1066         if (this->GetSettings()->LogDetailedGCInfoEnabled()) {
1067             LOG_INFO_GC << "Old dead obj ratio " << this->GetG1ObjectAllocator()->CalculateOldDeadObjectsRatio();
1068 #ifdef PANDA_MEASURE_FRAGMENTATION
1069             LOG_INFO_GC << "Nonmovable dead obj ratio "
1070                         << this->GetG1ObjectAllocator()->CalculateNonMovableDeadObjectsRatio();
1071 #endif
1072         }
1073 
1074         ConcurrentSweep(task);
1075     } else {
1076         concurrentMarkingStack_.Clear();
1077         ClearSatb();
1078     }
1079     ASSERT(concurrentMarkingStack_.Empty());
1080 }
1081 
1082 template <class LanguageConfig>
ConcurrentSweep(ark::GCTask & task)1083 void G1GC<LanguageConfig>::ConcurrentSweep(ark::GCTask &task)
1084 {
1085     // NOTE(ipetrov, 20146): Cross reference can be allocated during concurrent sweep, so XGC should handle this
1086     // situation. There is hot fix: XGC does not concurrent sweep
1087     if (task.reason == GCTaskCause::CROSSREF_CAUSE) {
1088         return;
1089     }
1090     ScopedTiming t("Concurrent Sweep", *this->GetTiming());
1091     ConcurrentScope concurrentScope(this);
1092     auto emptyTenuredRegions = GetEmptyTenuredRegularRegions(topGarbageRegions_);
1093     if (this->IsConcurrencyAllowed()) {
1094         CollectEmptyRegions<true, true>(task, &emptyTenuredRegions);
1095     } else {
1096         CollectEmptyRegions<false, false>(task, &emptyTenuredRegions);
1097     }
1098 }
1099 
1100 template <class LanguageConfig>
HaveGarbageRegions()1101 bool G1GC<LanguageConfig>::HaveGarbageRegions()
1102 {
1103     return std::find_if(topGarbageRegions_.begin(), topGarbageRegions_.end(), [](const auto &entry) {
1104                return entry.first != DEFAULT_REGION_SIZE && !entry.second->HasPinnedObjects();
1105            }) != topGarbageRegions_.end();
1106 }
1107 
1108 template <class LanguageConfig>
GetOldCollectionSetCandidatesNumber()1109 size_t G1GC<LanguageConfig>::GetOldCollectionSetCandidatesNumber()
1110 {
1111     return std::count_if(topGarbageRegions_.begin(), topGarbageRegions_.end(),
1112                          [](const std::pair<uint32_t, Region *> &entry) { return !entry.second->HasPinnedObjects(); });
1113 }
1114 
1115 template <class LanguageConfig>
ProcessDirtyCards()1116 void G1GC<LanguageConfig>::ProcessDirtyCards()
1117 {
1118     ScopedTiming t(__FUNCTION__, *this->GetTiming());
1119     updateRemsetWorker_->GCProcessCards();
1120 }
1121 
1122 template <class LanguageConfig>
CreateUpdateRemsetWorker()1123 void G1GC<LanguageConfig>::CreateUpdateRemsetWorker()
1124 {
1125     InternalAllocatorPtr allocator = this->GetInternalAllocator();
1126     // to make TSAN happy because we access updated_refs_queue_ inside constructor of UpdateRemsetWorker
1127     os::memory::LockHolder lock(queueLock_);
1128     if (this->GetSettings()->UseThreadPoolForGC()) {
1129         updateRemsetWorker_ = allocator->template New<UpdateRemsetThread<LanguageConfig>>(
1130             this, updatedRefsQueue_, &queueLock_, this->GetG1ObjectAllocator()->GetRegionSize(),
1131             this->GetSettings()->G1EnableConcurrentUpdateRemset(), this->GetSettings()->G1MinConcurrentCardsToProcess(),
1132             this->GetSettings()->G1HotCardsProcessingFrequency());
1133     } else {
1134         ASSERT(this->GetSettings()->UseTaskManagerForGC());
1135         updateRemsetWorker_ = allocator->template New<UpdateRemsetTaskQueue<LanguageConfig>>(
1136             this, updatedRefsQueue_, &queueLock_, this->GetG1ObjectAllocator()->GetRegionSize(),
1137             this->GetSettings()->G1EnableConcurrentUpdateRemset(), this->GetSettings()->G1MinConcurrentCardsToProcess(),
1138             this->GetSettings()->G1HotCardsProcessingFrequency());
1139     }
1140     ASSERT(updateRemsetWorker_ != nullptr);
1141 }
1142 
1143 template <class LanguageConfig>
InitializeImpl()1144 void G1GC<LanguageConfig>::InitializeImpl()
1145 {
1146     // GC saved the PandaVM instance, so we get allocator from the PandaVM.
1147     InternalAllocatorPtr allocator = this->GetInternalAllocator();
1148     this->CreateCardTable(allocator, PoolManager::GetMmapMemPool()->GetMinObjectAddress(),
1149                           PoolManager::GetMmapMemPool()->GetTotalObjectSize());
1150 
1151     auto barrierSet =
1152         allocator->New<GCG1BarrierSet>(allocator, &PreWrbFuncEntrypoint, &PostWrbUpdateCardFuncEntrypoint,
1153                                        ark::helpers::math::GetIntLog2(this->GetG1ObjectAllocator()->GetRegionSize()),
1154                                        this->GetCardTable(), updatedRefsQueue_, &queueLock_);
1155     ASSERT(barrierSet != nullptr);
1156     this->SetGCBarrierSet(barrierSet);
1157 
1158     this->CreateWorkersTaskPool();
1159     CreateUpdateRemsetWorker();
1160     LOG_DEBUG_GC << "G1GC initialized";
1161 }
1162 
1163 template <class LanguageConfig>
MarkObject(ObjectHeader * object)1164 void G1GC<LanguageConfig>::MarkObject(ObjectHeader *object)
1165 {
1166     G1GCMarker<LanguageConfig, true>::Mark(object);
1167 }
1168 
1169 template <class LanguageConfig>
MarkObjectIfNotMarked(ObjectHeader * object)1170 bool G1GC<LanguageConfig>::MarkObjectIfNotMarked(ObjectHeader *object)
1171 {
1172     ASSERT(object != nullptr);
1173     if (this->GetGCPhase() == GCPhase::GC_PHASE_MARK_YOUNG) {
1174         return mixedMarker_.MarkIfNotMarked(object);
1175     }
1176     return marker_.MarkIfNotMarked(object);
1177 }
1178 
1179 template <class LanguageConfig>
MarkObjectRecursively(ObjectHeader * object)1180 void G1GC<LanguageConfig>::MarkObjectRecursively(ObjectHeader *object)
1181 {
1182     ASSERT(object != nullptr);
1183     ASSERT(this->GetLastGCCause() == GCTaskCause::CROSSREF_CAUSE);
1184     [[maybe_unused]] auto phase = this->GetGCPhase();  // Load phase once to avoid false-positive assertion failure
1185     ASSERT_PRINT(phase == GCPhase::GC_PHASE_RUNNING || phase == GCPhase::GC_PHASE_INITIAL_MARK ||
1186                      phase == GCPhase::GC_PHASE_MARK || phase == GCPhase::GC_PHASE_REMARK,
1187                  GCScopedPhase::GetPhaseAbbr(phase));
1188     if (concXMarker_.MarkIfNotMarked(object)) {
1189         GCMarkingStackType stack(this);
1190         stack.PushToStack(RootType::ROOT_VM, object);
1191         this->MarkStack(&concXMarker_, &stack, CalcLiveBytesMarkPreprocess<true>);
1192     } else {
1193         LOG_DEBUG_GC << "Skip object: " << object << " since it is already marked";
1194     }
1195 }
1196 
1197 template <class LanguageConfig>
InitGCBitsForAllocationInTLAB(ark::ObjectHeader * object)1198 void G1GC<LanguageConfig>::InitGCBitsForAllocationInTLAB([[maybe_unused]] ark::ObjectHeader *object)
1199 {
1200     LOG(FATAL, GC) << "Not implemented";
1201 }
1202 
1203 template <class LanguageConfig>
IsMarked(ark::ObjectHeader const * object) const1204 bool G1GC<LanguageConfig>::IsMarked(ark::ObjectHeader const *object) const
1205 {
1206     return G1GCMarker<LanguageConfig, true>::IsMarked(object);
1207 }
1208 
1209 template <class LanguageConfig>
IsMarkedEx(ark::ObjectHeader const * object) const1210 bool G1GC<LanguageConfig>::IsMarkedEx(ark::ObjectHeader const *object) const
1211 {
1212     if (singlePassCompactionEnabled_) {
1213         return object->AtomicGetMark(std::memory_order_relaxed).IsForwarded();
1214     }
1215     return IsMarked(object);
1216 }
1217 
1218 template <class LanguageConfig>
MarkStackMixed(GCMarkingStackType * stack)1219 void G1GC<LanguageConfig>::MarkStackMixed(GCMarkingStackType *stack)
1220 {
1221     ASSERT(stack != nullptr);
1222     trace::ScopedTrace scopedTrace(__FUNCTION__);
1223     auto refPred = [this](const ObjectHeader *obj) { return InGCSweepRange(obj); };
1224     auto visitor = [this, stack, &refPred](const ObjectHeader *object) {
1225         ASSERT(mixedMarker_.IsMarked(object));
1226         ValidateObject(nullptr, object);
1227         auto *objectClass = object->template ClassAddr<BaseClass>();
1228         // We need annotation here for the FullMemoryBarrier used in InitializeClassByIdEntrypoint
1229         TSAN_ANNOTATE_HAPPENS_AFTER(objectClass);
1230         LOG_DEBUG_GC << "Current object: " << GetDebugInfoAboutObject(object);
1231 
1232         ASSERT(!object->IsForwarded());
1233         ASSERT(InGCSweepRange(object));
1234         CalcLiveBytesMarkPreprocess(object, objectClass);
1235         mixedMarker_.MarkInstance(stack, object, objectClass, refPred);
1236     };
1237     {
1238         auto markedObjects = stack->MarkObjects(visitor);
1239         os::memory::LockHolder lh(mixedMarkedObjectsMutex_);
1240         if (mixedMarkedObjects_.empty()) {
1241             mixedMarkedObjects_ = std::move(markedObjects);
1242         } else {
1243             mixedMarkedObjects_.insert(mixedMarkedObjects_.end(), markedObjects.begin(), markedObjects.end());
1244         }
1245     }
1246 }
1247 
1248 template <class LanguageConfig>
MarkStackFull(GCMarkingStackType * stack)1249 void G1GC<LanguageConfig>::MarkStackFull(GCMarkingStackType *stack)
1250 {
1251     this->MarkStack(&marker_, stack, CalcLiveBytesMarkPreprocess<true>, GC::EmptyReferenceProcessPredicate);
1252 }
1253 
1254 template <class LanguageConfig>
MarkReferences(GCMarkingStackType * references,GCPhase gcPhase)1255 void G1GC<LanguageConfig>::MarkReferences(GCMarkingStackType *references, GCPhase gcPhase)
1256 {
1257     trace::ScopedTrace scopedTrace(__FUNCTION__);
1258     LOG_DEBUG_GC << "Start marking " << references->Size() << " references";
1259     // mark refs only on mixed-gc and on full_gc. On concurrent mark we don't handle any references
1260     if (gcPhase == GCPhase::GC_PHASE_MARK_YOUNG) {
1261         MarkStackMixed(references);
1262     } else if (this->IsFullGC()) {
1263         MarkStackFull(references);
1264     } else if (gcPhase == GCPhase::GC_PHASE_INITIAL_MARK || gcPhase == GCPhase::GC_PHASE_MARK ||
1265                gcPhase == GCPhase::GC_PHASE_REMARK) {
1266         // nothing
1267     } else {
1268         LOG_DEBUG_GC << "phase: " << GCScopedPhase::GetPhaseName(gcPhase);
1269         UNREACHABLE();
1270     }
1271 }
1272 
1273 template <class LanguageConfig>
InGCSweepRange(const ObjectHeader * object) const1274 bool G1GC<LanguageConfig>::InGCSweepRange(const ObjectHeader *object) const
1275 {
1276     ASSERT_DO(!this->collectionSet_.empty() || this->IsFullGC(),
1277               std::cerr << "Incorrect phase in InGCSweepRange: " << static_cast<size_t>(this->GetGCPhase()) << "\n");
1278     ASSERT(IsHeapSpace(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(object)));
1279     Region *objRegion = ObjectToRegion(object);
1280     return objRegion->IsInCollectionSet();
1281 }
1282 
RemsetRegionPredicate(const Region * r)1283 static bool RemsetRegionPredicate(const Region *r)
1284 {
1285     // In case of mixed GC don't process remsets of the tenured regions which are in the collection set
1286     return !r->HasFlag(IS_COLLECTION_SET);
1287 }
1288 
1289 template <class LanguageConfig>
CollectInSinglePass(const GCTask & task)1290 void G1GC<LanguageConfig>::CollectInSinglePass(const GCTask &task)
1291 {
1292     ScopedTiming t(__FUNCTION__, *this->GetTiming());
1293     RemSet<> remset;
1294     MergeRemSet(&remset);
1295 
1296     for (auto *region : remset.GetDirtyRegions()) {
1297         // MarkBitmap is used instead of LiveBitmap in ScanRemset. See comment there.
1298         region->GetLiveBitmap()->CopyTo(region->GetMarkBitmap());
1299     }
1300 
1301     size_t allocatedBytesYoung = 0;
1302     size_t allocatedBytesOld = 0;
1303     for (auto *region : collectionSet_) {
1304         auto allocated = region->GetAllocatedBytes();
1305         if (region->HasFlag(RegionFlag::IS_EDEN)) {
1306             allocatedBytesYoung += allocated;
1307         } else {
1308             allocatedBytesOld += allocated;
1309         }
1310     }
1311 
1312     copiedBytesYoung_ = 0;
1313     copiedBytesOld_ = 0;
1314 
1315     EvacuateCollectionSet(remset);
1316 
1317     this->CommonUpdateRefsToMovedObjects();
1318     HandleReferences(task);
1319     ActualizeRemSets();
1320 
1321     analytics_.ReportEvacuatedBytes(copiedBytesYoung_);
1322 
1323     this->memStats_.template RecordSizeMovedYoung<false>(copiedBytesYoung_);
1324     this->memStats_.template RecordSizeMovedTenured<false>(copiedBytesOld_);
1325     this->memStats_.template RecordSizeFreedYoung<false>(allocatedBytesYoung - copiedBytesYoung_);
1326     this->memStats_.template RecordSizeFreedTenured<false>(allocatedBytesOld - copiedBytesOld_);
1327 
1328     updatedRefsQueue_->insert(updatedRefsQueue_->end(), updatedRefsQueueTemp_->begin(), updatedRefsQueueTemp_->end());
1329     updatedRefsQueueTemp_->clear();
1330 
1331     auto gcRootUpdaterCallback = [](ObjectHeader **object) {
1332         if ((*object)->IsForwarded()) {
1333             *object = GetForwardAddress(*object);
1334             return true;
1335         }
1336         return false;
1337     };
1338 
1339     this->GetPandaVm()->UpdateMovedStrings(gcRootUpdaterCallback);
1340     SweepRegularVmRefs();
1341 
1342     ResetRegionAfterMixedGC();
1343 }
1344 
1345 template <class LanguageConfig>
EvacuateCollectionSet(const RemSet<> & remset)1346 void G1GC<LanguageConfig>::EvacuateCollectionSet(const RemSet<> &remset)
1347 {
1348     auto useGcWorkers = this->GetSettings()->ParallelCompactingEnabled();
1349     GCEvacuateRegionsTaskStack<Ref> refStack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1350                                              useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1351                                              GCWorkersTaskTypes::TASK_REGION_COMPACTING,
1352                                              this->GetSettings()->GCMarkingStackNewTasksFrequency());
1353     G1EvacuateRegionsWorkerState<LanguageConfig> state(this, &refStack);
1354     state.EvacuateNonHeapRoots();
1355 
1356     auto startProcessing = ark::time::GetCurrentTimeInNanos();
1357     auto remsetSize = state.ScanRemset(remset);
1358     auto startEvacuation = ark::time::GetCurrentTimeInNanos();
1359     auto scanRemsetTime = startEvacuation - startProcessing;
1360     analytics_.ReportScanRemsetTime(remsetSize, scanRemsetTime);
1361 
1362     analytics_.ReportEvacuationStart(startEvacuation);
1363     state.EvacuateLiveObjects();
1364 
1365     ASSERT(refStack.Empty());
1366     if (useGcWorkers) {
1367         this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1368     }
1369     analytics_.ReportEvacuationEnd(ark::time::GetCurrentTimeInNanos());
1370 }
1371 
1372 template <class LanguageConfig>
MergeRemSet(RemSet<> * remset)1373 void G1GC<LanguageConfig>::MergeRemSet(RemSet<> *remset)
1374 {
1375     ScopedTiming t(__FUNCTION__, *this->GetTiming());
1376     for (auto *region : collectionSet_) {
1377         remset->Merge(region->GetRemSet());
1378     }
1379 
1380     constexpr size_t MEM_SIZE = DEFAULT_REGION_SIZE / RemSet<>::Bitmap::GetNumBits();
1381     auto *cardTable = this->GetCardTable();
1382     for (auto *card : dirtyCards_) {
1383         auto range = cardTable->GetMemoryRange(card);
1384         auto addr = range.GetStartAddress();
1385         auto region = ark::mem::AddrToRegion(ToVoidPtr(addr));
1386         if (!RemsetRegionPredicate(region)) {
1387             // Skip cards which correspond to regions in the collection set because live objects in collection set are
1388             // traversed during evacuation anyway.
1389             continue;
1390         }
1391         auto endAddr = range.GetEndAddress();
1392         while (addr < endAddr) {
1393             remset->AddRef(ToVoidPtr(addr));
1394             addr += MEM_SIZE;
1395         }
1396     }
1397 
1398     // All dirty cards which do not correspond to regions in the collection set are processed and reenqueued in case of
1399     // cross region references during evacuation, see EvacuationObjectPointerHandler::ProcessObjectPointer
1400     dirtyCards_.clear();
1401 }
1402 
1403 template <class LanguageConfig>
HandleReferences(const GCTask & task)1404 void G1GC<LanguageConfig>::HandleReferences([[maybe_unused]] const GCTask &task)
1405 {
1406     ScopedTiming t(__FUNCTION__, *this->GetTiming());
1407     auto refClearPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
1408     this->ProcessReferences(refClearPred);
1409     this->GetPandaVm()->GetGlobalObjectStorage()->ClearWeakRefs(refClearPred);
1410     ProcessDirtyCards();
1411 }
1412 
1413 template <class LanguageConfig>
EvacuateStartingWith(void * ref)1414 void G1GC<LanguageConfig>::EvacuateStartingWith(void *ref)
1415 {
1416     GCEvacuateRegionsTaskStack<Ref> refStack(this);
1417     G1EvacuateRegionsWorkerState<LanguageConfig> state(this, &refStack);
1418     state.PushToQueue(reinterpret_cast<Ref>(ref));
1419     state.EvacuateLiveObjects();
1420     ASSERT(refStack.Empty());
1421 }
1422 
1423 template <class LanguageConfig>
ResetRegionAfterMixedGC()1424 void G1GC<LanguageConfig>::ResetRegionAfterMixedGC()
1425 {
1426     auto *objectAllocator = this->GetG1ObjectAllocator();
1427     if (!collectionSet_.Young().empty()) {
1428         objectAllocator->ResetYoungAllocator();
1429     }
1430     {
1431         GCScope<TRACE_TIMING> resetRegions("ResetRegions", this);
1432         objectAllocator->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::NoRelease,
1433                                                OSPagesPolicy::IMMEDIATE_RETURN, false>(collectionSet_.Tenured());
1434     }
1435 }
1436 
1437 template <class LanguageConfig>
RunGC(GCTask & task,const CollectionSet & collectibleRegions)1438 void G1GC<LanguageConfig>::RunGC(GCTask &task, const CollectionSet &collectibleRegions)
1439 {
1440     ASSERT(!this->IsFullGC());
1441     GCScope<TRACE_TIMING> scopedTrace(__FUNCTION__, this);
1442     LOG_DEBUG_GC << "GC start";
1443     uint64_t youngPauseTime;
1444     {
1445         time::Timer timer(&youngPauseTime, true);
1446         singlePassCompactionEnabled_ = SinglePassCompactionAvailable();
1447         if (singlePassCompactionEnabled_) {
1448             CollectInSinglePass(task);
1449         } else {
1450             if (fullCollectionSetPromotion_) {
1451                 FastYoungMark(collectibleRegions);
1452             } else {
1453                 MixedMarkAndCacheRefs(task, collectibleRegions);
1454             }
1455             ClearYoungCards(collectibleRegions);
1456             ClearTenuredCards(collectibleRegions);
1457             CollectAndMove<false>(collectibleRegions);
1458         }
1459         analytics_.ReportSurvivedBytesRatio(collectibleRegions);
1460         ClearRefsFromRemsetsCache();
1461         this->GetObjectGenAllocator()->InvalidateSpaceData();
1462     }
1463     if (youngPauseTime > 0) {
1464         this->GetStats()->AddTimeValue(youngPauseTime, TimeTypeStats::YOUNG_PAUSED_TIME);
1465     }
1466     LOG_DEBUG_GC << "G1GC RunGC end";
1467 }
1468 
1469 template <class LanguageConfig>
SinglePassCompactionAvailable()1470 bool G1GC<LanguageConfig>::SinglePassCompactionAvailable()
1471 {
1472     if (!this->GetSettings()->G1SinglePassCompactionEnabled()) {
1473         return false;
1474     }
1475 
1476     if (!this->GetPandaVm()->SupportGCSinglePassCompaction()) {
1477         return false;
1478     }
1479 
1480     if (g1PromotionRegionAliveRate_ <= 0) {
1481         return false;
1482     }
1483 
1484     if (fullCollectionSetPromotion_) {
1485         return false;
1486     }
1487 
1488     for (auto *region : collectionSet_) {
1489         if (region->HasPinnedObjects()) {
1490             return false;
1491         }
1492     }
1493 
1494     auto predictedSurvivedBytesRatio = analytics_.PredictSurvivedBytesRatio();
1495     if (predictedSurvivedBytesRatio == 0) {
1496         // threre are not statistics, starts with GC which is able to promote whole regions
1497         return false;
1498     }
1499 
1500     // uses single pass collection for low survival ratio
1501     return predictedSurvivedBytesRatio * PERCENT_100_D < g1PromotionRegionAliveRate_;
1502 }
1503 
1504 template <class LanguageConfig>
MixedMarkAndCacheRefs(const GCTask & task,const CollectionSet & collectibleRegions)1505 void G1GC<LanguageConfig>::MixedMarkAndCacheRefs(const GCTask &task, const CollectionSet &collectibleRegions)
1506 {
1507     GCScope<TRACE_TIMING_PHASE> scopedTrace(__FUNCTION__, this, GCPhase::GC_PHASE_MARK_YOUNG);
1508     bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1509     GCMarkingStackType objectsStack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1510                                     useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1511                                     GCWorkersTaskTypes::TASK_MARKING,
1512                                     this->GetSettings()->GCMarkingStackNewTasksFrequency());
1513     for (Region *region : collectibleRegions) {
1514         region->GetMarkBitmap()->ClearAllBits();
1515         // Calculate live bytes during marking phase
1516         region->SetLiveBytes(0U);
1517     }
1518     ASSERT(this->GetReferenceProcessor()->GetReferenceQueueSize() ==
1519            0);  // all references should be processed on previous-gc
1520     // Iterate over roots and add other roots
1521     // 0. Pre-process refs queue and fill RemSets (should be done later in background)
1522     // Note: We need to process only tenured -> young refs,
1523     // since we reach this by graph from tenured roots,
1524     // because we will process all young regions at young GC we will find all required references
1525     RefCacheBuilder<LanguageConfig> builder(this, &uniqueRefsFromRemsets_, regionSizeBits_, &objectsStack);
1526     auto refsChecker = [this, &builder](Region *region, const MemRange &memRange) {
1527         IterateOverRefsInMemRange(memRange, region, builder);
1528         return false;
1529     };
1530 
1531     analytics_.ReportMarkingStart(ark::time::GetCurrentTimeInNanos());
1532     CacheRefsFromRemsets(refsChecker);
1533 
1534     GCRootVisitor gcMarkCollectionSet = CreateGCRootVisitor(
1535         objectsStack, mixedMarker_, [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); });
1536 
1537     {
1538         GCScope<TRACE_TIMING> markingCollectionSetRootsTrace("Marking roots collection-set", this);
1539 
1540         this->VisitRoots(gcMarkCollectionSet, VisitGCRootFlags::ACCESS_ROOT_NONE);
1541     }
1542     {
1543         GCScope<TRACE_TIMING> markStackTiming("MarkStack", this);
1544         this->MarkStackMixed(&objectsStack);
1545         ASSERT(objectsStack.Empty());
1546         if (useGcWorkers) {
1547             GCScope<TRACE_TIMING> waitingTiming("WaitUntilTasksEnd", this);
1548             this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1549         }
1550     }
1551 
1552     auto refClearPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
1553     this->GetPandaVm()->HandleReferences(task, refClearPred);
1554 
1555     analytics_.ReportMarkingEnd(ark::time::GetCurrentTimeInNanos(), GetUniqueRemsetRefsCount());
1556 
1557     // HandleReferences could write a new barriers - so we need to handle them before moving
1558     ProcessDirtyCards();
1559 }
1560 
1561 template <class LanguageConfig>
FastYoungMark(const CollectionSet & collectibleRegions)1562 void G1GC<LanguageConfig>::FastYoungMark(const CollectionSet &collectibleRegions)
1563 {
1564     // There should be only young regions in the collection set
1565     ASSERT(collectibleRegions.Tenured().empty());
1566     auto allocator = this->GetInternalAllocator();
1567     PandaVector<Region *> youngRegions(collectibleRegions.Young().begin(), collectibleRegions.Young().end());
1568     GCMarkingStackType::MarkedObjects markedObjects(youngRegions.size(), nullptr);
1569     for (size_t idx = 0; idx < youngRegions.size(); ++idx) {
1570         auto *region = youngRegions[idx];
1571         region->SetLiveBytes(0U);
1572         markedObjects[idx] = allocator->template New<PandaDeque<ObjectHeader *>>(allocator->Adapter());
1573         auto *markedObjDeque = markedObjects[idx];
1574         ASSERT(markedObjDeque != nullptr);
1575         GCMarkWholeRegionTask gcWorkerTask(region, markedObjDeque);
1576         bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1577         if (useGcWorkers && this->GetWorkersTaskPool()->AddTask(GCMarkWholeRegionTask(gcWorkerTask))) {
1578             continue;
1579         }
1580         // Couldn't add new task, so do task processing immediately
1581         this->WorkerTaskProcessing(&gcWorkerTask, nullptr);
1582     }
1583 
1584     RefCacheBuilder<LanguageConfig, true> builder(this, &uniqueRefsFromRemsets_, regionSizeBits_, nullptr);
1585     auto refsChecker = [this, &builder](Region *region, const MemRange &memRange) {
1586         IterateOverRefsInMemRange(memRange, region, builder);
1587         return false;
1588     };
1589     CacheRefsFromRemsets(refsChecker);
1590 
1591     if (mixedMarkedObjects_.empty()) {
1592         mixedMarkedObjects_ = std::move(markedObjects);
1593     } else {
1594         mixedMarkedObjects_.insert(mixedMarkedObjects_.end(), markedObjects.begin(), markedObjects.end());
1595     }
1596 }
1597 
1598 template <class LanguageConfig>
1599 template <typename Marker, typename Predicate>
CreateGCRootVisitor(GCMarkingStackType & objectsStack,Marker & marker,const Predicate & refPred)1600 GCRootVisitor G1GC<LanguageConfig>::CreateGCRootVisitor(GCMarkingStackType &objectsStack, Marker &marker,
1601                                                         const Predicate &refPred)
1602 {
1603     GCRootVisitor gcMarkCollectionSet = [&objectsStack, &marker, &refPred, this](const GCRoot &gcRoot) {
1604         ObjectHeader *rootObject = gcRoot.GetObjectHeader();
1605         ObjectHeader *fromObject = gcRoot.GetFromObjectHeader();
1606         LOG_DEBUG_GC << "Handle root " << GetDebugInfoAboutObject(rootObject) << " from: " << gcRoot.GetType();
1607         if (UNLIKELY(fromObject != nullptr) &&
1608             this->IsReference(fromObject->NotAtomicClassAddr<BaseClass>(), fromObject, refPred)) {
1609             LOG_DEBUG_GC << "Add reference: " << GetDebugInfoAboutObject(fromObject) << " to stack";
1610             marker.Mark(fromObject);
1611             this->ProcessReference(&objectsStack, fromObject->NotAtomicClassAddr<BaseClass>(), fromObject,
1612                                    GC::EmptyReferenceProcessPredicate);
1613         } else {
1614             // Skip non-collection-set roots
1615             auto rootObjectPtr = gcRoot.GetObjectHeader();
1616             ASSERT(rootObjectPtr != nullptr);
1617             if (marker.MarkIfNotMarked(rootObjectPtr)) {
1618                 LOG_DEBUG_GC << "root " << GetDebugInfoAboutObject(rootObjectPtr);
1619                 objectsStack.PushToStack(gcRoot.GetType(), rootObjectPtr);
1620             } else {
1621                 LOG_DEBUG_GC << "Skip root: " << std::hex << rootObjectPtr;
1622             }
1623         }
1624     };
1625 
1626     return gcMarkCollectionSet;
1627 }
1628 
1629 template <class LanguageConfig>
CollectVerificationInfo(const CollectionSet & collectionSet)1630 HeapVerifierIntoGC<LanguageConfig> G1GC<LanguageConfig>::CollectVerificationInfo(const CollectionSet &collectionSet)
1631 {
1632     HeapVerifierIntoGC<LanguageConfig> collectVerifier(this->GetPandaVm()->GetHeapManager());
1633     if (this->GetSettings()->IntoGCHeapVerification()) {
1634         ScopedTiming collectVerificationTiming(__FUNCTION__, *this->GetTiming());
1635         PandaVector<MemRange> memRanges;
1636         memRanges.reserve(collectionSet.size());
1637         std::for_each(collectionSet.begin(), collectionSet.end(),
1638                       [&memRanges](const Region *region) { memRanges.emplace_back(region->Begin(), region->End()); });
1639         collectVerifier.CollectVerificationInfo(std::move(memRanges));
1640     }
1641     return collectVerifier;
1642 }
1643 
1644 template <class LanguageConfig>
VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> && collectVerifier,const CollectionSet & collectionSet)1645 void G1GC<LanguageConfig>::VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&collectVerifier,
1646                                                 const CollectionSet &collectionSet)
1647 {
1648     if (this->GetSettings()->IntoGCHeapVerification()) {
1649         ScopedTiming verificationTiming(__FUNCTION__, *this->GetTiming());
1650         PandaVector<MemRange> aliveMemRange;
1651         std::for_each(collectionSet.begin(), collectionSet.end(), [&aliveMemRange](const Region *region) {
1652             if (region->HasFlag(RegionFlag::IS_PROMOTED)) {
1653                 aliveMemRange.emplace_back(region->Begin(), region->End());
1654             }
1655         });
1656         size_t failsCount = collectVerifier.VerifyAll(std::move(aliveMemRange));
1657         if (this->GetSettings()->FailOnHeapVerification() && failsCount > 0U) {
1658             PandaStringStream logStream;
1659             logStream << "Collection set size: " << collectionSet.size() << "\n";
1660             for (const auto r : collectionSet) {
1661                 logStream << *r << (r->HasFlag(RegionFlag::IS_PROMOTED) ? " was promoted\n" : "\n");
1662             }
1663             LOG(FATAL, GC) << "Heap was corrupted during CollectAndMove GC phase, HeapVerifier found " << failsCount
1664                            << " corruptions\n"
1665                            << logStream.str();
1666         }
1667     }
1668 }
1669 
1670 template <class LanguageConfig>
1671 template <bool FULL_GC>
UpdateRefsAndClear(const CollectionSet & collectionSet,MovedObjectsContainer<FULL_GC> * movedObjectsContainer,PandaVector<PandaVector<ObjectHeader * > * > * movedObjectsVector,HeapVerifierIntoGC<LanguageConfig> * collectVerifier)1672 void G1GC<LanguageConfig>::UpdateRefsAndClear(const CollectionSet &collectionSet,
1673                                               MovedObjectsContainer<FULL_GC> *movedObjectsContainer,
1674                                               PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector,
1675                                               HeapVerifierIntoGC<LanguageConfig> *collectVerifier)
1676 {
1677     {
1678         os::memory::LockHolder lock(queueLock_);
1679         analytics_.ReportUpdateRefsStart(ark::time::GetCurrentTimeInNanos());
1680         if (this->GetSettings()->ParallelRefUpdatingEnabled()) {
1681             UpdateRefsToMovedObjects<FULL_GC, true>(movedObjectsContainer);
1682         } else {
1683             UpdateRefsToMovedObjects<FULL_GC, false>(movedObjectsContainer);
1684         }
1685         analytics_.ReportUpdateRefsEnd(ark::time::GetCurrentTimeInNanos());
1686         ActualizeRemSets();
1687     }
1688 
1689     VerifyCollectAndMove(std::move(*collectVerifier), collectionSet);
1690     SweepRegularVmRefs();
1691 
1692     auto objectAllocator = this->GetG1ObjectAllocator();
1693     if (!collectionSet.Young().empty()) {
1694         objectAllocator->ResetYoungAllocator();
1695     }
1696     {
1697         GCScope<TRACE_TIMING> resetRegions("ResetRegions", this);
1698         if (!this->IsFullGC()) {
1699             objectAllocator->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::NoRelease,
1700                                                    OSPagesPolicy::IMMEDIATE_RETURN, false>(collectionSet.Tenured());
1701         } else {
1702             objectAllocator->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::Release,
1703                                                    OSPagesPolicy::NO_RETURN, false>(collectionSet.Tenured());
1704         }
1705     }
1706     {
1707         // Don't forget to delete all temporary elements
1708         GCScope<TRACE_TIMING> clearMovedObjects("ClearMovedObjects", this);
1709         auto internalAllocator = this->GetInternalAllocator();
1710         if constexpr (FULL_GC) {
1711             bool useGcWorkers = this->GetSettings()->ParallelCompactingEnabled();
1712             if (useGcWorkers) {
1713                 for (auto r : *movedObjectsVector) {
1714                     internalAllocator->Delete(r);
1715                 }
1716             } else {
1717                 ASSERT(movedObjectsVector->size() == 1);
1718                 internalAllocator->Delete(movedObjectsVector->back());
1719             }
1720         } else {
1721             for (auto r : mixedMarkedObjects_) {
1722                 internalAllocator->Delete(r);
1723             }
1724             mixedMarkedObjects_.clear();
1725         }
1726     }
1727 }
1728 
1729 template <class LanguageConfig>
1730 template <bool FULL_GC>
1731 // NOLINTNEXTLINE(readability-function-size)
CollectAndMove(const CollectionSet & collectionSet)1732 bool G1GC<LanguageConfig>::CollectAndMove(const CollectionSet &collectionSet)
1733 {
1734     GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE);
1735     LOG_DEBUG_GC << "== G1GC CollectAndMove start ==";
1736     auto internalAllocator = this->GetInternalAllocator();
1737     bool useGcWorkers = this->GetSettings()->ParallelCompactingEnabled();
1738 
1739     PandaVector<PandaVector<ObjectHeader *> *> movedObjectsVector;
1740     HeapVerifierIntoGC<LanguageConfig> collectVerifier = this->CollectVerificationInfo(collectionSet);
1741     {
1742         GCScope<TRACE_TIMING> compactRegions("CompactRegions", this);
1743         analytics_.ReportEvacuationStart(ark::time::GetCurrentTimeInNanos());
1744         if constexpr (FULL_GC) {
1745             if (!useGcWorkers) {
1746                 auto vector = internalAllocator->template New<PandaVector<ObjectHeader *>>();
1747                 movedObjectsVector.push_back(vector);
1748             }
1749         }
1750         for (auto r : collectionSet.Young()) {
1751             this->DoRegionCompacting<RegionFlag::IS_EDEN, FULL_GC>(r, useGcWorkers, &movedObjectsVector);
1752         }
1753         for (auto r : collectionSet.Tenured()) {
1754             this->DoRegionCompacting<RegionFlag::IS_OLD, FULL_GC>(r, useGcWorkers, &movedObjectsVector);
1755         }
1756 
1757         if (useGcWorkers) {
1758             this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1759         }
1760 
1761         analytics_.ReportEvacuationEnd(ark::time::GetCurrentTimeInNanos());
1762     }
1763 
1764     MovedObjectsContainer<FULL_GC> *movedObjectsContainer = nullptr;
1765     if constexpr (FULL_GC) {
1766         movedObjectsContainer = &movedObjectsVector;
1767     } else {
1768         movedObjectsContainer = &mixedMarkedObjects_;
1769     }
1770 
1771     UpdateRefsAndClear<FULL_GC>(collectionSet, movedObjectsContainer, &movedObjectsVector, &collectVerifier);
1772 
1773     LOG_DEBUG_GC << "== G1GC CollectAndMove end ==";
1774     return true;
1775 }
1776 
1777 template <class LanguageConfig>
1778 template <bool FULL_GC, bool NEED_LOCK>
1779 // CC-OFFNXT(G.FMT.10-CPP) project code style
1780 std::conditional_t<FULL_GC, UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>, EnqueueRemsetRefUpdater<LanguageConfig>>
CreateRefUpdater(GCG1BarrierSet::ThreadLocalCardQueues * updatedRefQueue) const1781 G1GC<LanguageConfig>::CreateRefUpdater([[maybe_unused]] GCG1BarrierSet::ThreadLocalCardQueues *updatedRefQueue) const
1782 {
1783     if constexpr (FULL_GC) {
1784         return UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>(regionSizeBits_);
1785     } else {
1786         return EnqueueRemsetRefUpdater<LanguageConfig>(this->GetCardTable(), updatedRefQueue, regionSizeBits_);
1787     }
1788 }
1789 
1790 template <class LanguageConfig>
1791 template <class ObjectsContainer>
ProcessMovedObjects(ObjectsContainer * movedObjects)1792 void G1GC<LanguageConfig>::ProcessMovedObjects(ObjectsContainer *movedObjects)
1793 {
1794     auto rangeBegin = movedObjects->begin();
1795     auto rangeEnd = rangeBegin;
1796     while (rangeBegin != movedObjects->end()) {
1797         if (std::distance(rangeBegin, movedObjects->end()) < GCUpdateRefsWorkersTask<false>::RANGE_SIZE) {
1798             rangeEnd = movedObjects->end();
1799         } else {
1800             std::advance(rangeEnd, GCUpdateRefsWorkersTask<false>::RANGE_SIZE);
1801         }
1802         auto *movedObjectsRange =
1803             this->GetInternalAllocator()->template New<typename GCUpdateRefsWorkersTask<false>::MovedObjectsRange>(
1804                 rangeBegin, rangeEnd);
1805         rangeBegin = rangeEnd;
1806         GCUpdateRefsWorkersTask<false> gcWorkerTask(movedObjectsRange);
1807         if (this->GetWorkersTaskPool()->AddTask(GCUpdateRefsWorkersTask<false>(gcWorkerTask))) {
1808             continue;
1809         }
1810         // Couldn't add new task, so do task processing immediately
1811         this->WorkerTaskProcessing(&gcWorkerTask, nullptr);
1812     }
1813 }
1814 
1815 template <class LanguageConfig>
1816 template <bool FULL_GC, bool ENABLE_WORKERS, class Visitor>
UpdateMovedObjectsReferences(MovedObjectsContainer<FULL_GC> * movedObjectsContainer,const Visitor & refUpdater)1817 void G1GC<LanguageConfig>::UpdateMovedObjectsReferences(MovedObjectsContainer<FULL_GC> *movedObjectsContainer,
1818                                                         const Visitor &refUpdater)
1819 {
1820     ScopedTiming t("UpdateMovedObjectsReferences", *this->GetTiming());
1821     ASSERT(movedObjectsContainer != nullptr);
1822     for (auto *movedObjects : *movedObjectsContainer) {
1823         if constexpr (ENABLE_WORKERS) {
1824             ProcessMovedObjects(movedObjects);
1825         } else {  // GC workers are not used
1826             typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange movedObjectsRange(movedObjects->begin(),
1827                                                                                            movedObjects->end());
1828             DoUpdateReferencesToMovedObjectsRange<LanguageConfig, decltype(refUpdater), FULL_GC>(&movedObjectsRange,
1829                                                                                                  refUpdater);
1830         }
1831     }
1832 }
1833 
1834 template <class LanguageConfig>
1835 template <bool FULL_GC, bool USE_WORKERS>
UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> * movedObjectsContainer)1836 void G1GC<LanguageConfig>::UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> *movedObjectsContainer)
1837 {
1838     GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
1839     // Currently lock for RemSet too much influences for pause, so don't use workers on FULL-GC
1840     constexpr bool ENABLE_WORKERS = USE_WORKERS && !FULL_GC;
1841     auto internalAllocator = this->GetInternalAllocator();
1842     auto *updatedRefQueue =
1843         (ENABLE_WORKERS) ? internalAllocator->template New<GCG1BarrierSet::ThreadLocalCardQueues>() : updatedRefsQueue_;
1844     // NEED_LOCK is true <=> when ENABLE_WORKERS is true
1845     auto refUpdater = this->CreateRefUpdater<FULL_GC, ENABLE_WORKERS>(updatedRefQueue);
1846     //  update reference from objects which were moved while garbage collection
1847     LOG_DEBUG_GC << "=== Update ex-cset -> ex-cset references. START. ===";
1848     UpdateMovedObjectsReferences<FULL_GC, ENABLE_WORKERS>(movedObjectsContainer, refUpdater);
1849     LOG_DEBUG_GC << "=== Update ex-cset -> ex-cset references. END. ===";
1850 
1851     // update references from objects which are not part of collection set
1852     LOG_DEBUG_GC << "=== Update non ex-cset -> ex-cset references. START. ===";
1853     if constexpr (FULL_GC) {
1854         UpdateRefsFromRemSets(refUpdater);
1855     } else {
1856         // We don't need to create Remset for promoted regions because we already have them
1857         if (!IsCollectionSetFullyPromoted()) {
1858             VisitRemSets(refUpdater);
1859         }
1860     }
1861     LOG_DEBUG_GC << "=== Update non ex-cset -> ex-cset references. END. ===";
1862     if constexpr (ENABLE_WORKERS) {
1863         {
1864             os::memory::LockHolder lock(gcWorkerQueueLock_);
1865             updatedRefsQueue_->insert(updatedRefsQueue_->end(), updatedRefQueue->begin(), updatedRefQueue->end());
1866             internalAllocator->Delete(updatedRefQueue);
1867         }
1868         GCScope<TRACE_TIMING> waitingTiming("WaitUntilTasksEnd", this);
1869         this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1870     }
1871     this->CommonUpdateRefsToMovedObjects();
1872 }
1873 
1874 template <class LanguageConfig>
OnPauseMark(GCTask & task,GCMarkingStackType * objectsStack,bool useGcWorkers)1875 NO_THREAD_SAFETY_ANALYSIS void G1GC<LanguageConfig>::OnPauseMark(GCTask &task, GCMarkingStackType *objectsStack,
1876                                                                  bool useGcWorkers)
1877 {
1878     GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
1879     LOG_DEBUG_GC << "OnPause marking started";
1880     auto *objectAllocator = GetG1ObjectAllocator();
1881     this->MarkImpl(
1882         &marker_, objectsStack, CardTableVisitFlag::VISIT_DISABLED,
1883         // process references on FULL-GC
1884         GC::EmptyReferenceProcessPredicate,
1885         // non-young mem-range checker
1886         [objectAllocator](MemRange &memRange) { return !objectAllocator->IsIntersectedWithYoung(memRange); },
1887         // mark predicate
1888         CalcLiveBytesMarkPreprocess<true>);
1889     if (useGcWorkers) {
1890         this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1891     }
1892     /**
1893      * We don't collect non-movable regions right now, if there was a reference from non-movable to
1894      * young/tenured region then we reset markbitmap for non-nonmovable, but don't update livebitmap and we
1895      * can traverse over non-reachable object (in CacheRefsFromRemsets) and visit DEAD object in
1896      * tenured space (was delete on young-collection or in Iterative-full-gc phase.
1897      */
1898     auto refClearPred = []([[maybe_unused]] const ObjectHeader *obj) { return true; };
1899     this->GetPandaVm()->HandleReferences(task, refClearPred);
1900 }
1901 
1902 template <class LanguageConfig>
FullMarking(ark::GCTask & task)1903 void G1GC<LanguageConfig>::FullMarking(ark::GCTask &task)
1904 {
1905     GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
1906     auto *objectAllocator = GetG1ObjectAllocator();
1907     bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1908 
1909     GCMarkingStackType fullCollectionStack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1910                                            useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1911                                            GCWorkersTaskTypes::TASK_FULL_MARK,
1912                                            this->GetSettings()->GCMarkingStackNewTasksFrequency());
1913 
1914     InitialMark<true>(fullCollectionStack, marker_);
1915 
1916     this->OnPauseMark(task, &fullCollectionStack, useGcWorkers);
1917     // We will sweep VM refs in tenured space during mixed collection, but only for non empty regions.
1918     // therefore, sweep it here only for NonMovable, Humongous objects, and empty movable regions:
1919     SweepNonRegularVmRefs();
1920     auto allRegions = objectAllocator->GetAllRegions();
1921     for (auto *r : allRegions) {
1922         if (r->GetLiveBitmap() != nullptr) {
1923             r->CloneMarkBitmapToLiveBitmap();
1924         }
1925     }
1926     // Force card updater here, after swapping bitmap, to skip dead objects
1927     ProcessDirtyCards();
1928     // We don't save to topGarbageRegions_ here, because
1929     // making topGarbageRegions_ value reusable for FullGC,
1930     // GetTopGarbageRegions<true>() should be called, which is not allowed here
1931     auto garbageRegions = GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
1932     auto emptyTenuredRegions = GetEmptyTenuredRegularRegions(garbageRegions);
1933     CollectEmptyRegions<false, false>(task, &emptyTenuredRegions);
1934 }
1935 
1936 template <class LanguageConfig>
PauseTimeGoalDelay()1937 void G1GC<LanguageConfig>::PauseTimeGoalDelay()
1938 {
1939     if (this->GetSettings()->G1EnablePauseTimeGoal() && !interruptConcurrentFlag_) {
1940         auto start = ark::time::GetCurrentTimeInMicros();
1941         // Instead of max pause it should be estimated to calculate delay
1942         auto remained = g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(ark::time::GetCurrentTimeInMicros());
1943         if (remained > 0) {
1944             ConcurrentScope concurrentScope(this);
1945             os::memory::LockHolder lh(concurrentMarkMutex_);
1946             while (!interruptConcurrentFlag_ && remained > 0) {
1947                 auto ms = static_cast<uint64_t>(remained) / ark::os::time::MILLIS_TO_MICRO;
1948                 auto ns = (static_cast<uint64_t>(remained) - ms * ark::os::time::MILLIS_TO_MICRO) *
1949                           ark::os::time::MICRO_TO_NANO;
1950                 concurrentMarkCondVar_.TimedWait(&concurrentMarkMutex_, ms, ns);
1951                 auto d = static_cast<int64_t>(ark::time::GetCurrentTimeInMicros() - start);
1952                 remained -= d;
1953             }
1954         }
1955     }
1956 }
1957 
1958 template <class LanguageConfig>
1959 template <bool PROCESS_WEAK_REFS, typename Marker>
InitialMark(GCMarkingStackType & markingStack,Marker & marker)1960 void G1GC<LanguageConfig>::InitialMark(GCMarkingStackType &markingStack, Marker &marker)
1961 {
1962     UnmarkAll(marker);
1963     ASSERT(this->GetReferenceProcessor()->GetReferenceQueueSize() ==
1964            0);  // all references should be processed on mixed-gc
1965     {
1966         GCScope<TRACE_TIMING_PHASE> initialMarkScope("InitialMark", this, GCPhase::GC_PHASE_INITIAL_MARK);
1967         // Collect non-heap roots.
1968         // Mark the whole heap by using only these roots.
1969         // The interregion roots will be processed at pause
1970 
1971         // InitialMark. STW
1972         if constexpr (!PROCESS_WEAK_REFS) {
1973             GCRootVisitor gcMarkRoots = [&markingStack, &marker](const GCRoot &gcRoot) {
1974                 ValidateObject(gcRoot.GetType(), gcRoot.GetObjectHeader());
1975                 if (marker.MarkIfNotMarked(gcRoot.GetObjectHeader())) {
1976                     markingStack.PushToStack(gcRoot.GetType(), gcRoot.GetObjectHeader());
1977                 }
1978             };
1979             this->VisitRoots(gcMarkRoots, VisitGCRootFlags::ACCESS_ROOT_ALL);
1980         } else {
1981             GCRootVisitor gcMarkRoots = CreateGCRootVisitor(markingStack, marker, GC::EmptyReferenceProcessPredicate);
1982             this->VisitRoots(gcMarkRoots, VisitGCRootFlags::ACCESS_ROOT_ALL);
1983         }
1984     }
1985 }
1986 
1987 template <class LanguageConfig>
1988 template <typename Marker>
UnmarkAll(Marker & marker)1989 void G1GC<LanguageConfig>::UnmarkAll([[maybe_unused]] Marker &marker)
1990 {
1991     // First we need to unmark all heap
1992     GCScope<TRACE_TIMING> unMarkScope("UnMark", this);
1993     LOG_DEBUG_GC << "Start unmark all heap before mark";
1994     auto allRegion = GetG1ObjectAllocator()->GetAllRegions();
1995     for (Region *r : allRegion) {
1996         auto *bitmap = r->GetMarkBitmap();
1997         // Calculate live bytes during mark-phase
1998         r->SetLiveBytes(0U);
1999         // unmark full-heap except Humongous-space
2000         bitmap->ClearAllBits();
2001     }
2002 #ifndef NDEBUG
2003     this->GetObjectAllocator()->IterateOverObjects([&marker](ObjectHeader *obj) { ASSERT(!marker.IsMarked(obj)); });
2004 #endif
2005 }
2006 
2007 template <class LanguageConfig>
2008 template <bool PROCESS_WEAK_REFS, typename Marker>
ConcurrentMark(const GCTask & task,GCMarkingStackType * objectsStack,Marker & marker)2009 void G1GC<LanguageConfig>::ConcurrentMark(const GCTask &task, GCMarkingStackType *objectsStack, Marker &marker)
2010 {
2011     ConcurrentScope concurrentScope(this);
2012     GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
2013     if (task.reason == GCTaskCause::CROSSREF_CAUSE) {
2014         // Live region bytes calculation does from GC several thread, so atomic calculation is required
2015         this->ConcurrentMarkImpl<PROCESS_WEAK_REFS, true>(objectsStack, marker);
2016     } else {
2017         // Only GC thread can modify live bytes in region, so no need atomically calculation
2018         this->ConcurrentMarkImpl<PROCESS_WEAK_REFS, false>(objectsStack, marker);
2019     }
2020 }
2021 
2022 template <class LanguageConfig>
2023 template <bool PROCESS_WEAK_REFS, typename Marker>
Remark(const GCTask & task,Marker & marker)2024 void G1GC<LanguageConfig>::Remark(const GCTask &task, Marker &marker)
2025 {
2026     /**
2027      * Make remark on pause to have all marked objects in tenured space, it gives possibility to check objects in
2028      * remsets. If they are not marked - we don't process this object, because it's dead already
2029      */
2030     auto scopedTracker = g1PauseTracker_.CreateScope();
2031     GCScope<TIMING_PHASE> gcScope(__FUNCTION__, this, GCPhase::GC_PHASE_REMARK);
2032     GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::REMARK_PAUSE);
2033     {
2034         ScopedTiming t("Stack Remarking", *this->GetTiming());
2035         bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
2036         GCMarkingStackType stack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
2037                                  useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
2038                                  task.reason == GCTaskCause::CROSSREF_CAUSE ? GCWorkersTaskTypes::TASK_XREMARK
2039                                                                             : GCWorkersTaskTypes::TASK_REMARK,
2040                                  this->GetSettings()->GCMarkingStackNewTasksFrequency());
2041 
2042         // The mutator may create new regions.
2043         // If so we should bind bitmaps of new regions.
2044         DrainSatb(&stack, marker);
2045         if constexpr (PROCESS_WEAK_REFS) {
2046             this->MarkStack(&marker, &stack, CalcLiveBytesMarkPreprocess<true>, GC::EmptyReferenceProcessPredicate);
2047         } else {
2048             this->MarkStack(&marker, &stack, CalcLiveBytesMarkPreprocess<true>);
2049         }
2050 
2051         if (useGcWorkers) {
2052             this->GetWorkersTaskPool()->WaitUntilTasksEnd();
2053         }
2054 
2055         // ConcurrentMark doesn't visit young objects - so we can't clear references which are in young-space because we
2056         // don't know which objects are marked. We will process them on young/mixed GC separately later, here we process
2057         // only refs in tenured-space
2058         auto refClearPred = []([[maybe_unused]] const ObjectHeader *obj) {
2059             return !ObjectToRegion(obj)->HasFlag(RegionFlag::IS_EDEN);
2060         };
2061         this->GetPandaVm()->HandleReferences(task, refClearPred);
2062     }
2063 
2064     // We will sweep VM refs in tenured space during mixed collection,
2065     // therefore, sweep it here only for NonMovable and Humongous objects:
2066     SweepNonRegularVmRefs();
2067     auto g1Allocator = this->GetG1ObjectAllocator();
2068     auto allRegions = g1Allocator->GetAllRegions();
2069     for (const auto &region : allRegions) {
2070         if (region->HasFlag(IS_OLD) || region->HasFlag(IS_NONMOVABLE)) {
2071             region->SwapMarkBitmap();
2072         }
2073     }
2074     // Force card updater here, after swapping bitmap, to skip dead objects
2075     ProcessDirtyCards();
2076 }
2077 
2078 template <class LanguageConfig>
SweepNonRegularVmRefs()2079 void G1GC<LanguageConfig>::SweepNonRegularVmRefs()
2080 {
2081     ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2082 
2083     this->GetPandaVm()->SweepVmRefs([this](ObjectHeader *object) {
2084         Region *region = ObjectToRegion(object);
2085         if (region->HasFlag(RegionFlag::IS_EDEN)) {
2086             return ObjectStatus::ALIVE_OBJECT;
2087         }
2088         bool nonRegularObject =
2089             region->HasFlag(RegionFlag::IS_NONMOVABLE) || region->HasFlag(RegionFlag::IS_LARGE_OBJECT);
2090         if (!nonRegularObject) {
2091             ASSERT(region->GetLiveBytes() != 0U || !this->IsMarked(object));
2092             if (region->GetLiveBytes() == 0U) {
2093                 return ObjectStatus::DEAD_OBJECT;
2094             }
2095         }
2096         return this->IsMarked(object) ? ObjectStatus::ALIVE_OBJECT : ObjectStatus::DEAD_OBJECT;
2097     });
2098 }
2099 
2100 template <class LanguageConfig>
SweepRegularVmRefs()2101 void G1GC<LanguageConfig>::SweepRegularVmRefs()
2102 {
2103     ScopedTiming t(__FUNCTION__, *this->GetTiming());
2104 
2105     this->GetPandaVm()->SweepVmRefs([this](ObjectHeader *obj) {
2106         if (this->InGCSweepRange(obj)) {
2107             return ObjectStatus::DEAD_OBJECT;
2108         }
2109         return ObjectStatus::ALIVE_OBJECT;
2110     });
2111 }
2112 
2113 template <class LanguageConfig>
VerifyHeapBeforeConcurrent()2114 void G1GC<LanguageConfig>::VerifyHeapBeforeConcurrent()
2115 {
2116     trace::ScopedTrace postHeapVerifierTrace("PostGCHeapVeriFier before concurrent");
2117     size_t failCount = this->VerifyHeap();
2118     if (this->GetSettings()->FailOnHeapVerification() && failCount > 0) {
2119         LOG(FATAL, GC) << "Heap corrupted after GC, HeapVerifier found " << failCount << " corruptions";
2120     }
2121 }
2122 
2123 template <class LanguageConfig>
GetCollectibleRegions(ark::GCTask const & task,bool isMixed)2124 CollectionSet G1GC<LanguageConfig>::GetCollectibleRegions(ark::GCTask const &task, bool isMixed)
2125 {
2126     ASSERT(!this->IsFullGC());
2127     ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2128     auto g1Allocator = this->GetG1ObjectAllocator();
2129     LOG_DEBUG_GC << "Start GetCollectibleRegions isMixed: " << isMixed << " reason: " << task.reason;
2130     CollectionSet collectionSet(g1Allocator->GetYoungRegions());
2131     if (isMixed && !fullCollectionSetPromotion_) {
2132         if (!this->GetSettings()->G1EnablePauseTimeGoal()) {
2133             AddOldRegionsMaxAllowed(collectionSet);
2134         } else {
2135             AddOldRegionsAccordingPauseTimeGoal(collectionSet);
2136         }
2137     }
2138     LOG_DEBUG_GC << "collectibleRegions size: " << collectionSet.size() << " young " << collectionSet.Young().size()
2139                  << " old " << std::distance(collectionSet.Young().end(), collectionSet.end())
2140                  << " reason: " << task.reason << " isMixed: " << isMixed;
2141     return collectionSet;
2142 }
2143 
2144 template <class LanguageConfig>
2145 template <typename UnaryPred>
DrainOldRegions(CollectionSet & collectionSet,UnaryPred pred)2146 void G1GC<LanguageConfig>::DrainOldRegions(CollectionSet &collectionSet, UnaryPred pred)
2147 {
2148     for (auto regionsIter = topGarbageRegions_.rbegin(); regionsIter != topGarbageRegions_.rend();) {
2149         auto *garbageRegion = regionsIter->second;
2150         if (garbageRegion->HasPinnedObjects()) {
2151             // Pinned objects may occur between collection phases
2152             ++regionsIter;
2153             continue;
2154         }
2155         ASSERT(!garbageRegion->HasFlag(IS_EDEN));
2156         ASSERT(!garbageRegion->HasFlag(IS_RESERVED));
2157         ASSERT(garbageRegion->GetAllocatedBytes() != 0U);
2158         if (!pred(garbageRegion)) {
2159             break;
2160         }
2161         [[maybe_unused]] double garbageRate =
2162             static_cast<double>(garbageRegion->GetGarbageBytes()) / garbageRegion->GetAllocatedBytes();
2163         LOG_DEBUG_GC << "Garbage percentage in " << std::hex << garbageRegion << " region = " << std::dec << garbageRate
2164                      << " %, add to collection set";
2165         collectionSet.AddRegion(garbageRegion);
2166         auto eraseIter = (regionsIter + 1).base();
2167         regionsIter = std::make_reverse_iterator(topGarbageRegions_.erase(eraseIter));
2168     }
2169 }
2170 
2171 template <class LanguageConfig>
AddOldRegionsMaxAllowed(CollectionSet & collectionSet)2172 void G1GC<LanguageConfig>::AddOldRegionsMaxAllowed(CollectionSet &collectionSet)
2173 {
2174     auto pred = [numberOfAddedRegions = 0U,
2175                  maxNumOfRegions = numberOfMixedTenuredRegions_]([[maybe_unused]] Region *garbageRegion) mutable {
2176         return numberOfAddedRegions++ < maxNumOfRegions;
2177     };
2178     DrainOldRegions(collectionSet, pred);
2179 }
2180 
2181 template <class LanguageConfig>
AddOldRegionsAccordingPauseTimeGoal(CollectionSet & collectionSet)2182 void G1GC<LanguageConfig>::AddOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet)
2183 {
2184     auto topRegionIter =
2185         std::find_if_not(topGarbageRegions_.rbegin(), topGarbageRegions_.rend(),
2186                          [](const std::pair<uint32_t, Region *> &entry) { return entry.second->HasPinnedObjects(); });
2187     if (topRegionIter == topGarbageRegions_.rend()) {
2188         return;
2189     }
2190     auto gcPauseTimeBudget = this->GetSettings()->GetG1MaxGcPauseInMillis() * ark::os::time::MILLIS_TO_MICRO;
2191     // add at least one old region to guarantee a progress in mixed collection
2192     collectionSet.AddRegion(topRegionIter->second);
2193     auto expectedYoungCollectionTime = analytics_.PredictYoungCollectionTimeInMicros(collectionSet);
2194     auto expectedTopRegionCollectionTime = analytics_.PredictOldCollectionTimeInMicros(topRegionIter->second);
2195     topGarbageRegions_.erase((topRegionIter + 1).base());
2196     auto totalPredictedPause = expectedYoungCollectionTime + expectedTopRegionCollectionTime;
2197     if (gcPauseTimeBudget < expectedTopRegionCollectionTime) {
2198         LOG_DEBUG_GC << "Not enough budget to add more than one old region";
2199         analytics_.ReportPredictedMixedPause(totalPredictedPause);
2200         return;
2201     }
2202     gcPauseTimeBudget -= expectedTopRegionCollectionTime;
2203     if (gcPauseTimeBudget < expectedYoungCollectionTime) {
2204         LOG_DEBUG_GC << "Not enough budget to add old regions";
2205         analytics_.ReportPredictedMixedPause(totalPredictedPause);
2206         return;
2207     }
2208     gcPauseTimeBudget -= expectedYoungCollectionTime;
2209     auto expectedScanDirtyCardsTime = analytics_.PredictScanDirtyCardsTime(dirtyCards_.size());
2210     if (gcPauseTimeBudget < expectedScanDirtyCardsTime) {
2211         LOG_DEBUG_GC << "Not enough budget to add old regions after scanning dirty cards";
2212         analytics_.ReportPredictedMixedPause(totalPredictedPause);
2213         return;
2214     }
2215     gcPauseTimeBudget -= expectedScanDirtyCardsTime;
2216     totalPredictedPause += expectedScanDirtyCardsTime;
2217 
2218     totalPredictedPause += AddMoreOldRegionsAccordingPauseTimeGoal(collectionSet, gcPauseTimeBudget);
2219     analytics_.ReportPredictedMixedPause(totalPredictedPause);
2220 }
2221 
2222 template <class LanguageConfig>
AddMoreOldRegionsAccordingPauseTimeGoal(CollectionSet & collectionSet,uint64_t gcPauseTimeBudget)2223 uint64_t G1GC<LanguageConfig>::AddMoreOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet,
2224                                                                        uint64_t gcPauseTimeBudget)
2225 {
2226     uint64_t time = 0;
2227     auto pred = [this, &time, &gcPauseTimeBudget](Region *garbageRegion) {
2228         auto expectedRegionCollectionTime = analytics_.PredictOldCollectionTimeInMicros(garbageRegion);
2229         if (gcPauseTimeBudget < expectedRegionCollectionTime) {
2230             LOG_DEBUG_GC << "Not enough budget to add old regions anymore";
2231             return false;
2232         }
2233 
2234         gcPauseTimeBudget -= expectedRegionCollectionTime;
2235         time += expectedRegionCollectionTime;
2236         return true;
2237     };
2238     DrainOldRegions(collectionSet, pred);
2239     return time;
2240 }
2241 
2242 template <class LanguageConfig>
GetFullCollectionSet()2243 CollectionSet G1GC<LanguageConfig>::GetFullCollectionSet()
2244 {
2245     ASSERT(this->IsFullGC());
2246     // FillRemSet should be always finished before GetCollectibleRegions
2247     ASSERT(updateRemsetWorker_->GetQueueSize() == 0);
2248     auto g1Allocator = this->GetG1ObjectAllocator();
2249     g1Allocator->ClearCurrentTenuredRegion();
2250     CollectionSet collectionSet(g1Allocator->GetYoungRegions());
2251     auto movableGarbageRegions = g1Allocator->template GetTopGarbageRegions<true>();
2252     LOG_DEBUG_GC << "Regions for FullGC:";
2253     for (auto iter = movableGarbageRegions.begin(); iter != movableGarbageRegions.end(); ++iter) {
2254         auto *region = iter->second;
2255         if (region->HasFlag(IS_EDEN) || region->HasPinnedObjects()) {
2256             LOG_DEBUG_GC << (region->HasFlags(IS_EDEN) ? "Young regions" : "Region with pinned objects") << " ("
2257                          << *region << ") is not added to collection set";
2258             continue;
2259         }
2260         LOG_DEBUG_GC << *region;
2261         ASSERT(!region->HasFlag(IS_NONMOVABLE) && !region->HasFlag(IS_LARGE_OBJECT));
2262         ASSERT(region->HasFlag(IS_OLD));
2263         collectionSet.AddRegion(region);
2264     }
2265     return collectionSet;
2266 }
2267 
2268 template <class LanguageConfig>
InterruptReleasePagesIfNeeded()2269 void G1GC<LanguageConfig>::InterruptReleasePagesIfNeeded()
2270 {
2271     if (this->GetSettings()->GCWorkersCount() != 0) {
2272         auto oldStatus = ReleasePagesStatus::RELEASING_PAGES;
2273         if (releasePagesInterruptFlag_.compare_exchange_strong(oldStatus, ReleasePagesStatus::NEED_INTERRUPT)) {
2274             /* @sync 1
2275              * @description Interrupt release pages
2276              */
2277         }
2278     }
2279 }
2280 
2281 template <class LanguageConfig>
StartReleasePagesIfNeeded(ReleasePagesStatus oldStatus)2282 void G1GC<LanguageConfig>::StartReleasePagesIfNeeded(ReleasePagesStatus oldStatus)
2283 {
2284     if (releasePagesInterruptFlag_.compare_exchange_strong(oldStatus, ReleasePagesStatus::RELEASING_PAGES)) {
2285         ASSERT(this->GetSettings()->GCWorkersCount() != 0);
2286         if (!this->GetWorkersTaskPool()->AddTask(GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS)) {
2287             PoolManager::GetMmapMemPool()->ReleaseFreePagesToOS();
2288             releasePagesInterruptFlag_ = ReleasePagesStatus::FINISHED;
2289         }
2290     }
2291 }
2292 
2293 template <class LanguageConfig>
HaveEnoughSpaceToMove(const CollectionSet & collectibleRegions)2294 bool G1GC<LanguageConfig>::HaveEnoughSpaceToMove(const CollectionSet &collectibleRegions)
2295 {
2296     // extra regions are required because workers concurrenly evacuate objects to several regions
2297     size_t extraRegions =
2298         this->GetPandaVm()->SupportGCSinglePassCompaction() ? this->GetSettings()->GCWorkersCount() : 0;
2299     return HaveEnoughRegionsToMove(collectibleRegions.Movable().size() + extraRegions);
2300 }
2301 
2302 template <class LanguageConfig>
HaveEnoughRegionsToMove(size_t num)2303 bool G1GC<LanguageConfig>::HaveEnoughRegionsToMove(size_t num)
2304 {
2305     return GetG1ObjectAllocator()->HaveTenuredSize(num) && GetG1ObjectAllocator()->HaveFreeRegions(num);
2306 }
2307 
2308 template <class LanguageConfig>
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)2309 void G1GC<LanguageConfig>::OnThreadTerminate(ManagedThread *thread, mem::BuffersKeepingFlag keepBuffers)
2310 {
2311     InternalAllocatorPtr allocator = this->GetInternalAllocator();
2312     // The method must be called while the lock which guards thread/coroutine list is hold
2313     LOG(DEBUG, GC) << "Call OnThreadTerminate";
2314     PandaVector<ObjectHeader *> *preBuff = nullptr;
2315     if (keepBuffers == mem::BuffersKeepingFlag::KEEP) {
2316         preBuff = allocator->New<PandaVector<ObjectHeader *>>(*thread->GetPreBuff());
2317         ASSERT(preBuff != nullptr);
2318         thread->GetPreBuff()->clear();
2319     } else {  // keep_buffers == mem::BuffersKeepingFlag::DELETE
2320         preBuff = thread->MovePreBuff();
2321     }
2322     ASSERT(preBuff != nullptr);
2323     {
2324         os::memory::LockHolder lock(satbAndNewobjBufLock_);
2325         satbBuffList_.push_back(preBuff);
2326     }
2327     {
2328         auto *localBuffer = thread->GetG1PostBarrierBuffer();
2329         ASSERT(localBuffer != nullptr);
2330         if (!localBuffer->IsEmpty()) {
2331             auto *tempBuffer = allocator->New<PandaVector<mem::CardTable::CardPtr>>();
2332             while (!localBuffer->IsEmpty()) {
2333                 tempBuffer->push_back(localBuffer->Pop());
2334             }
2335             updateRemsetWorker_->AddPostBarrierBuffer(tempBuffer);
2336         }
2337         if (keepBuffers == mem::BuffersKeepingFlag::DELETE) {
2338             thread->ResetG1PostBarrierBuffer();
2339             allocator->Delete(localBuffer);
2340         }
2341     }
2342 }
2343 
2344 template <class LanguageConfig>
OnThreadCreate(ManagedThread * thread)2345 void G1GC<LanguageConfig>::OnThreadCreate(ManagedThread *thread)
2346 {
2347     // Any access to other threads' data (including MAIN's) might cause a race here
2348     // so don't do this please.
2349     thread->SetPreWrbEntrypoint(reinterpret_cast<void *>(currentPreWrbEntrypoint_));
2350 }
2351 
2352 template <class LanguageConfig>
PreZygoteFork()2353 void G1GC<LanguageConfig>::PreZygoteFork()
2354 {
2355     GC::PreZygoteFork();
2356     this->DestroyWorkersTaskPool();
2357     this->DisableWorkerThreads();
2358     updateRemsetWorker_->DestroyWorker();
2359     // don't use thread while we are in zygote
2360     updateRemsetWorker_->SetUpdateConcurrent(false);
2361 }
2362 
2363 template <class LanguageConfig>
PostZygoteFork()2364 void G1GC<LanguageConfig>::PostZygoteFork()
2365 {
2366     this->EnableWorkerThreads();
2367     this->CreateWorkersTaskPool();
2368     GC::PostZygoteFork();
2369     // use concurrent-option after zygote
2370     updateRemsetWorker_->SetUpdateConcurrent(this->GetSettings()->G1EnableConcurrentUpdateRemset());
2371     updateRemsetWorker_->CreateWorker();
2372 }
2373 
2374 template <class LanguageConfig>
2375 template <typename Marker>
DrainSatb(GCAdaptiveMarkingStack * objectStack,Marker & marker)2376 void G1GC<LanguageConfig>::DrainSatb(GCAdaptiveMarkingStack *objectStack, Marker &marker)
2377 {
2378     ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2379     // Process satb buffers of the active threads
2380     auto callback = [this, objectStack, &marker](ManagedThread *thread) {
2381         // Acquire lock here to avoid data races with the threads
2382         // which are terminating now.
2383         // Data race is happens in thread.pre_buf_. The terminating thread may
2384         // release own pre_buf_ while GC thread iterates over threads and gets theirs
2385         // pre_buf_.
2386         os::memory::LockHolder lock(satbAndNewobjBufLock_);
2387         auto preBuff = thread->GetPreBuff();
2388         if (preBuff == nullptr) {
2389             // This can happens when the thread gives us own satb_buffer but
2390             // doesn't unregister from ThreadManaged.
2391             // At this perion GC can happen and we get pre_buff null here.
2392             return true;
2393         }
2394         for (auto obj : *preBuff) {
2395             if (marker.MarkIfNotMarked(obj)) {
2396                 objectStack->PushToStack(RootType::SATB_BUFFER, obj);
2397             }
2398         }
2399         preBuff->clear();
2400         return true;
2401     };
2402     this->GetPandaVm()->GetThreadManager()->EnumerateThreads(callback);
2403 
2404     // Process satb buffers of the terminated threads
2405     os::memory::LockHolder lock(satbAndNewobjBufLock_);
2406     for (auto objVector : satbBuffList_) {
2407         ASSERT(objVector != nullptr);
2408         for (auto obj : *objVector) {
2409             if (marker.MarkIfNotMarked(obj)) {
2410                 objectStack->PushToStack(RootType::SATB_BUFFER, obj);
2411             }
2412         }
2413         this->GetInternalAllocator()->Delete(objVector);
2414     }
2415     satbBuffList_.clear();
2416     for (auto obj : newobjBuffer_) {
2417         if (marker.MarkIfNotMarked(obj)) {
2418             objectStack->PushToStack(RootType::SATB_BUFFER, obj);
2419         }
2420     }
2421     newobjBuffer_.clear();
2422 }
2423 
2424 template <class LanguageConfig>
HandlePendingDirtyCards()2425 void G1GC<LanguageConfig>::HandlePendingDirtyCards()
2426 {
2427     ScopedTiming t(__FUNCTION__, *this->GetTiming());
2428     updateRemsetWorker_->DrainAllCards(&dirtyCards_);
2429     std::for_each(dirtyCards_.cbegin(), dirtyCards_.cend(), [](auto card) { card->UnMark(); });
2430 }
2431 
2432 template <class LanguageConfig>
ReenqueueDirtyCards()2433 void G1GC<LanguageConfig>::ReenqueueDirtyCards()
2434 {
2435     ScopedTiming t(__FUNCTION__, *this->GetTiming());
2436     os::memory::LockHolder lock(queueLock_);
2437     std::for_each(dirtyCards_.cbegin(), dirtyCards_.cend(), [this](auto card) {
2438         card->Mark();
2439         updatedRefsQueue_->push_back(card);
2440     });
2441     dirtyCards_.clear();
2442 }
2443 
2444 template <class LanguageConfig>
ClearSatb()2445 void G1GC<LanguageConfig>::ClearSatb()
2446 {
2447     ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2448     // Acquire lock here to avoid data races with the threads
2449     // which are terminating now.
2450     // Data race is happens in thread.pre_buf_. The terminating thread may
2451     // release own pre_buf_ while GC thread iterates over threads and gets theirs
2452     // pre_buf_.
2453     // Process satb buffers of the active threads
2454     auto threadCallback = [this](ManagedThread *thread) {
2455         os::memory::LockHolder lock(satbAndNewobjBufLock_);
2456         auto preBuff = thread->GetPreBuff();
2457         if (preBuff != nullptr) {
2458             preBuff->clear();
2459         }
2460         return true;
2461     };
2462     this->GetPandaVm()->GetThreadManager()->EnumerateThreads(threadCallback);
2463 
2464     os::memory::LockHolder lock(satbAndNewobjBufLock_);
2465     // Process satb buffers of the terminated threads
2466     for (auto objVector : satbBuffList_) {
2467         this->GetInternalAllocator()->Delete(objVector);
2468     }
2469     satbBuffList_.clear();
2470     newobjBuffer_.clear();
2471 }
2472 
2473 template <class LanguageConfig>
2474 template <class Visitor>
VisitRemSets(const Visitor & visitor)2475 void G1GC<LanguageConfig>::VisitRemSets(const Visitor &visitor)
2476 {
2477     GCScope<TRACE_TIMING> visitRemsetScope(__FUNCTION__, this);
2478 
2479     ASSERT(uniqueCardsInitialized_);
2480     // Iterate over stored references to the collection set
2481     for (auto &entryVector : uniqueRefsFromRemsets_) {
2482         for (auto &entry : *entryVector) {
2483             ObjectHeader *object = entry.GetObject();
2484             uint32_t offset = entry.GetReferenceOffset();
2485             visitor(object, ObjectAccessor::GetObject(object, offset), offset);
2486         }
2487     }
2488 }
2489 
2490 template <class LanguageConfig>
2491 template <class Visitor>
UpdateRefsFromRemSets(const Visitor & visitor)2492 void G1GC<LanguageConfig>::UpdateRefsFromRemSets(const Visitor &visitor)
2493 {
2494     auto fieldVisitor = [this, &visitor](ObjectHeader *object, ObjectHeader *field, uint32_t offset,
2495                                          [[maybe_unused]] bool isVolatile) {
2496         if (!InGCSweepRange(field)) {
2497             return true;
2498         }
2499         visitor(object, ObjectAccessor::GetObject(object, offset), offset);
2500         return true;
2501     };
2502     auto refsChecker = [this, &fieldVisitor](Region *region, const MemRange &memRange) {
2503         IterateOverRefsInMemRange(memRange, region, fieldVisitor);
2504         return true;
2505     };
2506     CacheRefsFromRemsets(refsChecker);
2507 }
2508 
2509 template <class LanguageConfig>
IsCollectionSetFullyPromoted() const2510 bool G1GC<LanguageConfig>::IsCollectionSetFullyPromoted() const
2511 {
2512     if (!collectionSet_.Tenured().empty()) {
2513         return false;
2514     }
2515     for (Region *region : collectionSet_.Young()) {
2516         if (!region->HasFlag(RegionFlag::IS_PROMOTED)) {
2517             return false;
2518         }
2519     }
2520     return true;
2521 }
2522 
2523 template <class LanguageConfig>
CacheRefsFromRemsets(const MemRangeRefsChecker & refsChecker)2524 void G1GC<LanguageConfig>::CacheRefsFromRemsets(const MemRangeRefsChecker &refsChecker)
2525 {
2526     GCScope<TRACE_TIMING> cacheRefsFromRemsetScope(__FUNCTION__, this);
2527     // Collect only unique objects to not proceed them more than once.
2528     ASSERT(!uniqueCardsInitialized_);
2529 
2530     size_t remsetSize = 0;
2531     auto visitor = [&remsetSize, &refsChecker](Region *r, const MemRange &range) {
2532         remsetSize++;
2533         return refsChecker(r, range);
2534     };
2535 
2536     GlobalRemSet globalRemSet;
2537     globalRemSet.ProcessRemSets(collectionSet_, RemsetRegionPredicate, visitor);
2538 
2539     analytics_.ReportRemsetSize(remsetSize, GetUniqueRemsetRefsCount());
2540 
2541     if (!this->IsFullGC()) {
2542         auto dirtyCardsCount = dirtyCards_.size();
2543         analytics_.ReportScanDirtyCardsStart(ark::time::GetCurrentTimeInNanos());
2544         CacheRefsFromDirtyCards(globalRemSet, refsChecker);
2545         analytics_.ReportScanDirtyCardsEnd(ark::time::GetCurrentTimeInNanos(), dirtyCardsCount);
2546 #ifndef NDEBUG
2547         uniqueCardsInitialized_ = true;
2548 #endif  // NDEBUG
2549     }
2550 }
2551 
2552 template <class LanguageConfig>
2553 template <typename Visitor>
CacheRefsFromDirtyCards(GlobalRemSet & globalRemSet,Visitor visitor)2554 void G1GC<LanguageConfig>::CacheRefsFromDirtyCards(GlobalRemSet &globalRemSet, Visitor visitor)
2555 {
2556     ScopedTiming t(__FUNCTION__, *this->GetTiming());
2557     auto cardTable = this->GetCardTable();
2558     for (auto it = dirtyCards_.cbegin(); it != dirtyCards_.cend();) {
2559         auto range = cardTable->GetMemoryRange(*it);
2560         auto addr = range.GetStartAddress();
2561         ASSERT_DO(IsHeapSpace(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(ToVoidPtr(addr))),
2562                   std::cerr << "Invalid space type for the " << addr << std::endl);
2563         auto region = ark::mem::AddrToRegion(ToVoidPtr(addr));
2564         if (!RemsetRegionPredicate(region)) {
2565             it = dirtyCards_.erase(it);
2566             continue;
2567         }
2568 
2569         auto allCrossRegionRefsProcessed = globalRemSet.IterateOverUniqueRange(region, range, visitor);
2570         if (allCrossRegionRefsProcessed) {
2571             it = dirtyCards_.erase(it);
2572             continue;
2573         }
2574         ++it;
2575     }
2576 }
2577 
2578 template <class LanguageConfig>
RestoreYoungCards(const CollectionSet & collectionSet)2579 void G1GC<LanguageConfig>::RestoreYoungCards(const CollectionSet &collectionSet)
2580 {
2581     CardTable *cardTable = this->GetCardTable();
2582     for (Region *region : collectionSet.Young()) {
2583         cardTable->MarkCardsAsYoung(MemRange(region->Begin(), region->End()));
2584     }
2585 }
2586 
2587 template <class LanguageConfig>
ClearYoungCards(const CollectionSet & collectionSet)2588 void G1GC<LanguageConfig>::ClearYoungCards(const CollectionSet &collectionSet)
2589 {
2590     auto *cardTable = this->GetCardTable();
2591     for (Region *region : collectionSet.Young()) {
2592         cardTable->ClearCardRange(ToUintPtr(region), ToUintPtr(region) + DEFAULT_REGION_SIZE);
2593     }
2594 }
2595 
2596 template <class LanguageConfig>
ClearTenuredCards(const CollectionSet & collectionSet)2597 void G1GC<LanguageConfig>::ClearTenuredCards(const CollectionSet &collectionSet)
2598 {
2599     auto *cardTable = this->GetCardTable();
2600     for (Region *region : collectionSet.Tenured()) {
2601         cardTable->ClearCardRange(ToUintPtr(region), ToUintPtr(region) + DEFAULT_REGION_SIZE);
2602     }
2603 }
2604 
2605 template <class LanguageConfig>
ClearRefsFromRemsetsCache()2606 void G1GC<LanguageConfig>::ClearRefsFromRemsetsCache()
2607 {
2608     ASSERT(!uniqueRefsFromRemsets_.empty());
2609     // Resize list of unique refs from remset to 1, to reduce memory usage
2610     size_t elemetsToRemove = uniqueRefsFromRemsets_.size() - 1;
2611     for (size_t i = 0; i < elemetsToRemove; i++) {
2612         RefVector *entry = uniqueRefsFromRemsets_.back();
2613         this->GetInternalAllocator()->Delete(entry);
2614         uniqueRefsFromRemsets_.pop_back();
2615     }
2616     ASSERT(uniqueRefsFromRemsets_.size() == 1);
2617     uniqueRefsFromRemsets_.front()->clear();
2618     ASSERT(uniqueRefsFromRemsets_.front()->capacity() == MAX_REFS);
2619 #ifndef NDEBUG
2620     uniqueCardsInitialized_ = false;
2621 #endif  // NDEBUG
2622 }
2623 
2624 template <class LanguageConfig>
ActualizeRemSets()2625 void G1GC<LanguageConfig>::ActualizeRemSets()
2626 {
2627     ScopedTiming t(__FUNCTION__, *this->GetTiming());
2628     auto *objectAllocator = this->GetG1ObjectAllocator();
2629     // Invalidate regions from collection set in all remsets
2630     for (Region *region : collectionSet_.Young()) {
2631         if (!region->HasFlag(RegionFlag::IS_PROMOTED)) {
2632             RemSet<>::template InvalidateRegion<false>(region);
2633         } else {
2634             objectAllocator->AddPromotedRegionToQueueIfPinned(region);
2635             region->RmvFlag(RegionFlag::IS_PROMOTED);
2636         }
2637     }
2638     for (Region *region : collectionSet_.Tenured()) {
2639         RemSet<>::template InvalidateRegion<false>(region);
2640     }
2641 }
2642 
2643 template <class LanguageConfig>
ShouldRunTenuredGC(const GCTask & task)2644 bool G1GC<LanguageConfig>::ShouldRunTenuredGC(const GCTask &task)
2645 {
2646     return this->IsOnPygoteFork() || task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE ||
2647            task.reason == GCTaskCause::STARTUP_COMPLETE_CAUSE || task.reason == GCTaskCause::NATIVE_ALLOC_CAUSE;
2648 }
2649 
2650 template <class LanguageConfig>
OnWaitForIdleFail()2651 void G1GC<LanguageConfig>::OnWaitForIdleFail()
2652 {
2653     // NOTE(ipetrov, #22715): Don't interrupt XGC while it is not support in another
2654     if (this->GetGCPhase() == GCPhase::GC_PHASE_MARK && this->GetLastGCCause() != GCTaskCause::CROSSREF_CAUSE) {
2655         // Atomic with release order reason: write to this variable should become visible in concurrent marker check
2656         interruptConcurrentFlag_.store(true, std::memory_order_release);
2657         if (this->GetSettings()->G1EnablePauseTimeGoal()) {
2658             os::memory::LockHolder lh(concurrentMarkMutex_);
2659             concurrentMarkCondVar_.Signal();
2660         }
2661     }
2662 }
2663 
2664 template <class LanguageConfig>
PostponeGCStart()2665 void G1GC<LanguageConfig>::PostponeGCStart()
2666 {
2667     regionGarbageRateThreshold_ = 0;
2668     g1PromotionRegionAliveRate_ = 0;
2669     GC::PostponeGCStart();
2670 }
2671 
2672 template <class LanguageConfig>
PostponeGCEnd()2673 void G1GC<LanguageConfig>::PostponeGCEnd()
2674 {
2675     ASSERT(!this->IsPostponeEnabled() || (regionGarbageRateThreshold_ == 0 && g1PromotionRegionAliveRate_ == 0));
2676     regionGarbageRateThreshold_ = this->GetSettings()->G1RegionGarbageRateThreshold();
2677     g1PromotionRegionAliveRate_ = this->GetSettings()->G1PromotionRegionAliveRate();
2678     GC::PostponeGCEnd();
2679 }
2680 
2681 template <class LanguageConfig>
IsPostponeGCSupported() const2682 bool G1GC<LanguageConfig>::IsPostponeGCSupported() const
2683 {
2684     return true;
2685 }
2686 
2687 template <class LanguageConfig>
GetMaxMixedRegionsCount()2688 size_t G1GC<LanguageConfig>::GetMaxMixedRegionsCount()
2689 {
2690     return this->GetG1ObjectAllocator()->GetMaxYoungRegionsCount() + numberOfMixedTenuredRegions_;
2691 }
2692 
2693 template <class LanguageConfig>
PrepareYoungRegionsForFullGC(const CollectionSet & collectionSet)2694 void G1GC<LanguageConfig>::PrepareYoungRegionsForFullGC(const CollectionSet &collectionSet)
2695 {
2696     BuildCrossYoungRemSets(collectionSet.Young());
2697     ClearYoungCards(collectionSet);
2698 }
2699 
2700 template <class LanguageConfig>
RestoreYoungRegionsAfterFullGC(const CollectionSet & collectionSet)2701 void G1GC<LanguageConfig>::RestoreYoungRegionsAfterFullGC(const CollectionSet &collectionSet)
2702 {
2703     RestoreYoungCards(collectionSet);
2704     for (Region *region : collectionSet.Young()) {
2705         RemSet<>::template InvalidateRefsFromRegion<false>(region);
2706     }
2707 }
2708 
2709 template <class LanguageConfig>
2710 template <typename Container>
BuildCrossYoungRemSets(const Container & young)2711 void G1GC<LanguageConfig>::BuildCrossYoungRemSets(const Container &young)
2712 {
2713     ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2714     ASSERT(this->IsFullGC());
2715     auto allocator = this->GetG1ObjectAllocator();
2716     size_t regionSizeBits = ark::helpers::math::GetIntLog2(allocator->GetRegionSize());
2717     auto updateRemsets = [regionSizeBits](ObjectHeader *object, ObjectHeader *ref, size_t offset,
2718                                           [[maybe_unused]] bool isVolatile) {
2719         if (!IsSameRegion(object, ref, regionSizeBits) && !ObjectToRegion(ref)->IsYoung()) {
2720             RemSet<>::AddRefWithAddr<false>(object, offset, ref);
2721         }
2722         return true;
2723     };
2724     for (Region *region : young) {
2725         region->GetMarkBitmap()->IterateOverMarkedChunks([&updateRemsets](void *addr) {
2726             ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(
2727                 reinterpret_cast<ObjectHeader *>(addr), updateRemsets);
2728         });
2729     }
2730 }
2731 
2732 template <class LanguageConfig>
StartConcurrentScopeRoutine() const2733 void G1GC<LanguageConfig>::StartConcurrentScopeRoutine() const
2734 {
2735     updateRemsetWorker_->ResumeWorkerAfterGCPause();
2736 }
2737 
2738 template <class LanguageConfig>
EndConcurrentScopeRoutine() const2739 void G1GC<LanguageConfig>::EndConcurrentScopeRoutine() const
2740 {
2741     updateRemsetWorker_->SuspendWorkerForGCPause();
2742 }
2743 
2744 template <class LanguageConfig>
ComputeNewSize()2745 void G1GC<LanguageConfig>::ComputeNewSize()
2746 {
2747     if (this->GetSettings()->G1EnablePauseTimeGoal()) {
2748         auto desiredEdenLengthByPauseDelay = CalculateDesiredEdenLengthByPauseDelay();
2749         auto desiredEdenLengthByPauseDuration = CalculateDesiredEdenLengthByPauseDuration();
2750         auto desiredEdenLength = std::max(desiredEdenLengthByPauseDelay, desiredEdenLengthByPauseDuration);
2751         GetG1ObjectAllocator()->GetHeapSpace()->UpdateSize(desiredEdenLength * GetG1ObjectAllocator()->GetRegionSize());
2752         GetG1ObjectAllocator()->SetDesiredEdenLength(desiredEdenLength);
2753     } else {
2754         GenerationalGC<LanguageConfig>::ComputeNewSize();
2755     }
2756 }
2757 
2758 template <class LanguageConfig>
CalculateDesiredEdenLengthByPauseDelay()2759 size_t G1GC<LanguageConfig>::CalculateDesiredEdenLengthByPauseDelay()
2760 {
2761     auto delayBeforePause = g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(ark::time::GetCurrentTimeInMicros());
2762     return static_cast<size_t>(ceil(analytics_.PredictAllocationRate() * delayBeforePause));
2763 }
2764 
2765 template <class LanguageConfig>
CalculateDesiredEdenLengthByPauseDuration()2766 size_t G1GC<LanguageConfig>::CalculateDesiredEdenLengthByPauseDuration()
2767 {
2768     // Calculate desired_eden_size according to pause time goal
2769     size_t minEdenLength = 1;
2770     size_t maxEdenLength =
2771         GetG1ObjectAllocator()->GetHeapSpace()->GetMaxYoungSize() / GetG1ObjectAllocator()->GetRegionSize();
2772 
2773     // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
2774     // on other reads or writes
2775     if (isMixedGcRequired_.load(std::memory_order_relaxed)) {
2776         auto oldCandidates = GetOldCollectionSetCandidatesNumber();
2777         if (oldCandidates >= maxEdenLength) {
2778             // Schedule next mixed collections as often as possible to maximize old regions collection
2779             return 1;
2780         }
2781         maxEdenLength -= oldCandidates;
2782     }
2783 
2784     auto maxPause = this->GetSettings()->GetG1MaxGcPauseInMillis() * ark::os::time::MILLIS_TO_MICRO;
2785     auto edenLengthPredicate = [this, maxPause](size_t edenLength) {
2786         if (!HaveEnoughRegionsToMove(edenLength)) {
2787             return false;
2788         }
2789         auto pauseTime = analytics_.PredictYoungCollectionTimeInMicros(edenLength);
2790         return pauseTime <= maxPause;
2791     };
2792     if (!edenLengthPredicate(minEdenLength)) {
2793         return minEdenLength;
2794     }
2795     if (edenLengthPredicate(maxEdenLength)) {
2796         return maxEdenLength;
2797     }
2798     auto delta = (maxEdenLength - minEdenLength) / 2U;
2799     while (delta > 0) {
2800         auto edenLength = minEdenLength + delta;
2801         if (edenLengthPredicate(edenLength)) {
2802             minEdenLength = edenLength;
2803         } else {
2804             maxEdenLength = edenLength;
2805         }
2806         ASSERT(minEdenLength < maxEdenLength);
2807         delta = (maxEdenLength - minEdenLength) / 2U;
2808     }
2809     return minEdenLength;
2810 }
2811 
2812 template <class LanguageConfig>
2813 template <bool PROCESS_WEAK_REFS, bool ATOMICALLY, typename Marker>
ConcurrentMarkImpl(GCMarkingStackType * objectsStack,Marker & marker)2814 NO_THREAD_SAFETY_ANALYSIS void G1GC<LanguageConfig>::ConcurrentMarkImpl(GCMarkingStackType *objectsStack,
2815                                                                         Marker &marker)
2816 {
2817     {
2818         ScopedTiming t("VisitClassRoots", *this->GetTiming());
2819         this->VisitClassRoots([this, objectsStack, marker](const GCRoot &gcRoot) {
2820             if (marker.MarkIfNotMarked(gcRoot.GetObjectHeader())) {
2821                 ASSERT(gcRoot.GetObjectHeader() != nullptr);
2822                 objectsStack->PushToStack(RootType::ROOT_CLASS, gcRoot.GetObjectHeader());
2823             } else {
2824                 LOG_DEBUG_GC << "Skip root: " << gcRoot.GetObjectHeader();
2825             }
2826         });
2827     }
2828     {
2829         ScopedTiming t("VisitInternalStringTable", *this->GetTiming());
2830         this->GetPandaVm()->VisitStringTable(
2831             [objectsStack, &marker](ObjectHeader *str) {
2832                 if (marker.MarkIfNotMarked(str)) {
2833                     ASSERT(str != nullptr);
2834                     objectsStack->PushToStack(RootType::STRING_TABLE, str);
2835                 }
2836             },
2837             VisitGCRootFlags::ACCESS_ROOT_ALL | VisitGCRootFlags::START_RECORDING_NEW_ROOT);
2838     }
2839     // Atomic with acquire order reason: load to this variable should become visible
2840     while (!objectsStack->Empty() && !interruptConcurrentFlag_.load(std::memory_order_acquire)) {
2841         auto *object = this->PopObjectFromStack(objectsStack);
2842         ASSERT(marker.IsMarked(object));
2843         ValidateObject(nullptr, object);
2844         auto *objectClass = object->template ClassAddr<BaseClass>();
2845         // We need annotation here for the FullMemoryBarrier used in InitializeClassByIdEntrypoint
2846         TSAN_ANNOTATE_HAPPENS_AFTER(objectClass);
2847         LOG_DEBUG_GC << "Current object: " << GetDebugInfoAboutObject(object);
2848 
2849         ASSERT(!object->IsForwarded());
2850         CalcLiveBytesMarkPreprocess<ATOMICALLY>(object, objectClass);
2851         if constexpr (PROCESS_WEAK_REFS) {
2852             marker.MarkInstance(objectsStack, object, objectClass, GC::EmptyReferenceProcessPredicate);
2853         } else {
2854             marker.MarkInstance(objectsStack, object, objectClass);
2855         }
2856     }
2857 }
2858 
2859 template <class LanguageConfig>
Trigger(PandaUniquePtr<GCTask> task)2860 bool G1GC<LanguageConfig>::Trigger(PandaUniquePtr<GCTask> task)
2861 {
2862     if (this->GetSettings()->G1EnablePauseTimeGoal() &&
2863         g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(ark::time::GetCurrentTimeInMicros()) > 0) {
2864         return false;
2865     }
2866     return GenerationalGC<LanguageConfig>::Trigger(std::move(task));
2867 }
2868 
2869 template <class LanguageConfig>
GetUniqueRemsetRefsCount() const2870 size_t G1GC<LanguageConfig>::GetUniqueRemsetRefsCount() const
2871 {
2872     size_t count = 0;
2873     for (const auto *v : uniqueRefsFromRemsets_) {
2874         count += v->size();
2875     }
2876     return count;
2877 }
2878 
2879 template <class LanguageConfig>
PrintFragmentationMetrics(const char * title)2880 void G1GC<LanguageConfig>::PrintFragmentationMetrics(const char *title)
2881 {
2882     LOG_INFO_GC << title << "internal Old fragmentation "
2883                 << this->GetG1ObjectAllocator()->CalculateInternalOldFragmentation();
2884     LOG_INFO_GC << title << "internal humongous fragmentation "
2885                 << this->GetG1ObjectAllocator()->CalculateInternalHumongousFragmentation();
2886     LOG_INFO_GC << title << "nonmovable external fragmentation "
2887                 << this->GetG1ObjectAllocator()->CalculateNonMovableExternalFragmentation();
2888 }
2889 
2890 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GC);
2891 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GCMixedMarker);
2892 
2893 }  // namespace ark::mem
2894