1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "libpandabase/mem/space.h"
17 #include "runtime/include/language_config.h"
18 #include "runtime/include/class.h"
19 #include "runtime/include/mem/panda_string.h"
20 #include "runtime/include/panda_vm.h"
21 #include "runtime/mem/gc/card_table-inl.h"
22 #include "runtime/mem/gc/dynamic/gc_marker_dynamic-inl.h"
23 #include "runtime/mem/gc/gc.h"
24 #include "runtime/mem/gc/g1/g1-gc.h"
25 #include "runtime/mem/gc/g1/g1-helpers.h"
26 #include "runtime/mem/gc/g1/ref_cache_builder.h"
27 #include "runtime/mem/gc/g1/update_remset_task_queue.h"
28 #include "runtime/mem/gc/g1/update_remset_thread.h"
29 #include "runtime/mem/gc/workers/gc_workers_task_pool.h"
30 #include "runtime/mem/gc/generational-gc-base-inl.h"
31 #include "runtime/mem/gc/static/gc_marker_static-inl.h"
32 #include "runtime/mem/gc/reference-processor/reference_processor.h"
33 #include "runtime/mem/object_helpers-inl.h"
34 #include "runtime/mem/rem_set-inl.h"
35 #include "runtime/include/thread.h"
36 #include "runtime/include/managed_thread.h"
37 #include "runtime/mem/gc/g1/ref_updater.h"
38 #include "runtime/mem/region_space.h"
39
40 namespace panda::mem {
41
42 #ifndef NDEBUG
IsCardTableClear(CardTable * cardTable)43 static bool IsCardTableClear(CardTable *cardTable)
44 {
45 bool clear = true;
46 cardTable->VisitMarked(
47 [&clear](const MemRange &range) {
48 LOG(ERROR, GC) << "Card [" << ToVoidPtr(range.GetStartAddress()) << " - "
49 << ToVoidPtr(range.GetEndAddress()) << "] is not clear";
50 clear = false;
51 },
52 CardTableProcessedFlag::VISIT_MARKED | CardTableProcessedFlag::VISIT_PROCESSED);
53 return clear;
54 }
55 #endif
56
57 /* static */
58 template <class LanguageConfig>
CalcLiveBytesMarkPreprocess(const ObjectHeader * object,BaseClass * baseKlass)59 void G1GC<LanguageConfig>::CalcLiveBytesMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass)
60 {
61 Region *region = ObjectToRegion(object);
62 size_t objectSize = GetAlignedObjectSize(object->ObjectSize<LanguageConfig::LANG_TYPE>(baseKlass));
63 region->AddLiveBytes<true>(objectSize);
64 }
65
66 /* static */
67 template <class LanguageConfig>
CalcLiveBytesNotAtomicallyMarkPreprocess(const ObjectHeader * object,BaseClass * baseKlass)68 void G1GC<LanguageConfig>::CalcLiveBytesNotAtomicallyMarkPreprocess(const ObjectHeader *object, BaseClass *baseKlass)
69 {
70 Region *region = ObjectToRegion(object);
71 size_t objectSize = GetAlignedObjectSize(object->ObjectSize<LanguageConfig::LANG_TYPE>(baseKlass));
72 region->AddLiveBytes<false>(objectSize);
73 }
74
75 template <class LanguageConfig>
G1GC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)76 G1GC<LanguageConfig>::G1GC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
77 : GenerationalGC<LanguageConfig>(objectAllocator, settings),
78 marker_(this),
79 concMarker_(this),
80 mixedMarker_(this),
81 concurrentMarkingStack_(this),
82 numberOfMixedTenuredRegions_(settings.GetG1NumberOfTenuredRegionsAtMixedCollection()),
83 regionGarbageRateThreshold_(settings.G1RegionGarbageRateThreshold()),
84 g1PromotionRegionAliveRate_(settings.G1PromotionRegionAliveRate()),
85 g1TrackFreedObjects_(settings.G1TrackFreedObjects()),
86 isExplicitConcurrentGcEnabled_(settings.IsExplicitConcurrentGcEnabled()),
87 regionSizeBits_(panda::helpers::math::GetIntLog2(this->GetG1ObjectAllocator()->GetRegionSize())),
88 g1PauseTracker_(settings.GetG1GcPauseIntervalInMillis(), settings.GetG1MaxGcPauseInMillis()),
89 analytics_(panda::time::GetCurrentTimeInNanos())
90 {
91 InternalAllocatorPtr allocator = this->GetInternalAllocator();
92 this->SetType(GCType::G1_GC);
93 this->SetTLABsSupported();
94 updatedRefsQueue_ = allocator->New<GCG1BarrierSet::ThreadLocalCardQueues>();
95 auto *firstRefVector = allocator->New<RefVector>();
96 firstRefVector->reserve(MAX_REFS);
97 uniqueRefsFromRemsets_.push_back(firstRefVector);
98 GetG1ObjectAllocator()->ReserveRegionIfNeeded();
99 }
100
101 template <class LanguageConfig>
~G1GC()102 G1GC<LanguageConfig>::~G1GC()
103 {
104 InternalAllocatorPtr allocator = this->GetInternalAllocator();
105 {
106 for (auto objVector : satbBuffList_) {
107 allocator->Delete(objVector);
108 }
109 }
110 allocator->Delete(updatedRefsQueue_);
111 ASSERT(uniqueRefsFromRemsets_.size() == 1);
112 allocator->Delete(uniqueRefsFromRemsets_.front());
113 uniqueRefsFromRemsets_.clear();
114 this->GetInternalAllocator()->Delete(updateRemsetWorker_);
115 }
116
117 template <class LanguageConfig>
InitGCBits(panda::ObjectHeader * objHeader)118 void G1GC<LanguageConfig>::InitGCBits(panda::ObjectHeader *objHeader)
119 {
120 // The mutator may create a new object during concurrent marking phase.
121 // In this case GC may don't mark it (for example only vregs may contain reference to the new object)
122 // and collect. To avoid such situations add objects to a special buffer which
123 // will be processed at remark stage.
124 if (this->GetCardTable()->GetCardPtr(ToUintPtr(objHeader))->IsYoung() ||
125 // Atomic with acquire order reason: read variable modified in GC thread
126 !concurrentMarkingFlag_.load(std::memory_order_acquire)) {
127 return;
128 }
129 os::memory::LockHolder lock(satbAndNewobjBufLock_);
130 newobjBuffer_.push_back(objHeader);
131 }
132
133 template <class LanguageConfig>
PreStartupImp()134 void G1GC<LanguageConfig>::PreStartupImp()
135 {
136 GenerationalGC<LanguageConfig>::DisableTenuredGC();
137 }
138
139 template <class LanguageConfig>
140 template <RegionFlag REGION_TYPE, bool FULL_GC>
DoRegionCompacting(Region * region,bool useGcWorkers,PandaVector<PandaVector<ObjectHeader * > * > * movedObjectsVector)141 void G1GC<LanguageConfig>::DoRegionCompacting(Region *region, bool useGcWorkers,
142 PandaVector<PandaVector<ObjectHeader *> *> *movedObjectsVector)
143 {
144 auto internalAllocator = this->GetInternalAllocator();
145 ObjectVisitor movedObjectSaver;
146 if constexpr (FULL_GC) {
147 PandaVector<ObjectHeader *> *movedObjects;
148 if (useGcWorkers) {
149 movedObjects = internalAllocator->template New<PandaVector<ObjectHeader *>>();
150 movedObjectsVector->push_back(movedObjects);
151 size_t moveSize = region->GetAllocatedBytes();
152 movedObjects->reserve(moveSize / GetMinimalObjectSize());
153 } else {
154 ASSERT(movedObjectsVector->size() == 1);
155 movedObjects = movedObjectsVector->back();
156 }
157 movedObjectSaver = [movedObjects](ObjectHeader *object) { movedObjects->push_back(object); };
158 } else {
159 movedObjectSaver = []([[maybe_unused]] const ObjectHeader *object) {};
160 }
161
162 if (useGcWorkers) {
163 auto *storage =
164 internalAllocator->template New<GCRegionCompactWorkersTask::RegionDataType>(region, movedObjectSaver);
165 if (!this->GetWorkersTaskPool()->AddTask(GCRegionCompactWorkersTask(storage))) {
166 // We couldn't send a task to workers. Therefore, do it here.
167 internalAllocator->Delete(storage);
168 RegionCompactingImpl<true, REGION_TYPE>(region, movedObjectSaver);
169 }
170 } else {
171 RegionCompactingImpl<false, REGION_TYPE>(region, movedObjectSaver);
172 }
173 }
174
175 class ScopedRegionCollectionInfo {
176 public:
ScopedRegionCollectionInfo(const GC * gc,const char * title,const Region * region,bool isYoung,const size_t & movedSize)177 ScopedRegionCollectionInfo(const GC *gc, const char *title, const Region *region, bool isYoung,
178 const size_t &movedSize)
179 : gc_(gc),
180 title_(title),
181 region_(region),
182 isYoung_(isYoung),
183 movedSize_(movedSize),
184 startTimeNs_(time::GetCurrentTimeInNanos())
185 {
186 }
187
188 NO_COPY_SEMANTIC(ScopedRegionCollectionInfo);
189 NO_MOVE_SEMANTIC(ScopedRegionCollectionInfo);
190
~ScopedRegionCollectionInfo()191 ~ScopedRegionCollectionInfo()
192 {
193 if (gc_->IsLogDetailedGcCompactionInfoEnabled()) {
194 LOG(INFO, GC) << *this;
195 }
196 }
197
198 private:
199 const GC *gc_;
200 const char *title_;
201 const Region *region_;
202 bool isYoung_;
203 const size_t &movedSize_;
204 uint64_t startTimeNs_;
205
operator <<(std::ostream & log,const ScopedRegionCollectionInfo & regionInfo)206 friend std::ostream &operator<<(std::ostream &log, const ScopedRegionCollectionInfo ®ionInfo)
207 {
208 auto region = regionInfo.region_;
209 log << '[' << regionInfo.gc_->GetCounter() << "] " << regionInfo.title_ << ": ";
210 // Need to use saved is_young_ flag since region flags can be changed during region promotion
211 if (regionInfo.isYoung_) {
212 log << 'Y';
213 } else {
214 log << 'T';
215 }
216 DumpRegionRange(log, *region) << " A " << panda::helpers::MemoryConverter(region->GetAllocatedBytes()) << " L ";
217 if (regionInfo.isYoung_) {
218 log << '-';
219 } else {
220 log << panda::helpers::MemoryConverter(region->GetLiveBytes());
221 }
222 log << " RS " << region->GetRemSetSize() << " M " << panda::helpers::MemoryConverter(regionInfo.movedSize_)
223 << " D " << panda::helpers::TimeConverter(time::GetCurrentTimeInNanos() - regionInfo.startTimeNs_);
224 return log;
225 }
226 };
227
228 template <class LanguageConfig>
229 template <bool ATOMIC>
RegionPromotionImpl(Region * region,const ObjectVisitor & movedObjectSaver)230 void G1GC<LanguageConfig>::RegionPromotionImpl(Region *region, const ObjectVisitor &movedObjectSaver)
231 {
232 size_t moveSize = region->GetAllocatedBytes();
233 size_t aliveMoveCount = 0;
234 size_t deadMoveCount = 0;
235 auto objectAllocator = this->GetG1ObjectAllocator();
236 auto promotionMoveChecker = [&aliveMoveCount, &movedObjectSaver](ObjectHeader *src) {
237 ++aliveMoveCount;
238 LOG_DEBUG_OBJECT_EVENTS << "PROMOTE YOUNG object " << src;
239 ASSERT(ObjectToRegion(src)->HasFlag(RegionFlag::IS_EDEN));
240 movedObjectSaver(src);
241 };
242 auto promotionDeathChecker = [this, &deadMoveCount](ObjectHeader *objectHeader) {
243 if (IsMarked(objectHeader)) {
244 return ObjectStatus::ALIVE_OBJECT;
245 }
246 ++deadMoveCount;
247 LOG_DEBUG_OBJECT_EVENTS << "PROMOTE DEAD YOUNG object " << objectHeader;
248 return ObjectStatus::DEAD_OBJECT;
249 };
250 ScopedRegionCollectionInfo collectionInfo(this, "Region promoted", region, true, moveSize);
251 if (g1TrackFreedObjects_) {
252 // We want to track all moved objects (including), therefore, iterate over all objects in region.
253 objectAllocator->template PromoteYoungRegion<false>(region, promotionDeathChecker, promotionMoveChecker);
254 } else {
255 objectAllocator->template PromoteYoungRegion<true>(region, promotionDeathChecker, promotionMoveChecker);
256 ASSERT(deadMoveCount == 0);
257 }
258 region->RmvFlag(RegionFlag::IS_COLLECTION_SET);
259 this->memStats_.template RecordSizeMovedYoung<ATOMIC>(moveSize);
260 this->memStats_.template RecordCountMovedYoung<ATOMIC>(aliveMoveCount + deadMoveCount);
261 analytics_.ReportPromotedRegion();
262 analytics_.ReportLiveObjects(aliveMoveCount);
263 }
264
265 template <class LanguageConfig>
266 template <typename Handler>
IterateOverRefsInMemRange(const MemRange & memRange,Region * region,Handler & refsHandler)267 void G1GC<LanguageConfig>::IterateOverRefsInMemRange(const MemRange &memRange, Region *region, Handler &refsHandler)
268 {
269 MarkBitmap *bitmap = nullptr;
270 if (region->IsEden()) {
271 ASSERT(this->IsFullGC());
272 bitmap = region->GetMarkBitmap();
273 } else {
274 bitmap = region->GetLiveBitmap();
275 }
276 auto *startAddress = ToVoidPtr(memRange.GetStartAddress());
277 auto *endAddress = ToVoidPtr(memRange.GetEndAddress());
278 auto visitor = [&refsHandler, startAddress, endAddress](void *mem) {
279 ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(
280 static_cast<ObjectHeader *>(mem), refsHandler, startAddress, endAddress);
281 };
282 if (region->HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
283 bitmap->CallForMarkedChunkInHumongousRegion<false>(ToVoidPtr(region->Begin()), visitor);
284 } else {
285 bitmap->IterateOverMarkedChunkInRange(startAddress, endAddress, visitor);
286 }
287 }
288
289 template <class LanguageConfig, bool CONCURRENTLY, bool COLLECT_CLASSES>
290 class NonRegularObjectsDeathChecker {
291 public:
NonRegularObjectsDeathChecker(size_t * deleteSize,size_t * deleteCount)292 NonRegularObjectsDeathChecker(size_t *deleteSize, size_t *deleteCount)
293 : deleteSize_(deleteSize), deleteCount_(deleteCount)
294 {
295 }
296
297 ~NonRegularObjectsDeathChecker() = default;
298
operator ()(ObjectHeader * objectHeader)299 ObjectStatus operator()(ObjectHeader *objectHeader)
300 {
301 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
302 if constexpr (CONCURRENTLY) {
303 // We may face a newly created object without live bitmap initialization.
304 if (objectHeader->template ClassAddr<BaseClass>() == nullptr) {
305 return ObjectStatus::ALIVE_OBJECT;
306 }
307 }
308 Region *region = ObjectToRegion(objectHeader);
309 auto liveBitmap = region->GetLiveBitmap();
310 if (liveBitmap->AtomicTest(objectHeader)) {
311 return ObjectStatus::ALIVE_OBJECT;
312 }
313 if constexpr (!COLLECT_CLASSES) {
314 if (ObjectHelpers<LanguageConfig::LANG_TYPE>::IsClassObject(objectHeader)) {
315 LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE class object " << objectHeader
316 << " but don't free memory";
317 return ObjectStatus::ALIVE_OBJECT;
318 }
319 }
320
321 if (region->HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
322 LOG_DEBUG_OBJECT_EVENTS << "DELETE HUMONGOUS object " << objectHeader;
323 // humongous allocator increases size by region size
324 *deleteSize_ += region->Size();
325 ++(*deleteCount_);
326 } else {
327 ASSERT(region->HasFlag(RegionFlag::IS_NONMOVABLE));
328 LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE object " << objectHeader;
329 }
330 return ObjectStatus::DEAD_OBJECT;
331 }
332
333 DEFAULT_COPY_SEMANTIC(NonRegularObjectsDeathChecker);
334 DEFAULT_MOVE_SEMANTIC(NonRegularObjectsDeathChecker);
335
336 private:
337 size_t *deleteSize_;
338 size_t *deleteCount_;
339 };
340
341 template <class LanguageConfig>
342 template <bool ATOMIC, bool CONCURRENTLY>
CollectEmptyRegions(GCTask & task,PandaVector<Region * > * emptyTenuredRegions)343 void G1GC<LanguageConfig>::CollectEmptyRegions(GCTask &task, PandaVector<Region *> *emptyTenuredRegions)
344 {
345 ScopedTiming t(__FUNCTION__, *this->GetTiming());
346 CollectNonRegularObjects<ATOMIC, CONCURRENTLY>();
347 ClearEmptyTenuredMovableRegions<ATOMIC, CONCURRENTLY>(emptyTenuredRegions);
348 task.UpdateGCCollectionType(GCCollectionType::TENURED);
349 }
350
351 template <class LanguageConfig>
352 template <bool ATOMIC, bool CONCURRENTLY>
CollectNonRegularObjects()353 void G1GC<LanguageConfig>::CollectNonRegularObjects()
354 {
355 ScopedTiming t(__FUNCTION__, *this->GetTiming());
356 size_t deleteSize = 0;
357 size_t deleteCount = 0;
358 // Don't collect classes if --g1-track-free-objects is enabled.
359 // We need to know size of objects while iterating over all objects in the collected region.
360 auto deathChecker =
361 g1TrackFreedObjects_
362 ? GCObjectVisitor(
363 NonRegularObjectsDeathChecker<LanguageConfig, CONCURRENTLY, false>(&deleteSize, &deleteCount))
364 : GCObjectVisitor(
365 NonRegularObjectsDeathChecker<LanguageConfig, CONCURRENTLY, true>(&deleteSize, &deleteCount));
366 auto regionVisitor = [this](PandaVector<Region *> ®ions) {
367 if constexpr (CONCURRENTLY) {
368 updateRemsetWorker_->InvalidateRegions(®ions);
369 } else {
370 updateRemsetWorker_->GCInvalidateRegions(®ions);
371 }
372 };
373 this->GetG1ObjectAllocator()->CollectNonRegularRegions(regionVisitor, deathChecker);
374 this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
375 this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
376 }
377
GetEmptyTenuredRegularRegionsFromQueue(PandaPriorityQueue<std::pair<uint32_t,Region * >> garbageRegions)378 PandaVector<Region *> GetEmptyTenuredRegularRegionsFromQueue(
379 PandaPriorityQueue<std::pair<uint32_t, Region *>> garbageRegions)
380 {
381 PandaVector<Region *> emptyTenuredRegions;
382 while (!garbageRegions.empty()) {
383 auto *topRegion = garbageRegions.top().second;
384 if (topRegion->GetLiveBytes() == 0U) {
385 emptyTenuredRegions.push_back(topRegion);
386 }
387 garbageRegions.pop();
388 }
389 return emptyTenuredRegions;
390 }
391
392 template <class LanguageConfig>
393 template <bool ATOMIC, bool CONCURRENTLY>
ClearEmptyTenuredMovableRegions(PandaVector<Region * > * emptyTenuredRegions)394 void G1GC<LanguageConfig>::ClearEmptyTenuredMovableRegions(PandaVector<Region *> *emptyTenuredRegions)
395 {
396 ScopedTiming t(__FUNCTION__, *this->GetTiming());
397 {
398 ScopedTiming t1("Region Invalidation", *this->GetTiming());
399 if constexpr (CONCURRENTLY) {
400 updateRemsetWorker_->InvalidateRegions(emptyTenuredRegions);
401 } else {
402 updateRemsetWorker_->GCInvalidateRegions(emptyTenuredRegions);
403 }
404 }
405 size_t deleteSize = 0;
406 size_t deleteCount = 0;
407 auto deathVisitor = [](ObjectHeader *objectHeader) {
408 LOG_DEBUG_OBJECT_EVENTS << "DELETE tenured object " << objectHeader;
409 };
410 for (auto i : *emptyTenuredRegions) {
411 deleteCount += i->GetAllocatedObjects();
412 deleteSize += i->GetAllocatedBytes();
413 ASSERT(i->GetLiveBitmap()->FindFirstMarkedChunks() == nullptr);
414 if (g1TrackFreedObjects_) {
415 i->IterateOverObjects(deathVisitor);
416 }
417 }
418 {
419 ScopedTiming t2("Reset regions", *this->GetTiming());
420 if (CONCURRENTLY) {
421 this->GetG1ObjectAllocator()
422 ->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::NoRelease,
423 OSPagesPolicy::IMMEDIATE_RETURN, true, PandaVector<Region *>>(
424 *emptyTenuredRegions);
425 } else {
426 this->GetG1ObjectAllocator()
427 ->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::Release,
428 OSPagesPolicy::NO_RETURN, false, PandaVector<Region *>>(*emptyTenuredRegions);
429 }
430 }
431 this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
432 this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
433 }
434
435 template <class LanguageConfig>
NeedToPromote(const Region * region) const436 bool G1GC<LanguageConfig>::NeedToPromote(const Region *region) const
437 {
438 ASSERT(region->HasFlag(RegionFlag::IS_EDEN));
439 if (region->HasPinnedObjects()) {
440 return true;
441 }
442 if ((g1PromotionRegionAliveRate_ < PERCENT_100_D) && !this->IsFullGC()) {
443 size_t aliveBytes = region->GetLiveBytes();
444 double alivePercentage = static_cast<double>(aliveBytes) / region->Size() * PERCENT_100_D;
445 if (alivePercentage >= g1PromotionRegionAliveRate_) {
446 return true;
447 }
448 }
449 return false;
450 }
451
452 template <class LanguageConfig>
453 template <bool ATOMIC, RegionFlag REGION_TYPE>
RegionCompactingImpl(Region * region,const ObjectVisitor & movedObjectSaver)454 void G1GC<LanguageConfig>::RegionCompactingImpl(Region *region, const ObjectVisitor &movedObjectSaver)
455 {
456 auto objectAllocator = this->GetG1ObjectAllocator();
457 // Calculated live bytes in region for all marked objects during MixedMark
458 size_t moveSize = region->GetLiveBytes();
459 size_t moveCount = 0;
460 size_t allocatedSize = region->GetAllocatedBytes();
461 ASSERT(moveSize <= allocatedSize);
462 size_t deleteSize = allocatedSize - moveSize;
463 size_t deleteCount = 0;
464
465 auto moveChecker = [this, &moveCount, &movedObjectSaver](ObjectHeader *src, ObjectHeader *dst) {
466 LOG_DEBUG_OBJECT_EVENTS << "MOVE object " << src << " -> " << dst;
467 ASSERT(ObjectToRegion(dst)->HasFlag(RegionFlag::IS_OLD));
468 this->SetForwardAddress(src, dst);
469 ++moveCount;
470 movedObjectSaver(dst);
471 };
472
473 auto deathChecker = [this, &deleteCount](ObjectHeader *objectHeader) {
474 if (IsMarked(objectHeader)) {
475 return ObjectStatus::ALIVE_OBJECT;
476 }
477 ++deleteCount;
478 if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
479 LOG_DEBUG_OBJECT_EVENTS << "DELETE YOUNG object " << objectHeader;
480 } else {
481 ASSERT(REGION_TYPE == RegionFlag::IS_OLD);
482 LOG_DEBUG_OBJECT_EVENTS << "DELETE TENURED object " << objectHeader;
483 }
484 return ObjectStatus::DEAD_OBJECT;
485 };
486 if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
487 if (!this->NeedToPromote(region)) {
488 ScopedRegionCollectionInfo collectionInfo(this, "Region compacted", region, true, moveSize);
489 if (g1TrackFreedObjects_) {
490 // We want to track all freed objects, therefore, iterate over all objects in region.
491 objectAllocator->template CompactRegion<RegionFlag::IS_EDEN, false>(region, deathChecker, moveChecker);
492 } else {
493 objectAllocator->template CompactRegion<RegionFlag::IS_EDEN, true>(region, deathChecker, moveChecker);
494 // delete_count is equal to 0 because we don't track allocation in TLABs by a default.
495 // We will do it only with PANDA_TRACK_TLAB_ALLOCATIONS key
496 ASSERT(deleteCount == 0);
497 }
498 this->memStats_.template RecordSizeMovedYoung<ATOMIC>(moveSize);
499 this->memStats_.template RecordCountMovedYoung<ATOMIC>(moveCount);
500 this->memStats_.template RecordSizeFreedYoung<ATOMIC>(deleteSize);
501 this->memStats_.template RecordCountFreedYoung<ATOMIC>(deleteCount);
502 analytics_.ReportEvacuatedBytes(moveSize);
503 analytics_.ReportLiveObjects(moveCount);
504 } else {
505 RegionPromotionImpl<ATOMIC>(region, movedObjectSaver);
506 }
507 } else {
508 ScopedRegionCollectionInfo collectionInfo(this, "Region compacted", region, false, moveSize);
509 ASSERT(region->HasFlag(RegionFlag::IS_OLD));
510 ASSERT(!region->HasFlag(RegionFlag::IS_NONMOVABLE) && !region->HasFlag(RegionFlag::IS_LARGE_OBJECT));
511 if (g1TrackFreedObjects_) {
512 // We want to track all freed objects, therefore, iterate over all objects in region.
513 objectAllocator->template CompactRegion<RegionFlag::IS_OLD, false>(region, deathChecker, moveChecker);
514 } else {
515 objectAllocator->template CompactRegion<RegionFlag::IS_OLD, true>(region, deathChecker, moveChecker);
516 size_t allocatedObjects = region->GetAllocatedObjects();
517 ASSERT(moveCount <= allocatedObjects);
518 ASSERT(deleteCount == 0);
519 deleteCount = allocatedObjects - moveCount;
520 }
521 this->memStats_.template RecordSizeMovedTenured<ATOMIC>(moveSize);
522 this->memStats_.template RecordCountMovedTenured<ATOMIC>(moveCount);
523 this->memStats_.template RecordSizeFreedTenured<ATOMIC>(deleteSize);
524 this->memStats_.template RecordCountFreedTenured<ATOMIC>(deleteCount);
525 }
526 }
527
528 template <class LanguageConfig, typename RefUpdater, bool FULL_GC>
DoUpdateReferencesToMovedObjectsRange(typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange * movedObjects,RefUpdater & refUpdater)529 void DoUpdateReferencesToMovedObjectsRange(typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange *movedObjects,
530 RefUpdater &refUpdater)
531 {
532 for (auto *obj : *movedObjects) {
533 if constexpr (!FULL_GC) {
534 obj = obj->IsForwarded() ? GetForwardAddress(obj) : obj;
535 }
536 ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(obj, refUpdater);
537 }
538 }
539
540 template <class LanguageConfig>
WorkerTaskProcessing(GCWorkersTask * task,void * workerData)541 void G1GC<LanguageConfig>::WorkerTaskProcessing(GCWorkersTask *task, [[maybe_unused]] void *workerData)
542 {
543 switch (task->GetType()) {
544 case GCWorkersTaskTypes::TASK_MARKING: {
545 auto objectsStack = task->Cast<GCMarkWorkersTask>()->GetMarkingStack();
546 MarkStackMixed(objectsStack);
547 ASSERT(objectsStack->Empty());
548 this->GetInternalAllocator()->Delete(objectsStack);
549 break;
550 }
551 case GCWorkersTaskTypes::TASK_REMARK: {
552 auto *objectsStack = task->Cast<GCMarkWorkersTask>()->GetMarkingStack();
553 this->MarkStack(&marker_, objectsStack, CalcLiveBytesMarkPreprocess);
554 ASSERT(objectsStack->Empty());
555 this->GetInternalAllocator()->Delete(objectsStack);
556 break;
557 }
558 case GCWorkersTaskTypes::TASK_FULL_MARK: {
559 const ReferenceCheckPredicateT &refEnablePred = []([[maybe_unused]] const ObjectHeader *obj) {
560 // process all refs
561 return true;
562 };
563 auto *objectsStack = task->Cast<GCMarkWorkersTask>()->GetMarkingStack();
564 this->MarkStack(&marker_, objectsStack, CalcLiveBytesMarkPreprocess, refEnablePred);
565 ASSERT(objectsStack->Empty());
566 this->GetInternalAllocator()->Delete(objectsStack);
567 break;
568 }
569 case GCWorkersTaskTypes::TASK_REGION_COMPACTING: {
570 auto *data = task->Cast<GCRegionCompactWorkersTask>()->GetRegionData();
571 Region *region = data->first;
572 const ObjectVisitor &movedObjectsSaver = data->second;
573 if (region->HasFlag(RegionFlag::IS_EDEN)) {
574 RegionCompactingImpl<true, RegionFlag::IS_EDEN>(region, movedObjectsSaver);
575 } else if (region->HasFlag(RegionFlag::IS_OLD)) {
576 RegionCompactingImpl<true, RegionFlag::IS_OLD>(region, movedObjectsSaver);
577 } else {
578 LOG(FATAL, GC) << "Unsupported region type";
579 }
580 this->GetInternalAllocator()->Delete(data);
581 break;
582 }
583 case GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS: {
584 PoolManager::GetMmapMemPool()->ReleasePagesInFreePools();
585 break;
586 }
587 case GCWorkersTaskTypes::TASK_ENQUEUE_REMSET_REFS: {
588 auto *movedObjectsRange = task->Cast<GCUpdateRefsWorkersTask<false>>()->GetMovedObjectsRange();
589 auto *taskUpdatedRefsQueue =
590 this->GetInternalAllocator()->template New<GCG1BarrierSet::ThreadLocalCardQueues>();
591 EnqueueRemsetRefUpdater<LanguageConfig> refUpdater(this->GetCardTable(), taskUpdatedRefsQueue,
592 regionSizeBits_);
593 DoUpdateReferencesToMovedObjectsRange<LanguageConfig, decltype(refUpdater), false>(movedObjectsRange,
594 refUpdater);
595 {
596 os::memory::LockHolder lock(gcWorkerQueueLock_);
597 updatedRefsQueue_->insert(updatedRefsQueue_->end(), taskUpdatedRefsQueue->begin(),
598 taskUpdatedRefsQueue->end());
599 }
600 this->GetInternalAllocator()->Delete(movedObjectsRange);
601 this->GetInternalAllocator()->Delete(taskUpdatedRefsQueue);
602 break;
603 }
604 default:
605 LOG(FATAL, GC) << "Unimplemented for " << GCWorkersTaskTypesToString(task->GetType());
606 UNREACHABLE();
607 }
608 }
609
610 template <class LanguageConfig>
UpdateCollectionSet(const CollectionSet & collectibleRegions)611 void G1GC<LanguageConfig>::UpdateCollectionSet(const CollectionSet &collectibleRegions)
612 {
613 collectionSet_ = collectibleRegions;
614 for (auto r : collectionSet_) {
615 // we don't need to reset flag, because we don't reuse collection_set region
616 r->AddFlag(RegionFlag::IS_COLLECTION_SET);
617 LOG_DEBUG_GC << "dump region: " << *r;
618 }
619 }
620
621 template <class LanguageConfig>
RunPhasesForRegions(panda::GCTask & task,const CollectionSet & collectibleRegions)622 void G1GC<LanguageConfig>::RunPhasesForRegions(panda::GCTask &task, const CollectionSet &collectibleRegions)
623 {
624 if (collectibleRegions.empty()) {
625 LOG_DEBUG_GC << "No regions specified for collection " << task.reason;
626 }
627 ASSERT(concurrentMarkingStack_.Empty());
628 this->GetObjectGenAllocator()->InvalidateSpaceData();
629 this->GetObjectGenAllocator()->UpdateSpaceData();
630 RunGC(task, collectibleRegions);
631 }
632
633 template <class LanguageConfig>
NeedToRunGC(const panda::GCTask & task)634 bool G1GC<LanguageConfig>::NeedToRunGC(const panda::GCTask &task)
635 {
636 return (task.reason == GCTaskCause::YOUNG_GC_CAUSE) || (task.reason == GCTaskCause::OOM_CAUSE) ||
637 (task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE) ||
638 (task.reason == GCTaskCause::STARTUP_COMPLETE_CAUSE) || (task.reason == GCTaskCause::EXPLICIT_CAUSE) ||
639 (task.reason == GCTaskCause::NATIVE_ALLOC_CAUSE) || (task.reason == GCTaskCause::MIXED);
640 }
641
642 template <class LanguageConfig>
NeedFullGC(const panda::GCTask & task)643 bool G1GC<LanguageConfig>::NeedFullGC(const panda::GCTask &task)
644 {
645 return this->IsExplicitFull(task) || (task.reason == GCTaskCause::OOM_CAUSE);
646 }
647
648 template <class LanguageConfig>
RunPhasesImpl(panda::GCTask & task)649 void G1GC<LanguageConfig>::RunPhasesImpl(panda::GCTask &task)
650 {
651 SuspendUpdateRemsetWorkerScope stopUpdateRemsetWorkerScope(updateRemsetWorker_);
652 interruptConcurrentFlag_ = false;
653 LOG_DEBUG_GC << "G1GC start, reason: " << task.reason;
654 LOG_DEBUG_GC << "Footprint before GC: " << this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
655 task.UpdateGCCollectionType(GCCollectionType::YOUNG);
656
657 size_t bytesInHeapBeforeMove = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
658 {
659 ScopedTiming t("G1 GC", *this->GetTiming());
660 {
661 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats());
662 this->memStats_.Reset();
663 if (NeedToRunGC(task)) {
664 // Check there is no concurrent mark running by another thread.
665 EnsurePreWrbDisabledInThreads();
666
667 if (NeedFullGC(task)) {
668 task.collectionType = GCCollectionType::FULL;
669 RunFullGC(task);
670 } else {
671 bool isMixed = false;
672 if (task.reason == GCTaskCause::MIXED && !interruptConcurrentFlag_) {
673 regionGarbageRateThreshold_ = 0;
674 isMixed = true;
675 } else {
676 // Atomic with acquire order reason: to see changes made by GC thread (which do concurrent
677 // marking and than set is_mixed_gc_required_) in mutator thread which waits for the end of
678 // concurrent marking.
679 isMixed = isMixedGcRequired_.load(std::memory_order_acquire);
680 }
681 task.collectionType = isMixed ? GCCollectionType::MIXED : GCCollectionType::YOUNG;
682 auto collectibleRegions = GetCollectibleRegions(task, isMixed);
683 if (!collectibleRegions.empty() && HaveEnoughSpaceToMove(collectibleRegions)) {
684 // Ordinary collection flow
685 RunMixedGC(task, collectibleRegions);
686 } else {
687 LOG_DEBUG_GC << "Failed to run gc: "
688 << (collectibleRegions.empty() ? "nothing to collect in movable space"
689 : "not enough free regions to move");
690 }
691 }
692 }
693 }
694 if (task.reason == GCTaskCause::MIXED) {
695 // There was forced a mixed GC. This GC type sets specific settings.
696 // So we need to restore them.
697 regionGarbageRateThreshold_ = this->GetSettings()->G1RegionGarbageRateThreshold();
698 }
699 if (ScheduleMixedGCAndConcurrentMark(task)) {
700 RunConcurrentMark(task);
701 }
702 }
703 // Update global and GC memstats based on generational memstats information
704 // We will update tenured stats and record allocations, so set 'true' values
705 this->UpdateMemStats(bytesInHeapBeforeMove, true, true);
706
707 LOG_DEBUG_GC << "Footprint after GC: " << this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
708 this->SetFullGC(false);
709 }
710
711 template <class LanguageConfig>
RunFullGC(panda::GCTask & task)712 void G1GC<LanguageConfig>::RunFullGC(panda::GCTask &task)
713 {
714 ScopedTiming t("Run Full GC", *this->GetTiming());
715 GetG1ObjectAllocator()->template ReleaseEmptyRegions<RegionFlag::IS_OLD, OSPagesPolicy::NO_RETURN>();
716 LOG_DEBUG_GC << "Explicit Full GC invocation due to a reason: " << task.reason;
717 this->SetFullGC(true);
718 FullMarking(task);
719 if (!HaveEnoughRegionsToMove(1)) {
720 GetG1ObjectAllocator()->ReleaseReservedRegion();
721 // After release reserved region we always have minimum 1 region for tenured collection
722 ASSERT(HaveEnoughRegionsToMove(1));
723 }
724 CollectionSet collectionSet = GetFullCollectionSet();
725 PrepareYoungRegionsForFullGC(collectionSet);
726 auto curRegionIt = collectionSet.Tenured().begin();
727 auto endRegionIt = collectionSet.Tenured().end();
728 while (curRegionIt != endRegionIt) {
729 ASSERT(HaveEnoughRegionsToMove(1));
730 CollectionSet cs;
731 while ((curRegionIt != endRegionIt) && (HaveEnoughRegionsToMove(cs.Movable().size() + 1))) {
732 Region *region = *curRegionIt;
733 curRegionIt++;
734 if (region->GetGarbageBytes() == 0) {
735 double regionFragmentation = region->GetFragmentation();
736 if (regionFragmentation < this->GetSettings()->G1FullGCRegionFragmentationRate()) {
737 LOG_DEBUG_GC << "Skip region " << *region << " because it has no garbage inside";
738 continue;
739 }
740 LOG_DEBUG_GC << "Add region " << *region
741 << " to a collection set because it has a big fragmentation = " << regionFragmentation;
742 } else {
743 LOG_DEBUG_GC << "Add region " << *region << " to a collection set";
744 }
745 cs.AddRegion(region);
746 }
747 UpdateCollectionSet(cs);
748 CollectAndMove<true>(cs);
749 LOG_DEBUG_GC << "Iterative full GC, collected " << cs.size() << " regions";
750 }
751 // Reserve a region to prevent OOM in case a lot of garbage in tenured space
752 GetG1ObjectAllocator()->ReserveRegionIfNeeded();
753 if (!collectionSet.Young().empty()) {
754 CollectionSet cs(collectionSet.Young());
755 if (HaveEnoughSpaceToMove(cs)) {
756 LOG_DEBUG_GC << "Iterative full GC. Collecting " << cs.size() << " young regions";
757 UpdateCollectionSet(cs);
758 CollectAndMove<true>(cs);
759 } else {
760 RestoreYoungRegionsAfterFullGC(cs);
761 LOG_INFO_GC << "Failed to run gc, not enough free regions for young";
762 LOG_INFO_GC << "Accounted total object used bytes = "
763 << PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
764 }
765 }
766 {
767 ScopedTiming releasePages("Release Pages in Free Pools", *this->GetTiming());
768 bool useGcWorkers = this->GetSettings()->GCWorkersCount() != 0;
769 if (useGcWorkers) {
770 if (!this->GetWorkersTaskPool()->AddTask(GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS)) {
771 PoolManager::GetMmapMemPool()->ReleasePagesInFreePools();
772 }
773 } else {
774 PoolManager::GetMmapMemPool()->ReleasePagesInFreePools();
775 }
776 }
777 this->SetFullGC(false);
778 collectionSet_.clear();
779 }
780
781 template <class LanguageConfig>
RunMixedGC(panda::GCTask & task,const CollectionSet & collectionSet)782 void G1GC<LanguageConfig>::RunMixedGC(panda::GCTask &task, const CollectionSet &collectionSet)
783 {
784 auto startTime = panda::time::GetCurrentTimeInNanos();
785 analytics_.ReportCollectionStart(startTime);
786 LOG_DEBUG_GC << "Collect regions size:" << collectionSet.size();
787 UpdateCollectionSet(collectionSet);
788 RunPhasesForRegions(task, collectionSet);
789 auto endTime = panda::time::GetCurrentTimeInNanos();
790 this->GetStats()->AddTimeValue(endTime - startTime, TimeTypeStats::YOUNG_TOTAL_TIME);
791 g1PauseTracker_.AddPauseInNanos(startTime, endTime);
792 analytics_.ReportCollectionEnd(endTime, collectionSet);
793 collectionSet_.clear();
794 }
795
796 template <class LanguageConfig>
ScheduleMixedGCAndConcurrentMark(panda::GCTask & task)797 bool G1GC<LanguageConfig>::ScheduleMixedGCAndConcurrentMark(panda::GCTask &task)
798 {
799 // Atomic with acquire order reason: to see changes made by GC thread (which do concurrent marking and than set
800 // is_mixed_gc_required_) in mutator thread which waits for the end of concurrent marking.
801 if (isMixedGcRequired_.load(std::memory_order_acquire)) {
802 if (!HaveGarbageRegions()) {
803 // Atomic with release order reason: to see changes made by GC thread (which do concurrent marking and
804 // than set is_mixed_gc_required_) in mutator thread which waits for the end of concurrent marking.
805 isMixedGcRequired_.store(false, std::memory_order_release);
806 }
807 return false; // don't run concurrent mark
808 }
809 concurrentMarkingFlag_ = !interruptConcurrentFlag_ && this->ShouldRunTenuredGC(task);
810 // Atomic with relaxed order reason: read variable modified in the same thread
811 return concurrentMarkingFlag_.load(std::memory_order_relaxed);
812 }
813
814 template <class LanguageConfig>
815 template <bool ENABLE_BARRIER>
UpdatePreWrbEntrypointInThreads()816 void G1GC<LanguageConfig>::UpdatePreWrbEntrypointInThreads()
817 {
818 ObjRefProcessFunc entrypointFunc = nullptr;
819 if constexpr (ENABLE_BARRIER) {
820 auto addr = this->GetBarrierSet()->GetBarrierOperand(panda::mem::BarrierPosition::BARRIER_POSITION_PRE,
821 "STORE_IN_BUFF_TO_MARK_FUNC");
822 entrypointFunc = std::get<ObjRefProcessFunc>(addr.GetValue());
823 }
824 auto setEntrypoint = [this, &entrypointFunc](ManagedThread *thread) {
825 void *entrypointFuncUntyped = reinterpret_cast<void *>(entrypointFunc);
826 ASSERT(thread->GetPreWrbEntrypoint() != entrypointFuncUntyped);
827 thread->SetPreWrbEntrypoint(entrypointFuncUntyped);
828
829 // currentPreWrbEntrypoint_ is not required to be set multiple times, but this has to be done under the
830 // EnumerateThreads()'s lock, hence the repetition
831 currentPreWrbEntrypoint_ = entrypointFunc;
832 return true;
833 };
834 this->GetPandaVm()->GetThreadManager()->EnumerateThreads(setEntrypoint);
835 }
836
837 template <class LanguageConfig>
EnsurePreWrbDisabledInThreads()838 void G1GC<LanguageConfig>::EnsurePreWrbDisabledInThreads()
839 {
840 [[maybe_unused]] auto callback = [](ManagedThread *thread) { return thread->GetPreWrbEntrypoint() == nullptr; };
841 ASSERT(this->GetPandaVm()->GetThreadManager()->EnumerateThreads(callback));
842 }
843
844 template <class LanguageConfig>
RunConcurrentMark(panda::GCTask & task)845 void G1GC<LanguageConfig>::RunConcurrentMark(panda::GCTask &task)
846 {
847 ASSERT(collectionSet_.empty());
848 // Init concurrent marking
849 EnablePreWrbInThreads();
850
851 if (this->GetSettings()->BeforeG1ConcurrentHeapVerification()) {
852 trace::ScopedTrace postHeapVerifierTrace("PostGCHeapVeriFier before concurrent");
853 size_t failCount = this->VerifyHeap();
854 if (this->GetSettings()->FailOnHeapVerification() && failCount > 0) {
855 LOG(FATAL, GC) << "Heap corrupted after GC, HeapVerifier found " << failCount << " corruptions";
856 }
857 }
858 ConcurrentMarking(task);
859 }
860
861 template <class LanguageConfig>
HaveGarbageRegions()862 bool G1GC<LanguageConfig>::HaveGarbageRegions()
863 {
864 // Use GetTopGarbageRegions because it doesn't return current regions
865 auto regions = GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
866 return HaveGarbageRegions(regions);
867 }
868
869 template <class LanguageConfig>
HaveGarbageRegions(const PandaPriorityQueue<std::pair<uint32_t,Region * >> & regions)870 bool G1GC<LanguageConfig>::HaveGarbageRegions(const PandaPriorityQueue<std::pair<uint32_t, Region *>> ®ions)
871 {
872 if (regions.empty()) {
873 return false;
874 }
875 auto *topRegion = regions.top().second;
876 double garbageRate = static_cast<double>(topRegion->GetGarbageBytes()) / topRegion->Size();
877 return garbageRate >= regionGarbageRateThreshold_;
878 }
879
880 template <class LanguageConfig>
ProcessDirtyCards()881 void G1GC<LanguageConfig>::ProcessDirtyCards()
882 {
883 ScopedTiming t(__FUNCTION__, *this->GetTiming());
884 updateRemsetWorker_->GCProcessCards();
885 }
886
887 template <class LanguageConfig>
CreateUpdateRemsetWorker()888 void G1GC<LanguageConfig>::CreateUpdateRemsetWorker()
889 {
890 InternalAllocatorPtr allocator = this->GetInternalAllocator();
891 // to make TSAN happy because we access updated_refs_queue_ inside constructor of UpdateRemsetWorker
892 os::memory::LockHolder lock(queueLock_);
893 if (this->GetSettings()->UseThreadPoolForGC()) {
894 updateRemsetWorker_ = allocator->template New<UpdateRemsetThread<LanguageConfig>>(
895 this, updatedRefsQueue_, &queueLock_, this->GetG1ObjectAllocator()->GetRegionSize(),
896 this->GetSettings()->G1EnableConcurrentUpdateRemset(),
897 this->GetSettings()->G1MinConcurrentCardsToProcess());
898 } else {
899 ASSERT(this->GetSettings()->UseTaskManagerForGC());
900 updateRemsetWorker_ = allocator->template New<UpdateRemsetTaskQueue<LanguageConfig>>(
901 this, updatedRefsQueue_, &queueLock_, this->GetG1ObjectAllocator()->GetRegionSize(),
902 this->GetSettings()->G1EnableConcurrentUpdateRemset(),
903 this->GetSettings()->G1MinConcurrentCardsToProcess());
904 }
905 ASSERT(updateRemsetWorker_ != nullptr);
906 }
907
908 template <class LanguageConfig>
InitializeImpl()909 void G1GC<LanguageConfig>::InitializeImpl()
910 {
911 // GC saved the PandaVM instance, so we get allocator from the PandaVM.
912 InternalAllocatorPtr allocator = this->GetInternalAllocator();
913 this->CreateCardTable(allocator, PoolManager::GetMmapMemPool()->GetMinObjectAddress(),
914 PoolManager::GetMmapMemPool()->GetTotalObjectSize());
915
916 auto barrierSet =
917 allocator->New<GCG1BarrierSet>(allocator, &PreWrbFuncEntrypoint, &PostWrbUpdateCardFuncEntrypoint,
918 panda::helpers::math::GetIntLog2(this->GetG1ObjectAllocator()->GetRegionSize()),
919 this->GetCardTable(), updatedRefsQueue_, &queueLock_);
920 ASSERT(barrierSet != nullptr);
921 this->SetGCBarrierSet(barrierSet);
922
923 this->CreateWorkersTaskPool();
924 CreateUpdateRemsetWorker();
925 LOG_DEBUG_GC << "G1GC initialized";
926 }
927
928 template <class LanguageConfig>
MarkObject(ObjectHeader * object)929 void G1GC<LanguageConfig>::MarkObject(ObjectHeader *object)
930 {
931 G1GCPauseMarker<LanguageConfig>::Mark(object);
932 }
933
934 template <class LanguageConfig>
MarkObjectIfNotMarked(ObjectHeader * object)935 bool G1GC<LanguageConfig>::MarkObjectIfNotMarked(ObjectHeader *object)
936 {
937 ASSERT(object != nullptr);
938 if (this->GetGCPhase() == GCPhase::GC_PHASE_MARK_YOUNG) {
939 return mixedMarker_.MarkIfNotMarked(object);
940 }
941 return marker_.MarkIfNotMarked(object);
942 }
943
944 template <class LanguageConfig>
InitGCBitsForAllocationInTLAB(panda::ObjectHeader * object)945 void G1GC<LanguageConfig>::InitGCBitsForAllocationInTLAB([[maybe_unused]] panda::ObjectHeader *object)
946 {
947 LOG(FATAL, GC) << "Not implemented";
948 }
949
950 template <class LanguageConfig>
IsMarked(panda::ObjectHeader const * object) const951 bool G1GC<LanguageConfig>::IsMarked(panda::ObjectHeader const *object) const
952 {
953 return G1GCPauseMarker<LanguageConfig>::IsMarked(object);
954 }
955
956 template <class LanguageConfig>
MarkStackMixed(GCMarkingStackType * stack)957 void G1GC<LanguageConfig>::MarkStackMixed(GCMarkingStackType *stack)
958 {
959 ASSERT(stack != nullptr);
960 trace::ScopedTrace scopedTrace(__FUNCTION__);
961 auto refPred = [this](const ObjectHeader *obj) { return InGCSweepRange(obj); };
962 auto visitor = [this, stack, &refPred](const ObjectHeader *object) {
963 ASSERT(mixedMarker_.IsMarked(object));
964 ValidateObject(nullptr, object);
965 auto *objectClass = object->template ClassAddr<BaseClass>();
966 // We need annotation here for the FullMemoryBarrier used in InitializeClassByIdEntrypoint
967 TSAN_ANNOTATE_HAPPENS_AFTER(objectClass);
968 LOG_DEBUG_GC << "Current object: " << GetDebugInfoAboutObject(object);
969
970 ASSERT(!object->IsForwarded());
971 ASSERT(InGCSweepRange(object));
972 CalcLiveBytesMarkPreprocess(object, objectClass);
973 mixedMarker_.MarkInstance(stack, object, objectClass, refPred);
974 };
975 {
976 auto markedObjects = stack->TraverseObjects(visitor);
977 os::memory::LockHolder lh(mixedMarkedObjectsMutex_);
978 if (mixedMarkedObjects_.empty()) {
979 mixedMarkedObjects_ = std::move(markedObjects);
980 } else {
981 mixedMarkedObjects_.insert(mixedMarkedObjects_.end(), markedObjects.begin(), markedObjects.end());
982 }
983 }
984 }
985
986 template <class LanguageConfig>
MarkStackFull(GCMarkingStackType * stack)987 void G1GC<LanguageConfig>::MarkStackFull(GCMarkingStackType *stack)
988 {
989 this->MarkStack(&marker_, stack, CalcLiveBytesMarkPreprocess, GC::EmptyReferenceProcessPredicate);
990 }
991
992 template <class LanguageConfig>
MarkReferences(GCMarkingStackType * references,GCPhase gcPhase)993 void G1GC<LanguageConfig>::MarkReferences(GCMarkingStackType *references, GCPhase gcPhase)
994 {
995 trace::ScopedTrace scopedTrace(__FUNCTION__);
996 LOG_DEBUG_GC << "Start marking " << references->Size() << " references";
997 // mark refs only on mixed-gc and on full_gc. On concurrent mark we don't handle any references
998 if (gcPhase == GCPhase::GC_PHASE_MARK_YOUNG) {
999 MarkStackMixed(references);
1000 } else if (this->IsFullGC()) {
1001 MarkStackFull(references);
1002 } else if (gcPhase == GCPhase::GC_PHASE_INITIAL_MARK || gcPhase == GCPhase::GC_PHASE_MARK ||
1003 gcPhase == GCPhase::GC_PHASE_REMARK) {
1004 // nothing
1005 } else {
1006 LOG_DEBUG_GC << "phase: " << GCScopedPhase::GetPhaseName(gcPhase);
1007 UNREACHABLE();
1008 }
1009 }
1010
1011 template <class LanguageConfig>
InGCSweepRange(const ObjectHeader * object) const1012 bool G1GC<LanguageConfig>::InGCSweepRange(const ObjectHeader *object) const
1013 {
1014 ASSERT_DO(!this->collectionSet_.empty() || this->IsFullGC(),
1015 std::cerr << "Incorrect phase in InGCSweepRange: " << static_cast<size_t>(this->GetGCPhase()) << "\n");
1016 ASSERT(IsHeapSpace(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(object)));
1017 Region *objRegion = ObjectToRegion(object);
1018 return objRegion->IsInCollectionSet();
1019 }
1020
RemsetRegionPredicate(const Region * r)1021 static bool RemsetRegionPredicate(const Region *r)
1022 {
1023 // In case of mixed GC don't process remsets of the tenured regions which are in the collection set
1024 return !r->HasFlag(IS_COLLECTION_SET);
1025 }
1026
1027 template <class LanguageConfig>
RunGC(GCTask & task,const CollectionSet & collectibleRegions)1028 void G1GC<LanguageConfig>::RunGC(GCTask &task, const CollectionSet &collectibleRegions)
1029 {
1030 ASSERT(!this->IsFullGC());
1031 GCScope<TRACE_TIMING> scopedTrace(__FUNCTION__, this);
1032 LOG_DEBUG_GC << "GC start";
1033 uint64_t youngPauseTime;
1034 {
1035 time::Timer timer(&youngPauseTime, true);
1036 HandlePendingDirtyCards();
1037 MemRange dirtyCardsRange = MixedMarkAndCacheRefs(task, collectibleRegions);
1038 ClearDirtyAndYoungCards(dirtyCardsRange);
1039 CollectAndMove<false>(collectibleRegions);
1040 ReenqueueDirtyCards();
1041 ClearRefsFromRemsetsCache();
1042 this->GetObjectGenAllocator()->InvalidateSpaceData();
1043 }
1044 if (youngPauseTime > 0) {
1045 this->GetStats()->AddTimeValue(youngPauseTime, TimeTypeStats::YOUNG_PAUSED_TIME);
1046 }
1047 LOG_DEBUG_GC << "G1GC RunGC end";
1048 }
1049
1050 template <class LanguageConfig>
MixedMarkAndCacheRefs(const GCTask & task,const CollectionSet & collectibleRegions)1051 MemRange G1GC<LanguageConfig>::MixedMarkAndCacheRefs(const GCTask &task, const CollectionSet &collectibleRegions)
1052 {
1053 GCScope<TRACE_TIMING_PHASE> scopedTrace(__FUNCTION__, this, GCPhase::GC_PHASE_MARK_YOUNG);
1054 bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1055 GCMarkingStackType objectsStack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1056 useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1057 GCWorkersTaskTypes::TASK_MARKING,
1058 this->GetSettings()->GCMarkingStackNewTasksFrequency());
1059 for (Region *region : collectibleRegions) {
1060 region->GetMarkBitmap()->ClearAllBits();
1061 // Calculate live bytes during marking phase
1062 region->SetLiveBytes(0U);
1063 }
1064 ASSERT(this->GetReferenceProcessor()->GetReferenceQueueSize() ==
1065 0); // all references should be processed on previous-gc
1066 // Iterate over roots and add other roots
1067 // 0. Pre-process refs queue and fill RemSets (should be done later in background)
1068 // Note: We need to process only tenured -> young refs,
1069 // since we reach this by graph from tenured roots,
1070 // because we will process all young regions at young GC we will find all required references
1071 RefCacheBuilder<LanguageConfig> builder(this, &uniqueRefsFromRemsets_, regionSizeBits_, &objectsStack);
1072 auto refsChecker = [this, &builder](const MemRange &memRange, Region *region) {
1073 IterateOverRefsInMemRange(memRange, region, builder);
1074 return builder.AllCrossRegionRefsProcessed();
1075 };
1076 MemRange dirtyCardsRange = CacheRefsFromRemsets(refsChecker);
1077
1078 auto refPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
1079 GCRootVisitor gcMarkCollectionSet = [&objectsStack, this, &refPred](const GCRoot &gcRoot) {
1080 ObjectHeader *rootObject = gcRoot.GetObjectHeader();
1081 ObjectHeader *fromObject = gcRoot.GetFromObjectHeader();
1082 LOG_DEBUG_GC << "Handle root " << GetDebugInfoAboutObject(rootObject) << " from: " << gcRoot.GetType();
1083 if (UNLIKELY(fromObject != nullptr) &&
1084 this->IsReference(fromObject->NotAtomicClassAddr<BaseClass>(), fromObject, refPred)) {
1085 LOG_DEBUG_GC << "Add reference: " << GetDebugInfoAboutObject(fromObject) << " to stack";
1086 mixedMarker_.Mark(fromObject);
1087 this->ProcessReference(&objectsStack, fromObject->NotAtomicClassAddr<BaseClass>(), fromObject,
1088 GC::EmptyReferenceProcessPredicate);
1089 } else {
1090 // Skip non-collection-set roots
1091 auto rootObjectPtr = gcRoot.GetObjectHeader();
1092 ASSERT(rootObjectPtr != nullptr);
1093 if (mixedMarker_.MarkIfNotMarked(rootObjectPtr)) {
1094 ASSERT(this->InGCSweepRange(rootObjectPtr));
1095 LOG_DEBUG_GC << "root " << GetDebugInfoAboutObject(rootObjectPtr);
1096 objectsStack.PushToStack(gcRoot.GetType(), rootObjectPtr);
1097 } else {
1098 LOG_DEBUG_GC << "Skip root for young mark: " << std::hex << rootObjectPtr;
1099 }
1100 }
1101 };
1102
1103 analytics_.ReportMarkingStart(panda::time::GetCurrentTimeInNanos());
1104 {
1105 GCScope<TRACE_TIMING> markingCollectionSetRootsTrace("Marking roots collection-set", this);
1106
1107 this->VisitRoots(gcMarkCollectionSet, VisitGCRootFlags::ACCESS_ROOT_NONE);
1108 }
1109 {
1110 GCScope<TRACE_TIMING> markStackTiming("MarkStack", this);
1111 this->MarkStackMixed(&objectsStack);
1112 ASSERT(objectsStack.Empty());
1113 if (useGcWorkers) {
1114 this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1115 }
1116 }
1117
1118 auto refClearPred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
1119 this->GetPandaVm()->HandleReferences(task, refClearPred);
1120
1121 analytics_.ReportMarkingEnd(panda::time::GetCurrentTimeInNanos());
1122
1123 // HandleReferences could write a new barriers - so we need to handle them before moving
1124 ProcessDirtyCards();
1125 return dirtyCardsRange;
1126 }
1127
1128 template <class LanguageConfig>
CollectVerificationInfo(const CollectionSet & collectionSet)1129 HeapVerifierIntoGC<LanguageConfig> G1GC<LanguageConfig>::CollectVerificationInfo(const CollectionSet &collectionSet)
1130 {
1131 HeapVerifierIntoGC<LanguageConfig> collectVerifier(this->GetPandaVm()->GetHeapManager());
1132 if (this->GetSettings()->IntoGCHeapVerification()) {
1133 ScopedTiming collectVerificationTiming(__FUNCTION__, *this->GetTiming());
1134 PandaVector<MemRange> memRanges;
1135 memRanges.reserve(collectionSet.size());
1136 std::for_each(collectionSet.begin(), collectionSet.end(),
1137 [&memRanges](const Region *region) { memRanges.emplace_back(region->Begin(), region->End()); });
1138 collectVerifier.CollectVerificationInfo(std::move(memRanges));
1139 }
1140 return collectVerifier;
1141 }
1142
1143 template <class LanguageConfig>
VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> && collectVerifier,const CollectionSet & collectionSet)1144 void G1GC<LanguageConfig>::VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&collectVerifier,
1145 const CollectionSet &collectionSet)
1146 {
1147 if (this->GetSettings()->IntoGCHeapVerification()) {
1148 ScopedTiming verificationTiming(__FUNCTION__, *this->GetTiming());
1149 PandaVector<MemRange> aliveMemRange;
1150 std::for_each(collectionSet.begin(), collectionSet.end(), [&aliveMemRange](const Region *region) {
1151 if (region->HasFlag(RegionFlag::IS_PROMOTED)) {
1152 aliveMemRange.emplace_back(region->Begin(), region->End());
1153 }
1154 });
1155 size_t failsCount = collectVerifier.VerifyAll(std::move(aliveMemRange));
1156 if (this->GetSettings()->FailOnHeapVerification() && failsCount > 0U) {
1157 PandaStringStream logStream;
1158 logStream << "Collection set size: " << collectionSet.size() << "\n";
1159 for (const auto r : collectionSet) {
1160 logStream << *r << (r->HasFlag(RegionFlag::IS_PROMOTED) ? " was promoted\n" : "\n");
1161 }
1162 LOG(FATAL, GC) << "Heap was corrupted during CollectAndMove GC phase, HeapVerifier found " << failsCount
1163 << " corruptions\n"
1164 << logStream.str();
1165 }
1166 }
1167 }
1168
1169 template <class LanguageConfig>
1170 template <bool FULL_GC>
1171 // NOLINTNEXTLINE(readability-function-size)
CollectAndMove(const CollectionSet & collectionSet)1172 bool G1GC<LanguageConfig>::CollectAndMove(const CollectionSet &collectionSet)
1173 {
1174 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE);
1175 LOG_DEBUG_GC << "== G1GC CollectAndMove start ==";
1176 auto internalAllocator = this->GetInternalAllocator();
1177 bool useGcWorkers = this->GetSettings()->ParallelCompactingEnabled();
1178
1179 PandaVector<PandaVector<ObjectHeader *> *> movedObjectsVector;
1180 HeapVerifierIntoGC<LanguageConfig> collectVerifier = this->CollectVerificationInfo(collectionSet);
1181 {
1182 GCScope<TRACE_TIMING> compactRegions("CompactRegions", this);
1183 analytics_.ReportEvacuationStart(panda::time::GetCurrentTimeInNanos());
1184 if constexpr (FULL_GC) {
1185 if (!useGcWorkers) {
1186 auto vector = internalAllocator->template New<PandaVector<ObjectHeader *>>();
1187 movedObjectsVector.push_back(vector);
1188 }
1189 }
1190 for (auto r : collectionSet.Young()) {
1191 this->DoRegionCompacting<RegionFlag::IS_EDEN, FULL_GC>(r, useGcWorkers, &movedObjectsVector);
1192 }
1193 for (auto r : collectionSet.Tenured()) {
1194 this->DoRegionCompacting<RegionFlag::IS_OLD, FULL_GC>(r, useGcWorkers, &movedObjectsVector);
1195 }
1196
1197 if (useGcWorkers) {
1198 this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1199 }
1200
1201 analytics_.ReportEvacuationEnd(panda::time::GetCurrentTimeInNanos());
1202 }
1203
1204 MovedObjectsContainer<FULL_GC> *movedObjectsContainer = nullptr;
1205 if constexpr (FULL_GC) {
1206 movedObjectsContainer = &movedObjectsVector;
1207 } else {
1208 movedObjectsContainer = &mixedMarkedObjects_;
1209 }
1210
1211 {
1212 os::memory::LockHolder lock(queueLock_);
1213 analytics_.ReportUpdateRefsStart(panda::time::GetCurrentTimeInNanos());
1214 if (this->GetSettings()->ParallelRefUpdatingEnabled()) {
1215 UpdateRefsToMovedObjects<FULL_GC, true>(movedObjectsContainer);
1216 } else {
1217 UpdateRefsToMovedObjects<FULL_GC, false>(movedObjectsContainer);
1218 }
1219 analytics_.ReportUpdateRefsEnd(panda::time::GetCurrentTimeInNanos());
1220 ActualizeRemSets();
1221 }
1222
1223 VerifyCollectAndMove(std::move(collectVerifier), collectionSet);
1224 SweepRegularVmRefs();
1225
1226 auto objectAllocator = this->GetG1ObjectAllocator();
1227 if (!collectionSet.Young().empty()) {
1228 objectAllocator->ResetYoungAllocator();
1229 }
1230 {
1231 GCScope<TRACE_TIMING> resetRegions("ResetRegions", this);
1232 if (!this->IsFullGC()) {
1233 objectAllocator->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::NoRelease,
1234 OSPagesPolicy::IMMEDIATE_RETURN, false>(collectionSet.Tenured());
1235 } else {
1236 objectAllocator->template ResetRegions<RegionFlag::IS_OLD, RegionSpace::ReleaseRegionsPolicy::Release,
1237 OSPagesPolicy::NO_RETURN, false>(collectionSet.Tenured());
1238 }
1239 }
1240 {
1241 // Don't forget to delete all temporary elements
1242 GCScope<TRACE_TIMING> clearMovedObjects("ClearMovedObjects", this);
1243 if constexpr (FULL_GC) {
1244 if (useGcWorkers) {
1245 for (auto r : movedObjectsVector) {
1246 internalAllocator->Delete(r);
1247 }
1248 } else {
1249 ASSERT(movedObjectsVector.size() == 1);
1250 internalAllocator->Delete(movedObjectsVector.back());
1251 }
1252 } else {
1253 for (auto r : mixedMarkedObjects_) {
1254 internalAllocator->Delete(r);
1255 }
1256 mixedMarkedObjects_.clear();
1257 }
1258 }
1259
1260 LOG_DEBUG_GC << "== G1GC CollectAndMove end ==";
1261 return true;
1262 }
1263
1264 template <class LanguageConfig>
1265 template <bool FULL_GC, bool NEED_LOCK>
1266 std::conditional_t<FULL_GC, UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>, EnqueueRemsetRefUpdater<LanguageConfig>>
CreateRefUpdater(GCG1BarrierSet::ThreadLocalCardQueues * updatedRefQueue) const1267 G1GC<LanguageConfig>::CreateRefUpdater([[maybe_unused]] GCG1BarrierSet::ThreadLocalCardQueues *updatedRefQueue) const
1268 {
1269 if constexpr (FULL_GC) {
1270 return UpdateRemsetRefUpdater<LanguageConfig, NEED_LOCK>(regionSizeBits_);
1271 } else {
1272 return EnqueueRemsetRefUpdater<LanguageConfig>(this->GetCardTable(), updatedRefQueue, regionSizeBits_);
1273 }
1274 }
1275
1276 template <class LanguageConfig>
1277 template <bool FULL_GC, bool USE_WORKERS>
UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> * movedObjectsContainer)1278 void G1GC<LanguageConfig>::UpdateRefsToMovedObjects(MovedObjectsContainer<FULL_GC> *movedObjectsContainer)
1279 {
1280 GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
1281 // Currently lock for RemSet too much influences for pause, so don't use workers on FULL-GC
1282 constexpr bool ENABLE_WORKERS = USE_WORKERS && !FULL_GC;
1283 auto internalAllocator = this->GetInternalAllocator();
1284 auto *updatedRefQueue =
1285 (ENABLE_WORKERS) ? internalAllocator->template New<GCG1BarrierSet::ThreadLocalCardQueues>() : updatedRefsQueue_;
1286 // NEED_LOCK is true <=> when ENABLE_WORKERS is true
1287 auto refUpdater = this->CreateRefUpdater<FULL_GC, ENABLE_WORKERS>(updatedRefQueue);
1288 // update reference from objects which were moved while garbage collection
1289 LOG_DEBUG_GC << "=== Update ex-cset -> ex-cset references. START. ===";
1290 {
1291 ScopedTiming t("UpdateMovedObjectsReferences", *this->GetTiming());
1292 for (auto *movedObjects : *movedObjectsContainer) {
1293 if constexpr (ENABLE_WORKERS) {
1294 auto rangeBegin = movedObjects->begin();
1295 auto rangeEnd = rangeBegin;
1296 while (rangeBegin != movedObjects->end()) {
1297 if (std::distance(rangeBegin, movedObjects->end()) < GCUpdateRefsWorkersTask<false>::RANGE_SIZE) {
1298 rangeEnd = movedObjects->end();
1299 } else {
1300 std::advance(rangeEnd, GCUpdateRefsWorkersTask<false>::RANGE_SIZE);
1301 }
1302 auto *movedObjectsRange =
1303 internalAllocator->template New<typename GCUpdateRefsWorkersTask<false>::MovedObjectsRange>(
1304 rangeBegin, rangeEnd);
1305 rangeBegin = rangeEnd;
1306 GCUpdateRefsWorkersTask<false> gcWorkerTask(movedObjectsRange);
1307 if (this->GetWorkersTaskPool()->AddTask(GCUpdateRefsWorkersTask<false>(gcWorkerTask))) {
1308 continue;
1309 }
1310 // Couldn't add new task, so do task processing immediately
1311 this->WorkerTaskProcessing(&gcWorkerTask, nullptr);
1312 }
1313 } else { // GC workers are not used
1314 typename GCUpdateRefsWorkersTask<FULL_GC>::MovedObjectsRange movedObjectsRange(movedObjects->begin(),
1315 movedObjects->end());
1316 DoUpdateReferencesToMovedObjectsRange<LanguageConfig, decltype(refUpdater), FULL_GC>(&movedObjectsRange,
1317 refUpdater);
1318 }
1319 }
1320 }
1321 LOG_DEBUG_GC << "=== Update ex-cset -> ex-cset references. END. ===";
1322
1323 // update references from objects which are not part of collection set
1324 LOG_DEBUG_GC << "=== Update non ex-cset -> ex-cset references. START. ===";
1325 if constexpr (FULL_GC) {
1326 UpdateRefsFromRemSets(refUpdater);
1327 } else {
1328 VisitRemSets(refUpdater);
1329 }
1330 LOG_DEBUG_GC << "=== Update non ex-cset -> ex-cset references. END. ===";
1331 if constexpr (ENABLE_WORKERS) {
1332 {
1333 os::memory::LockHolder lock(gcWorkerQueueLock_);
1334 updatedRefsQueue_->insert(updatedRefsQueue_->end(), updatedRefQueue->begin(), updatedRefQueue->end());
1335 this->GetInternalAllocator()->Delete(updatedRefQueue);
1336 }
1337 this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1338 }
1339 this->CommonUpdateRefsToMovedObjects();
1340 }
1341
1342 template <class LanguageConfig>
OnPauseMark(GCTask & task,GCMarkingStackType * objectsStack,bool useGcWorkers)1343 NO_THREAD_SAFETY_ANALYSIS void G1GC<LanguageConfig>::OnPauseMark(GCTask &task, GCMarkingStackType *objectsStack,
1344 bool useGcWorkers)
1345 {
1346 GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
1347 LOG_DEBUG_GC << "OnPause marking started";
1348 auto *objectAllocator = GetG1ObjectAllocator();
1349 this->MarkImpl(
1350 &marker_, objectsStack, CardTableVisitFlag::VISIT_DISABLED,
1351 // process references on FULL-GC
1352 GC::EmptyReferenceProcessPredicate,
1353 // non-young mem-range checker
1354 [objectAllocator](MemRange &memRange) { return !objectAllocator->IsIntersectedWithYoung(memRange); },
1355 // mark predicate
1356 CalcLiveBytesMarkPreprocess);
1357 if (useGcWorkers) {
1358 this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1359 }
1360 /**
1361 * We don't collect non-movable regions right now, if there was a reference from non-movable to
1362 * young/tenured region then we reset markbitmap for non-nonmovable, but don't update livebitmap and we
1363 * can traverse over non-reachable object (in CacheRefsFromRemsets) and visit DEAD object in
1364 * tenured space (was delete on young-collection or in Iterative-full-gc phase.
1365 */
1366 auto refClearPred = []([[maybe_unused]] const ObjectHeader *obj) { return true; };
1367 this->GetPandaVm()->HandleReferences(task, refClearPred);
1368 }
1369
1370 template <class LanguageConfig>
FullMarking(panda::GCTask & task)1371 void G1GC<LanguageConfig>::FullMarking(panda::GCTask &task)
1372 {
1373 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
1374 auto *objectAllocator = GetG1ObjectAllocator();
1375 bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1376
1377 GCMarkingStackType fullCollectionStack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1378 useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1379 GCWorkersTaskTypes::TASK_FULL_MARK,
1380 this->GetSettings()->GCMarkingStackNewTasksFrequency());
1381
1382 InitialMark(fullCollectionStack);
1383
1384 this->OnPauseMark(task, &fullCollectionStack, useGcWorkers);
1385 // We will sweep VM refs in tenured space during mixed collection, but only for non empty regions.
1386 // therefore, sweep it here only for NonMovable, Humongous objects, and empty movable regions:
1387 SweepNonRegularVmRefs();
1388 auto allRegions = objectAllocator->GetAllRegions();
1389 for (auto *r : allRegions) {
1390 if (r->GetLiveBitmap() != nullptr) {
1391 r->CloneMarkBitmapToLiveBitmap();
1392 }
1393 }
1394 // Force card updater here, after swapping bitmap, to skip dead objects
1395 ProcessDirtyCards();
1396 auto garbageRegions = GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
1397 auto emptyTenuredRegions = GetEmptyTenuredRegularRegionsFromQueue(std::move(garbageRegions));
1398 CollectEmptyRegions<false, false>(task, &emptyTenuredRegions);
1399 }
1400
1401 template <class LanguageConfig>
ConcurrentMarking(panda::GCTask & task)1402 void G1GC<LanguageConfig>::ConcurrentMarking(panda::GCTask &task)
1403 {
1404 {
1405 PauseTimeGoalDelay();
1406 auto scopedTracker = g1PauseTracker_.CreateScope();
1407 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::COMMON_PAUSE);
1408 InitialMark(concurrentMarkingStack_);
1409 }
1410
1411 LOG_DEBUG_GC << "Concurrent marking started";
1412 ConcurrentMark(&concurrentMarkingStack_);
1413 PauseTimeGoalDelay();
1414 // weak refs shouldn't be added to the queue on concurrent-mark
1415 ASSERT(this->GetReferenceProcessor()->GetReferenceQueueSize() == 0);
1416
1417 DisablePreWrbInThreads();
1418
1419 concurrentMarkingFlag_ = false;
1420 if (!interruptConcurrentFlag_) {
1421 Remark(task);
1422 // Enable mixed GC
1423 auto garbageRegions = GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
1424 if (HaveGarbageRegions(garbageRegions)) {
1425 // Atomic with release order reason: to see changes made by GC thread (which do concurrent marking
1426 // and than set is_mixed_gc_required_) in mutator thread which waits for the end of concurrent
1427 // marking.
1428 isMixedGcRequired_.store(true, std::memory_order_release);
1429 }
1430
1431 {
1432 ScopedTiming t("Concurrent Sweep", *this->GetTiming());
1433 ConcurrentScope concurrentScope(this);
1434 auto emptyTenuredRegions = GetEmptyTenuredRegularRegionsFromQueue(std::move(garbageRegions));
1435 if (this->IsConcurrencyAllowed()) {
1436 CollectEmptyRegions<true, true>(task, &emptyTenuredRegions);
1437 } else {
1438 CollectEmptyRegions<false, false>(task, &emptyTenuredRegions);
1439 }
1440 }
1441 } else {
1442 concurrentMarkingStack_.Clear();
1443 ClearSatb();
1444 }
1445 ASSERT(concurrentMarkingStack_.Empty());
1446 }
1447
1448 template <class LanguageConfig>
PauseTimeGoalDelay()1449 void G1GC<LanguageConfig>::PauseTimeGoalDelay()
1450 {
1451 if (this->GetSettings()->G1EnablePauseTimeGoal() && !interruptConcurrentFlag_) {
1452 auto start = panda::time::GetCurrentTimeInMicros();
1453 // Instead of max pause it should be estimated to calculate delay
1454 auto remained = g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(panda::time::GetCurrentTimeInMicros());
1455 if (remained > 0) {
1456 ConcurrentScope concurrentScope(this);
1457 os::memory::LockHolder lh(concurrentMarkMutex_);
1458 while (!interruptConcurrentFlag_ && remained > 0) {
1459 auto ms = remained / panda::os::time::MILLIS_TO_MICRO;
1460 auto ns = (remained - ms * panda::os::time::MILLIS_TO_MICRO) * panda::os::time::MICRO_TO_NANO;
1461 concurrentMarkCondVar_.TimedWait(&concurrentMarkMutex_, ms, ns);
1462 remained -= panda::time::GetCurrentTimeInMicros() - start;
1463 }
1464 }
1465 }
1466 }
1467
1468 template <class LanguageConfig>
InitialMark(GCMarkingStackType & markingStack)1469 void G1GC<LanguageConfig>::InitialMark(GCMarkingStackType &markingStack)
1470 {
1471 {
1472 // First we need to unmark all heap
1473 GCScope<TRACE_TIMING> unMarkScope("UnMark", this);
1474 LOG_DEBUG_GC << "Start unmark all heap before mark";
1475 auto allRegion = GetG1ObjectAllocator()->GetAllRegions();
1476 for (Region *r : allRegion) {
1477 auto *bitmap = r->GetMarkBitmap();
1478 // Calculate live bytes during mark-phase
1479 r->SetLiveBytes(0U);
1480 // unmark full-heap except Humongous-space
1481 bitmap->ClearAllBits();
1482 }
1483 #ifndef NDEBUG
1484 this->GetObjectAllocator()->IterateOverObjects(
1485 [this](ObjectHeader *obj) { ASSERT(!this->marker_.IsMarked(obj)); });
1486 #endif
1487 }
1488 ASSERT(this->GetReferenceProcessor()->GetReferenceQueueSize() ==
1489 0); // all references should be processed on mixed-gc
1490 {
1491 GCScope<TRACE_TIMING_PHASE> initialMarkScope("InitialMark", this, GCPhase::GC_PHASE_INITIAL_MARK);
1492 // Collect non-heap roots.
1493 // Mark the whole heap by using only these roots.
1494 // The interregion roots will be processed at pause
1495
1496 // InitialMark. STW
1497 GCRootVisitor gcMarkRoots = [this, &markingStack](const GCRoot &gcRoot) {
1498 ValidateObject(gcRoot.GetType(), gcRoot.GetObjectHeader());
1499 if (marker_.MarkIfNotMarked(gcRoot.GetObjectHeader())) {
1500 markingStack.PushToStack(gcRoot.GetType(), gcRoot.GetObjectHeader());
1501 }
1502 };
1503 this->VisitRoots(gcMarkRoots, VisitGCRootFlags::ACCESS_ROOT_ALL);
1504 }
1505 }
1506
1507 template <class LanguageConfig>
ConcurrentMark(GCMarkingStackType * objectsStack)1508 void G1GC<LanguageConfig>::ConcurrentMark(GCMarkingStackType *objectsStack)
1509 {
1510 ConcurrentScope concurrentScope(this);
1511 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
1512 this->ConcurentMarkImpl(objectsStack);
1513 }
1514
1515 template <class LanguageConfig>
Remark(panda::GCTask const & task)1516 void G1GC<LanguageConfig>::Remark(panda::GCTask const &task)
1517 {
1518 /**
1519 * Make remark on pause to have all marked objects in tenured space, it gives possibility to check objects in
1520 * remsets. If they are not marked - we don't process this object, because it's dead already
1521 */
1522 auto scopedTracker = g1PauseTracker_.CreateScope();
1523 GCScope<TIMING_PHASE> gcScope(__FUNCTION__, this, GCPhase::GC_PHASE_REMARK);
1524 GCScopedPauseStats scopedPauseStats(this->GetPandaVm()->GetGCStats(), nullptr, PauseTypeStats::REMARK_PAUSE);
1525 {
1526 ScopedTiming t("Stack Remarking", *this->GetTiming());
1527 bool useGcWorkers = this->GetSettings()->ParallelMarkingEnabled();
1528 GCMarkingStackType stack(this, useGcWorkers ? this->GetSettings()->GCRootMarkingStackMaxSize() : 0,
1529 useGcWorkers ? this->GetSettings()->GCWorkersMarkingStackMaxSize() : 0,
1530 GCWorkersTaskTypes::TASK_REMARK,
1531 this->GetSettings()->GCMarkingStackNewTasksFrequency());
1532
1533 // The mutator may create new regions.
1534 // If so we should bind bitmaps of new regions.
1535 DrainSatb(&stack);
1536 this->MarkStack(&marker_, &stack, CalcLiveBytesMarkPreprocess);
1537
1538 if (useGcWorkers) {
1539 this->GetWorkersTaskPool()->WaitUntilTasksEnd();
1540 }
1541
1542 // ConcurrentMark doesn't visit young objects - so we can't clear references which are in young-space because we
1543 // don't know which objects are marked. We will process them on young/mixed GC separately later, here we process
1544 // only refs in tenured-space
1545 auto refClearPred = []([[maybe_unused]] const ObjectHeader *obj) {
1546 return !ObjectToRegion(obj)->HasFlag(RegionFlag::IS_EDEN);
1547 };
1548 this->GetPandaVm()->HandleReferences(task, refClearPred);
1549 }
1550
1551 // We will sweep VM refs in tenured space during mixed collection,
1552 // therefore, sweep it here only for NonMovable and Humongous objects:
1553 SweepNonRegularVmRefs();
1554 auto g1Allocator = this->GetG1ObjectAllocator();
1555 auto allRegions = g1Allocator->GetAllRegions();
1556 for (const auto ®ion : allRegions) {
1557 if (region->HasFlag(IS_OLD) || region->HasFlag(IS_NONMOVABLE)) {
1558 region->SwapMarkBitmap();
1559 }
1560 }
1561 // Force card updater here, after swapping bitmap, to skip dead objects
1562 ProcessDirtyCards();
1563 }
1564
1565 template <class LanguageConfig>
SweepNonRegularVmRefs()1566 void G1GC<LanguageConfig>::SweepNonRegularVmRefs()
1567 {
1568 ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
1569
1570 this->GetPandaVm()->SweepVmRefs([this](ObjectHeader *object) {
1571 Region *region = ObjectToRegion(object);
1572 if (region->HasFlag(RegionFlag::IS_EDEN)) {
1573 return ObjectStatus::ALIVE_OBJECT;
1574 }
1575 bool nonRegularObject =
1576 region->HasFlag(RegionFlag::IS_NONMOVABLE) || region->HasFlag(RegionFlag::IS_LARGE_OBJECT);
1577 if (!nonRegularObject) {
1578 ASSERT(region->GetLiveBytes() != 0U || !this->IsMarked(object));
1579 return region->GetLiveBytes() == 0U ? ObjectStatus::DEAD_OBJECT : ObjectStatus::ALIVE_OBJECT;
1580 }
1581 return this->IsMarked(object) ? ObjectStatus::ALIVE_OBJECT : ObjectStatus::DEAD_OBJECT;
1582 });
1583 }
1584
1585 template <class LanguageConfig>
SweepRegularVmRefs()1586 void G1GC<LanguageConfig>::SweepRegularVmRefs()
1587 {
1588 ScopedTiming t(__FUNCTION__, *this->GetTiming());
1589
1590 this->GetPandaVm()->SweepVmRefs([this](ObjectHeader *obj) {
1591 if (this->InGCSweepRange(obj)) {
1592 return ObjectStatus::DEAD_OBJECT;
1593 }
1594 return ObjectStatus::ALIVE_OBJECT;
1595 });
1596 }
1597
1598 template <class LanguageConfig>
GetCollectibleRegions(panda::GCTask const & task,bool isMixed)1599 CollectionSet G1GC<LanguageConfig>::GetCollectibleRegions(panda::GCTask const &task, bool isMixed)
1600 {
1601 ASSERT(!this->IsFullGC());
1602 ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
1603 auto g1Allocator = this->GetG1ObjectAllocator();
1604 LOG_DEBUG_GC << "Start GetCollectibleRegions is_mixed: " << isMixed << " reason: " << task.reason;
1605 CollectionSet collectionSet(g1Allocator->GetYoungRegions());
1606 if (isMixed) {
1607 if (!this->GetSettings()->G1EnablePauseTimeGoal()) {
1608 AddOldRegionsMaxAllowed(collectionSet);
1609 } else {
1610 AddOldRegionsAccordingPauseTimeGoal(collectionSet);
1611 }
1612 }
1613 LOG_DEBUG_GC << "collectible_regions size: " << collectionSet.size() << " young " << collectionSet.Young().size()
1614 << " old " << std::distance(collectionSet.Young().end(), collectionSet.end())
1615 << " reason: " << task.reason << " is_mixed: " << isMixed;
1616 return collectionSet;
1617 }
1618
1619 template <class LanguageConfig>
AddOldRegionsMaxAllowed(CollectionSet & collectionSet)1620 void G1GC<LanguageConfig>::AddOldRegionsMaxAllowed(CollectionSet &collectionSet)
1621 {
1622 auto regions = this->GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
1623 for (size_t i = 0; i < numberOfMixedTenuredRegions_ && !regions.empty(); i++) {
1624 auto *garbageRegion = regions.top().second;
1625 regions.pop();
1626 ASSERT(!garbageRegion->HasFlag(IS_EDEN));
1627 ASSERT(!garbageRegion->HasPinnedObjects());
1628 ASSERT(!garbageRegion->HasFlag(IS_RESERVED));
1629 ASSERT(garbageRegion->GetAllocatedBytes() != 0U);
1630 double garbageRate = static_cast<double>(garbageRegion->GetGarbageBytes()) / garbageRegion->GetAllocatedBytes();
1631 if (garbageRate >= regionGarbageRateThreshold_) {
1632 LOG_DEBUG_GC << "Garbage percentage in " << std::hex << garbageRegion << " region = " << std::dec
1633 << garbageRate << " %, add to collection set";
1634 collectionSet.AddRegion(garbageRegion);
1635 } else {
1636 LOG_DEBUG_GC << "Garbage percentage in " << std::hex << garbageRegion << " region = " << std::dec
1637 << garbageRate << " %, don't add to collection set";
1638 break;
1639 }
1640 }
1641 }
1642
1643 template <class LanguageConfig>
AddOldRegionsAccordingPauseTimeGoal(CollectionSet & collectionSet)1644 void G1GC<LanguageConfig>::AddOldRegionsAccordingPauseTimeGoal(CollectionSet &collectionSet)
1645 {
1646 auto gcPauseTimeBudget =
1647 static_cast<int64_t>(this->GetSettings()->GetG1MaxGcPauseInMillis() * panda::os::time::MILLIS_TO_MICRO);
1648 auto regions = this->GetG1ObjectAllocator()->template GetTopGarbageRegions<false>();
1649 // add at least one old region to guarantee a progress in mixed collection
1650 auto *topRegion = regions.top().second;
1651 collectionSet.AddRegion(topRegion);
1652 auto expectedTopRegionCollectionTime = analytics_.PredictOldCollectionTimeInMicros(topRegion);
1653 if (gcPauseTimeBudget < expectedTopRegionCollectionTime) {
1654 LOG_DEBUG_GC << "Not enough budget to add more than one old region";
1655 return;
1656 }
1657 gcPauseTimeBudget -= expectedTopRegionCollectionTime;
1658 auto predictionError = analytics_.EstimatePredictionErrorInMicros();
1659 if (gcPauseTimeBudget < predictionError) {
1660 LOG_DEBUG_GC << "Not enough budget to add old regions";
1661 return;
1662 }
1663 gcPauseTimeBudget -= predictionError;
1664 auto expectedYoungCollectionTime = analytics_.PredictYoungCollectionTimeInMicros(collectionSet.Young().size());
1665 if (gcPauseTimeBudget < expectedYoungCollectionTime) {
1666 LOG_DEBUG_GC << "Not enough budget to add old regions";
1667 return;
1668 }
1669 gcPauseTimeBudget -= expectedYoungCollectionTime;
1670
1671 regions.pop();
1672 while (!regions.empty()) {
1673 auto &scoreAndRegion = regions.top();
1674 auto *garbageRegion = scoreAndRegion.second;
1675 ASSERT(!garbageRegion->HasFlag(IS_EDEN));
1676 ASSERT(!garbageRegion->HasPinnedObjects());
1677 ASSERT(!garbageRegion->HasFlag(IS_RESERVED));
1678 ASSERT(garbageRegion->GetAllocatedBytes() != 0U);
1679
1680 regions.pop();
1681
1682 double garbageRate = static_cast<double>(garbageRegion->GetGarbageBytes()) / garbageRegion->GetAllocatedBytes();
1683 if (garbageRate < regionGarbageRateThreshold_) {
1684 LOG_DEBUG_GC << "Garbage percentage in " << std::hex << garbageRegion << " region = " << std::dec
1685 << garbageRate << " %, don't add to collection set";
1686 break;
1687 }
1688
1689 auto expectedRegionCollectionTime = analytics_.PredictOldCollectionTimeInMicros(garbageRegion);
1690 if (gcPauseTimeBudget < expectedRegionCollectionTime) {
1691 LOG_DEBUG_GC << "Not enough budget to add old regions anymore";
1692 break;
1693 }
1694
1695 gcPauseTimeBudget -= expectedRegionCollectionTime;
1696
1697 LOG_DEBUG_GC << "Garbage percentage in " << std::hex << garbageRegion << " region = " << std::dec << garbageRate
1698 << " %, add to collection set";
1699 collectionSet.AddRegion(garbageRegion);
1700 }
1701 }
1702
1703 template <class LanguageConfig>
GetFullCollectionSet()1704 CollectionSet G1GC<LanguageConfig>::GetFullCollectionSet()
1705 {
1706 ASSERT(this->IsFullGC());
1707 // FillRemSet should be always finished before GetCollectibleRegions
1708 ASSERT(updateRemsetWorker_->GetQueueSize() == 0);
1709 auto g1Allocator = this->GetG1ObjectAllocator();
1710 g1Allocator->ClearCurrentTenuredRegion();
1711 CollectionSet collectionSet(g1Allocator->GetYoungRegions());
1712 auto movableGarbageRegions = g1Allocator->template GetTopGarbageRegions<true>();
1713 LOG_DEBUG_GC << "Regions for FullGC:";
1714 while (!movableGarbageRegions.empty()) {
1715 auto *region = movableGarbageRegions.top().second;
1716 movableGarbageRegions.pop();
1717 if (region->HasFlag(IS_EDEN) || region->HasPinnedObjects()) {
1718 LOG_DEBUG_GC << (region->HasFlags(IS_EDEN) ? "Young regions" : "Region with pinned objects") << " ("
1719 << *region << ") is not added to collection set";
1720 continue;
1721 }
1722 LOG_DEBUG_GC << *region;
1723 ASSERT(!region->HasFlag(IS_NONMOVABLE) && !region->HasFlag(IS_LARGE_OBJECT));
1724 ASSERT(region->HasFlag(IS_OLD));
1725 collectionSet.AddRegion(region);
1726 }
1727 return collectionSet;
1728 }
1729
1730 template <class LanguageConfig>
HaveEnoughSpaceToMove(const CollectionSet & collectibleRegions)1731 bool G1GC<LanguageConfig>::HaveEnoughSpaceToMove(const CollectionSet &collectibleRegions)
1732 {
1733 return HaveEnoughRegionsToMove(collectibleRegions.Movable().size());
1734 }
1735
1736 template <class LanguageConfig>
HaveEnoughRegionsToMove(size_t num)1737 bool G1GC<LanguageConfig>::HaveEnoughRegionsToMove(size_t num)
1738 {
1739 return GetG1ObjectAllocator()->HaveTenuredSize(num) && GetG1ObjectAllocator()->HaveFreeRegions(num);
1740 }
1741
1742 template <class LanguageConfig>
OnThreadTerminate(ManagedThread * thread,mem::BuffersKeepingFlag keepBuffers)1743 void G1GC<LanguageConfig>::OnThreadTerminate(ManagedThread *thread, mem::BuffersKeepingFlag keepBuffers)
1744 {
1745 InternalAllocatorPtr allocator = this->GetInternalAllocator();
1746 // The method must be called while the lock which guards thread/coroutine list is hold
1747 LOG(DEBUG, GC) << "Call OnThreadTerminate";
1748 PandaVector<ObjectHeader *> *preBuff = nullptr;
1749 if (keepBuffers == mem::BuffersKeepingFlag::KEEP) {
1750 preBuff = allocator->New<PandaVector<ObjectHeader *>>(*thread->GetPreBuff());
1751 thread->GetPreBuff()->clear();
1752 } else { // keep_buffers == mem::BuffersKeepingFlag::DELETE
1753 preBuff = thread->MovePreBuff();
1754 }
1755 ASSERT(preBuff != nullptr);
1756 {
1757 os::memory::LockHolder lock(satbAndNewobjBufLock_);
1758 satbBuffList_.push_back(preBuff);
1759 }
1760 {
1761 auto *localBuffer = thread->GetG1PostBarrierBuffer();
1762 ASSERT(localBuffer != nullptr);
1763 if (!localBuffer->IsEmpty()) {
1764 auto *tempBuffer = allocator->New<PandaVector<mem::CardTable::CardPtr>>();
1765 while (!localBuffer->IsEmpty()) {
1766 tempBuffer->push_back(localBuffer->Pop());
1767 }
1768 updateRemsetWorker_->AddPostBarrierBuffer(tempBuffer);
1769 }
1770 if (keepBuffers == mem::BuffersKeepingFlag::DELETE) {
1771 thread->ResetG1PostBarrierBuffer();
1772 allocator->Delete(localBuffer);
1773 }
1774 }
1775 }
1776
1777 template <class LanguageConfig>
OnThreadCreate(ManagedThread * thread)1778 void G1GC<LanguageConfig>::OnThreadCreate(ManagedThread *thread)
1779 {
1780 // Any access to other threads' data (including MAIN's) might cause a race here
1781 // so don't do this please.
1782 thread->SetPreWrbEntrypoint(reinterpret_cast<void *>(currentPreWrbEntrypoint_));
1783 }
1784
1785 template <class LanguageConfig>
PreZygoteFork()1786 void G1GC<LanguageConfig>::PreZygoteFork()
1787 {
1788 GC::PreZygoteFork();
1789 this->DestroyWorkersTaskPool();
1790 this->DisableWorkerThreads();
1791 updateRemsetWorker_->DestroyWorker();
1792 // don't use thread while we are in zygote
1793 updateRemsetWorker_->SetUpdateConcurrent(false);
1794 }
1795
1796 template <class LanguageConfig>
PostZygoteFork()1797 void G1GC<LanguageConfig>::PostZygoteFork()
1798 {
1799 this->EnableWorkerThreads();
1800 this->CreateWorkersTaskPool();
1801 GC::PostZygoteFork();
1802 // use concurrent-option after zygote
1803 updateRemsetWorker_->SetUpdateConcurrent(this->GetSettings()->G1EnableConcurrentUpdateRemset());
1804 updateRemsetWorker_->CreateWorker();
1805 }
1806
1807 template <class LanguageConfig>
DrainSatb(GCAdaptiveStack * objectStack)1808 void G1GC<LanguageConfig>::DrainSatb(GCAdaptiveStack *objectStack)
1809 {
1810 ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
1811 // Process satb buffers of the active threads
1812 auto callback = [this, objectStack](ManagedThread *thread) {
1813 // Acquire lock here to avoid data races with the threads
1814 // which are terminating now.
1815 // Data race is happens in thread.pre_buf_. The terminating thread may
1816 // release own pre_buf_ while GC thread iterates over threads and gets theirs
1817 // pre_buf_.
1818 os::memory::LockHolder lock(satbAndNewobjBufLock_);
1819 auto preBuff = thread->GetPreBuff();
1820 if (preBuff == nullptr) {
1821 // This can happens when the thread gives us own satb_buffer but
1822 // doesn't unregister from ThreadManaged.
1823 // At this perion GC can happen and we get pre_buff null here.
1824 return true;
1825 }
1826 for (auto obj : *preBuff) {
1827 if (marker_.MarkIfNotMarked(obj)) {
1828 objectStack->PushToStack(RootType::SATB_BUFFER, obj);
1829 }
1830 }
1831 preBuff->clear();
1832 return true;
1833 };
1834 this->GetPandaVm()->GetThreadManager()->EnumerateThreads(callback);
1835
1836 // Process satb buffers of the terminated threads
1837 os::memory::LockHolder lock(satbAndNewobjBufLock_);
1838 for (auto objVector : satbBuffList_) {
1839 ASSERT(objVector != nullptr);
1840 for (auto obj : *objVector) {
1841 if (marker_.MarkIfNotMarked(obj)) {
1842 objectStack->PushToStack(RootType::SATB_BUFFER, obj);
1843 }
1844 }
1845 this->GetInternalAllocator()->Delete(objVector);
1846 }
1847 satbBuffList_.clear();
1848 for (auto obj : newobjBuffer_) {
1849 if (marker_.MarkIfNotMarked(obj)) {
1850 objectStack->PushToStack(RootType::SATB_BUFFER, obj);
1851 }
1852 }
1853 newobjBuffer_.clear();
1854 }
1855
1856 template <class LanguageConfig>
HandlePendingDirtyCards()1857 void G1GC<LanguageConfig>::HandlePendingDirtyCards()
1858 {
1859 ScopedTiming t(__FUNCTION__, *this->GetTiming());
1860 updateRemsetWorker_->DrainAllCards(&dirtyCards_);
1861 std::for_each(dirtyCards_.cbegin(), dirtyCards_.cend(), [](auto card) { card->Clear(); });
1862 }
1863
1864 template <class LanguageConfig>
ReenqueueDirtyCards()1865 void G1GC<LanguageConfig>::ReenqueueDirtyCards()
1866 {
1867 ScopedTiming t(__FUNCTION__, *this->GetTiming());
1868 os::memory::LockHolder lock(queueLock_);
1869 std::for_each(dirtyCards_.cbegin(), dirtyCards_.cend(), [this](auto card) {
1870 card->Mark();
1871 updatedRefsQueue_->push_back(card);
1872 });
1873 dirtyCards_.clear();
1874 }
1875
1876 template <class LanguageConfig>
ClearSatb()1877 void G1GC<LanguageConfig>::ClearSatb()
1878 {
1879 ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
1880 // Acquire lock here to avoid data races with the threads
1881 // which are terminating now.
1882 // Data race is happens in thread.pre_buf_. The terminating thread may
1883 // release own pre_buf_ while GC thread iterates over threads and gets theirs
1884 // pre_buf_.
1885 os::memory::LockHolder lock(satbAndNewobjBufLock_);
1886 // Process satb buffers of the active threads
1887 auto threadCallback = [](ManagedThread *thread) {
1888 auto preBuff = thread->GetPreBuff();
1889 if (preBuff != nullptr) {
1890 preBuff->clear();
1891 }
1892 return true;
1893 };
1894 this->GetPandaVm()->GetThreadManager()->EnumerateThreads(threadCallback);
1895
1896 // Process satb buffers of the terminated threads
1897 for (auto objVector : satbBuffList_) {
1898 this->GetInternalAllocator()->Delete(objVector);
1899 }
1900 satbBuffList_.clear();
1901 newobjBuffer_.clear();
1902 }
1903
1904 template <class LanguageConfig>
1905 template <class Visitor>
VisitRemSets(const Visitor & visitor)1906 void G1GC<LanguageConfig>::VisitRemSets(const Visitor &visitor)
1907 {
1908 GCScope<TRACE_TIMING> visitRemsetScope(__FUNCTION__, this);
1909
1910 ASSERT(uniqueCardsInitialized_);
1911 // Iterate over stored references to the collection set
1912 for (auto &entryVector : uniqueRefsFromRemsets_) {
1913 for (auto &entry : *entryVector) {
1914 ObjectHeader *object = entry.GetObject();
1915 uint32_t offset = entry.GetReferenceOffset();
1916 visitor(object, ObjectAccessor::GetObject(object, offset), offset);
1917 }
1918 }
1919 }
1920
1921 template <class LanguageConfig>
1922 template <class Visitor>
UpdateRefsFromRemSets(const Visitor & visitor)1923 void G1GC<LanguageConfig>::UpdateRefsFromRemSets(const Visitor &visitor)
1924 {
1925 auto fieldVisitor = [this, &visitor](ObjectHeader *object, ObjectHeader *field, uint32_t offset,
1926 [[maybe_unused]] bool isVolatile) {
1927 if (!InGCSweepRange(field)) {
1928 return true;
1929 }
1930 visitor(object, ObjectAccessor::GetObject(object, offset), offset);
1931 return true;
1932 };
1933 auto refsChecker = [this, &fieldVisitor](const MemRange &memRange, Region *region) {
1934 IterateOverRefsInMemRange(memRange, region, fieldVisitor);
1935 return true;
1936 };
1937 MemRange dirtyCards = CacheRefsFromRemsets(refsChecker);
1938 ClearDirtyAndYoungCards(dirtyCards);
1939 }
1940
1941 template <class LanguageConfig>
CacheRefsFromRemsets(const MemRangeRefsChecker & refsChecker)1942 MemRange G1GC<LanguageConfig>::CacheRefsFromRemsets(const MemRangeRefsChecker &refsChecker)
1943 {
1944 GCScope<TRACE_TIMING> cacheRefsFromRemsetScope(__FUNCTION__, this);
1945 // Collect only unique objects to not proceed them more than once.
1946 ASSERT(!uniqueCardsInitialized_);
1947 CardTable *cardTable = this->GetCardTable();
1948 uintptr_t minDirtyAddr = cardTable->GetMinAddress() + cardTable->GetCardsCount() * cardTable->GetCardSize();
1949 uintptr_t maxDirtyAddr = cardTable->GetMinAddress();
1950
1951 ASSERT(IsCardTableClear(cardTable));
1952 auto visitor = [cardTable, &minDirtyAddr, &maxDirtyAddr, &refsChecker](Region *r, const MemRange &range) {
1953 // Use the card table to mark the ranges we already processed.
1954 // Each card is uint8_t. Use it as a bitmap. Set bit means the corresponding memory
1955 // range is processed.
1956 CardTable::CardPtr card = cardTable->GetCardPtr(range.GetStartAddress());
1957 uintptr_t cardAddr = cardTable->GetCardStartAddress(card);
1958 size_t memSize = DEFAULT_REGION_SIZE / RemSet<>::Bitmap::GetNumBits();
1959 size_t bitIdx = (range.GetStartAddress() - cardAddr) / memSize;
1960 if ((card->GetCard() & (1U << bitIdx)) == 0) {
1961 card->SetCard(card->GetCard() | (1U << bitIdx));
1962 if (minDirtyAddr > cardAddr) {
1963 minDirtyAddr = cardAddr;
1964 }
1965 if (maxDirtyAddr < cardAddr + cardTable->GetCardSize()) {
1966 maxDirtyAddr = cardAddr + cardTable->GetCardSize();
1967 }
1968 return refsChecker(range, r);
1969 }
1970 // some cross region refs might be not processed
1971 return false;
1972 };
1973 analytics_.ReportScanRemsetStart(panda::time::GetCurrentTimeInNanos());
1974 for (auto region : collectionSet_) {
1975 region->GetRemSet()->Iterate(RemsetRegionPredicate, visitor);
1976 }
1977 analytics_.ReportScanRemsetEnd(panda::time::GetCurrentTimeInNanos());
1978
1979 if (!this->IsFullGC()) {
1980 CacheRefsFromDirtyCards(visitor);
1981 #ifndef NDEBUG
1982 uniqueCardsInitialized_ = true;
1983 #endif // NDEBUG
1984 }
1985 if (minDirtyAddr > maxDirtyAddr) {
1986 minDirtyAddr = maxDirtyAddr;
1987 }
1988 return MemRange(minDirtyAddr, maxDirtyAddr);
1989 }
1990
1991 template <class LanguageConfig>
1992 template <typename Visitor>
CacheRefsFromDirtyCards(Visitor visitor)1993 void G1GC<LanguageConfig>::CacheRefsFromDirtyCards(Visitor visitor)
1994 {
1995 ScopedTiming t(__FUNCTION__, *this->GetTiming());
1996 auto cardTable = this->GetCardTable();
1997 constexpr size_t MEM_SIZE = DEFAULT_REGION_SIZE / RemSet<>::Bitmap::GetNumBits();
1998 for (auto it = dirtyCards_.cbegin(); it != dirtyCards_.cend();) {
1999 auto range = cardTable->GetMemoryRange(*it);
2000 auto addr = range.GetStartAddress();
2001 ASSERT_DO(IsHeapSpace(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(ToVoidPtr(addr))),
2002 std::cerr << "Invalid space type for the " << addr << std::endl);
2003 auto endAddr = range.GetEndAddress();
2004 auto region = panda::mem::AddrToRegion(ToVoidPtr(addr));
2005 if (!RemsetRegionPredicate(region)) {
2006 it = dirtyCards_.erase(it);
2007 continue;
2008 }
2009
2010 auto allCrossRegionRefsProcessed = true;
2011 while (addr < endAddr) {
2012 if (!visitor(region, MemRange(addr, addr + MEM_SIZE))) {
2013 allCrossRegionRefsProcessed = false;
2014 }
2015 addr += MEM_SIZE;
2016 }
2017 if (allCrossRegionRefsProcessed) {
2018 it = dirtyCards_.erase(it);
2019 continue;
2020 }
2021 ++it;
2022 }
2023 }
2024
2025 template <class LanguageConfig>
RestoreYoungCards(const CollectionSet & collectionSet)2026 void G1GC<LanguageConfig>::RestoreYoungCards(const CollectionSet &collectionSet)
2027 {
2028 CardTable *cardTable = this->GetCardTable();
2029 for (Region *region : collectionSet.Young()) {
2030 cardTable->MarkCardsAsYoung(MemRange(region->Begin(), region->End()));
2031 }
2032 }
2033
2034 template <class LanguageConfig>
ClearYoungCards(const CollectionSet & collectionSet)2035 void G1GC<LanguageConfig>::ClearYoungCards(const CollectionSet &collectionSet)
2036 {
2037 CardTable *cardTable = this->GetCardTable();
2038 for (Region *region : collectionSet.Young()) {
2039 cardTable->ClearCardRange(ToUintPtr(region), ToUintPtr(region) + DEFAULT_REGION_SIZE);
2040 }
2041 }
2042
2043 template <class LanguageConfig>
ClearDirtyAndYoungCards(const MemRange & dirtyCardsRange)2044 void G1GC<LanguageConfig>::ClearDirtyAndYoungCards(const MemRange &dirtyCardsRange)
2045 {
2046 CardTable *cardTable = this->GetCardTable();
2047 ClearYoungCards(collectionSet_);
2048 cardTable->ClearCardRange(dirtyCardsRange.GetStartAddress(), dirtyCardsRange.GetEndAddress());
2049 }
2050
2051 template <class LanguageConfig>
ClearRefsFromRemsetsCache()2052 void G1GC<LanguageConfig>::ClearRefsFromRemsetsCache()
2053 {
2054 ASSERT(!uniqueRefsFromRemsets_.empty());
2055 // Resize list of unique refs from remset to 1, to reduce memory usage
2056 size_t elemetsToRemove = uniqueRefsFromRemsets_.size() - 1;
2057 for (size_t i = 0; i < elemetsToRemove; i++) {
2058 RefVector *entry = uniqueRefsFromRemsets_.back();
2059 this->GetInternalAllocator()->Delete(entry);
2060 uniqueRefsFromRemsets_.pop_back();
2061 }
2062 ASSERT(uniqueRefsFromRemsets_.size() == 1);
2063 uniqueRefsFromRemsets_.front()->clear();
2064 ASSERT(uniqueRefsFromRemsets_.front()->capacity() == MAX_REFS);
2065 #ifndef NDEBUG
2066 uniqueCardsInitialized_ = false;
2067 #endif // NDEBUG
2068 }
2069
2070 template <class LanguageConfig>
ActualizeRemSets()2071 void G1GC<LanguageConfig>::ActualizeRemSets()
2072 {
2073 ScopedTiming t(__FUNCTION__, *this->GetTiming());
2074
2075 // Invalidate regions from collection set in all remsets
2076 for (Region *region : collectionSet_.Young()) {
2077 if (!region->HasFlag(RegionFlag::IS_PROMOTED)) {
2078 RemSet<>::template InvalidateRegion<false>(region);
2079 } else {
2080 region->RmvFlag(RegionFlag::IS_PROMOTED);
2081 }
2082 }
2083 for (Region *region : collectionSet_.Tenured()) {
2084 RemSet<>::template InvalidateRegion<false>(region);
2085 }
2086 }
2087
2088 template <class LanguageConfig>
ShouldRunTenuredGC(const GCTask & task)2089 bool G1GC<LanguageConfig>::ShouldRunTenuredGC(const GCTask &task)
2090 {
2091 return this->IsOnPygoteFork() || task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE ||
2092 task.reason == GCTaskCause::STARTUP_COMPLETE_CAUSE;
2093 }
2094
2095 template <class LanguageConfig>
OnWaitForIdleFail()2096 void G1GC<LanguageConfig>::OnWaitForIdleFail()
2097 {
2098 if (this->GetGCPhase() == GCPhase::GC_PHASE_MARK) {
2099 // Atomic with release order reason: write to this variable should become visible in concurrent marker check
2100 interruptConcurrentFlag_.store(true, std::memory_order_release);
2101 if (this->GetSettings()->G1EnablePauseTimeGoal()) {
2102 os::memory::LockHolder lh(concurrentMarkMutex_);
2103 concurrentMarkCondVar_.Signal();
2104 }
2105 }
2106 }
2107
2108 template <class LanguageConfig>
PostponeGCStart()2109 void G1GC<LanguageConfig>::PostponeGCStart()
2110 {
2111 regionGarbageRateThreshold_ = 0;
2112 g1PromotionRegionAliveRate_ = 0;
2113 GC::PostponeGCStart();
2114 }
2115
2116 template <class LanguageConfig>
PostponeGCEnd()2117 void G1GC<LanguageConfig>::PostponeGCEnd()
2118 {
2119 ASSERT(!this->IsPostponeEnabled() || (regionGarbageRateThreshold_ == 0 && g1PromotionRegionAliveRate_ == 0));
2120 regionGarbageRateThreshold_ = this->GetSettings()->G1RegionGarbageRateThreshold();
2121 g1PromotionRegionAliveRate_ = this->GetSettings()->G1PromotionRegionAliveRate();
2122 GC::PostponeGCEnd();
2123 }
2124
2125 template <class LanguageConfig>
IsPostponeGCSupported() const2126 bool G1GC<LanguageConfig>::IsPostponeGCSupported() const
2127 {
2128 return true;
2129 }
2130
2131 template <class LanguageConfig>
GetMaxMixedRegionsCount()2132 size_t G1GC<LanguageConfig>::GetMaxMixedRegionsCount()
2133 {
2134 return this->GetG1ObjectAllocator()->GetMaxYoungRegionsCount() + numberOfMixedTenuredRegions_;
2135 }
2136
2137 template <class LanguageConfig>
PrepareYoungRegionsForFullGC(const CollectionSet & collectionSet)2138 void G1GC<LanguageConfig>::PrepareYoungRegionsForFullGC(const CollectionSet &collectionSet)
2139 {
2140 BuildCrossYoungRemSets(collectionSet.Young());
2141 ClearYoungCards(collectionSet);
2142 }
2143
2144 template <class LanguageConfig>
RestoreYoungRegionsAfterFullGC(const CollectionSet & collectionSet)2145 void G1GC<LanguageConfig>::RestoreYoungRegionsAfterFullGC(const CollectionSet &collectionSet)
2146 {
2147 RestoreYoungCards(collectionSet);
2148 for (Region *region : collectionSet.Young()) {
2149 RemSet<>::template InvalidateRefsFromRegion<false>(region);
2150 }
2151 }
2152
2153 template <class LanguageConfig>
2154 template <typename Container>
BuildCrossYoungRemSets(const Container & young)2155 void G1GC<LanguageConfig>::BuildCrossYoungRemSets(const Container &young)
2156 {
2157 ScopedTiming scopedTiming(__FUNCTION__, *this->GetTiming());
2158 ASSERT(this->IsFullGC());
2159 auto allocator = this->GetG1ObjectAllocator();
2160 size_t regionSizeBits = panda::helpers::math::GetIntLog2(allocator->GetRegionSize());
2161 auto updateRemsets = [regionSizeBits](ObjectHeader *object, ObjectHeader *ref, size_t offset,
2162 [[maybe_unused]] bool isVolatile) {
2163 if (!IsSameRegion(object, ref, regionSizeBits) && !ObjectToRegion(ref)->IsYoung()) {
2164 RemSet<>::AddRefWithAddr<false>(object, offset, ref);
2165 }
2166 return true;
2167 };
2168 for (Region *region : young) {
2169 region->GetMarkBitmap()->IterateOverMarkedChunks([&updateRemsets](void *addr) {
2170 ObjectHelpers<LanguageConfig::LANG_TYPE>::template TraverseAllObjectsWithInfo<false>(
2171 reinterpret_cast<ObjectHeader *>(addr), updateRemsets);
2172 });
2173 }
2174 }
2175
2176 template <class LanguageConfig>
StartConcurrentScopeRoutine() const2177 void G1GC<LanguageConfig>::StartConcurrentScopeRoutine() const
2178 {
2179 updateRemsetWorker_->ResumeWorkerAfterGCPause();
2180 }
2181
2182 template <class LanguageConfig>
EndConcurrentScopeRoutine() const2183 void G1GC<LanguageConfig>::EndConcurrentScopeRoutine() const
2184 {
2185 updateRemsetWorker_->SuspendWorkerForGCPause();
2186 }
2187
2188 template <class LanguageConfig>
ComputeNewSize()2189 void G1GC<LanguageConfig>::ComputeNewSize()
2190 {
2191 if (this->GetSettings()->G1EnablePauseTimeGoal()) {
2192 auto desiredEdenLengthByPauseDelay = CalculateDesiredEdenLengthByPauseDelay();
2193 auto desiredEdenLengthByPauseDuration = CalculateDesiredEdenLengthByPauseDuration();
2194 auto desiredEdenLength = std::max(desiredEdenLengthByPauseDelay, desiredEdenLengthByPauseDuration);
2195 GetG1ObjectAllocator()->GetHeapSpace()->UpdateSize(desiredEdenLength * GetG1ObjectAllocator()->GetRegionSize());
2196 GetG1ObjectAllocator()->SetDesiredEdenLength(desiredEdenLength);
2197 } else {
2198 GenerationalGC<LanguageConfig>::ComputeNewSize();
2199 }
2200 }
2201
2202 template <class LanguageConfig>
CalculateDesiredEdenLengthByPauseDelay()2203 size_t G1GC<LanguageConfig>::CalculateDesiredEdenLengthByPauseDelay()
2204 {
2205 auto delayBeforePause = g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(panda::time::GetCurrentTimeInMicros());
2206 return static_cast<size_t>(ceil(analytics_.PredictAllocationRate() * delayBeforePause));
2207 }
2208
2209 template <class LanguageConfig>
CalculateDesiredEdenLengthByPauseDuration()2210 size_t G1GC<LanguageConfig>::CalculateDesiredEdenLengthByPauseDuration()
2211 {
2212 // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
2213 // on other reads or writes
2214 if (isMixedGcRequired_.load(std::memory_order_relaxed)) {
2215 // Schedule next mixed collections as often as possible to maximize old regions collection
2216 return 1;
2217 }
2218
2219 // Calculate desired_eden_size according to pause time goal
2220 size_t minEdenLength = 1;
2221 size_t maxEdenLength =
2222 GetG1ObjectAllocator()->GetHeapSpace()->GetMaxYoungSize() / GetG1ObjectAllocator()->GetRegionSize();
2223
2224 auto predictionError = analytics_.EstimatePredictionErrorInMicros();
2225 auto maxPause =
2226 static_cast<int64_t>(this->GetSettings()->GetG1MaxGcPauseInMillis() * panda::os::time::MILLIS_TO_MICRO);
2227 auto edenLengthPredicate = [this, predictionError, maxPause](size_t edenLength) {
2228 if (!HaveEnoughRegionsToMove(edenLength)) {
2229 return false;
2230 }
2231 auto pauseTime = predictionError + analytics_.PredictYoungCollectionTimeInMicros(edenLength);
2232 return pauseTime <= maxPause;
2233 };
2234 if (!edenLengthPredicate(minEdenLength)) {
2235 return minEdenLength;
2236 }
2237
2238 if (edenLengthPredicate(maxEdenLength)) {
2239 return maxEdenLength;
2240 }
2241 auto delta = (maxEdenLength - minEdenLength) / 2U;
2242 while (delta > 0) {
2243 auto edenLength = minEdenLength + delta;
2244 if (edenLengthPredicate(edenLength)) {
2245 minEdenLength = edenLength;
2246 } else {
2247 maxEdenLength = edenLength;
2248 }
2249 ASSERT(minEdenLength < maxEdenLength);
2250 delta = (maxEdenLength - minEdenLength) / 2U;
2251 }
2252 return minEdenLength;
2253 }
2254
2255 template <class LanguageConfig>
ConcurentMarkImpl(GCMarkingStackType * objectsStack)2256 NO_THREAD_SAFETY_ANALYSIS void G1GC<LanguageConfig>::ConcurentMarkImpl(GCMarkingStackType *objectsStack)
2257 {
2258 {
2259 ScopedTiming t("VisitClassRoots", *this->GetTiming());
2260 this->VisitClassRoots([this, objectsStack](const GCRoot &gcRoot) {
2261 if (concMarker_.MarkIfNotMarked(gcRoot.GetObjectHeader())) {
2262 ASSERT(gcRoot.GetObjectHeader() != nullptr);
2263 objectsStack->PushToStack(RootType::ROOT_CLASS, gcRoot.GetObjectHeader());
2264 } else {
2265 LOG_DEBUG_GC << "Skip root: " << gcRoot.GetObjectHeader();
2266 }
2267 });
2268 }
2269 {
2270 ScopedTiming t("VisitInternalStringTable", *this->GetTiming());
2271 this->GetPandaVm()->VisitStringTable(
2272 [this, objectsStack](ObjectHeader *str) {
2273 if (concMarker_.MarkIfNotMarked(str)) {
2274 ASSERT(str != nullptr);
2275 objectsStack->PushToStack(RootType::STRING_TABLE, str);
2276 }
2277 },
2278 VisitGCRootFlags::ACCESS_ROOT_ALL | VisitGCRootFlags::START_RECORDING_NEW_ROOT);
2279 }
2280 // Atomic with acquire order reason: load to this variable should become visible
2281 while (!objectsStack->Empty() && !interruptConcurrentFlag_.load(std::memory_order_acquire)) {
2282 auto *object = this->PopObjectFromStack(objectsStack);
2283 ASSERT(concMarker_.IsMarked(object));
2284 ValidateObject(nullptr, object);
2285 auto *objectClass = object->template ClassAddr<BaseClass>();
2286 // We need annotation here for the FullMemoryBarrier used in InitializeClassByIdEntrypoint
2287 TSAN_ANNOTATE_HAPPENS_AFTER(objectClass);
2288 LOG_DEBUG_GC << "Current object: " << GetDebugInfoAboutObject(object);
2289
2290 ASSERT(!object->IsForwarded());
2291 CalcLiveBytesNotAtomicallyMarkPreprocess(object, objectClass);
2292 concMarker_.MarkInstance(objectsStack, object, objectClass);
2293 }
2294 }
2295
2296 template <class LanguageConfig>
Trigger(PandaUniquePtr<GCTask> task)2297 bool G1GC<LanguageConfig>::Trigger(PandaUniquePtr<GCTask> task)
2298 {
2299 if (this->GetSettings()->G1EnablePauseTimeGoal() &&
2300 g1PauseTracker_.MinDelayBeforeMaxPauseInMicros(panda::time::GetCurrentTimeInMicros()) > 0) {
2301 return false;
2302 }
2303 return GenerationalGC<LanguageConfig>::Trigger(std::move(task));
2304 }
2305
2306 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GC);
2307 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GCConcurrentMarker);
2308 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GCMixedMarker);
2309 TEMPLATE_CLASS_LANGUAGE_CONFIG(G1GCPauseMarker);
2310
2311 } // namespace panda::mem
2312