1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/mem/gc/gen-gc/gen-gc.h"
17 #include "runtime/include/hclass.h"
18 #include "runtime/include/coretypes/array-inl.h"
19 #include "runtime/include/mem/panda_smart_pointers.h"
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/runtime_notification.h"
22 #include "runtime/mem/gc/gc_root-inl.h"
23 #include "runtime/mem/object_helpers-inl.h"
24 #include "runtime/mem/refstorage/global_object_storage.h"
25 #include "runtime/mem/rendezvous.h"
26 #include "runtime/include/panda_vm.h"
27 #include "runtime/mem/gc/card_table-inl.h"
28 #include "runtime/mem/gc/gc_workers_thread_pool.h"
29 #include "runtime/timing.h"
30 #include "runtime/include/exceptions.h"
31 #include "runtime/mem/pygote_space_allocator-inl.h"
32 #include "runtime/mem/gc/static/gc_marker_static-inl.h"
33 #include "runtime/mem/gc/dynamic/gc_marker_dynamic-inl.h"
34 #include "runtime/mem/gc/generational-gc-base-inl.h"
35
36 namespace panda::mem {
37
38 template <class LanguageConfig>
GenGC(ObjectAllocatorBase * object_allocator,const GCSettings & settings)39 GenGC<LanguageConfig>::GenGC(ObjectAllocatorBase *object_allocator, const GCSettings &settings)
40 : GenerationalGC<LanguageConfig>(object_allocator, settings), marker_(this)
41 {
42 this->SetType(GCType::GEN_GC);
43 this->SetTLABsSupported();
44 }
45
46 template <class LanguageConfig>
InitializeImpl()47 void GenGC<LanguageConfig>::InitializeImpl()
48 {
49 // GC saved the PandaVM instance, so we get allocator from the PandaVM.
50 InternalAllocatorPtr allocator = this->GetInternalAllocator();
51 this->CreateCardTable(allocator, PoolManager::GetMmapMemPool()->GetMinObjectAddress(),
52 PoolManager::GetMmapMemPool()->GetTotalObjectSize());
53 auto barrier_set =
54 allocator->New<GCGenBarrierSet>(allocator, PoolManager::GetMmapMemPool()->GetAddressOfMinObjectAddress(),
55 this->GetCardTable(), CardTable::GetCardBits(), CardTable::GetCardDirtyValue());
56 ASSERT(barrier_set != nullptr);
57 this->SetGCBarrierSet(barrier_set);
58 LOG_DEBUG_GC << "GenGC initialized";
59 }
60
61 template <class LanguageConfig>
ShouldRunTenuredGC(const GCTask & task)62 bool GenGC<LanguageConfig>::ShouldRunTenuredGC(const GCTask &task)
63 {
64 return task.reason_ == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE;
65 }
66
67 template <class LanguageConfig>
ShouldRunFullGC(const GCTask & task,bool have_enough_space_for_young) const68 bool GenGC<LanguageConfig>::ShouldRunFullGC(const GCTask &task, bool have_enough_space_for_young) const
69 {
70 return !have_enough_space_for_young || task.reason_ == GCTaskCause::OOM_CAUSE ||
71 task.reason_ == GCTaskCause::EXPLICIT_CAUSE || this->IsOnPygoteFork() ||
72 task.reason_ == GCTaskCause::STARTUP_COMPLETE_CAUSE;
73 }
74
75 template <class LanguageConfig>
RunPhasesImpl(GCTask & task)76 void GenGC<LanguageConfig>::RunPhasesImpl(GCTask &task)
77 {
78 LOG(DEBUG, GC) << "GenGC start";
79 uint64_t footprint_before = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
80 LOG_DEBUG_GC << "Footprint before GC: " << footprint_before;
81 if (this->IsLogDetailedGcInfoEnabled()) {
82 this->footprint_list_.clear();
83 this->footprint_list_.push_back({"Footprint before GC", footprint_before});
84 }
85 uint64_t young_total_time = 0;
86 this->GetTiming()->Reset();
87 {
88 ScopedTiming t("Generational GC", *this->GetTiming());
89 this->mem_stats_.Reset();
90 // We trigger a full gc at first pygote fork
91 if (ShouldRunFullGC(task, HaveEnoughSpaceToMove())) {
92 marker_.BindBitmaps(true); // clear pygote live bitmaps, we will rebuild it
93 this->GetObjectGenAllocator()->InvalidateSpaceData();
94 this->GetObjectGenAllocator()->UpdateSpaceData();
95 RunFullGC(task);
96 } else {
97 {
98 GCScopedPauseStats scoped_pause_stats(this->GetPandaVm()->GetGCStats());
99 time::Timer timer(&young_total_time, true);
100 this->GetPandaVm()->GetMemStats()->RecordGCPauseStart();
101 marker_.BindBitmaps(false);
102 this->GetObjectGenAllocator()->InvalidateSpaceData();
103 this->GetObjectGenAllocator()->UpdateSpaceData();
104 LOG_DEBUG_GC << "Young range: " << this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
105 RunYoungGC(task);
106 this->GetPandaVm()->GetMemStats()->RecordGCPhaseEnd();
107 if (young_total_time > 0) {
108 this->GetStats()->AddTimeValue(young_total_time, TimeTypeStats::YOUNG_TOTAL_TIME);
109 }
110 uint64_t footprint_young = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
111 LOG_DEBUG_GC << "Footprint after young: " << footprint_young;
112 if (this->IsLogDetailedGcInfoEnabled()) {
113 this->footprint_list_.push_back({"Footprint after young", footprint_young});
114 }
115 }
116 if (ShouldRunTenuredGC(task)) {
117 marker_.BindBitmaps(true); // clear pygote live bitmaps, we will rebuild it
118 this->GetObjectGenAllocator()->InvalidateSpaceData();
119 this->GetObjectGenAllocator()->UpdateSpaceData();
120 RunTenuredGC(task);
121 }
122 }
123 }
124 uint64_t footprint_after = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
125 LOG_DEBUG_GC << "Footprint after GC: " << footprint_after;
126 if (this->IsLogDetailedGcInfoEnabled()) {
127 this->footprint_list_.push_back({"Footprint after GC", footprint_after});
128 }
129 }
130
131 template <class LanguageConfig>
PreStartupImp()132 void GenGC<LanguageConfig>::PreStartupImp()
133 {
134 GenerationalGC<LanguageConfig>::DisableTenuredGC();
135 }
136
137 template <class LanguageConfig>
InitGCBits(panda::ObjectHeader * obj_header)138 void GenGC<LanguageConfig>::InitGCBits(panda::ObjectHeader *obj_header)
139 {
140 if (UNLIKELY(this->GetGCPhase() == GCPhase::GC_PHASE_SWEEP) &&
141 (!this->GetObjectAllocator()->IsAddressInYoungSpace(ToUintPtr(obj_header)))) {
142 obj_header->SetMarkedForGC();
143 // do unmark if out of sweep phase otherwise we may miss it in sweep
144 if (UNLIKELY(this->GetGCPhase() != GCPhase::GC_PHASE_SWEEP)) {
145 obj_header->SetUnMarkedForGC();
146 }
147 } else {
148 obj_header->SetUnMarkedForGC();
149 }
150 LOG_DEBUG_GC << "Init gc bits for object: " << std::hex << obj_header << " bit: " << obj_header->IsMarkedForGC()
151 << ", is marked = " << IsMarked(obj_header);
152 }
153
154 template <class LanguageConfig>
InitGCBitsForAllocationInTLAB(panda::ObjectHeader * obj_header)155 void GenGC<LanguageConfig>::InitGCBitsForAllocationInTLAB(panda::ObjectHeader *obj_header)
156 {
157 // Compiler will allocate objects in TLABs only in young space
158 // Therefore, set unmarked for GC here.
159 obj_header->SetUnMarkedForGC();
160 }
161
162 template <class LanguageConfig>
Trigger()163 void GenGC<LanguageConfig>::Trigger()
164 {
165 // Check current heap size.
166 // Collect Young gen.
167 // If threshold for tenured gen - collect tenured gen.
168 auto task = MakePandaUnique<GCTask>(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE, time::GetCurrentTimeInNanos());
169 // TODO(dtrubenkov): change for concurrent mode
170 this->AddGCTask(true, std::move(task), true);
171 }
172
173 template <class LanguageConfig>
RunYoungGC(GCTask & task)174 void GenGC<LanguageConfig>::RunYoungGC(GCTask &task)
175 {
176 GCScope<TRACE_TIMING> scoped_trace(__FUNCTION__, this);
177 LOG_DEBUG_GC << "GenGC RunYoungGC start";
178 uint64_t young_pause_time;
179 {
180 NoAtomicGCMarkerScope scope(&this->marker_);
181 // TODO: Measure only those that are on pause
182 time::Timer timer(&young_pause_time, true);
183 // NOLINTNEXTLINE(performance-unnecessary-value-param)
184 MarkYoung(task);
185 CollectYoungAndMove();
186 this->GetCardTable()->ClearAll();
187 }
188 if (young_pause_time > 0) {
189 this->GetStats()->AddTimeValue(young_pause_time, TimeTypeStats::YOUNG_PAUSED_TIME);
190 }
191 LOG_DEBUG_GC << "GenGC RunYoungGC end";
192 task.collection_type_ = GCCollectionType::YOUNG;
193 }
194
195 template <class LanguageConfig>
MarkYoung(const GCTask & task)196 void GenGC<LanguageConfig>::MarkYoung(const GCTask &task)
197 {
198 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK_YOUNG);
199
200 // Iterate over roots and add other roots
201 GCMarkingStackType objects_stack(this);
202 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
203 auto young_mr = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
204 GCRootVisitor gc_mark_young = [&objects_stack, &young_mr, this](const GCRoot &gc_root) {
205 // Skip non-young roots
206 auto root_object_ptr = gc_root.GetObjectHeader();
207 ASSERT(root_object_ptr != nullptr);
208 if (!young_mr.IsAddressInRange(ToUintPtr(root_object_ptr))) {
209 LOG_DEBUG_GC << "Skip root for young mark: " << std::hex << root_object_ptr;
210 return;
211 }
212 LOG(DEBUG, GC) << "root " << GetDebugInfoAboutObject(root_object_ptr);
213 if (this->MarkObjectIfNotMarked(root_object_ptr)) {
214 objects_stack.PushToStack(gc_root.GetType(), root_object_ptr);
215 this->MarkYoungStack(&objects_stack);
216 }
217 };
218 {
219 GCScope<TRACE_TIMING> marking_young_roots_trace("Marking roots young", this);
220 this->VisitRoots(gc_mark_young,
221 VisitGCRootFlags::ACCESS_ROOT_NONE | VisitGCRootFlags::ACCESS_ROOT_AOT_STRINGS_ONLY_YOUNG);
222 }
223 {
224 ScopedTiming visit_card_table_roots_timing("VisitCardTableRoots", *this->GetTiming());
225 LOG_DEBUG_GC << "START Marking tenured -> young roots";
226 MemRangeChecker tenured_range_checker = [&young_mr](MemRange &mem_range) -> bool {
227 return !young_mr.IsIntersect(mem_range);
228 };
229 ObjectChecker tenured_range_young_object_checker = [&young_mr](const ObjectHeader *object_header) -> bool {
230 return young_mr.IsAddressInRange(ToUintPtr(object_header));
231 };
232
233 ObjectChecker from_object_checker = []([[maybe_unused]] const ObjectHeader *object_header) -> bool {
234 return true;
235 };
236
237 this->VisitCardTableRoots(this->GetCardTable(), gc_mark_young, tenured_range_checker,
238 tenured_range_young_object_checker, from_object_checker,
239 CardTableProcessedFlag::VISIT_MARKED | CardTableProcessedFlag::VISIT_PROCESSED);
240 }
241 // reference-processor in VisitCardTableRoots can add new objects to stack
242 this->MarkYoungStack(&objects_stack);
243 LOG_DEBUG_GC << "END Marking tenured -> young roots";
244 auto ref_clear_pred = [this]([[maybe_unused]] const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
245 this->GetPandaVm()->HandleReferences(task, ref_clear_pred);
246 }
247
248 template <class LanguageConfig>
MarkYoungStack(GCMarkingStackType * stack)249 void GenGC<LanguageConfig>::MarkYoungStack(GCMarkingStackType *stack)
250 {
251 trace::ScopedTrace scoped_trace(__FUNCTION__);
252 ASSERT(stack != nullptr);
253 auto allocator = this->GetObjectAllocator();
254 auto &young_ranges = allocator->GetYoungSpaceMemRanges();
255 auto ref_pred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
256 while (!stack->Empty()) {
257 auto *object = this->PopObjectFromStack(stack);
258 ValidateObject(nullptr, object);
259 auto *cls = object->template ClassAddr<BaseClass>();
260 LOG_DEBUG_GC << "current object " << GetDebugInfoAboutObject(object);
261
262 bool in_range = false;
263 for (const auto &r : young_ranges) {
264 if (r.IsAddressInRange(ToUintPtr(object))) {
265 in_range = true;
266 break;
267 }
268 }
269 if (in_range) {
270 marker_.MarkInstance(stack, ref_pred, object, cls);
271 }
272 }
273 }
274
275 template <class LanguageConfig>
CollectVerificationInfo(const MemRange & young_mem_range)276 HeapVerifierIntoGC<LanguageConfig> GenGC<LanguageConfig>::CollectVerificationInfo(const MemRange &young_mem_range)
277 {
278 HeapVerifierIntoGC<LanguageConfig> young_verifier(this->GetPandaVm()->GetHeapManager());
279 if (this->GetSettings()->IntoGCHeapVerification() && !this->IsFullGC()) {
280 ScopedTiming collect_verification_timing("CollectVerificationInfo", *this->GetTiming());
281 young_verifier.CollectVerificationInfo(PandaVector<MemRange>(1U, young_mem_range));
282 }
283 return young_verifier;
284 }
285
286 template <class LanguageConfig>
VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> && young_verifier)287 void GenGC<LanguageConfig>::VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&young_verifier)
288 {
289 if (this->GetSettings()->IntoGCHeapVerification() && !this->IsFullGC()) {
290 ScopedTiming verification_timing("Verification", *this->GetTiming());
291 size_t fails_count = young_verifier.VerifyAll();
292 if (this->GetSettings()->FailOnHeapVerification() && fails_count > 0) {
293 LOG(FATAL, GC) << "Heap was corrupted during GC, HeapVerifier found " << fails_count << " corruptions";
294 }
295 }
296 }
297
298 // NOLINTNEXTLINE(readability-function-size)
299 template <class LanguageConfig>
CollectYoungAndMove()300 void GenGC<LanguageConfig>::CollectYoungAndMove()
301 {
302 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_COLLECT_YOUNG_AND_MOVE);
303 LOG_DEBUG_GC << "== GenGC CollectYoungAndMove start ==";
304 // TODO(dtrubenkov): add assert that we in STW
305 PandaVector<ObjectHeader *> moved_objects;
306 size_t prev_moved_size = this->GetPandaVm()->GetMemStats()->GetLastYoungObjectsMovedBytes();
307 constexpr size_t MINIMAL_PREALLOC_MOVE_OBJ = 32U;
308 // Adaptive preallocate buffer for moved_objects to avoid useless reallocations
309 moved_objects.reserve(std::max(MINIMAL_PREALLOC_MOVE_OBJ, prev_moved_size / GetMinimalObjectSize()));
310 size_t young_move_size = 0;
311 size_t young_move_count = 0;
312 size_t young_delete_size = 0;
313 size_t young_delete_count = 0;
314 size_t bytes_in_heap_before_move = this->GetPandaVm()->GetMemStats()->GetFootprintHeap();
315
316 auto *object_allocator = this->GetObjectGenAllocator();
317 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
318 auto young_mem_range = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
319 HeapVerifierIntoGC<LanguageConfig> young_verifier = CollectVerificationInfo(young_mem_range);
320
321 std::function<void(ObjectHeader * object_header)> move_visitor(
322 [this, &object_allocator, &moved_objects, &young_move_size, &young_move_count, &young_delete_size,
323 &young_delete_count](ObjectHeader *object_header) -> void {
324 size_t size = GetObjectSize(object_header);
325 ASSERT(size <= ObjectAllocatorGen<>::GetYoungAllocMaxSize());
326 // Use aligned size here, because we need to proceed MemStats correctly.
327 size_t aligned_size = GetAlignedObjectSize(size);
328 if (object_header->IsMarkedForGC<false>()) {
329 auto dst = reinterpret_cast<ObjectHeader *>(object_allocator->AllocateTenuredWithoutLocks(size));
330 ASSERT(dst != nullptr);
331 memcpy_s(dst, size, object_header, size);
332 young_move_size += aligned_size;
333 young_move_count++;
334 LOG_DEBUG_OBJECT_EVENTS << "MOVE object " << object_header << " -> " << dst << ", size = " << size;
335 moved_objects.push_back(dst);
336 // set unmarked dst
337 UnMarkObject(dst);
338 this->SetForwardAddress(object_header, dst);
339 } else {
340 LOG_DEBUG_OBJECT_EVENTS << "DELETE OBJECT young: " << object_header;
341 ++young_delete_count;
342 young_delete_size += aligned_size;
343 }
344 // We will record all object in MemStats as SPACE_TYPE_OBJECT, so check it
345 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(object_header) == SpaceType::SPACE_TYPE_OBJECT);
346 });
347 {
348 ScopedTiming move_timing("MoveAndSweep", *this->GetTiming());
349 object_allocator->IterateOverYoungObjects(move_visitor);
350 }
351 this->mem_stats_.RecordSizeMovedYoung(young_move_size);
352 this->mem_stats_.RecordCountMovedYoung(young_move_count);
353 this->mem_stats_.RecordSizeFreedYoung(young_delete_size);
354 this->mem_stats_.RecordCountFreedYoung(young_delete_count);
355 UpdateRefsToMovedObjects(&moved_objects);
356 this->VerifyCollectAndMove(std::move(young_verifier));
357 // Sweep string table here to avoid dangling references
358 this->SweepStringTableYoung([&young_mem_range](ObjectHeader *object_header) {
359 return young_mem_range.IsAddressInRange(ToUintPtr(object_header));
360 });
361 // Remove young
362 object_allocator->ResetYoungAllocator();
363
364 this->UpdateMemStats(bytes_in_heap_before_move, false);
365
366 LOG_DEBUG_GC << "== GenGC CollectYoungAndMove end ==";
367 }
368
369 template <class LanguageConfig>
SweepStringTable()370 void GenGC<LanguageConfig>::SweepStringTable()
371 {
372 GCScope<TRACE_TIMING_PHASE> scope(__FUNCTION__, this, GCPhase::GC_PHASE_SWEEP_STRING_TABLE);
373
374 ASSERT(this->GetObjectAllocator()->GetYoungSpaceMemRanges().size() == 1);
375 // new strings may be created in young space during tenured gc, we shouldn't collect them
376 auto young_mem_range = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
377 this->GetPandaVm()->SweepStringTable([this, &young_mem_range](ObjectHeader *object) {
378 if (young_mem_range.IsAddressInRange(ToUintPtr(object))) {
379 return ObjectStatus::ALIVE_OBJECT;
380 }
381 return this->marker_.MarkChecker(object);
382 });
383 }
384
385 template <class LanguageConfig>
UpdateRefsToMovedObjects(PandaVector<ObjectHeader * > * moved_objects)386 void GenGC<LanguageConfig>::UpdateRefsToMovedObjects(PandaVector<ObjectHeader *> *moved_objects)
387 {
388 GCScope<TRACE_TIMING> scope("UpdateRefsToMovedObjects", this);
389
390 auto obj_allocator = this->GetObjectAllocator();
391 // Update references exyoung -> young
392 LOG_DEBUG_GC << "process moved objects cnt = " << std::dec << moved_objects->size();
393 LOG_DEBUG_GC << "=== Update exyoung -> young references. START. ===";
394 for (auto obj : *moved_objects) {
395 ObjectHelpers<LanguageConfig::LANG_TYPE>::UpdateRefsToMovedObjects(obj);
396 }
397
398 LOG_DEBUG_GC << "=== Update exyoung -> young references. END. ===";
399 // update references tenured -> young
400 LOG_DEBUG_GC << "=== Update tenured -> young references. START. ===";
401 auto young_space = obj_allocator->GetYoungSpaceMemRanges().at(0);
402 auto update_refs_in_object(
403 [](ObjectHeader *obj) { ObjectHelpers<LanguageConfig::LANG_TYPE>::UpdateRefsToMovedObjects(obj); });
404 this->GetCardTable()->VisitMarked(
405 [&update_refs_in_object, &obj_allocator, &young_space](const MemRange &mem_range) {
406 if (!young_space.Contains(mem_range)) {
407 obj_allocator->IterateOverObjectsInRange(mem_range, update_refs_in_object);
408 }
409 },
410 CardTableProcessedFlag::VISIT_MARKED | CardTableProcessedFlag::VISIT_PROCESSED);
411 LOG_DEBUG_GC << "=== Update tenured -> young references. END. ===";
412 this->CommonUpdateRefsToMovedObjects();
413 }
414
415 template <class LanguageConfig>
RunTenuredGC(GCTask & task)416 void GenGC<LanguageConfig>::RunTenuredGC(GCTask &task)
417 {
418 GCScope<TRACE_TIMING> scope(__FUNCTION__, this);
419 LOG_DEBUG_GC << "GC tenured start";
420 this->GetPandaVm()->GetMemStats()->RecordGCPauseStart();
421 {
422 ScopedTiming un_mark_timing("UnMark", *this->GetTiming());
423 // Unmark all because no filter out tenured when mark young
424 // TODO(dtrubenk): remove this
425 this->GetObjectAllocator()->IterateOverObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
426 }
427 GCMarkingStackType objects_stack(this);
428 InitialMark(&objects_stack);
429 auto object_allocator = this->GetObjectAllocator();
430 this->GetPandaVm()->GetMemStats()->RecordGCPauseEnd();
431 auto ref_pred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
432 this->ConcurrentMark(
433 &marker_, &objects_stack, CardTableVisitFlag::VISIT_ENABLED,
434 [&objects_stack]() { return !objects_stack.Empty(); }, ref_pred,
435 [&object_allocator](MemRange &mem_range) { return !object_allocator->IsIntersectedWithYoung(mem_range); });
436 this->GetPandaVm()->GetMemStats()->RecordGCPauseStart();
437 // NOLINTNEXTLINE(performance-unnecessary-value-param)
438 ReMark(&objects_stack, task);
439 ASSERT(objects_stack.Empty());
440 {
441 ScopedTiming un_mark_young_timing("UnMarkYoung", *this->GetTiming());
442 // TODO(yxr): remove this after not marking young objects in tenured gc
443 this->GetObjectAllocator()->IterateOverYoungObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
444 }
445 // TODO(dtrubenkov): make concurrent
446 SweepStringTable();
447 ConcurrentSweep();
448 this->GetPandaVm()->GetMemStats()->RecordGCPauseEnd();
449 LOG_DEBUG_GC << "GC tenured end";
450 task.collection_type_ = GCCollectionType::TENURED;
451 }
452
453 // Full GC is ran on pause
454 template <class LanguageConfig>
RunFullGC(GCTask & task)455 void GenGC<LanguageConfig>::RunFullGC(GCTask &task)
456 {
457 GCScope<TRACE_TIMING> full_gc_scope(__FUNCTION__, this);
458 LOG_DEBUG_GC << "Full GC start";
459 this->GetPandaVm()->GetMemStats()->RecordGCPauseStart();
460 this->SetFullGC(true);
461 {
462 ScopedTiming un_mark_timing("UnMark", *this->GetTiming());
463 this->GetObjectAllocator()->IterateOverObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
464 }
465 FullMark(task);
466 // Sweep dead objects from tenured space
467 SweepStringTable();
468 Sweep();
469 // Young GC
470 if (LIKELY(HaveEnoughSpaceToMove())) {
471 GCScopedPauseStats scoped_pause_stats(this->GetPandaVm()->GetGCStats()); // Not clear young pause
472 // We already marked objects above so just collect and move
473 CollectYoungAndMove();
474 this->GetCardTable()->ClearAll();
475 }
476 this->SetFullGC(false);
477 this->GetPandaVm()->GetMemStats()->RecordGCPauseEnd();
478 LOG_DEBUG_GC << "Full GC end";
479 task.collection_type_ = GCCollectionType::FULL;
480 }
481
482 template <class LanguageConfig>
MarkRoots(GCMarkingStackType * objects_stack,CardTableVisitFlag visit_card_table_roots,const ReferenceCheckPredicateT & ref_pred,VisitGCRootFlags flags)483 void GenGC<LanguageConfig>::MarkRoots(GCMarkingStackType *objects_stack, CardTableVisitFlag visit_card_table_roots,
484 const ReferenceCheckPredicateT &ref_pred, VisitGCRootFlags flags)
485 {
486 trace::ScopedTrace scoped_trace(__FUNCTION__);
487 GCRootVisitor gc_mark_roots = [this, &objects_stack, &ref_pred](const GCRoot &gc_root) {
488 ObjectHeader *root_object = gc_root.GetObjectHeader();
489 ObjectHeader *from_object = gc_root.GetFromObjectHeader();
490 LOG_DEBUG_GC << "Handle root " << GetDebugInfoAboutObject(root_object);
491 if (UNLIKELY(from_object != nullptr) &&
492 this->IsReference(from_object->ClassAddr<BaseClass>(), from_object, ref_pred)) {
493 LOG_DEBUG_GC << "Add reference: " << GetDebugInfoAboutObject(from_object) << " to stack";
494 marker_.Mark(from_object);
495 this->ProcessReference(objects_stack, from_object->ClassAddr<BaseClass>(), from_object,
496 GC::EmptyReferenceProcessPredicate);
497 } else {
498 // we should always add this object to the stack, because we could mark this object in InitialMark, but
499 // write to some fields in ConcurrentMark - need to iterate over all fields again, MarkObjectIfNotMarked
500 // can't be used here
501 marker_.Mark(root_object);
502 objects_stack->PushToStack(gc_root.GetType(), root_object);
503 }
504 };
505 this->VisitRoots(gc_mark_roots, flags);
506 if (visit_card_table_roots == CardTableVisitFlag::VISIT_ENABLED) {
507 auto allocator = this->GetObjectAllocator();
508 ASSERT(allocator->GetYoungSpaceMemRanges().size() == 1);
509 MemRange young_mr = allocator->GetYoungSpaceMemRanges().at(0);
510 MemRangeChecker young_range_checker = []([[maybe_unused]] MemRange &mem_range) -> bool { return true; };
511 ObjectChecker young_range_tenured_object_checker = [&young_mr](const ObjectHeader *object_header) -> bool {
512 return !young_mr.IsAddressInRange(ToUintPtr(object_header));
513 };
514 ObjectChecker from_object_checker = [&young_mr, this](const ObjectHeader *object_header) -> bool {
515 // Don't visit objects which are in tenured and not marked.
516 return young_mr.IsAddressInRange(ToUintPtr(object_header)) || IsMarked(object_header);
517 };
518 this->VisitCardTableRoots(this->GetCardTable(), gc_mark_roots, young_range_checker,
519 young_range_tenured_object_checker, from_object_checker,
520 CardTableProcessedFlag::VISIT_MARKED);
521 }
522 }
523
524 template <class LanguageConfig>
InitialMark(GCMarkingStackType * objects_stack)525 void GenGC<LanguageConfig>::InitialMark(GCMarkingStackType *objects_stack)
526 {
527 GCScope<TRACE_TIMING_PHASE> gc_scope(__FUNCTION__, this, GCPhase::GC_PHASE_INITIAL_MARK);
528 {
529 NoAtomicGCMarkerScope scope(&this->marker_);
530 auto ref_pred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
531 MarkRoots(objects_stack, CardTableVisitFlag::VISIT_DISABLED, ref_pred,
532 VisitGCRootFlags::ACCESS_ROOT_NONE | VisitGCRootFlags::START_RECORDING_NEW_ROOT);
533 }
534 }
535
536 template <class LanguageConfig>
ReMark(GCMarkingStackType * objects_stack,const GCTask & task)537 void GenGC<LanguageConfig>::ReMark(GCMarkingStackType *objects_stack, const GCTask &task)
538 {
539 GCScope<TRACE_TIMING_PHASE> gc_scope(__FUNCTION__, this, GCPhase::GC_PHASE_REMARK);
540
541 // TODO(dtrubenkov): consider iterational concurrent marking of card table
542 {
543 NoAtomicGCMarkerScope scope(&this->marker_);
544 auto ref_pred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
545 MarkRoots(objects_stack, CardTableVisitFlag::VISIT_ENABLED, ref_pred,
546 VisitGCRootFlags::ACCESS_ROOT_ONLY_NEW | VisitGCRootFlags::END_RECORDING_NEW_ROOT);
547 this->MarkStack(&marker_, objects_stack, ref_pred,
548 []([[maybe_unused]] const ObjectHeader *obj) { return true; });
549 {
550 ScopedTiming t1("VisitInternalStringTable", *this->GetTiming());
551 this->GetPandaVm()->VisitStringTable(
552 [this, &objects_stack](ObjectHeader *str) {
553 if (this->MarkObjectIfNotMarked(str)) {
554 ASSERT(str != nullptr);
555 objects_stack->PushToStack(RootType::STRING_TABLE, str);
556 }
557 },
558 VisitGCRootFlags::ACCESS_ROOT_ONLY_NEW | VisitGCRootFlags::END_RECORDING_NEW_ROOT);
559 this->MarkStack(&marker_, objects_stack, ref_pred, GC::EmptyReferenceProcessPredicate);
560 }
561 // NOLINTNEXTLINE(performance-unnecessary-value-param)
562 this->GetPandaVm()->HandleReferences(task, GC::EmptyReferenceProcessPredicate);
563 }
564 }
565
566 template <class LanguageConfig>
FullMark(const GCTask & task)567 void GenGC<LanguageConfig>::FullMark(const GCTask &task)
568 {
569 GCScope<TRACE_TIMING_PHASE> full_mark_scope(__FUNCTION__, this, GCPhase::GC_PHASE_MARK);
570 NoAtomicGCMarkerScope marker_scope(&this->marker_);
571
572 GCMarkingStackType objects_stack(this);
573 VisitGCRootFlags flags = VisitGCRootFlags::ACCESS_ROOT_ALL;
574 auto ref_pred = GC::EmptyReferenceProcessPredicate;
575 // Mark all reachable objects
576 MarkRoots(&objects_stack, CardTableVisitFlag::VISIT_DISABLED, ref_pred, flags);
577 this->GetPandaVm()->VisitStringTable(
578 [this, &objects_stack](ObjectHeader *str) {
579 if (this->MarkObjectIfNotMarked(str)) {
580 ASSERT(str != nullptr);
581 objects_stack.PushToStack(RootType::STRING_TABLE, str);
582 }
583 },
584 flags);
585 this->MarkStack(&marker_, &objects_stack, ref_pred, []([[maybe_unused]] const ObjectHeader *obj) { return true; });
586 auto ref_clear_pred = []([[maybe_unused]] const ObjectHeader *obj) { return true; };
587 // NOLINTNEXTLINE(performance-unnecessary-value-param)
588 this->GetPandaVm()->HandleReferences(task, ref_clear_pred);
589 }
590
591 template <class LanguageConfig>
MarkReferences(GCMarkingStackType * references,GCPhase gc_phase)592 void GenGC<LanguageConfig>::MarkReferences(GCMarkingStackType *references, GCPhase gc_phase)
593 {
594 trace::ScopedTrace scoped_trace(__FUNCTION__);
595 LOG_DEBUG_GC << "Start marking " << references->Size() << " references";
596 auto ref_pred = [this](const ObjectHeader *obj) { return this->InGCSweepRange(obj); };
597 if (gc_phase == GCPhase::GC_PHASE_MARK_YOUNG) {
598 this->MarkYoungStack(references);
599 } else if (gc_phase == GCPhase::GC_PHASE_INITIAL_MARK || gc_phase == GCPhase::GC_PHASE_MARK ||
600 gc_phase == GCPhase::GC_PHASE_REMARK) {
601 this->MarkStack(&marker_, references, ref_pred, GC::EmptyReferenceProcessPredicate);
602 } else {
603 UNREACHABLE();
604 }
605 }
606
607 template <class LanguageConfig>
MarkObject(ObjectHeader * object)608 void GenGC<LanguageConfig>::MarkObject(ObjectHeader *object)
609 {
610 marker_.Mark(object);
611 }
612
613 template <class LanguageConfig>
UnMarkObject(ObjectHeader * object_header)614 void GenGC<LanguageConfig>::UnMarkObject(ObjectHeader *object_header)
615 {
616 LOG_DEBUG_GC << "Set unmark for GC " << GetDebugInfoAboutObject(object_header);
617 this->marker_.UnMark(object_header);
618 }
619
620 template <class LanguageConfig>
IsMarked(const ObjectHeader * object) const621 bool GenGC<LanguageConfig>::IsMarked(const ObjectHeader *object) const
622 {
623 return this->marker_.IsMarked(object);
624 }
625
626 template <class LanguageConfig>
Sweep()627 void GenGC<LanguageConfig>::Sweep()
628 {
629 GCScope<TRACE_TIMING_PHASE> gc_sweep_scope(__FUNCTION__, this, GCPhase::GC_PHASE_SWEEP);
630
631 size_t freed_object_size = 0U;
632 size_t freed_object_count = 0U;
633
634 this->GetObjectAllocator()->Collect(
635 [this, &freed_object_size, &freed_object_count](ObjectHeader *object) {
636 auto status = this->marker_.MarkChecker(object);
637 if (status == ObjectStatus::DEAD_OBJECT) {
638 freed_object_size += GetAlignedObjectSize(GetObjectSize(object));
639 freed_object_count++;
640 }
641 return status;
642 },
643 GCCollectMode::GC_ALL);
644 this->GetObjectAllocator()->VisitAndRemoveFreePools([this](void *mem, size_t size) {
645 this->GetCardTable()->ClearCardRange(ToUintPtr(mem), ToUintPtr(mem) + size);
646 PoolManager::GetMmapMemPool()->FreePool(mem, size);
647 });
648 this->mem_stats_.RecordSizeFreedTenured(freed_object_size);
649 this->mem_stats_.RecordCountFreedTenured(freed_object_count);
650 }
651
652 // NO_THREAD_SAFETY_ANALYSIS because clang thread safety analysis
653 template <class LanguageConfig>
ConcurrentSweep()654 NO_THREAD_SAFETY_ANALYSIS void GenGC<LanguageConfig>::ConcurrentSweep()
655 {
656 GCScope<TRACE_TIMING> gc_scope(__FUNCTION__, this);
657 ConcurrentScope concurrent_scope(this, false);
658 size_t freed_object_size = 0U;
659 size_t freed_object_count = 0U;
660
661 // NB! can't move block out of brace, we need to make sure GC_PHASE_SWEEP cleared
662 {
663 GCScopedPhase scoped_phase(this->GetPandaVm()->GetMemStats(), this, GCPhase::GC_PHASE_SWEEP);
664 concurrent_scope.Start(); // enable concurrent after GC_PHASE_SWEEP has been set
665
666 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
667 if constexpr (LanguageConfig::MT_MODE == MT_MODE_MULTI) {
668 // Run monitor deflation again, to avoid object was reclaimed before monitor deflate.
669 auto young_mr = this->GetObjectAllocator()->GetYoungSpaceMemRanges().at(0);
670 this->GetPandaVm()->GetMonitorPool()->DeflateMonitorsWithCallBack([&young_mr, this](Monitor *monitor) {
671 ObjectHeader *object_header = monitor->GetObject();
672 return (!IsMarked(object_header)) && (!young_mr.IsAddressInRange(ToUintPtr(object_header)));
673 });
674 }
675
676 this->GetObjectAllocator()->Collect(
677 [this, &freed_object_size, &freed_object_count](ObjectHeader *object) {
678 auto status = this->marker_.MarkChecker(object);
679 if (status == ObjectStatus::DEAD_OBJECT) {
680 LOG_DEBUG_OBJECT_EVENTS << "DELETE OBJECT tenured: " << object;
681 freed_object_size += GetAlignedObjectSize(GetObjectSize(object));
682 freed_object_count++;
683 }
684 return status;
685 },
686 GCCollectMode::GC_ALL);
687 this->GetObjectAllocator()->VisitAndRemoveFreePools([this](void *mem, size_t size) {
688 this->GetCardTable()->ClearCardRange(ToUintPtr(mem), ToUintPtr(mem) + size);
689 PoolManager::GetMmapMemPool()->FreePool(mem, size);
690 });
691 }
692
693 this->mem_stats_.RecordSizeFreedTenured(freed_object_size);
694 this->mem_stats_.RecordCountFreedTenured(freed_object_count);
695
696 // In concurrent sweep phase, the new created objects may being marked in InitGCBits,
697 // so we need wait for that done, then we can safely unmark objects concurrent with mutator.
698 ASSERT(this->GetGCPhase() != GCPhase::GC_PHASE_SWEEP); // Make sure we are out of sweep scope
699 this->GetObjectAllocator()->IterateOverTenuredObjects([this](ObjectHeader *obj) { this->marker_.UnMark(obj); });
700 }
701
702 template <class LanguageConfig>
InGCSweepRange(const ObjectHeader * obj) const703 bool GenGC<LanguageConfig>::InGCSweepRange(const ObjectHeader *obj) const
704 {
705 bool in_young_space = this->GetObjectAllocator()->IsAddressInYoungSpace(ToUintPtr(obj));
706 auto phase = this->GetGCPhase();
707
708 // Do young GC and the object is in the young space
709 if (phase == GCPhase::GC_PHASE_MARK_YOUNG && in_young_space) {
710 return true;
711 }
712
713 // Do tenured GC and the object is in the tenured space
714 if (phase != GCPhase::GC_PHASE_MARK_YOUNG && !in_young_space) {
715 return true;
716 }
717
718 return this->IsFullGC();
719 }
720
721 template <class LanguageConfig>
HaveEnoughSpaceToMove() const722 bool GenGC<LanguageConfig>::HaveEnoughSpaceToMove() const
723 {
724 // hack for pools because we have 2 type of pools in tenures space, in bad cases objects can be moved to different
725 // spaces. And move 4M objects in bump-allocator to other allocator, may need more than 4M space in other allocator
726 // - so we need 3 empty pools.
727 // TODO(xucheng) : remove the checker when we can do part young collection.
728 // The min num that can guarantee that we move all objects in young space.
729 constexpr size_t POOLS_NUM = 3;
730 return this->GetObjectAllocator()->HaveEnoughPoolsInObjectSpace(POOLS_NUM);
731 }
732
733 TEMPLATE_CLASS_LANGUAGE_CONFIG(GenGC);
734
735 } // namespace panda::mem
736