• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <memory>
17 
18 #include "libpandabase/os/mem.h"
19 #include "libpandabase/os/thread.h"
20 #include "libpandabase/utils/time.h"
21 #include "runtime/assert_gc_scope.h"
22 #include "runtime/include/class.h"
23 #include "runtime/include/coretypes/dyn_objects.h"
24 #include "runtime/include/locks.h"
25 #include "runtime/include/runtime.h"
26 #include "runtime/include/runtime_notification.h"
27 #include "runtime/include/stack_walker-inl.h"
28 #include "runtime/mem/gc/epsilon/epsilon.h"
29 #include "runtime/mem/gc/gc.h"
30 #include "runtime/mem/gc/gc_root-inl.h"
31 #include "runtime/mem/gc/gc_queue.h"
32 #include "runtime/mem/gc/g1/g1-gc.h"
33 #include "runtime/mem/gc/gen-gc/gen-gc.h"
34 #include "runtime/mem/gc/stw-gc/stw-gc.h"
35 #include "runtime/mem/pygote_space_allocator-inl.h"
36 #include "runtime/mem/heap_manager.h"
37 #include "runtime/mem/gc/reference-processor/reference_processor.h"
38 #include "runtime/include/panda_vm.h"
39 #include "runtime/assert_gc_scope.h"
40 #include "runtime/include/object_accessor-inl.h"
41 #include "runtime/include/coretypes/class.h"
42 #include "runtime/thread_manager.h"
43 
44 namespace panda::mem {
45 using TaggedValue = coretypes::TaggedValue;
46 using TaggedType = coretypes::TaggedType;
47 using DynClass = coretypes::DynClass;
48 
49 GCListener::~GCListener() = default;
50 
GC(ObjectAllocatorBase * object_allocator,const GCSettings & settings)51 GC::GC(ObjectAllocatorBase *object_allocator, const GCSettings &settings)
52     : gc_settings_(settings),
53       object_allocator_(object_allocator),
54       internal_allocator_(InternalAllocator<>::GetInternalAllocatorFromRuntime())
55 {
56 }
57 
~GC()58 GC::~GC()
59 {
60     if (gc_queue_ != nullptr) {
61         InternalAllocatorPtr allocator = GetInternalAllocator();
62         allocator->Delete(gc_queue_);
63     }
64     if (gc_listeners_ptr_ != nullptr) {
65         InternalAllocatorPtr allocator = GetInternalAllocator();
66         allocator->Delete(gc_listeners_ptr_);
67     }
68     if (gc_barrier_set_ != nullptr) {
69         InternalAllocatorPtr allocator = GetInternalAllocator();
70         allocator->Delete(gc_barrier_set_);
71     }
72     if (cleared_references_ != nullptr) {
73         InternalAllocatorPtr allocator = GetInternalAllocator();
74         allocator->Delete(cleared_references_);
75     }
76     if (cleared_references_lock_ != nullptr) {
77         InternalAllocatorPtr allocator = GetInternalAllocator();
78         allocator->Delete(cleared_references_lock_);
79     }
80 }
81 
GetType()82 GCType GC::GetType()
83 {
84     return gc_type_;
85 }
86 
SetPandaVM(PandaVM * vm)87 void GC::SetPandaVM(PandaVM *vm)
88 {
89     vm_ = vm;
90     reference_processor_ = vm->GetReferenceProcessor();
91 }
92 
GetNativeGcTriggerType()93 NativeGcTriggerType GC::GetNativeGcTriggerType()
94 {
95     return gc_settings_.native_gc_trigger_type;
96 }
97 
SimpleNativeAllocationGcWatermark()98 size_t GC::SimpleNativeAllocationGcWatermark()
99 {
100     return GetPandaVm()->GetOptions().GetMaxFree();
101 }
102 
WaitForIdleGC()103 NO_THREAD_SAFETY_ANALYSIS void GC::WaitForIdleGC()
104 {
105     while (!CASGCPhase(GCPhase::GC_PHASE_IDLE, GCPhase::GC_PHASE_RUNNING)) {
106         GetPandaVm()->GetRendezvous()->SafepointEnd();
107         constexpr uint64_t WAIT_FINISHED = 10;
108         // Use NativeSleep for all threads, as this thread shouldn't hold Mutator lock here
109         os::thread::NativeSleep(WAIT_FINISHED);
110         GetPandaVm()->GetRendezvous()->SafepointBegin();
111     }
112 }
113 
TriggerGCForNative()114 inline void GC::TriggerGCForNative()
115 {
116     auto native_gc_trigger_type = GetNativeGcTriggerType();
117     ASSERT_PRINT((native_gc_trigger_type == NativeGcTriggerType::NO_NATIVE_GC_TRIGGER) ||
118                      (native_gc_trigger_type == NativeGcTriggerType::SIMPLE_STRATEGY),
119                  "Unknown Native GC Trigger type");
120     switch (native_gc_trigger_type) {
121         case NativeGcTriggerType::NO_NATIVE_GC_TRIGGER:
122             break;
123         case NativeGcTriggerType::SIMPLE_STRATEGY:
124             if (native_bytes_registered_ > SimpleNativeAllocationGcWatermark()) {
125                 auto task = MakePandaUnique<GCTask>(GCTaskCause::NATIVE_ALLOC_CAUSE, time::GetCurrentTimeInNanos());
126                 AddGCTask(false, std::move(task), true);
127                 MTManagedThread::GetCurrent()->SafepointPoll();
128             }
129             break;
130         default:
131             LOG(FATAL, GC) << "Unknown Native GC Trigger type";
132             break;
133     }
134 }
135 
Initialize()136 void GC::Initialize()
137 {
138     trace::ScopedTrace scoped_trace(__PRETTY_FUNCTION__);
139     // GC saved the PandaVM instance, so we get allocator from the PandaVM.
140     auto allocator = GetInternalAllocator();
141     gc_listeners_ptr_ = allocator->template New<PandaVector<GCListener *>>(allocator->Adapter());
142     cleared_references_lock_ = allocator->New<os::memory::Mutex>();
143     os::memory::LockHolder holder(*cleared_references_lock_);
144     cleared_references_ = allocator->New<PandaVector<panda::mem::Reference *>>(allocator->Adapter());
145     gc_queue_ = allocator->New<GCQueueWithTime>(this);
146     InitializeImpl();
147 }
148 
StartGC()149 void GC::StartGC()
150 {
151     CreateWorker();
152 }
153 
StopGC()154 void GC::StopGC()
155 {
156     JoinWorker();
157     ASSERT(gc_queue_ != nullptr);
158     gc_queue_->Finalize();
159 }
160 
BindBitmaps(bool clear_pygote_space_bitmaps)161 void GC::BindBitmaps(bool clear_pygote_space_bitmaps)
162 {
163     // Set marking bitmaps
164     marker_.ClearMarkBitMaps();
165     auto pygote_space_allocator = object_allocator_->GetPygoteSpaceAllocator();
166     if (pygote_space_allocator != nullptr) {
167         // clear live bitmaps if we decide to rebuild it in full gc,
168         // it will be used as marked bitmaps and updated at the end of gc
169         if (clear_pygote_space_bitmaps) {
170             pygote_space_allocator->ClearLiveBitmaps();
171         }
172         auto &bitmaps = pygote_space_allocator->GetLiveBitmaps();
173         marker_.AddMarkBitMaps(bitmaps.begin(), bitmaps.end());
174     }
175 }
176 
177 // NOLINTNEXTLINE(performance-unnecessary-value-param)
RunPhases(const GCTask & task)178 void GC::RunPhases(const GCTask &task)
179 {
180     DCHECK_ALLOW_GARBAGE_COLLECTION;
181     trace::ScopedTrace s_trace(__FUNCTION__);
182     auto old_counter = gc_counter_.load(std::memory_order_acquire);
183     WaitForIdleGC();
184     auto new_counter = gc_counter_.load(std::memory_order_acquire);
185     if (new_counter > old_counter) {
186         SetGCPhase(GCPhase::GC_PHASE_IDLE);
187         return;
188     }
189     last_cause_ = task.reason_;
190     if (gc_settings_.pre_gc_heap_verification) {
191         trace::ScopedTrace s_trace2("PreGCHeapVeriFier");
192         size_t fail_count = VerifyHeap();
193         if (gc_settings_.fail_on_heap_verification && fail_count > 0) {
194             LOG(FATAL, GC) << "Heap corrupted before GC, HeapVerifier found " << fail_count << " corruptions";
195         }
196     }
197     gc_counter_.fetch_add(1, std::memory_order_acq_rel);
198     if (gc_settings_.is_dump_heap) {
199         PandaOStringStream os;
200         os << "Heap dump before GC" << std::endl;
201         GetPandaVm()->GetHeapManager()->DumpHeap(&os);
202         std::cerr << os.str() << std::endl;
203     }
204     size_t bytes_in_heap_before_gc = GetPandaVm()->GetMemStats()->GetFootprintHeap();
205     LOG_DEBUG_GC << "Bytes in heap before GC " << std::dec << bytes_in_heap_before_gc;
206     {
207         GCScopedStats scoped_stats(GetPandaVm()->GetGCStats(), gc_type_ == GCType::STW_GC ? GetStats() : nullptr);
208         for (auto listener : *gc_listeners_ptr_) {
209             listener->GCStarted(bytes_in_heap_before_gc);
210         }
211 
212         PreRunPhasesImpl();
213         // NOLINTNEXTLINE(performance-unnecessary-value-param)
214         RunPhasesImpl(task);
215 
216         // Clear Internal allocator unused pools (must do it on pause to avoid race conditions):
217         // - Clear global part:
218         InternalAllocator<>::GetInternalAllocatorFromRuntime()->VisitAndRemoveFreePools(
219             [](void *mem, [[maybe_unused]] size_t size) { PoolManager::GetMmapMemPool()->FreePool(mem, size); });
220         // - Clear local part:
221         GetPandaVm()->GetThreadManager()->EnumerateThreads(
222             [](ManagedThread *thread) {
223                 InternalAllocator<>::RemoveFreePoolsForLocalInternalAllocator(thread->GetLocalInternalAllocator());
224                 return true;
225             },
226             static_cast<unsigned int>(EnumerationFlag::ALL));
227 
228         size_t bytes_in_heap_after_gc = GetPandaVm()->GetMemStats()->GetFootprintHeap();
229         // There is case than bytes_in_heap_after_gc > 0 and bytes_in_heap_before_gc == 0.
230         // Because TLABs are registered during GC
231         if (bytes_in_heap_after_gc > 0 && bytes_in_heap_before_gc > 0) {
232             GetStats()->AddReclaimRatioValue(1 - static_cast<double>(bytes_in_heap_after_gc) / bytes_in_heap_before_gc);
233         }
234         LOG_DEBUG_GC << "Bytes in heap after GC " << std::dec << bytes_in_heap_after_gc;
235         for (auto listener : *gc_listeners_ptr_) {
236             listener->GCFinished(task, bytes_in_heap_before_gc, bytes_in_heap_after_gc);
237         }
238     }
239     last_gc_reclaimed_bytes.store(vm_->GetGCStats()->GetObjectsFreedBytes());
240 
241     LOG(INFO, GC) << task.reason_ << " " << GetPandaVm()->GetGCStats()->GetStatistics();
242     if (gc_settings_.is_dump_heap) {
243         PandaOStringStream os;
244         os << "Heap dump after GC" << std::endl;
245         GetPandaVm()->GetHeapManager()->DumpHeap(&os);
246         std::cerr << os.str() << std::endl;
247     }
248 
249     if (gc_settings_.post_gc_heap_verification) {
250         trace::ScopedTrace s_trace2("PostGCHeapVeriFier");
251         size_t fail_count = VerifyHeap();
252         if (gc_settings_.fail_on_heap_verification && fail_count > 0) {
253             LOG(FATAL, GC) << "Heap corrupted after GC, HeapVerifier found " << fail_count << " corruptions";
254         }
255     }
256 
257     SetGCPhase(GCPhase::GC_PHASE_IDLE);
258 }
259 
260 template <class LanguageConfig>
CreateGC(GCType gc_type,ObjectAllocatorBase * object_allocator,const GCSettings & settings)261 GC *CreateGC(GCType gc_type, ObjectAllocatorBase *object_allocator, const GCSettings &settings)
262 {
263     GC *ret = nullptr;
264     ASSERT_PRINT((gc_type == GCType::EPSILON_GC) || (gc_type == GCType::STW_GC) || (gc_type == GCType::GEN_GC) ||
265                      (gc_type == GCType::G1_GC),
266                  "Unknown GC type");
267     InternalAllocatorPtr allocator {InternalAllocator<>::GetInternalAllocatorFromRuntime()};
268 
269     switch (gc_type) {
270         case GCType::EPSILON_GC:
271             ret = allocator->New<EpsilonGC<LanguageConfig>>(object_allocator, settings);
272             break;
273         case GCType::STW_GC:
274             ret = allocator->New<StwGC<LanguageConfig>>(object_allocator, settings);
275             break;
276         case GCType::GEN_GC:
277             ret = allocator->New<GenGC<LanguageConfig>>(object_allocator, settings);
278             break;
279         case GCType::G1_GC:
280             ret = allocator->New<G1GC<LanguageConfig>>(object_allocator, settings);
281             break;
282         default:
283             LOG(FATAL, GC) << "Unknown GC type";
284             break;
285     }
286     return ret;
287 }
288 
MarkObject(ObjectHeader * object_header)289 void GC::MarkObject(ObjectHeader *object_header)
290 {
291     marker_.Mark(object_header);
292 }
293 
MarkObjectIfNotMarked(ObjectHeader * object_header)294 bool GC::MarkObjectIfNotMarked(ObjectHeader *object_header)
295 {
296     ASSERT(object_header != nullptr);
297     if (IsMarked(object_header)) {
298         return false;
299     }
300     MarkObject(object_header);
301     return true;
302 }
303 
UnMarkObject(ObjectHeader * object_header)304 void GC::UnMarkObject(ObjectHeader *object_header)
305 {
306     marker_.UnMark(object_header);
307 }
308 
ProcessReference(PandaStackTL<ObjectHeader * > * objects_stack,BaseClass * cls,const ObjectHeader * object)309 void GC::ProcessReference(PandaStackTL<ObjectHeader *> *objects_stack, BaseClass *cls, const ObjectHeader *object)
310 {
311     ASSERT(reference_processor_ != nullptr);
312     reference_processor_->DelayReferenceProcessing(cls, object);
313     reference_processor_->HandleReference(this, objects_stack, cls, object);
314 }
315 
IsMarked(const ObjectHeader * object) const316 bool GC::IsMarked(const ObjectHeader *object) const
317 {
318     return marker_.IsMarked(object);
319 }
320 
AddReference(ObjectHeader * object)321 void GC::AddReference(ObjectHeader *object)
322 {
323     ASSERT(IsMarked(object));
324     PandaStackTL<ObjectHeader *> references;
325     AddToStack(&references, object);
326     MarkReferences(&references, phase_);
327     if (gc_type_ != GCType::EPSILON_GC) {
328         ASSERT(references.empty());
329     }
330 }
331 
332 /* static */
333 // NOLINTNEXTLINE(performance-unnecessary-value-param)
ProcessReferences(GCPhase gc_phase,const GCTask & task)334 void GC::ProcessReferences(GCPhase gc_phase, const GCTask &task)
335 {
336     LOG(DEBUG, REF_PROC) << "Start processing cleared references";
337     ASSERT(reference_processor_ != nullptr);
338     bool clear_soft_references = task.reason_ == GCTaskCause::OOM_CAUSE || task.reason_ == GCTaskCause::EXPLICIT_CAUSE;
339     reference_processor_->ProcessReferences(false, clear_soft_references, gc_phase);
340     Reference *processed_ref = reference_processor_->CollectClearedReferences();
341 
342     if (processed_ref != nullptr) {
343         os::memory::LockHolder holder(*cleared_references_lock_);
344         cleared_references_->push_back(processed_ref);
345     }
346 }
347 
GCWorkerEntry(GC * gc,PandaVM * vm)348 void GC::GCWorkerEntry(GC *gc, PandaVM *vm)
349 {
350     // We need to set VM to current_thread, since GC can call ObjectAccessor::GetBarrierSet() methods
351     Thread gc_thread(vm, Thread::ThreadType::THREAD_TYPE_GC);
352     ScopedCurrentThread sct(&gc_thread);
353     while (true) {
354         auto task = gc->gc_queue_->GetTask();
355         if (!gc->IsGCRunning()) {
356             LOG(DEBUG, GC) << "Stopping GC thread";
357             if (task != nullptr) {
358                 task->Release(Runtime::GetCurrent()->GetInternalAllocator());
359             }
360             break;
361         }
362         if (task == nullptr) {
363             continue;
364         }
365         if (task->reason_ == GCTaskCause::INVALID_CAUSE) {
366             task->Release(Runtime::GetCurrent()->GetInternalAllocator());
367             continue;
368         }
369         LOG(DEBUG, GC) << "Running GC task, reason " << task->reason_;
370         task->Run(*gc);
371         task->Release(Runtime::GetCurrent()->GetInternalAllocator());
372     }
373 }
374 
JoinWorker()375 void GC::JoinWorker()
376 {
377     gc_running_.store(false);
378     if (!gc_settings_.run_gc_in_place) {
379         ASSERT(worker_ != nullptr);
380     }
381     if (worker_ != nullptr && !gc_settings_.run_gc_in_place) {
382         ASSERT(gc_queue_ != nullptr);
383         gc_queue_->Signal();
384         worker_->join();
385         InternalAllocatorPtr allocator = GetInternalAllocator();
386         allocator->Delete(worker_);
387         worker_ = nullptr;
388     }
389 }
390 
CreateWorker()391 void GC::CreateWorker()
392 {
393     gc_running_.store(true);
394     ASSERT(worker_ == nullptr);
395     if (worker_ == nullptr && !gc_settings_.run_gc_in_place) {
396         InternalAllocatorPtr allocator = GetInternalAllocator();
397         worker_ = allocator->New<std::thread>(GC::GCWorkerEntry, this, this->GetPandaVm());
398         if (worker_ == nullptr) {
399             LOG(FATAL, RUNTIME) << "Cannot create a GC thread";
400         }
401         int res = os::thread::SetThreadName(worker_->native_handle(), "GCThread");
402         if (res != 0) {
403             LOG(ERROR, RUNTIME) << "Failed to set a name for the gc thread";
404         }
405         ASSERT(gc_queue_ != nullptr);
406     }
407 }
408 
409 class GC::PostForkGCTask : public GCTask {
410 public:
PostForkGCTask(GCTaskCause reason,uint64_t target_time)411     PostForkGCTask(GCTaskCause reason, uint64_t target_time) : GCTask(reason, target_time) {}
412 
Run(mem::GC & gc)413     void Run(mem::GC &gc) override
414     {
415         LOG(INFO, GC) << "Running PostForkGCTask";
416         gc.GetPandaVm()->GetGCTrigger()->RestoreMinTargetFootprint();
417         gc.PostForkCallback();
418         GCTask::Run(gc);
419     }
420 
421     ~PostForkGCTask() override = default;
422 
423     NO_COPY_SEMANTIC(PostForkGCTask);
424     NO_MOVE_SEMANTIC(PostForkGCTask);
425 };
426 
PreStartup()427 void GC::PreStartup()
428 {
429     // Add a delay GCTask.
430     if ((!Runtime::GetCurrent()->IsZygote()) && (!gc_settings_.run_gc_in_place)) {
431         // divide 2 to temporarily set target footprint to a high value to disable GC during App startup.
432         GetPandaVm()->GetGCTrigger()->SetMinTargetFootprint(Runtime::GetOptions().GetHeapSizeLimit() / 2);
433         PreStartupImp();
434         constexpr uint64_t DISABLE_GC_DURATION_NS = 2000 * 1000 * 1000;
435         auto task = MakePandaUnique<PostForkGCTask>(GCTaskCause::STARTUP_COMPLETE_CAUSE,
436                                                     time::GetCurrentTimeInNanos() + DISABLE_GC_DURATION_NS);
437         AddGCTask(true, std::move(task), false);
438         LOG(INFO, GC) << "Add PostForkGCTask";
439     }
440 }
441 
442 // NOLINTNEXTLINE(performance-unnecessary-value-param)
AddGCTask(bool is_managed,PandaUniquePtr<GCTask> task,bool triggered_by_threshold)443 void GC::AddGCTask(bool is_managed, PandaUniquePtr<GCTask> task, bool triggered_by_threshold)
444 {
445     if (gc_settings_.run_gc_in_place) {
446         auto *gc_task = task.release();
447         if (IsGCRunning()) {
448             if (is_managed) {
449                 WaitForGCInManaged(*gc_task);
450             } else {
451                 WaitForGC(*gc_task);
452             }
453         }
454         gc_task->Release(Runtime::GetCurrent()->GetInternalAllocator());
455     } else {
456         if (triggered_by_threshold) {
457             bool expect = true;
458             if (can_add_gc_task_.compare_exchange_strong(expect, false, std::memory_order_seq_cst)) {
459                 gc_queue_->AddTask(task.release());
460             }
461         } else {
462             gc_queue_->AddTask(task.release());
463         }
464     }
465 }
466 
IsReference(BaseClass * cls,const ObjectHeader * ref)467 bool GC::IsReference(BaseClass *cls, const ObjectHeader *ref)
468 {
469     ASSERT(reference_processor_ != nullptr);
470     return reference_processor_->IsReference(cls, ref);
471 }
472 
EnqueueReferences()473 void GC::EnqueueReferences()
474 {
475     while (true) {
476         panda::mem::Reference *ref = nullptr;
477         {
478             os::memory::LockHolder holder(*cleared_references_lock_);
479             if (cleared_references_->empty()) {
480                 break;
481             }
482             ref = cleared_references_->back();
483             cleared_references_->pop_back();
484         }
485         ASSERT(ref != nullptr);
486         ASSERT(reference_processor_ != nullptr);
487         reference_processor_->ScheduleForEnqueue(ref);
488     }
489 }
490 
NotifyNativeAllocations()491 void GC::NotifyNativeAllocations()
492 {
493     native_objects_notified_.fetch_add(NOTIFY_NATIVE_INTERVAL, std::memory_order_relaxed);
494     TriggerGCForNative();
495 }
496 
RegisterNativeAllocation(size_t bytes)497 void GC::RegisterNativeAllocation(size_t bytes)
498 {
499     size_t allocated;
500     do {
501         allocated = native_bytes_registered_.load(std::memory_order_relaxed);
502     } while (!native_bytes_registered_.compare_exchange_weak(allocated, allocated + bytes));
503     if (allocated > std::numeric_limits<size_t>::max() - bytes) {
504         native_bytes_registered_.store(std::numeric_limits<size_t>::max(), std::memory_order_relaxed);
505     }
506     TriggerGCForNative();
507 }
508 
RegisterNativeFree(size_t bytes)509 void GC::RegisterNativeFree(size_t bytes)
510 {
511     size_t allocated;
512     size_t new_freed_bytes;
513     do {
514         allocated = native_bytes_registered_.load(std::memory_order_relaxed);
515         new_freed_bytes = std::min(allocated, bytes);
516     } while (!native_bytes_registered_.compare_exchange_weak(allocated, allocated - new_freed_bytes));
517 }
518 
GetNativeBytesFromMallinfoAndRegister() const519 size_t GC::GetNativeBytesFromMallinfoAndRegister() const
520 {
521     size_t mallinfo_bytes = panda::os::mem::GetNativeBytesFromMallinfo();
522     size_t all_bytes = mallinfo_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
523     return all_bytes;
524 }
525 
WaitForGCInManaged(const GCTask & task)526 void GC::WaitForGCInManaged(const GCTask &task)
527 {
528     MTManagedThread *thread = MTManagedThread::GetCurrent();
529     if (thread != nullptr) {
530         ASSERT(Locks::mutator_lock->HasLock());
531         ASSERT(!thread->IsDaemon() || thread->GetStatus() == ThreadStatus::RUNNING);
532         Locks::mutator_lock->Unlock();
533         thread->PrintSuspensionStackIfNeeded();
534         WaitForGC(task);
535         Locks::mutator_lock->ReadLock();
536         ASSERT(Locks::mutator_lock->HasLock());
537     }
538 }
539 
ConcurrentScope(GC * gc,bool auto_start)540 ConcurrentScope::ConcurrentScope(GC *gc, bool auto_start)
541 {
542     gc_ = gc;
543     if (auto_start) {
544         Start();
545     }
546 }
547 
~ConcurrentScope()548 ConcurrentScope::~ConcurrentScope()
549 {
550     if (started_ && gc_->IsConcurrencyAllowed()) {
551         gc_->GetPandaVm()->GetRendezvous()->SafepointBegin();
552         gc_->GetPandaVm()->GetMemStats()->RecordGCPauseStart();
553     }
554 }
555 
Start()556 NO_THREAD_SAFETY_ANALYSIS void ConcurrentScope::Start()
557 {
558     if (!started_ && gc_->IsConcurrencyAllowed()) {
559         gc_->GetPandaVm()->GetRendezvous()->SafepointEnd();
560         gc_->GetPandaVm()->GetMemStats()->RecordGCPauseEnd();
561         started_ = true;
562     }
563 }
564 
WaitForGCOnPygoteFork(const GCTask & task)565 void GC::WaitForGCOnPygoteFork(const GCTask &task)
566 {
567     // do nothing if no pygote space
568     auto pygote_space_allocator = object_allocator_->GetPygoteSpaceAllocator();
569     if (pygote_space_allocator == nullptr) {
570         return;
571     }
572 
573     // do nothing if not at first pygote fork
574     if (pygote_space_allocator->GetState() != PygoteSpaceState::STATE_PYGOTE_INIT) {
575         return;
576     }
577 
578     LOG(INFO, GC) << "== GC WaitForGCOnPygoteFork Start ==";
579 
580     // do we need a lock?
581     // looks all other threads have been stopped before pygote fork
582 
583     // 0. indicate that we're rebuilding pygote space
584     pygote_space_allocator->SetState(PygoteSpaceState::STATE_PYGOTE_FORKING);
585 
586     // 1. trigger gc
587     WaitForGC(task);
588 
589     // 2. move other space to pygote space
590     MoveObjectsToPygoteSpace();
591 
592     // 3. indicate that we have done
593     pygote_space_allocator->SetState(PygoteSpaceState::STATE_PYGOTE_FORKED);
594 
595     // 4. disable pygote for allocation
596     object_allocator_->DisablePygoteAlloc();
597 
598     LOG(INFO, GC) << "== GC WaitForGCOnPygoteFork End ==";
599 }
600 
IsOnPygoteFork()601 bool GC::IsOnPygoteFork()
602 {
603     auto pygote_space_allocator = object_allocator_->GetPygoteSpaceAllocator();
604     return pygote_space_allocator != nullptr &&
605            pygote_space_allocator->GetState() == PygoteSpaceState::STATE_PYGOTE_FORKING;
606 }
607 
MoveObjectsToPygoteSpace()608 void GC::MoveObjectsToPygoteSpace()
609 {
610     trace::ScopedTrace scoped_trace(__FUNCTION__);
611     LOG(INFO, GC) << "MoveObjectsToPygoteSpace: start";
612 
613     size_t all_size_move = 0;
614     size_t moved_objects_num = 0;
615     size_t bytes_in_heap_before_move = GetPandaVm()->GetMemStats()->GetFootprintHeap();
616     auto pygote_space_allocator = object_allocator_->GetPygoteSpaceAllocator();
617     ObjectVisitor move_visitor(
618         [this, &pygote_space_allocator, &moved_objects_num, &all_size_move](ObjectHeader *src) -> void {
619             size_t size = GetObjectSize(src);
620             auto dst = reinterpret_cast<ObjectHeader *>(pygote_space_allocator->Alloc(size));
621             ASSERT(dst != nullptr);
622             (void)memcpy_s(dst, size, src, size);
623             all_size_move += size;
624             moved_objects_num++;
625             SetForwardAddress(src, dst);
626             LOG_DEBUG_GC << "object MOVED from " << std::hex << src << " to " << dst << ", size = " << std::dec << size;
627         });
628 
629     // Move all small movable objects to pygote space
630     object_allocator_->IterateRegularSizeObjects(move_visitor);
631 
632     LOG(INFO, GC) << "MoveObjectsToPygoteSpace: move_num = " << moved_objects_num << ", move_size = " << all_size_move;
633 
634     if (all_size_move > 0) {
635         GetStats()->AddMemoryValue(all_size_move, MemoryTypeStats::MOVED_BYTES);
636         GetStats()->AddObjectsValue(moved_objects_num, ObjectTypeStats::MOVED_OBJECTS);
637     }
638     if (bytes_in_heap_before_move > 0) {
639         GetStats()->AddCopiedRatioValue(static_cast<double>(all_size_move) / bytes_in_heap_before_move);
640     }
641 
642     // Update because we moved objects from object_allocator -> pygote space
643     CommonUpdateRefsToMovedObjects([this](const UpdateRefInObject &update_refs_in_object) {
644         object_allocator_->IterateNonRegularSizeObjects(update_refs_in_object);
645     });
646 
647     // Clear the moved objects in old space
648     object_allocator_->FreeObjectsMovedToPygoteSpace();
649 
650     LOG(INFO, GC) << "MoveObjectsToPygoteSpace: finish";
651 }
652 
SetForwardAddress(ObjectHeader * src,ObjectHeader * dst)653 void GC::SetForwardAddress(ObjectHeader *src, ObjectHeader *dst)
654 {
655     auto base_cls = src->ClassAddr<BaseClass>();
656     if (base_cls->IsDynamicClass()) {
657         auto cls = static_cast<HClass *>(base_cls);
658         // Note: During moving phase, 'src => dst'. Consider the src is a DynClass,
659         //       since 'dst' is not in GC-status the 'manage-object' inside 'dst' won't be updated to
660         //       'dst'. To fix it, we update 'manage-object' here rather than upating phase.
661         if (cls->IsHClass()) {
662             size_t offset = ObjectHeader::ObjectHeaderSize() + HClass::OffsetOfManageObject();
663             dst->SetFieldObject<false, false, true>(GetPandaVm()->GetAssociatedThread(), offset, dst);
664         }
665     }
666 
667     // Set fwd address in src
668     bool update_res = false;
669     do {
670         MarkWord mark_word = src->AtomicGetMark();
671         MarkWord fwd_mark_word =
672             mark_word.DecodeFromForwardingAddress(static_cast<MarkWord::markWordSize>(ToUintPtr(dst)));
673         update_res = src->AtomicSetMark(mark_word, fwd_mark_word);
674     } while (!update_res);
675 }
676 
UpdateRefsInVRegs(ManagedThread * thread)677 void GC::UpdateRefsInVRegs(ManagedThread *thread)
678 {
679     LOG_DEBUG_GC << "Update frames for thread: " << thread->GetId();
680     for (StackWalker pframe(thread); pframe.HasFrame(); pframe.NextFrame()) {
681         LOG_DEBUG_GC << "Frame for method " << pframe.GetMethod()->GetFullName();
682         pframe.IterateObjectsWithInfo([&pframe, this](auto &reg_info, auto &vreg) {
683             ObjectHeader *object_header = vreg.GetReference();
684             if (object_header == nullptr) {
685                 return true;
686             }
687 
688             MarkWord mark_word = object_header->AtomicGetMark();
689             if (mark_word.GetState() != MarkWord::ObjectState::STATE_GC) {
690                 return true;
691             }
692 
693             MarkWord::markWordSize addr = mark_word.GetForwardingAddress();
694             LOG_DEBUG_GC << "Update vreg, vreg old val = " << std::hex << object_header << ", new val = 0x" << addr;
695             LOG_IF(reg_info.IsAccumulator(), DEBUG, GC) << "^ acc reg";
696             if (!pframe.IsCFrame() && reg_info.IsAccumulator()) {
697                 LOG_DEBUG_GC << "^ acc updated";
698                 vreg.SetReference(reinterpret_cast<ObjectHeader *>(addr));
699             } else {
700                 pframe.SetVRegValue(reg_info, reinterpret_cast<ObjectHeader *>(addr));
701             }
702             return true;
703         });
704     }
705 }
706 
AddToStack(PandaStackTL<ObjectHeader * > * objects_stack,ObjectHeader * object)707 void GC::AddToStack(PandaStackTL<ObjectHeader *> *objects_stack, ObjectHeader *object)
708 {
709     ASSERT(IsMarked(object));
710     ASSERT(object != nullptr);
711     LOG_DEBUG_GC << "Add object to stack: " << GetDebugInfoAboutObject(object);
712     objects_stack->push(object);
713 }
714 
PopObjectFromStack(PandaStackTL<ObjectHeader * > * objects_stack)715 ObjectHeader *GC::PopObjectFromStack(PandaStackTL<ObjectHeader *> *objects_stack)
716 {
717     LOG_DEBUG_GC << "stack size is: " << objects_stack->size() << " pop object";
718     auto *object = objects_stack->top();
719     ASSERT(object != nullptr);
720     objects_stack->pop();
721     return object;
722 }
723 
IsGenerational() const724 bool GC::IsGenerational() const
725 {
726     return IsGenerationalGCType(gc_type_);
727 }
728 
GetLastGCReclaimedBytes()729 uint64_t GC::GetLastGCReclaimedBytes()
730 {
731     return last_gc_reclaimed_bytes.load();
732 }
733 
734 template GC *CreateGC<PandaAssemblyLanguageConfig>(GCType gc_type, ObjectAllocatorBase *object_allocator,
735                                                    const GCSettings &settings);
736 
737 }  // namespace panda::mem
738