• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/checkpoint/thread_state_transition.h"
17 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
18 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
19 #endif
20 
21 #include "ecmascript/mem/incremental_marker.h"
22 #include "ecmascript/mem/partial_gc.h"
23 #include "ecmascript/mem/parallel_evacuator.h"
24 #include "ecmascript/mem/parallel_marker.h"
25 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
26 #include "ecmascript/mem/shared_heap/shared_gc_evacuator.h"
27 #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
28 #include "ecmascript/mem/shared_heap/shared_gc.h"
29 #include "ecmascript/mem/shared_heap/shared_full_gc.h"
30 #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
31 #include "ecmascript/mem/verification.h"
32 #include "ecmascript/runtime_call_id.h"
33 #include "ecmascript/jit/jit.h"
34 #if !WIN_OR_MAC_OR_IOS_PLATFORM
35 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
36 #include "ecmascript/dfx/hprof/heap_profiler.h"
37 #endif
38 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
39 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
40 #endif
41 #include "ecmascript/dfx/tracing/tracing.h"
42 #if defined(ENABLE_DUMP_IN_FAULTLOG)
43 #include "syspara/parameter.h"
44 #endif
45 
46 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
47 #include "parameters.h"
48 #include "hisysevent.h"
49 static constexpr uint32_t DEC_TO_INT = 100;
50 static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
51 static uint64_t g_lastHeapDumpTime = 0;
52 static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
53 static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
54 static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
55 static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
56                             (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
57 static bool g_futVersion = OHOS::system::GetIntParameter("const.product.dfx.fans.stage", 0) == 1;
58 #endif
59 
60 namespace panda::ecmascript {
61 SharedHeap *SharedHeap::instance_ = nullptr;
62 
CreateNewInstance()63 void SharedHeap::CreateNewInstance()
64 {
65     ASSERT(instance_ == nullptr);
66     size_t heapShared = 0;
67 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
68     heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
69 #endif
70     EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
71         MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
72     instance_ = new SharedHeap(config);
73 }
74 
GetInstance()75 SharedHeap *SharedHeap::GetInstance()
76 {
77     ASSERT(instance_ != nullptr);
78     return instance_;
79 }
80 
DestroyInstance()81 void SharedHeap::DestroyInstance()
82 {
83     ASSERT(instance_ != nullptr);
84     instance_->Destroy();
85     delete instance_;
86     instance_ = nullptr;
87 }
88 
ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType,GCReason gcReason,JSThread * thread)89 void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
90 {
91     ASSERT(!dThread_->IsRunning());
92     SuspendAllScope scope(thread);
93     SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
94     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
95     CheckInHeapProfiler();
96     GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
97     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
98         // pre gc heap verify
99         LOG_ECMA(DEBUG) << "pre gc shared heap verify";
100         sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
101         SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
102     }
103     switch (gcType) { // LCOV_EXCL_BR_LINE
104         case TriggerGCType::SHARED_PARTIAL_GC:
105         case TriggerGCType::SHARED_GC: {
106             sharedGC_->RunPhases();
107             break;
108         }
109         case TriggerGCType::SHARED_FULL_GC: {
110             sharedFullGC_->RunPhases();
111             break;
112         }
113         default: // LOCV_EXCL_BR_LINE
114             LOG_ECMA(FATAL) << "this branch is unreachable";
115             UNREACHABLE();
116             break;
117     }
118     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
119         // pre gc heap verify
120         LOG_ECMA(DEBUG) << "after gc shared heap verify";
121         SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
122     }
123     CollectGarbageFinish(false, gcType);
124     InvokeSharedNativePointerCallbacks();
125 }
126 
CheckAndTriggerSharedGC(JSThread * thread)127 bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
128 {
129     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
130         return false;
131     }
132     size_t sharedGCThreshold = globalSpaceAllocLimit_ + spaceOvershoot_.load(std::memory_order_relaxed);
133     if ((OldSpaceExceedLimit() || GetHeapObjectSize() > sharedGCThreshold) &&
134         !NeedStopCollection()) {
135         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
136         return true;
137     }
138     return false;
139 }
140 
CheckHugeAndTriggerSharedGC(JSThread * thread,size_t size)141 bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
142 {
143     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
144         return false;
145     }
146     size_t sharedGCThreshold = globalSpaceAllocLimit_ + spaceOvershoot_.load(std::memory_order_relaxed);
147     if ((sHugeObjectSpace_->CommittedSizeExceed(size) || GetHeapObjectSize() > sharedGCThreshold) &&
148         !NeedStopCollection()) {
149         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
150         return true;
151     }
152     return false;
153 }
154 
CollectGarbageNearOOM(JSThread * thread)155 void SharedHeap::CollectGarbageNearOOM(JSThread *thread)
156 {
157     auto fragmentationSize = sOldSpace_->GetCommittedSize() - sOldSpace_->GetHeapObjectSize();
158     if (fragmentationSize >= fragmentationLimitForSharedFullGC_) {
159         CollectGarbage<TriggerGCType::SHARED_FULL_GC,  GCReason::ALLOCATION_FAILED>(thread);
160     } else {
161         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
162     }
163 }
164 // Shared gc trigger
AdjustGlobalSpaceAllocLimit()165 void SharedHeap::AdjustGlobalSpaceAllocLimit()
166 {
167     globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
168                                       config_.GetDefaultGlobalAllocLimit());
169     globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
170                                       config_.GetMaxHeapSize());
171     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
172                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
173     constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
174     size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
175     globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
176     LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
177         << globalSpaceAllocLimit_;
178 }
179 
ObjectExceedMaxHeapSize() const180 bool SharedHeap::ObjectExceedMaxHeapSize() const
181 {
182     return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
183 }
184 
StartConcurrentMarking(TriggerGCType gcType,GCReason gcReason)185 void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason)
186 {
187     ASSERT(JSThread::GetCurrent() == dThread_);
188     sConcurrentMarker_->Mark(gcType, gcReason);
189 }
190 
CheckCanTriggerConcurrentMarking(JSThread * thread)191 bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
192 {
193     return thread->IsReadyToSharedConcurrentMark() &&
194            sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
195 }
196 
Initialize(NativeAreaAllocator * nativeAreaAllocator,HeapRegionAllocator * heapRegionAllocator,const JSRuntimeOptions & option,DaemonThread * dThread)197 void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
198     const JSRuntimeOptions &option, DaemonThread *dThread)
199 {
200     sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
201     nativeAreaAllocator_ = nativeAreaAllocator;
202     heapRegionAllocator_ = heapRegionAllocator;
203     shouldVerifyHeap_ = option.EnableHeapVerify();
204     parallelGC_ = option.EnableParallelGC();
205     optionalLogEnabled_ = option.EnableOptionalLog();
206     size_t maxHeapSize = config_.GetMaxHeapSize();
207     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
208     sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
209 
210     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
211     size_t oldSpaceCapacity =
212         AlignUp((maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2, DEFAULT_REGION_SIZE); // 2: half
213     globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
214     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
215                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
216 
217     sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
218     sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
219     sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
220     sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
221     sharedMemController_ = new SharedMemController(this);
222     sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
223     growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
224     growingStep_ = config_.GetSharedHeapLimitGrowingStep();
225     incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
226     incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
227     fragmentationLimitForSharedFullGC_ = config_.GetFragmentationLimitForSharedFullGC();
228     dThread_ = dThread;
229 }
230 
Destroy()231 void SharedHeap::Destroy()
232 {
233     if (sWorkManager_ != nullptr) {
234         delete sWorkManager_;
235         sWorkManager_ = nullptr;
236     }
237     if (sOldSpace_ != nullptr) {
238         sOldSpace_->Reset();
239         delete sOldSpace_;
240         sOldSpace_ = nullptr;
241     }
242     if (sCompressSpace_ != nullptr) {
243         sCompressSpace_->Reset();
244         delete sCompressSpace_;
245         sCompressSpace_ = nullptr;
246     }
247     if (sNonMovableSpace_ != nullptr) {
248         sNonMovableSpace_->Reset();
249         delete sNonMovableSpace_;
250         sNonMovableSpace_ = nullptr;
251     }
252     if (sHugeObjectSpace_ != nullptr) {
253         sHugeObjectSpace_->Destroy();
254         delete sHugeObjectSpace_;
255         sHugeObjectSpace_ = nullptr;
256     }
257     if (sReadOnlySpace_ != nullptr) {
258         sReadOnlySpace_->ClearReadOnly();
259         sReadOnlySpace_->Destroy();
260         delete sReadOnlySpace_;
261         sReadOnlySpace_ = nullptr;
262     }
263     if (sAppSpawnSpace_ != nullptr) {
264         sAppSpawnSpace_->Reset();
265         delete sAppSpawnSpace_;
266         sAppSpawnSpace_ = nullptr;
267     }
268     if (sharedGC_ != nullptr) {
269         delete sharedGC_;
270         sharedGC_ = nullptr;
271     }
272     if (sharedFullGC_ != nullptr) {
273         delete sharedFullGC_;
274         sharedFullGC_ = nullptr;
275     }
276     if (sEvacuator_ != nullptr) {
277         delete sEvacuator_;
278         sEvacuator_ = nullptr;
279     }
280     nativeAreaAllocator_ = nullptr;
281     heapRegionAllocator_ = nullptr;
282 
283     if (sSweeper_ != nullptr) {
284         delete sSweeper_;
285         sSweeper_ = nullptr;
286     }
287     if (sConcurrentMarker_ != nullptr) {
288         delete sConcurrentMarker_;
289         sConcurrentMarker_ = nullptr;
290     }
291     if (sharedGCMarker_ != nullptr) {
292         delete sharedGCMarker_;
293         sharedGCMarker_ = nullptr;
294     }
295     if (sharedGCMovableMarker_ != nullptr) {
296         delete sharedGCMovableMarker_;
297         sharedGCMovableMarker_ = nullptr;
298     }
299     if (sharedMemController_ != nullptr) {
300         delete sharedMemController_;
301         sharedMemController_ = nullptr;
302     }
303 
304     dThread_ = nullptr;
305 }
306 
PostInitialization(const GlobalEnvConstants * globalEnvConstants,const JSRuntimeOptions & option)307 void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
308 {
309     globalEnvConstants_ = globalEnvConstants;
310     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
311     maxMarkTaskCount_ = totalThreadNum - 1;
312     sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
313     sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
314     sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
315     sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
316         EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
317     sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
318         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
319     sharedGC_ = new SharedGC(this);
320     sEvacuator_ = new SharedGCEvacuator(this);
321     sharedFullGC_ = new SharedFullGC(this);
322 }
323 
PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)324 void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
325 {
326     IncreaseTaskCount();
327     Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
328                                                                                 this, sharedTaskPhase));
329 }
330 
Run(uint32_t threadIndex)331 bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
332 {
333     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
334     while (!sHeap_->GetWorkManager()->HasInitialized());
335     switch (taskPhase_) {
336         case SharedParallelMarkPhase::SHARED_MARK_TASK:
337             sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
338             break;
339         case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
340             sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
341             break;
342         default: // LOCV_EXCL_BR_LINE
343             break;
344     }
345     sHeap_->ReduceTaskCount();
346     return true;
347 }
348 
Run(uint32_t threadIndex)349 bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
350 {
351     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedHeap::AsyncClearTask::Run");
352     sHeap_->ReclaimRegions(gcType_);
353     return true;
354 }
355 
NotifyGCCompleted()356 void SharedHeap::NotifyGCCompleted()
357 {
358     ASSERT(JSThread::GetCurrent() == dThread_);
359     LockHolder lock(waitGCFinishedMutex_);
360     gcFinished_ = true;
361     waitGCFinishedCV_.SignalAll();
362 }
363 
WaitGCFinished(JSThread * thread)364 void SharedHeap::WaitGCFinished(JSThread *thread)
365 {
366     ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
367     ASSERT(thread->IsInRunningState());
368     ThreadSuspensionScope scope(thread);
369     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished");
370     LockHolder lock(waitGCFinishedMutex_);
371     while (!gcFinished_) {
372         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
373     }
374 }
375 
WaitGCFinishedAfterAllJSThreadEliminated()376 void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
377 {
378     ASSERT(Runtime::GetInstance()->vmCount_ == 0);
379     LockHolder lock(waitGCFinishedMutex_);
380     while (!gcFinished_) {
381         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
382     }
383 }
384 
DaemonCollectGarbage(TriggerGCType gcType,GCReason gcReason)385 void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
386 {
387     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
388     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC ||
389         gcType == TriggerGCType::SHARED_FULL_GC);
390     ASSERT(JSThread::GetCurrent() == dThread_);
391     {
392         ThreadManagedScope runningScope(dThread_);
393         SuspendAllScope scope(dThread_);
394         SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
395         CheckInHeapProfiler();
396         gcType_ = gcType;
397         GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
398         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
399             // pre gc heap verify
400             LOG_ECMA(DEBUG) << "pre gc shared heap verify";
401             sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
402             SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
403         }
404         switch (gcType) {
405             case TriggerGCType::SHARED_PARTIAL_GC:
406             case TriggerGCType::SHARED_GC: {
407                 sharedGC_->RunPhases();
408                 break;
409             }
410             case TriggerGCType::SHARED_FULL_GC: {
411                 sharedFullGC_->RunPhases();
412                 break;
413             }
414             default: // LOCV_EXCL_BR_LINE
415                 LOG_ECMA(FATAL) << "this branch is unreachable";
416                 UNREACHABLE();
417                 break;
418         }
419 
420         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
421             // after gc heap verify
422             LOG_ECMA(DEBUG) << "after gc shared heap verify";
423             SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
424         }
425         CollectGarbageFinish(true, gcType);
426     }
427     InvokeSharedNativePointerCallbacks();
428     // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
429 }
430 
WaitAllTasksFinished(JSThread * thread)431 void SharedHeap::WaitAllTasksFinished(JSThread *thread)
432 {
433     WaitGCFinished(thread);
434     sSweeper_->WaitAllTaskFinished();
435     WaitClearTaskFinished();
436 }
437 
WaitAllTasksFinishedAfterAllJSThreadEliminated()438 void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
439 {
440     WaitGCFinishedAfterAllJSThreadEliminated();
441     sSweeper_->WaitAllTaskFinished();
442     WaitClearTaskFinished();
443 }
444 
CheckOngoingConcurrentMarking()445 bool SharedHeap::CheckOngoingConcurrentMarking()
446 {
447     if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
448         sConcurrentMarker_->IsTriggeredConcurrentMark()) {
449         // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
450         return true;
451     }
452     return false;
453 }
454 
CheckInHeapProfiler()455 void SharedHeap::CheckInHeapProfiler()
456 {
457 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
458     Runtime::GetInstance()->GCIterateThreadList([this](JSThread *thread) {
459         if (thread->GetEcmaVM()->GetHeapProfile() != nullptr) {
460             inHeapProfiler_ = true;
461             return;
462         }
463     });
464 #else
465     inHeapProfiler_ = false;
466 #endif
467 }
468 
Prepare(bool inTriggerGCThread)469 void SharedHeap::Prepare(bool inTriggerGCThread)
470 {
471     WaitRunningTaskFinished();
472     if (inTriggerGCThread) {
473         sSweeper_->EnsureAllTaskFinished();
474     } else {
475         sSweeper_->WaitAllTaskFinished();
476     }
477     WaitClearTaskFinished();
478 }
479 
SharedGCScope()480 SharedHeap::SharedGCScope::SharedGCScope()
481 {
482     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
483         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
484         if (pgoProfiler != nullptr) {
485             pgoProfiler->SuspendByGC();
486         }
487 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
488         thread->SetGcState(true);
489 #endif
490     });
491 }
492 
~SharedGCScope()493 SharedHeap::SharedGCScope::~SharedGCScope()
494 {
495     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
496         ASSERT(!thread->IsInRunningState());
497         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
498         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
499         if (pgoProfiler != nullptr) {
500             pgoProfiler->ResumeByGC();
501         }
502 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
503         thread->SetGcState(false);
504 #endif
505     });
506 }
507 
PrepareRecordRegionsForReclaim()508 void SharedHeap::PrepareRecordRegionsForReclaim()
509 {
510     sOldSpace_->SetRecordRegion();
511     sNonMovableSpace_->SetRecordRegion();
512     sHugeObjectSpace_->SetRecordRegion();
513 }
514 
Reclaim(TriggerGCType gcType)515 void SharedHeap::Reclaim(TriggerGCType gcType)
516 {
517     PrepareRecordRegionsForReclaim();
518     sHugeObjectSpace_->ReclaimHugeRegion();
519 
520     if (parallelGC_) {
521         clearTaskFinished_ = false;
522         Taskpool::GetCurrentTaskpool()->PostTask(
523             std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
524     } else {
525         ReclaimRegions(gcType);
526     }
527 }
528 
ReclaimRegions(TriggerGCType gcType)529 void SharedHeap::ReclaimRegions(TriggerGCType gcType)
530 {
531     if (gcType == TriggerGCType::SHARED_FULL_GC) {
532         sCompressSpace_->Reset();
533     }
534     sOldSpace_->ReclaimCSets();
535     sSweeper_->WaitAllTaskFinished();
536     EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
537         region->ClearMarkGCBitset();
538         region->ClearCrossRegionRSet();
539     });
540     if (!clearTaskFinished_) {
541         LockHolder holder(waitClearTaskFinishedMutex_);
542         clearTaskFinished_ = true;
543         waitClearTaskFinishedCV_.SignalAll();
544     }
545 }
546 
DisableParallelGC(JSThread * thread)547 void SharedHeap::DisableParallelGC(JSThread *thread)
548 {
549     WaitAllTasksFinished(thread);
550     dThread_->WaitFinished();
551     parallelGC_ = false;
552     maxMarkTaskCount_ = 0;
553     sSweeper_->ConfigConcurrentSweep(false);
554     sConcurrentMarker_->ConfigConcurrentMark(false);
555 }
556 
EnableParallelGC(JSRuntimeOptions & option)557 void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
558 {
559     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
560     maxMarkTaskCount_ = totalThreadNum - 1;
561     parallelGC_ = option.EnableParallelGC();
562     if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
563         workThreadNum != totalThreadNum + 1) {
564         LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
565                             << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
566         delete sWorkManager_;
567         sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
568         UpdateWorkManager(sWorkManager_);
569     }
570     sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
571     sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
572 }
573 
UpdateWorkManager(SharedGCWorkManager * sWorkManager)574 void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
575 {
576     sConcurrentMarker_->ResetWorkManager(sWorkManager);
577     sharedGCMarker_->ResetWorkManager(sWorkManager);
578     sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
579     sharedGC_->ResetWorkManager(sWorkManager);
580     sharedFullGC_->ResetWorkManager(sWorkManager);
581 }
582 
TryTriggerLocalConcurrentMarking()583 void SharedHeap::TryTriggerLocalConcurrentMarking()
584 {
585     if (localFullMarkTriggered_) {
586         return;
587     }
588     if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
589             != false) { // LCOV_EXCL_BR_LINE
590         return;
591     }
592     ASSERT(localFullMarkTriggered_ == true);
593     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
594         thread->SetFullMarkRequest();
595     });
596 }
597 
VerifyHeapObjects(VerifyKind verifyKind) const598 size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
599 {
600     size_t failCount = 0;
601     {
602         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
603         sOldSpace_->IterateOverObjects(verifier);
604     }
605     {
606         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
607         sNonMovableSpace_->IterateOverObjects(verifier);
608     }
609     {
610         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
611         sHugeObjectSpace_->IterateOverObjects(verifier);
612     }
613     {
614         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
615         sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
616     }
617     return failCount;
618 }
619 
CollectGarbageFinish(bool inDaemon,TriggerGCType gcType)620 void SharedHeap::CollectGarbageFinish(bool inDaemon, TriggerGCType gcType)
621 {
622     if (inDaemon) {
623         ASSERT(JSThread::GetCurrent() == dThread_);
624 #ifndef NDEBUG
625         ASSERT(dThread_->HasLaunchedSuspendAll());
626 #endif
627         dThread_->FinishRunningTask();
628         NotifyGCCompleted();
629         // Update to forceGC_ is in DaemeanSuspendAll, and protected by the Runtime::mutatorLock_,
630         // so do not need lock.
631         smartGCStats_.forceGC_ = false;
632     }
633     localFullMarkTriggered_ = false;
634     // Record alive object size after shared gc and other stats
635     UpdateHeapStatsAfterGC(gcType);
636     // Adjust shared gc trigger threshold
637     AdjustGlobalSpaceAllocLimit();
638     spaceOvershoot_.store(0, std::memory_order_relaxed);
639     GetEcmaGCStats()->RecordStatisticAfterGC();
640     GetEcmaGCStats()->PrintGCStatistic();
641     ProcessAllGCListeners();
642     if (shouldThrowOOMError_ || shouldForceThrowOOMError_) {
643         // LocalHeap could do FullGC later instead of Fatal at once if only set `shouldThrowOOMError_` because there
644         // is kind of partial compress GC in LocalHeap, but SharedHeap differs.
645         DumpHeapSnapshotBeforeOOM(false, Runtime::GetInstance()->GetMainThread(), SharedHeapOOMSource::SHARED_GC);
646         LOG_GC(FATAL) << "SharedHeap OOM";
647         UNREACHABLE();
648     }
649 }
650 
IsReadyToConcurrentMark() const651 bool SharedHeap::IsReadyToConcurrentMark() const
652 {
653     return dThread_->IsReadyToConcurrentMark();
654 }
655 
ObjectExceedJustFinishStartupThresholdForGC() const656 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForGC() const
657 {
658     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
659     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForGC;
660 }
661 
ObjectExceedJustFinishStartupThresholdForCM() const662 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForCM() const
663 {
664     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
665     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
666                                         * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
667     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForCM;
668 }
669 
CheckIfNeedStopCollectionByStartup()670 bool SharedHeap::CheckIfNeedStopCollectionByStartup()
671 {
672     StartupStatus startupStatus = GetStartupStatus();
673     switch (startupStatus) {
674         case StartupStatus::ON_STARTUP:
675             if (!ObjectExceedMaxHeapSize()) {
676                 return true;
677             }
678             break;
679         case StartupStatus::JUST_FINISH_STARTUP:
680             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
681                 return true;
682             }
683             break;
684         default:
685             break;
686     }
687     return false;
688 }
689 
NeedStopCollection()690 bool SharedHeap::NeedStopCollection()
691 {
692     if (CheckIfNeedStopCollectionByStartup()) {
693         return true;
694     }
695 
696     if (!InSensitiveStatus()) {
697         return false;
698     }
699 
700     if (!ObjectExceedMaxHeapSize()) {
701         return true;
702     }
703     return false;
704 }
705 
TryAdjustSpaceOvershootByConfigSize()706 void SharedHeap::TryAdjustSpaceOvershootByConfigSize()
707 {
708     if (InGC() || !IsReadyToConcurrentMark()) {
709         // no need to reserve space if SharedGC or SharedConcurrentMark is already triggered
710         return;
711     }
712     // set overshoot size to increase gc threashold larger 8MB than current heap size.
713     int64_t heapObjectSize = static_cast<int64_t>(GetHeapObjectSize());
714     int64_t remainSizeBeforeGC = static_cast<int64_t>(globalSpaceAllocLimit_) - heapObjectSize;
715     int64_t overshootSize = static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - remainSizeBeforeGC;
716     // overshoot size should be larger than 0.
717     spaceOvershoot_.store(std::max(overshootSize, (int64_t)0), std::memory_order_relaxed);
718 }
719 
CompactHeapBeforeFork(JSThread * thread)720 void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
721 {
722     ThreadManagedScope managedScope(thread);
723     WaitGCFinished(thread);
724     sharedFullGC_->SetForAppSpawn(true);
725     CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
726     sharedFullGC_->SetForAppSpawn(false);
727 }
728 
MoveOldSpaceToAppspawn()729 void SharedHeap::MoveOldSpaceToAppspawn()
730 {
731     auto committedSize = sOldSpace_->GetCommittedSize();
732     sAppSpawnSpace_->SetInitialCapacity(committedSize);
733     sAppSpawnSpace_->SetMaximumCapacity(committedSize);
734     sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
735     sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
736     sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
737     sCompressSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity());
738 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
739     sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
740 #endif
741     sOldSpace_->EnumerateRegions([&](Region *region) {
742         region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
743         // Region in SharedHeap do not need PageTag threadId.
744         PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName());
745         sAppSpawnSpace_->AddRegion(region);
746         sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
747     });
748     sOldSpace_->GetRegionList().Clear();
749     sOldSpace_->Reset();
750 }
751 
ReclaimForAppSpawn()752 void SharedHeap::ReclaimForAppSpawn()
753 {
754     sSweeper_->WaitAllTaskFinished();
755     sHugeObjectSpace_->ReclaimHugeRegion();
756     sCompressSpace_->Reset();
757     MoveOldSpaceToAppspawn();
758     auto cb = [] (Region *region) {
759         region->ClearMarkGCBitset();
760     };
761     sNonMovableSpace_->EnumerateRegions(cb);
762     sHugeObjectSpace_->EnumerateRegions(cb);
763 }
764 
DumpHeapSnapshotBeforeOOM(bool isFullGC,JSThread * thread,SharedHeapOOMSource source)765 void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread,
766                                            [[maybe_unused]] SharedHeapOOMSource source)
767 {
768 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(ENABLE_DUMP_IN_FAULTLOG)
769     AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
770     if (appfreezeCallback != nullptr && !appfreezeCallback(getprocpid())) {
771         LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, no dump quota.";
772         return;
773     }
774 #endif
775 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
776 #if defined(ENABLE_DUMP_IN_FAULTLOG)
777     EcmaVM *vm = thread->GetEcmaVM();
778     HeapProfilerInterface *heapProfile = nullptr;
779     if (source == SharedHeapOOMSource::SHARED_GC) {
780 #ifndef NDEBUG
781         // If OOM during SharedGC, use main JSThread and create a new HeapProfile instancre to dump when GC completed.
782         ASSERT(thread == Runtime::GetInstance()->GetMainThread() && JSThread::GetCurrent()->HasLaunchedSuspendAll());
783 #endif
784         heapProfile = HeapProfilerInterface::CreateNewInstance(vm);
785     } else {
786         if (vm->GetHeapProfile() != nullptr) {
787             LOG_ECMA(ERROR) << "SharedHeap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
788             return;
789         }
790         heapProfile = HeapProfilerInterface::GetInstance(vm);
791     }
792     // Filter appfreeze when dump.
793     LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
794     base::BlockHookScope blockScope;
795     vm->GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetEcmaParamConfiguration().GetMaxHeapSize(),
796                                                     GetHeapObjectSize());
797     DumpSnapShotOption dumpOption;
798     dumpOption.dumpFormat = DumpFormat::BINARY;
799     dumpOption.isVmMode = true;
800     dumpOption.isPrivate = false;
801     dumpOption.captureNumericValue = false;
802     dumpOption.isFullGC = isFullGC;
803     dumpOption.isSimplify = true;
804     dumpOption.isSync = true;
805     dumpOption.isBeforeFill = false;
806     dumpOption.isDumpOOM = true;
807     if (source == SharedHeapOOMSource::SHARED_GC) {
808         heapProfile->DumpHeapSnapshotForOOM(dumpOption, true);
809         HeapProfilerInterface::DestroyInstance(heapProfile);
810     } else {
811         heapProfile->DumpHeapSnapshotForOOM(dumpOption);
812         HeapProfilerInterface::Destroy(vm);
813     }
814 #endif // ENABLE_DUMP_IN_FAULTLOG
815 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
816 }
817 
Heap(EcmaVM * ecmaVm)818 Heap::Heap(EcmaVM *ecmaVm)
819     : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
820       ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
821 
Initialize()822 void Heap::Initialize()
823 {
824     enablePageTagThreadId_ = ecmaVm_->GetJSOptions().EnablePageTagThreadId();
825     memController_ = new MemController(this);
826     nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
827     heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
828     size_t maxHeapSize = config_.GetMaxHeapSize();
829     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
830     size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
831     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
832     activeSemiSpace_->Restart();
833     activeSemiSpace_->SetWaterLine();
834 
835     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
836     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
837     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
838     sOldTlab_ = new ThreadLocalAllocationBuffer(this);
839     thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
840     sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
841     thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
842                                                     sNonMovableTlab_->GetEndAddress());
843     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
844 
845     // whether should verify heap duration gc
846     shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
847     // not set up from space
848 
849     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
850     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
851     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
852     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
853     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) { // LCOV_EXCL_BR_LINE
854         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
855     }
856     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
857     nonMovableSpace_->Initialize();
858     size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
859     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
860     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
861     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
862 
863     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
864         machineCodeSpaceCapacity + readOnlySpaceCapacity;
865     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
866         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
867     }
868     size_t oldSpaceCapacity = maxHeapSize - capacities;
869     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
870     globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
871     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
872     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
873     oldSpace_->Initialize();
874 
875     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
876     hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
877     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
878     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
879         maxEvacuateTaskCount_ - 1);
880 
881     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
882                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
883                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
884                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
885                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
886                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
887                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
888                  << ", gcThreadNum = " << maxMarkTaskCount_;
889     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
890     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
891     markType_ = MarkType::MARK_YOUNG;
892 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
893     concurrentMarkerEnabled = false;
894 #endif
895     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
896     fullGC_ = new FullGC(this);
897 
898     partialGC_ = new PartialGC(this);
899     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
900         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
901     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
902         EnableConcurrentMarkType::CONFIG_DISABLE);
903     nonMovableMarker_ = new NonMovableMarker(this);
904     compressGCMarker_ = new CompressGCMarker(this);
905     evacuator_ = new ParallelEvacuator(this);
906     incrementalMarker_ = new IncrementalMarker(this);
907     gcListeners_.reserve(16U);
908     nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
909     incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
910     nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
911     asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
912     idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
913 }
914 
ResetTlab()915 void Heap::ResetTlab()
916 {
917     sOldTlab_->Reset();
918     sNonMovableTlab_->Reset();
919 }
920 
FillBumpPointerForTlab()921 void Heap::FillBumpPointerForTlab()
922 {
923     sOldTlab_->FillBumpPointer();
924     sNonMovableTlab_->FillBumpPointer();
925 }
926 
ProcessSharedGCMarkingLocalBuffer()927 void Heap::ProcessSharedGCMarkingLocalBuffer()
928 {
929     if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
930         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
931         sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
932         ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
933     }
934 }
935 
ProcessSharedGCRSetWorkList()936 void Heap::ProcessSharedGCRSetWorkList()
937 {
938     if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
939         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
940         ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
941         sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
942         // The current thread may end earlier than the deamon thread.
943         // To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
944         // Reentrant does not cause exceptions because all the values are set to false.
945         thread_->SetProcessingLocalToSharedRset(false);
946         ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
947     }
948 }
949 
GetGlobalConst() const950 const GlobalEnvConstants *Heap::GetGlobalConst() const
951 {
952     return thread_->GlobalConstants();
953 }
954 
Destroy()955 void Heap::Destroy()
956 {
957     ProcessSharedGCRSetWorkList();
958     ProcessSharedGCMarkingLocalBuffer();
959     if (sOldTlab_ != nullptr) {
960         sOldTlab_->Reset();
961         delete sOldTlab_;
962         sOldTlab_ = nullptr;
963     }
964     if (sNonMovableTlab_!= nullptr) {
965         sNonMovableTlab_->Reset();
966         delete sNonMovableTlab_;
967         sNonMovableTlab_= nullptr;
968     }
969     if (workManager_ != nullptr) {
970         delete workManager_;
971         workManager_ = nullptr;
972     }
973     if (activeSemiSpace_ != nullptr) {
974         activeSemiSpace_->Destroy();
975         delete activeSemiSpace_;
976         activeSemiSpace_ = nullptr;
977     }
978     if (inactiveSemiSpace_ != nullptr) {
979         inactiveSemiSpace_->Destroy();
980         delete inactiveSemiSpace_;
981         inactiveSemiSpace_ = nullptr;
982     }
983     if (oldSpace_ != nullptr) {
984         oldSpace_->Reset();
985         delete oldSpace_;
986         oldSpace_ = nullptr;
987     }
988     if (compressSpace_ != nullptr) {
989         compressSpace_->Destroy();
990         delete compressSpace_;
991         compressSpace_ = nullptr;
992     }
993     if (nonMovableSpace_ != nullptr) {
994         nonMovableSpace_->Reset();
995         delete nonMovableSpace_;
996         nonMovableSpace_ = nullptr;
997     }
998     if (snapshotSpace_ != nullptr) {
999         snapshotSpace_->Destroy();
1000         delete snapshotSpace_;
1001         snapshotSpace_ = nullptr;
1002     }
1003     if (machineCodeSpace_ != nullptr) {
1004         machineCodeSpace_->Reset();
1005         delete machineCodeSpace_;
1006         machineCodeSpace_ = nullptr;
1007     }
1008     if (hugeObjectSpace_ != nullptr) {
1009         hugeObjectSpace_->Destroy();
1010         delete hugeObjectSpace_;
1011         hugeObjectSpace_ = nullptr;
1012     }
1013     if (hugeMachineCodeSpace_ != nullptr) {
1014         hugeMachineCodeSpace_->Destroy();
1015         delete hugeMachineCodeSpace_;
1016         hugeMachineCodeSpace_ = nullptr;
1017     }
1018     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
1019         readOnlySpace_->ClearReadOnly();
1020         readOnlySpace_->Destroy();
1021         delete readOnlySpace_;
1022         readOnlySpace_ = nullptr;
1023     }
1024     if (appSpawnSpace_ != nullptr) {
1025         appSpawnSpace_->Reset();
1026         delete appSpawnSpace_;
1027         appSpawnSpace_ = nullptr;
1028     }
1029     if (partialGC_ != nullptr) {
1030         delete partialGC_;
1031         partialGC_ = nullptr;
1032     }
1033     if (fullGC_ != nullptr) {
1034         delete fullGC_;
1035         fullGC_ = nullptr;
1036     }
1037 
1038     nativeAreaAllocator_ = nullptr;
1039     heapRegionAllocator_ = nullptr;
1040 
1041     if (memController_ != nullptr) {
1042         delete memController_;
1043         memController_ = nullptr;
1044     }
1045     if (sweeper_ != nullptr) {
1046         delete sweeper_;
1047         sweeper_ = nullptr;
1048     }
1049     if (concurrentMarker_ != nullptr) {
1050         delete concurrentMarker_;
1051         concurrentMarker_ = nullptr;
1052     }
1053     if (incrementalMarker_ != nullptr) {
1054         delete incrementalMarker_;
1055         incrementalMarker_ = nullptr;
1056     }
1057     if (nonMovableMarker_ != nullptr) {
1058         delete nonMovableMarker_;
1059         nonMovableMarker_ = nullptr;
1060     }
1061     if (compressGCMarker_ != nullptr) {
1062         delete compressGCMarker_;
1063         compressGCMarker_ = nullptr;
1064     }
1065     if (evacuator_ != nullptr) {
1066         delete evacuator_;
1067         evacuator_ = nullptr;
1068     }
1069 }
1070 
Prepare()1071 void Heap::Prepare()
1072 {
1073     MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
1074     WaitRunningTaskFinished();
1075     sweeper_->EnsureAllTaskFinished();
1076     WaitClearTaskFinished();
1077 }
1078 
GetHeapPrepare()1079 void Heap::GetHeapPrepare()
1080 {
1081     // Ensure local and shared heap prepared.
1082     Prepare();
1083     SharedHeap *sHeap = SharedHeap::GetInstance();
1084     sHeap->Prepare(false);
1085 }
1086 
Resume(TriggerGCType gcType)1087 void Heap::Resume(TriggerGCType gcType)
1088 {
1089     activeSemiSpace_->SetWaterLine();
1090 
1091     if (mode_ != HeapMode::SPAWN &&
1092         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
1093         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
1094         size_t multiple = 2;
1095         size_t oldSpaceMaxLimit = 0;
1096         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
1097             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
1098             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
1099         } else {
1100             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
1101             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
1102         }
1103         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1104     }
1105 
1106     PrepareRecordRegionsForReclaim();
1107     hugeObjectSpace_->ReclaimHugeRegion();
1108     hugeMachineCodeSpace_->ReclaimHugeRegion();
1109     if (parallelGC_) {
1110         if (gcType == TriggerGCType::OLD_GC) {
1111             isCSetClearing_.store(true, std::memory_order_release);
1112         }
1113         clearTaskFinished_ = false;
1114         Taskpool::GetCurrentTaskpool()->PostTask(
1115             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1116     } else {
1117         ReclaimRegions(gcType);
1118     }
1119 }
1120 
ResumeForAppSpawn()1121 void Heap::ResumeForAppSpawn()
1122 {
1123     sweeper_->WaitAllTaskFinished();
1124     hugeObjectSpace_->ReclaimHugeRegion();
1125     hugeMachineCodeSpace_->ReclaimHugeRegion();
1126     inactiveSemiSpace_->ReclaimRegions();
1127     oldSpace_->Reset();
1128     auto cb = [] (Region *region) {
1129         region->ClearMarkGCBitset();
1130     };
1131     nonMovableSpace_->EnumerateRegions(cb);
1132     machineCodeSpace_->EnumerateRegions(cb);
1133     hugeObjectSpace_->EnumerateRegions(cb);
1134     hugeMachineCodeSpace_->EnumerateRegions(cb);
1135 }
1136 
CompactHeapBeforeFork()1137 void Heap::CompactHeapBeforeFork()
1138 {
1139     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1140 }
1141 
DisableParallelGC()1142 void Heap::DisableParallelGC()
1143 {
1144     WaitAllTasksFinished();
1145     parallelGC_ = false;
1146     maxEvacuateTaskCount_ = 0;
1147     maxMarkTaskCount_ = 0;
1148     sweeper_->ConfigConcurrentSweep(false);
1149     concurrentMarker_->ConfigConcurrentMark(false);
1150     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1151 }
1152 
EnableParallelGC()1153 void Heap::EnableParallelGC()
1154 {
1155     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1156     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1157     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1158         totalThreadNum != maxEvacuateTaskCount_ + 1) {
1159         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1160                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1161         delete workManager_;
1162         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1163         UpdateWorkManager(workManager_);
1164     }
1165     ASSERT(maxEvacuateTaskCount_ > 0);
1166     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1167                                          maxEvacuateTaskCount_ - 1);
1168     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1169 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1170     concurrentMarkerEnabled = false;
1171 #endif
1172     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1173     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1174 }
1175 
SelectGCType() const1176 TriggerGCType Heap::SelectGCType() const
1177 {
1178     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1179     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1180         return YOUNG_GC;
1181     }
1182     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1183         GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1184         !GlobalNativeSizeLargerThanLimit()) {
1185         return YOUNG_GC;
1186     }
1187     return OLD_GC;
1188 }
1189 
CollectGarbageImpl(TriggerGCType gcType,GCReason reason)1190 void Heap::CollectGarbageImpl(TriggerGCType gcType, GCReason reason)
1191 {
1192     Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1193     {
1194 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1195         if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1196             LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1197             UNREACHABLE();
1198         }
1199 #endif
1200         if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1201             ProcessGCListeners();
1202             return;
1203         }
1204         RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1205 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1206         [[maybe_unused]] GcStateScope scope(thread_);
1207 #endif
1208         CHECK_NO_GC;
1209         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1210             // pre gc heap verify
1211             LOG_ECMA(DEBUG) << "pre gc heap verify";
1212             ProcessSharedGCRSetWorkList();
1213             Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1214         }
1215 
1216 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1217         gcType = TriggerGCType::FULL_GC;
1218 #endif
1219         if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1220             gcType = TriggerGCType::FULL_GC;
1221         }
1222         if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1223             gcType = TriggerGCType::OLD_GC;
1224         }
1225         if (shouldThrowOOMError_) {
1226             // Force Full GC after failed Old GC to avoid OOM
1227             LOG_ECMA(INFO) << "Old space is almost OOM, attempt trigger full gc to avoid OOM.";
1228             gcType = TriggerGCType::FULL_GC;
1229         }
1230         oldGCRequested_ = false;
1231         oldSpace_->AdjustOvershootSize();
1232 
1233         size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
1234         if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1235             GetEcmaGCStats()->SetGCReason(reason);
1236         } else {
1237             GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1238         }
1239         memController_->StartCalculationBeforeGC();
1240         StatisticHeapObject(gcType);
1241         gcType_ = gcType;
1242         {
1243             pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1244             switch (gcType) {
1245                 case TriggerGCType::YOUNG_GC:
1246                     // Use partial GC for young generation.
1247                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1248                         SetMarkType(MarkType::MARK_YOUNG);
1249                     }
1250                     if (markType_ == MarkType::MARK_FULL) {
1251                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1252                         gcType_ = TriggerGCType::OLD_GC;
1253                     }
1254                     partialGC_->RunPhases();
1255                     break;
1256                 case TriggerGCType::OLD_GC: {
1257                     bool fullConcurrentMarkRequested = false;
1258                     // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1259                     if (concurrentMarker_->IsEnabled() &&
1260                         (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1261                         reason == GCReason::ALLOCATION_LIMIT) {
1262                         fullConcurrentMarkRequested = true;
1263                     }
1264                     if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1265                         // Wait for existing concurrent marking tasks to be finished (if any),
1266                         // and reset concurrent marker's status for full mark.
1267                         bool concurrentMark = CheckOngoingConcurrentMarking();
1268                         if (concurrentMark) {
1269                             concurrentMarker_->Reset();
1270                         }
1271                     }
1272                     SetMarkType(MarkType::MARK_FULL);
1273                     if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1274                         LOG_ECMA(INFO)
1275                             << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1276                         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1277                         TriggerConcurrentMarking();
1278                         oldGCRequested_ = true;
1279                         ProcessGCListeners();
1280                         memController_->ResetCalculationWithoutGC();
1281                         return;
1282                     }
1283                     partialGC_->RunPhases();
1284                     break;
1285                 }
1286                 case TriggerGCType::FULL_GC:
1287                     fullGC_->SetForAppSpawn(false);
1288                     fullGC_->RunPhases();
1289                     if (fullGCRequested_) {
1290                         fullGCRequested_ = false;
1291                     }
1292                     break;
1293                 case TriggerGCType::APPSPAWN_FULL_GC:
1294                     fullGC_->SetForAppSpawn(true);
1295                     fullGC_->RunPhasesForAppSpawn();
1296                     break;
1297                 default: // LOCV_EXCL_BR_LINE
1298                     LOG_ECMA(FATAL) << "this branch is unreachable";
1299                     UNREACHABLE();
1300                     break;
1301             }
1302             ASSERT(thread_->IsPropertyCacheCleared());
1303         }
1304         UpdateHeapStatsAfterGC(gcType_);
1305         ClearIdleTask();
1306         // Adjust the old space capacity and global limit for the first partial GC with full mark.
1307         // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1308         AdjustBySurvivalRate(originalNewSpaceSize);
1309         memController_->StopCalculationAfterGC(gcType);
1310         if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1311             // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1312             // the limits of old space and global space can be recomputed.
1313             RecomputeLimits();
1314             ResetNativeSizeAfterLastGC();
1315             OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1316                                         << " global object size " << GetHeapObjectSize()
1317                                         << " global committed size " << GetCommittedSize()
1318                                         << " global limit " << globalSpaceAllocLimit_;
1319             markType_ = MarkType::MARK_YOUNG;
1320         }
1321         if (concurrentMarker_->IsRequestDisabled()) {
1322             concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1323         }
1324         // GC log
1325         GetEcmaGCStats()->RecordStatisticAfterGC();
1326 #ifdef ENABLE_HISYSEVENT
1327         GetEcmaGCKeyStats()->IncGCCount();
1328         if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1329             GetEcmaGCKeyStats()->AddGCStatsToKey();
1330         }
1331 #endif
1332         GetEcmaGCStats()->PrintGCStatistic();
1333     }
1334 
1335     if (gcType_ == TriggerGCType::OLD_GC) {
1336         // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1337         // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1338         // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1339         // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1340         CheckNonMovableSpaceOOM();
1341     }
1342     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1343     if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1344         oldSpace_->ResetCommittedOverSizeLimit();
1345         if (oldSpace_->CommittedSizeExceed()) { // LCOV_EXCL_BR_LINE
1346             sweeper_->EnsureAllTaskFinished();
1347             DumpHeapSnapshotBeforeOOM(false);
1348             StatisticHeapDetail();
1349             ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1350         }
1351         oldSpace_->ResetMergeSize();
1352         shouldThrowOOMError_ = false;
1353     }
1354     // Allocate region failed during GC, MUST throw OOM here
1355     if (shouldForceThrowOOMError_) {
1356         sweeper_->EnsureAllTaskFinished();
1357         DumpHeapSnapshotBeforeOOM(false);
1358         StatisticHeapDetail();
1359         ThrowOutOfMemoryError(thread_, DEFAULT_REGION_SIZE, " HeapRegionAllocator::AllocateAlignedRegion");
1360     }
1361     // Update record heap object size after gc if in sensitive status
1362     if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1363         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1364     }
1365 
1366     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1367         // verify post gc heap verify
1368         LOG_ECMA(DEBUG) << "post gc heap verify";
1369         Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1370     }
1371 
1372 #if defined(ECMASCRIPT_SUPPORT_TRACING)
1373     auto tracing = GetEcmaVM()->GetTracing();
1374     if (tracing != nullptr) {
1375         tracing->TraceEventRecordMemory();
1376     }
1377 #endif
1378     ProcessGCListeners();
1379 
1380 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1381     if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1382         ThresholdReachedDump();
1383     }
1384 #endif
1385 
1386     if (GetEcmaGCKeyStats()->CheckIfMainThread()) {
1387         GetEcmaGCKeyStats()->ProcessLongGCEvent();
1388     }
1389 
1390     if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1391         // check machine code space if enough
1392         int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1393             static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1394         Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1395     }
1396 }
1397 
CollectGarbage(TriggerGCType gcType,GCReason reason)1398 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1399 {
1400     CollectGarbageImpl(gcType, reason);
1401 
1402     // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1403     // even lead to another GC, so this have to invoke after this GC process.
1404     thread_->InvokeWeakNodeNativeFinalizeCallback();
1405     // PostTask for ProcessNativeDelete
1406     CleanCallback();
1407     JSFinalizationRegistry::CheckAndCall(thread_);
1408 }
1409 
ThrowOutOfMemoryError(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1410 void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1411     bool NonMovableObjNearOOM)
1412 { // LCOV_EXCL_START
1413     GetEcmaGCStats()->PrintGCMemoryStatistic();
1414     std::ostringstream oss;
1415     if (NonMovableObjNearOOM) {
1416         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1417             << " function name: " << functionName.c_str();
1418     } else {
1419         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1420             << functionName.c_str();
1421     }
1422     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1423     THROW_OOM_ERROR(thread, oss.str().c_str());
1424 } // LCOV_EXCL_STOP
1425 
SetMachineCodeOutOfMemoryError(JSThread * thread,size_t size,std::string functionName)1426 void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1427 {
1428     std::ostringstream oss;
1429     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1430         << functionName.c_str();
1431     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1432 
1433     EcmaVM *ecmaVm = thread->GetEcmaVM();
1434     ObjectFactory *factory = ecmaVm->GetFactory();
1435     JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1436     thread->SetException(error.GetTaggedValue());
1437 }
1438 
ThrowOutOfMemoryErrorForDefault(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1439 void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1440     bool NonMovableObjNearOOM)
1441 { // LCOV_EXCL_START
1442     GetEcmaGCStats()->PrintGCMemoryStatistic();
1443     std::ostringstream oss;
1444     if (NonMovableObjNearOOM) {
1445         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1446             << " function name: " << functionName.c_str();
1447     } else {
1448         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1449     }
1450     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1451     EcmaVM *ecmaVm = thread->GetEcmaVM();
1452     JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1453     JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1454 
1455     thread->SetException(error.GetTaggedValue());
1456     ecmaVm->HandleUncatchableError();
1457 } // LCOV_EXCL_STOP
1458 
FatalOutOfMemoryError(size_t size,std::string functionName)1459 void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1460 { // LCOV_EXCL_START
1461     GetEcmaGCStats()->PrintGCMemoryStatistic();
1462     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1463                         << " function name: " << functionName.c_str();
1464 } // LCOV_EXCL_STOP
1465 
CheckNonMovableSpaceOOM()1466 void Heap::CheckNonMovableSpaceOOM()
1467 {
1468     if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) { // LCOV_EXCL_BR_LINE
1469         sweeper_->EnsureAllTaskFinished();
1470         DumpHeapSnapshotBeforeOOM(false);
1471         StatisticHeapDetail();
1472         ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1473     }
1474 }
1475 
AdjustBySurvivalRate(size_t originalNewSpaceSize)1476 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1477 {
1478     promotedSize_ = GetEvacuator()->GetPromotedSize();
1479     if (originalNewSpaceSize <= 0) {
1480         return;
1481     }
1482     semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
1483     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1484     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1485     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1486     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1487                                 << " survivalRate: " << survivalRate;
1488     if (!oldSpaceLimitAdjusted_) {
1489         memController_->AddSurvivalRate(survivalRate);
1490         AdjustOldSpaceLimit();
1491     } else {
1492         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1493         // 2 means half
1494         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1495             SetFullMarkRequestedState(true);
1496             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1497                 << " is less than half the average survival rates: " << averageSurvivalRate
1498                 << ". Trigger full mark next time.";
1499             // Survival rate of full mark is precise. Reset recorded survival rates.
1500             memController_->ResetRecordedSurvivalRates();
1501         }
1502         memController_->AddSurvivalRate(survivalRate);
1503     }
1504 }
1505 
VerifyHeapObjects(VerifyKind verifyKind) const1506 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1507 {
1508     size_t failCount = 0;
1509     {
1510         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1511         activeSemiSpace_->IterateOverObjects(verifier);
1512     }
1513 
1514     {
1515         if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1516             verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1517             verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1518                 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1519                     region->IterateAllMarkedBits([this](void *addr) {
1520                         VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1521                     });
1522                 });
1523             }
1524     }
1525 
1526     {
1527         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1528         oldSpace_->IterateOverObjects(verifier);
1529     }
1530 
1531     {
1532         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1533         appSpawnSpace_->IterateOverMarkedObjects(verifier);
1534     }
1535 
1536     {
1537         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1538         nonMovableSpace_->IterateOverObjects(verifier);
1539     }
1540 
1541     {
1542         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1543         hugeObjectSpace_->IterateOverObjects(verifier);
1544     }
1545     {
1546         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1547         hugeMachineCodeSpace_->IterateOverObjects(verifier);
1548     }
1549     {
1550         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1551         machineCodeSpace_->IterateOverObjects(verifier);
1552     }
1553     {
1554         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1555         snapshotSpace_->IterateOverObjects(verifier);
1556     }
1557     return failCount;
1558 }
1559 
VerifyOldToNewRSet(VerifyKind verifyKind) const1560 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1561 {
1562     size_t failCount = 0;
1563     VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1564     oldSpace_->IterateOldToNewOverObjects(verifier);
1565     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1566     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1567     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1568     return failCount;
1569 }
1570 
AdjustOldSpaceLimit()1571 void Heap::AdjustOldSpaceLimit()
1572 {
1573     if (oldSpaceLimitAdjusted_) {
1574         return;
1575     }
1576     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1577     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1578     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1579         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1580     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1581         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1582     } else {
1583         oldSpaceLimitAdjusted_ = true;
1584     }
1585 
1586     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1587         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1588     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1589         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1590     }
1591     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1592         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1593 }
1594 
OnAllocateEvent(EcmaVM * ecmaVm,TaggedObject * address,size_t size)1595 void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1596                                [[maybe_unused]] size_t size)
1597 {
1598 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1599     HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1600     if (profiler != nullptr) {
1601         base::BlockHookScope blockScope;
1602         profiler->AllocationEvent(address, size);
1603     }
1604 #endif
1605 }
1606 
DumpHeapSnapshotBeforeOOM(bool isFullGC)1607 void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
1608 {
1609 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(ENABLE_DUMP_IN_FAULTLOG)
1610     AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
1611     if (appfreezeCallback != nullptr && !appfreezeCallback(getprocpid())) {
1612         LOG_ECMA(INFO) << "Heap::DumpHeapSnapshotBeforeOOM, no dump quota.";
1613         return;
1614     }
1615 #endif
1616 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1617 #if defined(ENABLE_DUMP_IN_FAULTLOG)
1618     if (ecmaVm_->GetHeapProfile() != nullptr) {
1619         LOG_ECMA(ERROR) << "Heap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
1620         return;
1621     }
1622     // Filter appfreeze when dump.
1623     LOG_ECMA(INFO) << " Heap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
1624     base::BlockHookScope blockScope;
1625     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1626 #ifdef ENABLE_HISYSEVENT
1627     GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize());
1628     hasOOMDump_ = true;
1629 #endif
1630     // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1631     DumpSnapShotOption dumpOption;
1632     dumpOption.dumpFormat = DumpFormat::BINARY;
1633     dumpOption.isVmMode = true;
1634     dumpOption.isPrivate = false;
1635     dumpOption.captureNumericValue = false;
1636     dumpOption.isFullGC = isFullGC;
1637     dumpOption.isSimplify = true;
1638     dumpOption.isSync = true;
1639     dumpOption.isBeforeFill = false;
1640     dumpOption.isDumpOOM = true;
1641     heapProfile->DumpHeapSnapshotForOOM(dumpOption);
1642     HeapProfilerInterface::Destroy(ecmaVm_);
1643 #endif // ENABLE_DUMP_IN_FAULTLOG
1644 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1645 }
1646 
AdjustSpaceSizeForAppSpawn()1647 void Heap::AdjustSpaceSizeForAppSpawn()
1648 {
1649     SetHeapMode(HeapMode::SPAWN);
1650     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1651     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1652     auto committedSize = appSpawnSpace_->GetCommittedSize();
1653     appSpawnSpace_->SetInitialCapacity(committedSize);
1654     appSpawnSpace_->SetMaximumCapacity(committedSize);
1655     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1656     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1657 }
1658 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)1659 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1660 {
1661     ASSERT(inspector != nullptr);
1662     // activeSemiSpace_/inactiveSemiSpace_:
1663     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1664     activeSemiSpace_->AddAllocationInspector(inspector);
1665     // oldSpace_/compressSpace_:
1666     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1667     oldSpace_->AddAllocationInspector(inspector);
1668     // readOnlySpace_ need not allocationInspector.
1669     // appSpawnSpace_ need not allocationInspector.
1670     nonMovableSpace_->AddAllocationInspector(inspector);
1671     machineCodeSpace_->AddAllocationInspector(inspector);
1672     hugeObjectSpace_->AddAllocationInspector(inspector);
1673     hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1674 }
1675 
ClearAllocationInspectorFromAllSpaces()1676 void Heap::ClearAllocationInspectorFromAllSpaces()
1677 {
1678     activeSemiSpace_->ClearAllocationInspector();
1679     oldSpace_->ClearAllocationInspector();
1680     nonMovableSpace_->ClearAllocationInspector();
1681     machineCodeSpace_->ClearAllocationInspector();
1682     hugeObjectSpace_->ClearAllocationInspector();
1683     hugeMachineCodeSpace_->ClearAllocationInspector();
1684 }
1685 
RecomputeLimits()1686 void Heap::RecomputeLimits()
1687 {
1688     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1689     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1690     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1691         hugeMachineCodeSpace_->GetHeapObjectSize();
1692     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1693 
1694     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1695     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1696     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1697         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1698     size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1699     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1700                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
1701     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1702     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1703     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1704                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1705                                                                   growingFactor);
1706     globalSpaceNativeLimit_ = std::max(globalSpaceNativeLimit_, GetGlobalNativeSize()
1707                                         + config_.GetMinNativeLimitGrowingStep());
1708     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1709         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1710         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1711     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1712         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1713         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1714                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1715                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
1716         SetFullMarkRequestedState(true);
1717     }
1718 }
1719 
CheckAndTriggerOldGC(size_t size)1720 bool Heap::CheckAndTriggerOldGC(size_t size)
1721 {
1722     bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1723     bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1724     if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1725         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1726     }
1727     if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1728         GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1729         !NeedStopCollection()) {
1730         if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1731             oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1732             return false;
1733         }
1734         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1735         if (!oldGCRequested_) {
1736             return true;
1737         }
1738     }
1739     return false;
1740 }
1741 
CheckAndTriggerHintGC(MemoryReduceDegree degree,GCReason reason)1742 bool Heap::CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason)
1743 {
1744     if (InSensitiveStatus()) {
1745         return false;
1746     }
1747     LOG_GC(INFO) << "HintGC degree:"<< static_cast<int>(degree) << " reason:" << GCStats::GCReasonToString(reason);
1748     switch (degree) {
1749         case MemoryReduceDegree::LOW: {
1750             if (idleGCTrigger_->HintGCInLowDegree<Heap>(this)) {
1751                 if (CheckCanTriggerConcurrentMarking()) {
1752                     markType_ = MarkType::MARK_FULL;
1753                     TriggerConcurrentMarking();
1754                     LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerConcurrentMark.";
1755                     return true;
1756                 }
1757             }
1758             if (idleGCTrigger_->HintGCInLowDegree<SharedHeap>(sHeap_)) {
1759                 if (sHeap_->CheckCanTriggerConcurrentMarking(thread_)) {
1760                     LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerSharedConcurrentMark.";
1761                     sHeap_->TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::HINT_GC>(thread_);
1762                     return true;
1763                 }
1764             }
1765             break;
1766         }
1767         case MemoryReduceDegree::MIDDLE: {
1768             if (idleGCTrigger_->HintGCInMiddleDegree<Heap>(this)) {
1769                 CollectGarbage(TriggerGCType::FULL_GC, reason);
1770                 return true;
1771             }
1772             if (idleGCTrigger_->HintGCInMiddleDegree<SharedHeap>(sHeap_)) {
1773                 sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1774                 return true;
1775             }
1776             break;
1777         }
1778         case MemoryReduceDegree::HIGH: {
1779             bool result = false;
1780             if (idleGCTrigger_->HintGCInHighDegree<Heap>(this)) {
1781                 CollectGarbage(TriggerGCType::FULL_GC, reason);
1782                 result = true;
1783             }
1784             if (idleGCTrigger_->HintGCInHighDegree<SharedHeap>(sHeap_)) {
1785                 sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1786                 result = true;
1787             }
1788             return result;
1789         }
1790         default: // LCOV_EXCL_BR_LINE
1791             LOG_GC(INFO) << "HintGC invalid degree value: " << static_cast<int>(degree);
1792             break;
1793     }
1794     return false;
1795 }
1796 
CheckOngoingConcurrentMarking()1797 bool Heap::CheckOngoingConcurrentMarking()
1798 {
1799     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark() &&
1800         concurrentMarker_->IsTriggeredConcurrentMark()) {
1801         TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1802         if (thread_->IsMarking()) {
1803             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
1804             MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1805             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
1806             WaitConcurrentMarkingFinished();
1807         }
1808         WaitRunningTaskFinished();
1809         memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1810         return true;
1811     }
1812     return false;
1813 }
1814 
ClearIdleTask()1815 void Heap::ClearIdleTask()
1816 {
1817     SetIdleTask(IdleTaskType::NO_TASK);
1818     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1819 }
1820 
TryTriggerIdleCollection()1821 void Heap::TryTriggerIdleCollection()
1822 {
1823     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1824         return;
1825     }
1826     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1827         SetIdleTask(IdleTaskType::FINISH_MARKING);
1828         EnableNotifyIdle();
1829         CalculateIdleDuration();
1830         return;
1831     }
1832 
1833     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1834     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1835     double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1836                                            static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1837                                            newSpaceAllocSpeed;
1838     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1839     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1840     // 2 means double
1841     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1842         SetIdleTask(IdleTaskType::YOUNG_GC);
1843         SetMarkType(MarkType::MARK_YOUNG);
1844         EnableNotifyIdle();
1845         CalculateIdleDuration();
1846         return;
1847     }
1848 }
1849 
CalculateIdleDuration()1850 void Heap::CalculateIdleDuration()
1851 {
1852     size_t updateReferenceSpeed = 0;
1853     // clear native object duration
1854     size_t clearNativeObjSpeed = 0;
1855     if (markType_ == MarkType::MARK_YOUNG) {
1856         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1857         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1858     } else if (markType_ == MarkType::MARK_FULL) {
1859         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1860         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1861     }
1862 
1863     // update reference duration
1864     idlePredictDuration_ = 0.0f;
1865     if (updateReferenceSpeed != 0) {
1866         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1867     }
1868 
1869     if (clearNativeObjSpeed != 0) {
1870         idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1871     }
1872 
1873     // sweep and evacuate duration
1874     size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1875     double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1876     if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1877         idlePredictDuration_ += activeSemiSpace_->GetHeapObjectSize() * survivalRate / youngEvacuateSpeed;
1878     } else if (markType_ == MarkType::MARK_FULL) {
1879         size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1880         size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1881         if (sweepSpeed != 0) {
1882             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1883         }
1884         if (oldEvacuateSpeed != 0) {
1885             size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1886                 RecordData::COLLECT_REGION_SET_SIZE);
1887             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
1888                                     oldEvacuateSpeed;
1889         }
1890     }
1891 
1892     // Idle YoungGC mark duration
1893     size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
1894     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
1895         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
1896     }
1897     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
1898 }
1899 
TryTriggerIncrementalMarking()1900 void Heap::TryTriggerIncrementalMarking()
1901 {
1902     if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
1903         return;
1904     }
1905     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1906     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1907         hugeMachineCodeSpace_->GetHeapObjectSize();
1908     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1909     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1910     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1911     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1912 
1913     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1914     // mark finished before allocate limit
1915     if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1916         // The object allocated in incremental marking should lower than limit,
1917         // otherwise select trigger concurrent mark.
1918         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1919         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1920             EnableNotifyIdle();
1921             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1922         }
1923     }
1924 }
1925 
CheckCanTriggerConcurrentMarking()1926 bool Heap::CheckCanTriggerConcurrentMarking()
1927 {
1928     return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
1929         !incrementalMarker_->IsTriggeredIncrementalMark() &&
1930         (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1931 }
1932 
TryTriggerConcurrentMarking()1933 void Heap::TryTriggerConcurrentMarking()
1934 {
1935     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1936     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1937     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1938     // full mark will be triggered.
1939     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1940     // young mark may not result in the new space reaching its limit, young mark can be triggered.
1941     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1942     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1943     if (!CheckCanTriggerConcurrentMarking()) {
1944         return;
1945     }
1946     if (fullMarkRequested_) {
1947         markType_ = MarkType::MARK_FULL;
1948         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1949         TriggerConcurrentMarking();
1950         return;
1951     }
1952     if (InSensitiveStatus() && !ObjectExceedHighSensitiveThresholdForCM()) {
1953         return;
1954     }
1955     if (IsJustFinishStartup() && !ObjectExceedJustFinishStartupThresholdForCM()) {
1956         return;
1957     }
1958 
1959     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1960            oldSpaceAllocToLimitDuration = 0;
1961     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1962     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1963     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1964         hugeMachineCodeSpace_->GetHeapObjectSize();
1965     size_t globalHeapObjectSize = GetHeapObjectSize();
1966     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1967     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1968         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1969             GlobalNativeSizeLargerThanLimit()) {
1970             markType_ = MarkType::MARK_FULL;
1971             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1972             TriggerConcurrentMarking();
1973             return;
1974         }
1975     } else {
1976         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1977             GlobalNativeSizeLargerThanLimit()) {
1978             markType_ = MarkType::MARK_FULL;
1979             TriggerConcurrentMarking();
1980             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1981             return;
1982         }
1983         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1984         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1985         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1986         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1987         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1988             markType_ = MarkType::MARK_FULL;
1989             TriggerConcurrentMarking();
1990             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1991             return;
1992         }
1993     }
1994 
1995     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1996     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1997     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1998         if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
1999             markType_ = MarkType::MARK_YOUNG;
2000             TriggerConcurrentMarking();
2001             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
2002         }
2003         return;
2004     }
2005     size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity() + activeSemiSpace_->GetOvershootSize();
2006     size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
2007     bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
2008     if (!triggerMark) {
2009         newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
2010         newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
2011         // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
2012         newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
2013         triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
2014     }
2015 
2016     if (triggerMark) {
2017         markType_ = MarkType::MARK_YOUNG;
2018         TriggerConcurrentMarking();
2019         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
2020         return;
2021     }
2022 }
2023 
TryTriggerFullMarkOrGCByNativeSize()2024 void Heap::TryTriggerFullMarkOrGCByNativeSize()
2025 {
2026     // In high sensitive scene and native size larger than limit, trigger old gc directly
2027     if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
2028         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
2029     } else if (GlobalNativeSizeLargerThanLimit()) {
2030         if (concurrentMarker_->IsEnabled()) {
2031             SetFullMarkRequestedState(true);
2032             TryTriggerConcurrentMarking();
2033         } else {
2034             CheckAndTriggerOldGC();
2035         }
2036     }
2037 }
2038 
TryTriggerFullMarkBySharedLimit()2039 bool Heap::TryTriggerFullMarkBySharedLimit()
2040 {
2041     bool keepFullMarkRequest = false;
2042     if (concurrentMarker_->IsEnabled()) {
2043         if (!CheckCanTriggerConcurrentMarking()) {
2044             return keepFullMarkRequest;
2045         }
2046         markType_ = MarkType::MARK_FULL;
2047         if (ConcurrentMarker::TryIncreaseTaskCounts()) {
2048             concurrentMarker_->Mark();
2049         } else {
2050             // need retry full mark request again.
2051             keepFullMarkRequest = true;
2052         }
2053     }
2054     return keepFullMarkRequest;
2055 }
2056 
CheckAndTriggerTaskFinishedGC()2057 void Heap::CheckAndTriggerTaskFinishedGC()
2058 {
2059     size_t objectSizeOfTaskBegin = GetRecordObjectSize();
2060     size_t objectSizeOfTaskFinished = GetHeapObjectSize();
2061     size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
2062     size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
2063     // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
2064     bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
2065         objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
2066             TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
2067     bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
2068         nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
2069             TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
2070     if (objectSizeFlag || nativeSizeFlag) {
2071         CollectGarbage(TriggerGCType::OLD_GC, GCReason::TRIGGER_BY_TASKPOOL);
2072         RecordOrResetObjectSize(0);
2073         RecordOrResetNativeSize(0);
2074     }
2075 }
2076 
IsMarking() const2077 bool Heap::IsMarking() const
2078 {
2079     return thread_->IsMarking();
2080 }
2081 
TryTriggerFullMarkBySharedSize(size_t size)2082 void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2083 {
2084     newAllocatedSharedObjectSize_ += size;
2085     if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2086         if (thread_->IsMarkFinished() && GetConcurrentMarker()->IsTriggeredConcurrentMark() &&
2087             !GetOnSerializeEvent() && InSensitiveStatus()) {
2088             GetConcurrentMarker()->HandleMarkingFinished();
2089             newAllocatedSharedObjectSize_ = 0;
2090         } else if (concurrentMarker_->IsEnabled()) {
2091             SetFullMarkRequestedState(true);
2092             TryTriggerConcurrentMarking();
2093             newAllocatedSharedObjectSize_ = 0;
2094         }
2095     }
2096 }
2097 
IsReadyToConcurrentMark() const2098 bool Heap::IsReadyToConcurrentMark() const
2099 {
2100     return thread_->IsReadyToConcurrentMark();
2101 }
2102 
IncreaseNativeBindingSize(JSNativePointer * object)2103 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2104 {
2105     size_t size = object->GetBindingSize();
2106     if (size == 0) {
2107         return;
2108     }
2109     nativeBindingSize_ += size;
2110 }
2111 
IncreaseNativeBindingSize(size_t size)2112 void Heap::IncreaseNativeBindingSize(size_t size)
2113 {
2114     if (size == 0) {
2115         return;
2116     }
2117     nativeBindingSize_ += size;
2118 }
2119 
DecreaseNativeBindingSize(size_t size)2120 void Heap::DecreaseNativeBindingSize(size_t size)
2121 {
2122     ASSERT(size <= nativeBindingSize_);
2123     nativeBindingSize_ -= size;
2124 }
2125 
PrepareRecordRegionsForReclaim()2126 void Heap::PrepareRecordRegionsForReclaim()
2127 {
2128     activeSemiSpace_->SetRecordRegion();
2129     oldSpace_->SetRecordRegion();
2130     snapshotSpace_->SetRecordRegion();
2131     nonMovableSpace_->SetRecordRegion();
2132     hugeObjectSpace_->SetRecordRegion();
2133     machineCodeSpace_->SetRecordRegion();
2134     hugeMachineCodeSpace_->SetRecordRegion();
2135 }
2136 
TriggerConcurrentMarking()2137 void Heap::TriggerConcurrentMarking()
2138 {
2139     ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2140     if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2141         ClearIdleTask();
2142         DisableNotifyIdle();
2143     }
2144     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2145         concurrentMarker_->Mark();
2146     }
2147 }
2148 
WaitAllTasksFinished()2149 void Heap::WaitAllTasksFinished()
2150 {
2151     WaitRunningTaskFinished();
2152     sweeper_->EnsureAllTaskFinished();
2153     WaitClearTaskFinished();
2154     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2155         concurrentMarker_->WaitMarkingFinished();
2156     }
2157 }
2158 
WaitConcurrentMarkingFinished()2159 void Heap::WaitConcurrentMarkingFinished()
2160 {
2161     concurrentMarker_->WaitMarkingFinished();
2162 }
2163 
PostParallelGCTask(ParallelGCTaskPhase gcTask)2164 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2165 {
2166     IncreaseTaskCount();
2167     Taskpool::GetCurrentTaskpool()->PostTask(
2168         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2169 }
2170 
ChangeGCParams(bool inBackground)2171 void Heap::ChangeGCParams(bool inBackground)
2172 {
2173     const double doubleOne = 1.0;
2174     inBackground_ = inBackground;
2175     if (inBackground) {
2176         LOG_GC(INFO) << "app is inBackground";
2177         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2178             GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2179             doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2180             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2181         }
2182         if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
2183             sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2184             doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2185             sHeap_->CompressCollectGarbageNotWaiting<GCReason::SWITCH_BACKGROUND>(thread_);
2186         }
2187         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2188             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2189             LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2190         }
2191         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::BACKGROUND);
2192     } else {
2193         LOG_GC(INFO) << "app is not inBackground";
2194         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2195             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2196             LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2197         }
2198         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2199         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2200         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2201             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2202         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2203         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::FOREGROUND);
2204     }
2205 }
2206 
GetEcmaGCStats()2207 GCStats *Heap::GetEcmaGCStats()
2208 {
2209     return ecmaVm_->GetEcmaGCStats();
2210 }
2211 
GetEcmaGCKeyStats()2212 GCKeyStats *Heap::GetEcmaGCKeyStats()
2213 {
2214     return ecmaVm_->GetEcmaGCKeyStats();
2215 }
2216 
GetJSObjectResizingStrategy()2217 JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2218 {
2219     return ecmaVm_->GetJSObjectResizingStrategy();
2220 }
2221 
TriggerIdleCollection(int idleMicroSec)2222 void Heap::TriggerIdleCollection(int idleMicroSec)
2223 {
2224     if (idleTask_ == IdleTaskType::NO_TASK) {
2225         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2226             DisableNotifyIdle();
2227         }
2228         return;
2229     }
2230 
2231     // Incremental mark initialize and process
2232     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2233         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2234         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2235         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2236             CalculateIdleDuration();
2237         }
2238         return;
2239     }
2240 
2241     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2242         return;
2243     }
2244 
2245     switch (idleTask_) {
2246         case IdleTaskType::FINISH_MARKING: {
2247             if (markType_ == MarkType::MARK_FULL) {
2248                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2249             } else {
2250                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2251             }
2252             break;
2253         }
2254         case IdleTaskType::YOUNG_GC:
2255             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2256             break;
2257         case IdleTaskType::INCREMENTAL_MARK:
2258             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2259             break;
2260         default: // LCOV_EXCL_BR_LINE
2261             break;
2262     }
2263     ClearIdleTask();
2264 }
2265 
NotifyMemoryPressure(bool inHighMemoryPressure)2266 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2267 {
2268     if (inHighMemoryPressure) {
2269         LOG_GC(INFO) << "app is inHighMemoryPressure";
2270         SetMemGrowingType(MemGrowingType::PRESSURE);
2271     } else {
2272         LOG_GC(INFO) << "app is not inHighMemoryPressure";
2273         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2274     }
2275 }
2276 
NotifyFinishColdStart(bool isMainThread)2277 void Heap::NotifyFinishColdStart(bool isMainThread)
2278 {
2279     if (!FinishStartupEvent()) {
2280         return;
2281     }
2282     ASSERT(!OnStartupEvent());
2283     LOG_GC(INFO) << "SmartGC: app cold start just finished";
2284 
2285     if (isMainThread && ObjectExceedJustFinishStartupThresholdForCM()) {
2286         TryTriggerConcurrentMarking();
2287     }
2288 
2289     auto startIdleMonitor = JSNApi::GetStartIdleMonitorCallback();
2290     if (startIdleMonitor != nullptr) {
2291         startIdleMonitor();
2292     }
2293 
2294     if (startupDurationInMs_ == 0) {
2295         startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2296     }
2297 
2298     // restrain GC from 2s to 8s
2299     uint64_t delayTimeInMs = FINISH_STARTUP_TIMEPOINT_MS - startupDurationInMs_;
2300     Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2301         std::make_unique<FinishGCRestrainTask>(GetJSThread()->GetThreadId(), this),
2302         delayTimeInMs);
2303 }
2304 
NotifyFinishColdStartSoon()2305 void Heap::NotifyFinishColdStartSoon()
2306 {
2307     if (!OnStartupEvent()) {
2308         return;
2309     }
2310 
2311     // post 2s task
2312     startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2313 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
2314     startupDurationInMs_ = OHOS::system::GetUintParameter<uint64_t>("persist.ark.startupDuration",
2315                                                                     DEFAULT_STARTUP_DURATION_MS);
2316     startupDurationInMs_ = std::max(startupDurationInMs_, static_cast<uint64_t>(MIN_CONFIGURABLE_STARTUP_DURATION_MS));
2317     startupDurationInMs_ = std::min(startupDurationInMs_, static_cast<uint64_t>(MAX_CONFIGURABLE_STARTUP_DURATION_MS));
2318 #endif
2319     Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2320         std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this),
2321         startupDurationInMs_);
2322 }
2323 
NotifyHighSensitive(bool isStart)2324 void Heap::NotifyHighSensitive(bool isStart)
2325 {
2326     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SmartGC: set high sensitive status: " + std::to_string(isStart));
2327     isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2328         : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2329     LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2330 }
2331 
HandleExitHighSensitiveEvent()2332 bool Heap::HandleExitHighSensitiveEvent()
2333 {
2334     AppSensitiveStatus status = GetSensitiveStatus();
2335     if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2336         && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE) && !OnStartupEvent()) {
2337         // Set record heap obj size 0 after exit high senstive
2338         SetRecordHeapObjectSizeBeforeSensitive(0);
2339         // set overshoot size to increase gc threashold larger 8MB than current heap size.
2340         TryIncreaseNewSpaceOvershootByConfigSize();
2341 
2342         // fixme: IncrementalMarking and IdleCollection is currently not enabled
2343         TryTriggerIncrementalMarking();
2344         TryTriggerIdleCollection();
2345         TryTriggerConcurrentMarking();
2346         return true;
2347     }
2348     return false;
2349 }
2350 
2351 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2352 // concurrent mark
ObjectExceedMaxHeapSize() const2353 bool Heap::ObjectExceedMaxHeapSize() const
2354 {
2355     size_t configMaxHeapSize = config_.GetMaxHeapSize();
2356     size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2357     return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2358 }
2359 
ObjectExceedHighSensitiveThresholdForCM() const2360 bool Heap::ObjectExceedHighSensitiveThresholdForCM() const
2361 {
2362     size_t recordSizeBeforeSensitive = GetRecordHeapObjectSizeBeforeSensitive();
2363     return GetHeapObjectSize() > (recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive())
2364                                  * MIN_SENSITIVE_OBJECT_SURVIVAL_RATE;
2365 }
2366 
ObjectExceedJustFinishStartupThresholdForGC() const2367 bool Heap::ObjectExceedJustFinishStartupThresholdForGC() const
2368 {
2369     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2370     return GetHeapObjectSize() > heapObjectSizeThresholdForGC;
2371 }
2372 
ObjectExceedJustFinishStartupThresholdForCM() const2373 bool Heap::ObjectExceedJustFinishStartupThresholdForCM() const
2374 {
2375     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2376     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
2377                                         * JUST_FINISH_STARTUP_LOCAL_CONCURRENT_MARK_RATIO;
2378     return GetHeapObjectSize() > heapObjectSizeThresholdForCM;
2379 }
2380 
TryIncreaseNewSpaceOvershootByConfigSize()2381 void Heap::TryIncreaseNewSpaceOvershootByConfigSize()
2382 {
2383     if (InGC() || !IsReadyToConcurrentMark()) {
2384         // overShootSize will be adjusted when resume heap during GC and
2385         // no need to reserve space for newSpace if ConcurrentMark is already triggered
2386         return;
2387     }
2388     // need lock because conflict may occur when handle exit sensitive status by main thread
2389     // and handle finish startup by child thread happen at the same time
2390     LockHolder lock(setNewSpaceOvershootSizeMutex_);
2391     // set overshoot size to increase gc threashold larger 8MB than current heap size.
2392     int64_t initialCapacity = static_cast<int64_t>(GetNewSpace()->GetInitialCapacity());
2393     int64_t committedSize = static_cast<int64_t>(GetNewSpace()->GetCommittedSize());
2394     int64_t semiRemainSize = initialCapacity - committedSize;
2395     int64_t overshootSize =
2396         static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2397     // overshoot size should be larger than 0.
2398     GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2399 }
2400 
TryIncreaseOvershootByConfigSize()2401 void Heap::TryIncreaseOvershootByConfigSize()
2402 {
2403     TryIncreaseNewSpaceOvershootByConfigSize();
2404     sHeap_->TryAdjustSpaceOvershootByConfigSize();
2405 }
2406 
CheckIfNeedStopCollectionByStartup()2407 bool Heap::CheckIfNeedStopCollectionByStartup()
2408 {
2409     StartupStatus startupStatus = GetStartupStatus();
2410     switch (startupStatus) {
2411         case StartupStatus::ON_STARTUP:
2412             // During app cold start, gc threshold adjust to max heap size
2413             if (!ObjectExceedMaxHeapSize()) {
2414                 return true;
2415             }
2416             break;
2417         case StartupStatus::JUST_FINISH_STARTUP:
2418             // During app cold start just finished, gc threshold adjust to a quarter of max heap size
2419             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
2420                 return true;
2421             }
2422             break;
2423         default:
2424             break;
2425     }
2426     return false;
2427 }
2428 
CheckIfNeedStopCollectionByHighSensitive()2429 bool Heap::CheckIfNeedStopCollectionByHighSensitive()
2430 {
2431     AppSensitiveStatus sensitiveStatus = GetSensitiveStatus();
2432     if (sensitiveStatus != AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
2433         return false;
2434     }
2435 
2436     size_t objSize = GetHeapObjectSize();
2437     size_t recordSizeBeforeSensitive = GetRecordHeapObjectSizeBeforeSensitive();
2438     if (recordSizeBeforeSensitive == 0) {
2439         recordSizeBeforeSensitive = objSize;
2440         SetRecordHeapObjectSizeBeforeSensitive(recordSizeBeforeSensitive);
2441     }
2442 
2443     if (objSize < recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive()
2444         && !ObjectExceedMaxHeapSize()) {
2445         if (!IsNearGCInSensitive() &&
2446             objSize > (recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive())
2447             * MIN_SENSITIVE_OBJECT_SURVIVAL_RATE) {
2448             SetNearGCInSensitive(true);
2449         }
2450         return true;
2451     }
2452 
2453     OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2454         << ", exceed sensitive gc threshold";
2455     return false;
2456 }
2457 
NeedStopCollection()2458 bool Heap::NeedStopCollection()
2459 {
2460     // gc is not allowed during value serialize
2461     if (onSerializeEvent_) {
2462         return true;
2463     }
2464 
2465     // check high sensitive before checking startup because we still need to record
2466     // current heap object size when high sensitive happens in startup duration
2467     if (CheckIfNeedStopCollectionByHighSensitive()) {
2468         return true;
2469     }
2470 
2471     if (CheckIfNeedStopCollectionByStartup()) {
2472         return true;
2473     }
2474 
2475     return false;
2476 }
2477 
Run(uint32_t threadIndex)2478 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2479 {
2480     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2481     ASSERT(heap_->GetWorkManager()->HasInitialized());
2482     while (!heap_->GetWorkManager()->HasInitialized());
2483     switch (taskPhase_) {
2484         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2485             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2486             break;
2487         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2488             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2489             break;
2490         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2491             heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2492             break;
2493         default: // LOCV_EXCL_BR_LINE
2494             LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2495             UNREACHABLE();
2496     }
2497     heap_->ReduceTaskCount();
2498     return true;
2499 }
2500 
Run(uint32_t threadIndex)2501 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2502 {
2503     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "AsyncClearTask::Run");
2504     heap_->ReclaimRegions(gcType_);
2505     return true;
2506 }
2507 
Run(uint32_t threadIndex)2508 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2509 {
2510     heap_->NotifyFinishColdStart(false);
2511     return true;
2512 }
2513 
Run(uint32_t threadIndex)2514 bool Heap::FinishGCRestrainTask::Run([[maybe_unused]] uint32_t threadIndex)
2515 {
2516     heap_->CancelJustFinishStartupEvent();
2517     LOG_GC(INFO) << "SmartGC: app cold start finished";
2518     return true;
2519 }
2520 
CleanCallback()2521 void Heap::CleanCallback()
2522 {
2523     auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2524     if (!concurrentCallbacks.empty()) {
2525         Taskpool::GetCurrentTaskpool()->PostTask(
2526             std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2527         );
2528     }
2529     ASSERT(concurrentCallbacks.empty());
2530 
2531     AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2532     if (asyncCallbacksPack.Empty()) {
2533         ASSERT(asyncCallbacksPack.TotallyEmpty());
2534         return;
2535     }
2536     AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2537     std::swap(*asyncCallbacks, asyncCallbacksPack);
2538     NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2539     if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2540         pendingAsyncNativeCallbackSize_ < asyncClearNativePointerThreshold_) {
2541         IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2542         asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2543             this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2544         });
2545         asyncTaskCb(asyncCallbacks);
2546     } else {
2547         ThreadNativeScope nativeScope(thread_);
2548         asyncCallbacks->ProcessAll("ArkCompiler");
2549         delete asyncCallbacks;
2550     }
2551     ASSERT(asyncCallbacksPack.TotallyEmpty());
2552 }
2553 
Run(uint32_t threadIndex)2554 bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2555 {
2556     for (auto iter : nativePointerCallbacks_) {
2557         if (iter.first != nullptr) {
2558             iter.first(std::get<0>(iter.second),
2559                 std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2560         }
2561     }
2562     return true;
2563 }
2564 
GetArrayBufferSize() const2565 size_t Heap::GetArrayBufferSize() const
2566 {
2567     size_t result = 0;
2568     sweeper_->EnsureAllTaskFinished();
2569     this->IterateOverObjects([&result](TaggedObject *obj) {
2570         JSHClass* jsClass = obj->GetClass();
2571         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2572     });
2573     return result;
2574 }
2575 
GetLiveObjectSize() const2576 size_t Heap::GetLiveObjectSize() const
2577 {
2578     size_t objectSize = 0;
2579     sweeper_->EnsureAllTaskFinished();
2580     this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2581         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
2582     });
2583     return objectSize;
2584 }
2585 
GetHeapLimitSize() const2586 size_t Heap::GetHeapLimitSize() const
2587 {
2588     // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2589     return config_.GetMaxHeapSize();
2590 }
2591 
IsAlive(TaggedObject * object) const2592 bool BaseHeap::IsAlive(TaggedObject *object) const
2593 {
2594     if (!ContainObject(object)) {
2595         LOG_GC(ERROR) << "The region is already free";
2596         return false;
2597     }
2598 
2599     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2600     if (isFree) {
2601         Region *region = Region::ObjectAddressToRange(object);
2602         LOG_GC(ERROR) << "The object " << object << " in "
2603                             << region->GetSpaceTypeName()
2604                             << " already free";
2605     }
2606     return !isFree;
2607 }
2608 
ContainObject(TaggedObject * object) const2609 bool BaseHeap::ContainObject(TaggedObject *object) const
2610 {
2611     /*
2612      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2613      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2614      * calculated from an object address is still valid or alive in a cheap way.
2615      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2616      * introduce additional incorrect memory access issues.
2617      * Unless we can tolerate the performance impact of iterating the region list of each space and change
2618      * the implementation to that approach, don't rely on current implementation to get accurate result.
2619      */
2620     Region *region = Region::ObjectAddressToRange(object);
2621     return region->InHeapSpace();
2622 }
2623 
UpdateHeapStatsAfterGC(TriggerGCType gcType)2624 void SharedHeap::UpdateHeapStatsAfterGC(TriggerGCType gcType)
2625 {
2626     heapAliveSizeAfterGC_ = GetHeapObjectSize();
2627     fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize();
2628     if (gcType == TriggerGCType::SHARED_FULL_GC) {
2629         heapBasicLoss_ = fragmentSizeAfterGC_;
2630     }
2631 }
2632 
UpdateHeapStatsAfterGC(TriggerGCType gcType)2633 void Heap::UpdateHeapStatsAfterGC(TriggerGCType gcType)
2634 {
2635     if (gcType == TriggerGCType::YOUNG_GC) {
2636         return;
2637     }
2638     heapAliveSizeAfterGC_ = GetHeapObjectSize();
2639     heapAliveSizeExcludesYoungAfterGC_ = heapAliveSizeAfterGC_ - activeSemiSpace_->GetHeapObjectSize();
2640     fragmentSizeAfterGC_ = GetCommittedSize() - heapAliveSizeAfterGC_;
2641     if (gcType == TriggerGCType::FULL_GC) {
2642         heapBasicLoss_ = fragmentSizeAfterGC_;
2643     }
2644 }
2645 
PrintHeapInfo(TriggerGCType gcType) const2646 void Heap::PrintHeapInfo(TriggerGCType gcType) const
2647 {
2648     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2649     OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2650                                 << ";OnStartup:" << static_cast<int>(GetStartupStatus())
2651                                 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2652                                 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2653     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2654                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark();
2655     OPTIONAL_LOG(ecmaVm_, INFO) << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2656                  << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2657                  << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2658                  << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2659                  << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2660                  << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2661                  << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2662                  << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2663                  << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2664                  << "), NativeBindingSize(" << nativeBindingSize_
2665                  << "), NativeLimitSize(" << globalSpaceNativeLimit_
2666                  << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2667 }
2668 
StatisticHeapObject(TriggerGCType gcType) const2669 void Heap::StatisticHeapObject(TriggerGCType gcType) const
2670 {
2671     PrintHeapInfo(gcType);
2672 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2673     StatisticHeapDetail();
2674 #endif
2675 }
2676 
StatisticHeapDetail()2677 void Heap::StatisticHeapDetail()
2678 {
2679     Prepare();
2680     static const int JS_TYPE_SUM = static_cast<int>(JSType::TYPE_LAST) + 1;
2681     int typeCount[JS_TYPE_SUM] = { 0 };
2682     static const int MIN_COUNT_THRESHOLD = 1000;
2683 
2684     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2685         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2686     });
2687     for (int i = 0; i < JS_TYPE_SUM; i++) {
2688         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2689             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2690                            << " count:" << typeCount[i];
2691         }
2692         typeCount[i] = 0;
2693     }
2694 
2695     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2696         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2697     });
2698     for (int i = 0; i < JS_TYPE_SUM; i++) {
2699         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2700             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2701                            << " count:" << typeCount[i];
2702         }
2703         typeCount[i] = 0;
2704     }
2705 
2706     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2707         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2708     });
2709     for (int i = 0; i < JS_TYPE_SUM; i++) {
2710         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2711             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2712                            << " count:" << typeCount[i];
2713         }
2714         typeCount[i] = 0;
2715     }
2716 }
2717 
UpdateWorkManager(WorkManager * workManager)2718 void Heap::UpdateWorkManager(WorkManager *workManager)
2719 {
2720     concurrentMarker_->workManager_ = workManager;
2721     fullGC_->workManager_ = workManager;
2722     incrementalMarker_->workManager_ = workManager;
2723     nonMovableMarker_->workManager_ = workManager;
2724     compressGCMarker_->workManager_ = workManager;
2725     partialGC_->workManager_ = workManager;
2726 }
2727 
GetMachineCodeObject(uintptr_t pc) const2728 MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2729 {
2730     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2731     MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2732     if (machineCode != nullptr) {
2733         return machineCode;
2734     }
2735     HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2736     return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2737 }
2738 
CalCallSiteInfo(uintptr_t retAddr) const2739 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2740 {
2741     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2742     MachineCode *code = nullptr;
2743     // 1. find return
2744     // 2. gc
2745     machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2746         if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2747             return;
2748         }
2749         if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2750             code = MachineCode::Cast(obj);
2751             return;
2752         }
2753     });
2754     if (code == nullptr) {
2755         HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2756         hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2757             if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2758                 return;
2759             }
2760             if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2761                 code = MachineCode::Cast(obj);
2762                 return;
2763             }
2764         });
2765     }
2766 
2767     if (code == nullptr ||
2768         (code->GetPayLoadSizeInBytes() ==
2769          code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2770         return {};
2771     }
2772     return code->CalCallSiteInfo();
2773 };
2774 
AddGCListener(FinishGCListener listener,void * data)2775 GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2776 {
2777     gcListeners_.emplace_back(std::make_pair(listener, data));
2778     return std::prev(gcListeners_.cend());
2779 }
2780 
ProcessGCListeners()2781 void Heap::ProcessGCListeners()
2782 {
2783     for (auto &&[listener, data] : gcListeners_) {
2784         listener(data);
2785     }
2786 }
2787 
ProcessAllGCListeners()2788 void SharedHeap::ProcessAllGCListeners()
2789 {
2790     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2791         ASSERT(!thread->IsInRunningState());
2792         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2793     });
2794 }
2795 
2796 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
GetCurrentTickMillseconds()2797 uint64_t Heap::GetCurrentTickMillseconds()
2798 {
2799     return std::chrono::duration_cast<std::chrono::milliseconds>(
2800     std::chrono::steady_clock::now().time_since_epoch()).count();
2801 }
2802 
SetJsDumpThresholds(size_t thresholds) const2803 void Heap::SetJsDumpThresholds(size_t thresholds) const
2804 {
2805     if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2806         LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2807         return;
2808     }
2809     g_threshold = thresholds;
2810 }
2811 
ThresholdReachedDump()2812 void Heap::ThresholdReachedDump()
2813 {
2814     size_t limitSize = GetHeapLimitSize();
2815     if (!limitSize) {
2816         LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2817         return;
2818     }
2819     size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2820     if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2821         GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2822             size_t liveObjectSize = GetLiveObjectSize();
2823             size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2824             LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2825             if (nowPrecentRecheck < g_threshold) {
2826                 return;
2827             }
2828             g_lastHeapDumpTime = GetCurrentTickMillseconds();
2829             base::BlockHookScope blockScope;
2830             HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2831             AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
2832             if (appfreezeCallback != nullptr && appfreezeCallback(getprocpid())) {
2833                 LOG_ECMA(INFO) << "ThresholdReachedDump and avoid freeze success.";
2834             } else {
2835                 LOG_ECMA(WARN) << "ThresholdReachedDump but avoid freeze failed.";
2836             }
2837             GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2838                                                         GetHeapLimitSize(), GetLiveObjectSize());
2839             DumpSnapShotOption dumpOption;
2840             dumpOption.dumpFormat = DumpFormat::BINARY;
2841             dumpOption.isVmMode = true;
2842             dumpOption.isPrivate = false;
2843             dumpOption.captureNumericValue = false;
2844             dumpOption.isFullGC = false;
2845             dumpOption.isSimplify = true;
2846             dumpOption.isSync = false;
2847             dumpOption.isBeforeFill = false;
2848             dumpOption.isDumpOOM = true; // aim's to do binary dump
2849             heapProfile->DumpHeapSnapshotForOOM(dumpOption);
2850             hasOOMDump_ = false;
2851             HeapProfilerInterface::Destroy(ecmaVm_);
2852         }
2853 }
2854 #endif
2855 
RemoveGCListener(GCListenerId listenerId)2856 void Heap::RemoveGCListener(GCListenerId listenerId)
2857 {
2858     gcListeners_.erase(listenerId);
2859 }
2860 
IncreaseTaskCount()2861 void BaseHeap::IncreaseTaskCount()
2862 {
2863     LockHolder holder(waitTaskFinishedMutex_);
2864     runningTaskCount_++;
2865 }
2866 
WaitRunningTaskFinished()2867 void BaseHeap::WaitRunningTaskFinished()
2868 {
2869     LockHolder holder(waitTaskFinishedMutex_);
2870     while (runningTaskCount_ > 0) {
2871         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
2872     }
2873 }
2874 
CheckCanDistributeTask()2875 bool BaseHeap::CheckCanDistributeTask()
2876 {
2877     LockHolder holder(waitTaskFinishedMutex_);
2878     return runningTaskCount_ < maxMarkTaskCount_;
2879 }
2880 
ReduceTaskCount()2881 void BaseHeap::ReduceTaskCount()
2882 {
2883     LockHolder holder(waitTaskFinishedMutex_);
2884     runningTaskCount_--;
2885     if (runningTaskCount_ == 0) {
2886         waitTaskFinishedCV_.SignalAll();
2887     }
2888 }
2889 
WaitClearTaskFinished()2890 void BaseHeap::WaitClearTaskFinished()
2891 {
2892     LockHolder holder(waitClearTaskFinishedMutex_);
2893     while (!clearTaskFinished_) {
2894         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
2895     }
2896 }
2897 }  // namespace panda::ecmascript
2898