• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <chrono>
17 #include <thread>
18 
19 #include "ecmascript/base/block_hook_scope.h"
20 #include "ecmascript/checkpoint/thread_state_transition.h"
21 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
22 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
23 #endif
24 
25 #include "ecmascript/mem/incremental_marker.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/parallel_evacuator.h"
28 #include "ecmascript/mem/parallel_marker-inl.h"
29 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
30 #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
31 #include "ecmascript/mem/shared_heap/shared_gc.h"
32 #include "ecmascript/mem/shared_heap/shared_full_gc.h"
33 #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
34 #include "ecmascript/mem/stw_young_gc.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/runtime_call_id.h"
37 #include "ecmascript/jit/jit.h"
38 #include "ecmascript/ohos/ohos_params.h"
39 #if !WIN_OR_MAC_OR_IOS_PLATFORM
40 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
41 #include "ecmascript/dfx/hprof/heap_profiler.h"
42 #endif
43 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
44 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
45 #endif
46 #include "ecmascript/dfx/tracing/tracing.h"
47 #if defined(ENABLE_DUMP_IN_FAULTLOG)
48 #include "syspara/parameter.h"
49 #endif
50 
51 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
52 #include "parameters.h"
53 #include "hisysevent.h"
54 static constexpr uint32_t DEC_TO_INT = 100;
55 static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
56 static uint64_t g_lastHeapDumpTime = 0;
57 static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
58 static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
59 static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
60 static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
61                             (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
62 #endif
63 
64 namespace panda::ecmascript {
65 SharedHeap *SharedHeap::instance_ = nullptr;
66 
CreateNewInstance()67 void SharedHeap::CreateNewInstance()
68 {
69     ASSERT(instance_ == nullptr);
70     size_t heapShared = 0;
71 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
72     heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
73 #endif
74     EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
75         MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
76     instance_ = new SharedHeap(config);
77 }
78 
GetInstance()79 SharedHeap *SharedHeap::GetInstance()
80 {
81     ASSERT(instance_ != nullptr);
82     return instance_;
83 }
84 
DestroyInstance()85 void SharedHeap::DestroyInstance()
86 {
87     ASSERT(instance_ != nullptr);
88     instance_->Destroy();
89     delete instance_;
90     instance_ = nullptr;
91 }
92 
ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType,GCReason gcReason,JSThread * thread)93 void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
94 {
95     ASSERT(!dThread_->IsRunning());
96     SuspendAllScope scope(thread);
97     SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
98     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
99     GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
100     if (UNLIKELY(ShouldVerifyHeap())) {
101         // pre gc heap verify
102         LOG_ECMA(DEBUG) << "pre gc shared heap verify";
103         sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
104         SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
105     }
106     switch (gcType) {
107         case TriggerGCType::SHARED_GC: {
108             sharedGC_->RunPhases();
109             break;
110         }
111         case TriggerGCType::SHARED_FULL_GC: {
112             sharedFullGC_->RunPhases();
113             break;
114         }
115         default:
116             LOG_ECMA(FATAL) << "this branch is unreachable";
117             UNREACHABLE();
118             break;
119     }
120     if (UNLIKELY(ShouldVerifyHeap())) {
121         // pre gc heap verify
122         LOG_ECMA(DEBUG) << "after gc shared heap verify";
123         SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
124     }
125     CollectGarbageFinish(false, gcType);
126 }
127 
CheckAndTriggerSharedGC(JSThread * thread)128 bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
129 {
130     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
131         return false;
132     }
133     if ((OldSpaceExceedLimit() || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
134         !NeedStopCollection()) {
135         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
136         return true;
137     }
138     return false;
139 }
140 
CheckHugeAndTriggerSharedGC(JSThread * thread,size_t size)141 bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
142 {
143     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
144         return false;
145     }
146     if ((sHugeObjectSpace_->CommittedSizeExceed(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
147         !NeedStopCollection()) {
148         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
149         return true;
150     }
151     return false;
152 }
153 
CollectGarbageNearOOM(JSThread * thread)154 void SharedHeap::CollectGarbageNearOOM(JSThread *thread)
155 {
156     auto fragmentationSize = sOldSpace_->GetCommittedSize() - sOldSpace_->GetHeapObjectSize();
157     if (fragmentationSize >= fragmentationLimitForSharedFullGC_) {
158         CollectGarbage<TriggerGCType::SHARED_FULL_GC,  GCReason::ALLOCATION_FAILED>(thread);
159     } else {
160         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
161     }
162 }
163 // Shared gc trigger
AdjustGlobalSpaceAllocLimit()164 void SharedHeap::AdjustGlobalSpaceAllocLimit()
165 {
166     globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
167                                       config_.GetDefaultGlobalAllocLimit() * 2); // 2: double
168     globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
169                                       config_.GetMaxHeapSize());
170     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
171                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
172     constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
173     size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
174     globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
175     LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
176         << globalSpaceAllocLimit_;
177 }
178 
ObjectExceedMaxHeapSize() const179 bool SharedHeap::ObjectExceedMaxHeapSize() const
180 {
181     return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
182 }
183 
StartConcurrentMarking(TriggerGCType gcType,GCReason gcReason)184 void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason)
185 {
186     ASSERT(JSThread::GetCurrent() == dThread_);
187     sConcurrentMarker_->Mark(gcType, gcReason);
188 }
189 
CheckCanTriggerConcurrentMarking(JSThread * thread)190 bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
191 {
192     return thread->IsReadyToSharedConcurrentMark() &&
193            sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
194 }
195 
Initialize(NativeAreaAllocator * nativeAreaAllocator,HeapRegionAllocator * heapRegionAllocator,const JSRuntimeOptions & option,DaemonThread * dThread)196 void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
197     const JSRuntimeOptions &option, DaemonThread *dThread)
198 {
199     sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
200     nativeAreaAllocator_ = nativeAreaAllocator;
201     heapRegionAllocator_ = heapRegionAllocator;
202     shouldVerifyHeap_ = option.EnableHeapVerify();
203     parallelGC_ = option.EnableParallelGC();
204     optionalLogEnabled_ = option.EnableOptionalLog();
205     size_t maxHeapSize = config_.GetMaxHeapSize();
206     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
207     sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
208 
209     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
210     size_t oldSpaceCapacity = (maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2; // 2: half
211     globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
212     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
213                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
214 
215     sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
216     sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
217     sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
218     sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
219     sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
220     growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
221     growingStep_ = config_.GetSharedHeapLimitGrowingStep();
222     incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
223     incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
224     fragmentationLimitForSharedFullGC_ = config_.GetFragmentationLimitForSharedFullGC();
225     dThread_ = dThread;
226 }
227 
Destroy()228 void SharedHeap::Destroy()
229 {
230     if (sWorkManager_ != nullptr) {
231         delete sWorkManager_;
232         sWorkManager_ = nullptr;
233     }
234     if (sOldSpace_ != nullptr) {
235         sOldSpace_->Reset();
236         delete sOldSpace_;
237         sOldSpace_ = nullptr;
238     }
239     if (sCompressSpace_ != nullptr) {
240         sCompressSpace_->Reset();
241         delete sCompressSpace_;
242         sCompressSpace_ = nullptr;
243     }
244     if (sNonMovableSpace_ != nullptr) {
245         sNonMovableSpace_->Reset();
246         delete sNonMovableSpace_;
247         sNonMovableSpace_ = nullptr;
248     }
249     if (sHugeObjectSpace_ != nullptr) {
250         sHugeObjectSpace_->Destroy();
251         delete sHugeObjectSpace_;
252         sHugeObjectSpace_ = nullptr;
253     }
254     if (sReadOnlySpace_ != nullptr) {
255         sReadOnlySpace_->ClearReadOnly();
256         sReadOnlySpace_->Destroy();
257         delete sReadOnlySpace_;
258         sReadOnlySpace_ = nullptr;
259     }
260     if (sAppSpawnSpace_ != nullptr) {
261         sAppSpawnSpace_->Reset();
262         delete sAppSpawnSpace_;
263         sAppSpawnSpace_ = nullptr;
264     }
265     if (sharedGC_ != nullptr) {
266         delete sharedGC_;
267         sharedGC_ = nullptr;
268     }
269     if (sharedFullGC_ != nullptr) {
270         delete sharedFullGC_;
271         sharedFullGC_ = nullptr;
272     }
273 
274     nativeAreaAllocator_ = nullptr;
275     heapRegionAllocator_ = nullptr;
276 
277     if (sSweeper_ != nullptr) {
278         delete sSweeper_;
279         sSweeper_ = nullptr;
280     }
281     if (sConcurrentMarker_ != nullptr) {
282         delete sConcurrentMarker_;
283         sConcurrentMarker_ = nullptr;
284     }
285     if (sharedGCMarker_ != nullptr) {
286         delete sharedGCMarker_;
287         sharedGCMarker_ = nullptr;
288     }
289     if (sharedGCMovableMarker_ != nullptr) {
290         delete sharedGCMovableMarker_;
291         sharedGCMovableMarker_ = nullptr;
292     }
293     dThread_ = nullptr;
294 }
295 
PostInitialization(const GlobalEnvConstants * globalEnvConstants,const JSRuntimeOptions & option)296 void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
297 {
298     globalEnvConstants_ = globalEnvConstants;
299     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
300     maxMarkTaskCount_ = totalThreadNum - 1;
301     sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
302     sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
303     sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
304     sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
305         EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
306     sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
307         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
308     sharedGC_ = new SharedGC(this);
309     sharedFullGC_ = new SharedFullGC(this);
310 }
311 
PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)312 void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
313 {
314     IncreaseTaskCount();
315     Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
316                                                                                 this, sharedTaskPhase));
317 }
318 
Run(uint32_t threadIndex)319 bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
320 {
321     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
322     while (!sHeap_->GetWorkManager()->HasInitialized());
323     switch (taskPhase_) {
324         case SharedParallelMarkPhase::SHARED_MARK_TASK:
325             sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
326             break;
327         case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
328             sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
329             break;
330         default:
331             break;
332     }
333     sHeap_->ReduceTaskCount();
334     return true;
335 }
336 
Run(uint32_t threadIndex)337 bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
338 {
339     sHeap_->ReclaimRegions(gcType_);
340     return true;
341 }
342 
NotifyGCCompleted()343 void SharedHeap::NotifyGCCompleted()
344 {
345     ASSERT(JSThread::GetCurrent() == dThread_);
346     LockHolder lock(waitGCFinishedMutex_);
347     gcFinished_ = true;
348     waitGCFinishedCV_.SignalAll();
349 }
350 
WaitGCFinished(JSThread * thread)351 void SharedHeap::WaitGCFinished(JSThread *thread)
352 {
353     ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
354     ASSERT(thread->IsInRunningState());
355     ThreadSuspensionScope scope(thread);
356     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished");
357     LockHolder lock(waitGCFinishedMutex_);
358     while (!gcFinished_) {
359         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
360     }
361 }
362 
WaitGCFinishedAfterAllJSThreadEliminated()363 void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
364 {
365     ASSERT(Runtime::GetInstance()->vmCount_ == 0);
366     LockHolder lock(waitGCFinishedMutex_);
367     while (!gcFinished_) {
368         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
369     }
370 }
371 
DaemonCollectGarbage(TriggerGCType gcType,GCReason gcReason)372 void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
373 {
374     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
375     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
376     ASSERT(JSThread::GetCurrent() == dThread_);
377     {
378         ThreadManagedScope runningScope(dThread_);
379         SuspendAllScope scope(dThread_);
380         SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
381         gcType_ = gcType;
382         GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
383         if (UNLIKELY(ShouldVerifyHeap())) {
384             // pre gc heap verify
385             LOG_ECMA(DEBUG) << "pre gc shared heap verify";
386             sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
387             SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
388         }
389         switch (gcType) {
390             case TriggerGCType::SHARED_GC: {
391                 sharedGC_->RunPhases();
392                 break;
393             }
394             case TriggerGCType::SHARED_FULL_GC: {
395                 sharedFullGC_->RunPhases();
396                 break;
397             }
398             default:
399                 LOG_ECMA(FATAL) << "this branch is unreachable";
400                 UNREACHABLE();
401                 break;
402         }
403 
404         if (UNLIKELY(ShouldVerifyHeap())) {
405             // after gc heap verify
406             LOG_ECMA(DEBUG) << "after gc shared heap verify";
407             SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
408         }
409         CollectGarbageFinish(true, gcType);
410     }
411     // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
412 }
413 
WaitAllTasksFinished(JSThread * thread)414 void SharedHeap::WaitAllTasksFinished(JSThread *thread)
415 {
416     WaitGCFinished(thread);
417     sSweeper_->WaitAllTaskFinished();
418     WaitClearTaskFinished();
419 }
420 
WaitAllTasksFinishedAfterAllJSThreadEliminated()421 void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
422 {
423     WaitGCFinishedAfterAllJSThreadEliminated();
424     sSweeper_->WaitAllTaskFinished();
425     WaitClearTaskFinished();
426 }
427 
CheckOngoingConcurrentMarking()428 bool SharedHeap::CheckOngoingConcurrentMarking()
429 {
430     if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
431         sConcurrentMarker_->IsTriggeredConcurrentMark()) {
432         // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
433         return true;
434     }
435     return false;
436 }
437 
Prepare(bool inTriggerGCThread)438 void SharedHeap::Prepare(bool inTriggerGCThread)
439 {
440     WaitRunningTaskFinished();
441     if (inTriggerGCThread) {
442         sSweeper_->EnsureAllTaskFinished();
443     } else {
444         sSweeper_->WaitAllTaskFinished();
445     }
446     WaitClearTaskFinished();
447 }
448 
SharedGCScope()449 SharedHeap::SharedGCScope::SharedGCScope()
450 {
451     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
452         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
453         if (pgoProfiler != nullptr) {
454             pgoProfiler->SuspendByGC();
455         }
456 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
457         thread->SetGcState(true);
458 #endif
459     });
460 }
461 
~SharedGCScope()462 SharedHeap::SharedGCScope::~SharedGCScope()
463 {
464     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
465         ASSERT(!thread->IsInRunningState());
466         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
467         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
468         if (pgoProfiler != nullptr) {
469             pgoProfiler->ResumeByGC();
470         }
471 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
472         thread->SetGcState(false);
473 #endif
474     });
475 }
476 
PrepareRecordRegionsForReclaim()477 void SharedHeap::PrepareRecordRegionsForReclaim()
478 {
479     sOldSpace_->SetRecordRegion();
480     sNonMovableSpace_->SetRecordRegion();
481     sHugeObjectSpace_->SetRecordRegion();
482 }
483 
Reclaim(TriggerGCType gcType)484 void SharedHeap::Reclaim(TriggerGCType gcType)
485 {
486     PrepareRecordRegionsForReclaim();
487     sHugeObjectSpace_->ReclaimHugeRegion();
488 
489     if (parallelGC_) {
490         clearTaskFinished_ = false;
491         Taskpool::GetCurrentTaskpool()->PostTask(
492             std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
493     } else {
494         ReclaimRegions(gcType);
495     }
496 }
497 
ReclaimRegions(TriggerGCType gcType)498 void SharedHeap::ReclaimRegions(TriggerGCType gcType)
499 {
500     if (gcType == TriggerGCType::SHARED_FULL_GC) {
501         sCompressSpace_->Reset();
502     }
503     sSweeper_->WaitAllTaskFinished();
504     EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
505         region->ClearMarkGCBitset();
506         region->ResetAliveObject();
507     });
508     if (!clearTaskFinished_) {
509         LockHolder holder(waitClearTaskFinishedMutex_);
510         clearTaskFinished_ = true;
511         waitClearTaskFinishedCV_.SignalAll();
512     }
513 }
514 
DisableParallelGC(JSThread * thread)515 void SharedHeap::DisableParallelGC(JSThread *thread)
516 {
517     WaitAllTasksFinished(thread);
518     dThread_->WaitFinished();
519     parallelGC_ = false;
520     maxMarkTaskCount_ = 0;
521     sSweeper_->ConfigConcurrentSweep(false);
522     sConcurrentMarker_->ConfigConcurrentMark(false);
523 }
524 
EnableParallelGC(JSRuntimeOptions & option)525 void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
526 {
527     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
528     maxMarkTaskCount_ = totalThreadNum - 1;
529     parallelGC_ = option.EnableParallelGC();
530     if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
531         workThreadNum != totalThreadNum + 1) {
532         LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
533                             << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
534         delete sWorkManager_;
535         sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
536         UpdateWorkManager(sWorkManager_);
537     }
538     sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
539     sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
540 }
541 
UpdateWorkManager(SharedGCWorkManager * sWorkManager)542 void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
543 {
544     sConcurrentMarker_->ResetWorkManager(sWorkManager);
545     sharedGCMarker_->ResetWorkManager(sWorkManager);
546     sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
547     sharedGC_->ResetWorkManager(sWorkManager);
548     sharedFullGC_->ResetWorkManager(sWorkManager);
549 }
550 
TryTriggerLocalConcurrentMarking()551 void SharedHeap::TryTriggerLocalConcurrentMarking()
552 {
553     if (localFullMarkTriggered_) {
554         return;
555     }
556     if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
557             != false) {
558         return;
559     }
560     ASSERT(localFullMarkTriggered_ == true);
561     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
562         thread->SetFullMarkRequest();
563     });
564 }
565 
VerifyHeapObjects(VerifyKind verifyKind) const566 size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
567 {
568     size_t failCount = 0;
569     {
570         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
571         sOldSpace_->IterateOverObjects(verifier);
572     }
573     {
574         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
575         sNonMovableSpace_->IterateOverObjects(verifier);
576     }
577     {
578         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
579         sHugeObjectSpace_->IterateOverObjects(verifier);
580     }
581     {
582         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
583         sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
584     }
585     return failCount;
586 }
587 
IsReadyToConcurrentMark() const588 bool SharedHeap::IsReadyToConcurrentMark() const
589 {
590     return dThread_->IsReadyToConcurrentMark();
591 }
592 
ObjectExceedJustFinishStartupThresholdForGC() const593 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForGC() const
594 {
595     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
596     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForGC;
597 }
598 
ObjectExceedJustFinishStartupThresholdForCM() const599 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForCM() const
600 {
601     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
602     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
603                                         * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
604     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForCM;
605 }
606 
CheckIfNeedStopCollectionByStartup()607 bool SharedHeap::CheckIfNeedStopCollectionByStartup()
608 {
609     StartupStatus startupStatus = GetStartupStatus();
610     switch (startupStatus) {
611         case StartupStatus::ON_STARTUP:
612             if (!ObjectExceedMaxHeapSize()) {
613                 return true;
614             }
615             break;
616         case StartupStatus::JUST_FINISH_STARTUP:
617             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
618                 return true;
619             }
620             break;
621         default:
622             break;
623     }
624     return false;
625 }
626 
NeedStopCollection()627 bool SharedHeap::NeedStopCollection()
628 {
629     if (CheckIfNeedStopCollectionByStartup()) {
630         return true;
631     }
632 
633     if (!InSensitiveStatus()) {
634         return false;
635     }
636 
637     if (!ObjectExceedMaxHeapSize()) {
638         return true;
639     }
640     return false;
641 }
642 
CompactHeapBeforeFork(JSThread * thread)643 void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
644 {
645     ThreadManagedScope managedScope(thread);
646     WaitGCFinished(thread);
647     sharedFullGC_->SetForAppSpawn(true);
648     CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
649     sharedFullGC_->SetForAppSpawn(false);
650 }
651 
MoveOldSpaceToAppspawn()652 void SharedHeap::MoveOldSpaceToAppspawn()
653 {
654     auto committedSize = sOldSpace_->GetCommittedSize();
655     sAppSpawnSpace_->SetInitialCapacity(committedSize);
656     sAppSpawnSpace_->SetMaximumCapacity(committedSize);
657     sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
658     sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
659 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
660     sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
661 #endif
662     sOldSpace_->EnumerateRegions([&](Region *region) {
663         region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
664         // Region in SharedHeap do not need PageTag threadId.
665         PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName());
666         sAppSpawnSpace_->AddRegion(region);
667         sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
668     });
669     sOldSpace_->GetRegionList().Clear();
670     sOldSpace_->Reset();
671 }
672 
ReclaimForAppSpawn()673 void SharedHeap::ReclaimForAppSpawn()
674 {
675     sSweeper_->WaitAllTaskFinished();
676     sHugeObjectSpace_->ReclaimHugeRegion();
677     sCompressSpace_->Reset();
678     MoveOldSpaceToAppspawn();
679     auto cb = [] (Region *region) {
680         region->ClearMarkGCBitset();
681         region->ResetAliveObject();
682     };
683     sNonMovableSpace_->EnumerateRegions(cb);
684     sHugeObjectSpace_->EnumerateRegions(cb);
685 }
686 
DumpHeapSnapshotBeforeOOM(bool isFullGC,JSThread * thread)687 void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread)
688 {
689 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
690 #if defined(ENABLE_DUMP_IN_FAULTLOG)
691     EcmaVM *vm = thread->GetEcmaVM();
692     if (vm->GetHeapProfile() != nullptr) {
693         LOG_ECMA(ERROR) << "SharedHeap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
694         return;
695     }
696     // Filter appfreeze when dump.
697     LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
698     base::BlockHookScope blockScope;
699     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(vm);
700     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
701         LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
702     }
703     vm->GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetEcmaParamConfiguration().GetMaxHeapSize(),
704                                                     GetHeapObjectSize());
705     DumpSnapShotOption dumpOption;
706     dumpOption.dumpFormat = DumpFormat::BINARY;
707     dumpOption.isVmMode = true;
708     dumpOption.isPrivate = false;
709     dumpOption.captureNumericValue = false;
710     dumpOption.isFullGC = isFullGC;
711     dumpOption.isSimplify = true;
712     dumpOption.isSync = true;
713     dumpOption.isBeforeFill = false;
714     dumpOption.isDumpOOM = true;
715     heapProfile->DumpHeapSnapshot(dumpOption);
716     HeapProfilerInterface::Destroy(vm);
717 #endif // ENABLE_DUMP_IN_FAULTLOG
718 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
719 }
720 
Heap(EcmaVM * ecmaVm)721 Heap::Heap(EcmaVM *ecmaVm)
722     : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
723       ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
724 
Initialize()725 void Heap::Initialize()
726 {
727     enablePageTagThreadId_ = ecmaVm_->GetJSOptions().EnablePageTagThreadId();
728     memController_ = new MemController(this);
729     nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
730     heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
731     size_t maxHeapSize = config_.GetMaxHeapSize();
732     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
733     size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
734     size_t edenSpaceCapacity = 2_MB;
735     edenSpace_ = new EdenSpace(this, edenSpaceCapacity, edenSpaceCapacity);
736     edenSpace_->Restart();
737     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
738     activeSemiSpace_->Restart();
739     activeSemiSpace_->SetWaterLine();
740 
741     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
742     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
743     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
744     sOldTlab_ = new ThreadLocalAllocationBuffer(this);
745     thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
746     sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
747     thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
748                                                     sNonMovableTlab_->GetEndAddress());
749     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
750 
751     // whether should verify heap duration gc
752     shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
753     // not set up from space
754 
755     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
756     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
757     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
758     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
759     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
760         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
761     }
762     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
763     nonMovableSpace_->Initialize();
764     size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
765     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
766     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
767     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
768 
769     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
770         machineCodeSpaceCapacity + readOnlySpaceCapacity;
771     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
772         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
773     }
774     size_t oldSpaceCapacity = maxHeapSize - capacities;
775     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
776     globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
777     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
778     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
779     oldSpace_->Initialize();
780 
781     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
782     hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
783     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
784     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
785         maxEvacuateTaskCount_ - 1);
786 
787     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
788                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
789                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
790                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
791                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
792                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
793                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
794                  << ", gcThreadNum = " << maxMarkTaskCount_;
795     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
796     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
797     markType_ = MarkType::MARK_YOUNG;
798 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
799     concurrentMarkerEnabled = false;
800 #endif
801     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
802     stwYoungGC_ = new STWYoungGC(this, parallelGC_);
803     fullGC_ = new FullGC(this);
804 
805     partialGC_ = new PartialGC(this);
806     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
807         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
808     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
809         EnableConcurrentMarkType::CONFIG_DISABLE);
810     nonMovableMarker_ = new NonMovableMarker(this);
811     semiGCMarker_ = new SemiGCMarker(this);
812     compressGCMarker_ = new CompressGCMarker(this);
813     evacuator_ = new ParallelEvacuator(this);
814     incrementalMarker_ = new IncrementalMarker(this);
815     gcListeners_.reserve(16U);
816     nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
817     incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
818     nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
819     idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
820     asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
821 }
822 
ResetTlab()823 void Heap::ResetTlab()
824 {
825     sOldTlab_->Reset();
826     sNonMovableTlab_->Reset();
827 }
828 
FillBumpPointerForTlab()829 void Heap::FillBumpPointerForTlab()
830 {
831     sOldTlab_->FillBumpPointer();
832     sNonMovableTlab_->FillBumpPointer();
833 }
834 
ProcessSharedGCMarkingLocalBuffer()835 void Heap::ProcessSharedGCMarkingLocalBuffer()
836 {
837     if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
838         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
839         sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
840         ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
841     }
842 }
843 
ProcessSharedGCRSetWorkList()844 void Heap::ProcessSharedGCRSetWorkList()
845 {
846     if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
847         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
848         ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
849         sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
850         // The current thread may end earlier than the deamon thread.
851         // To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
852         // Reentrant does not cause exceptions because all the values are set to false.
853         thread_->SetProcessingLocalToSharedRset(false);
854         ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
855     }
856 }
857 
GetGlobalConst() const858 const GlobalEnvConstants *Heap::GetGlobalConst() const
859 {
860     return thread_->GlobalConstants();
861 }
862 
Destroy()863 void Heap::Destroy()
864 {
865     ProcessSharedGCRSetWorkList();
866     ProcessSharedGCMarkingLocalBuffer();
867     if (sOldTlab_ != nullptr) {
868         sOldTlab_->Reset();
869         delete sOldTlab_;
870         sOldTlab_ = nullptr;
871     }
872     if (sNonMovableTlab_!= nullptr) {
873         sNonMovableTlab_->Reset();
874         delete sNonMovableTlab_;
875         sNonMovableTlab_= nullptr;
876     }
877     if (workManager_ != nullptr) {
878         delete workManager_;
879         workManager_ = nullptr;
880     }
881     if (edenSpace_ != nullptr) {
882         edenSpace_->Destroy();
883         delete edenSpace_;
884         edenSpace_ = nullptr;
885     }
886     if (activeSemiSpace_ != nullptr) {
887         activeSemiSpace_->Destroy();
888         delete activeSemiSpace_;
889         activeSemiSpace_ = nullptr;
890     }
891     if (inactiveSemiSpace_ != nullptr) {
892         inactiveSemiSpace_->Destroy();
893         delete inactiveSemiSpace_;
894         inactiveSemiSpace_ = nullptr;
895     }
896     if (oldSpace_ != nullptr) {
897         oldSpace_->Reset();
898         delete oldSpace_;
899         oldSpace_ = nullptr;
900     }
901     if (compressSpace_ != nullptr) {
902         compressSpace_->Destroy();
903         delete compressSpace_;
904         compressSpace_ = nullptr;
905     }
906     if (nonMovableSpace_ != nullptr) {
907         nonMovableSpace_->Reset();
908         delete nonMovableSpace_;
909         nonMovableSpace_ = nullptr;
910     }
911     if (snapshotSpace_ != nullptr) {
912         snapshotSpace_->Destroy();
913         delete snapshotSpace_;
914         snapshotSpace_ = nullptr;
915     }
916     if (machineCodeSpace_ != nullptr) {
917         machineCodeSpace_->Reset();
918         delete machineCodeSpace_;
919         machineCodeSpace_ = nullptr;
920     }
921     if (hugeObjectSpace_ != nullptr) {
922         hugeObjectSpace_->Destroy();
923         delete hugeObjectSpace_;
924         hugeObjectSpace_ = nullptr;
925     }
926     if (hugeMachineCodeSpace_ != nullptr) {
927         hugeMachineCodeSpace_->Destroy();
928         delete hugeMachineCodeSpace_;
929         hugeMachineCodeSpace_ = nullptr;
930     }
931     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
932         readOnlySpace_->ClearReadOnly();
933         readOnlySpace_->Destroy();
934         delete readOnlySpace_;
935         readOnlySpace_ = nullptr;
936     }
937     if (appSpawnSpace_ != nullptr) {
938         appSpawnSpace_->Reset();
939         delete appSpawnSpace_;
940         appSpawnSpace_ = nullptr;
941     }
942     if (stwYoungGC_ != nullptr) {
943         delete stwYoungGC_;
944         stwYoungGC_ = nullptr;
945     }
946     if (partialGC_ != nullptr) {
947         delete partialGC_;
948         partialGC_ = nullptr;
949     }
950     if (fullGC_ != nullptr) {
951         delete fullGC_;
952         fullGC_ = nullptr;
953     }
954 
955     nativeAreaAllocator_ = nullptr;
956     heapRegionAllocator_ = nullptr;
957 
958     if (memController_ != nullptr) {
959         delete memController_;
960         memController_ = nullptr;
961     }
962     if (sweeper_ != nullptr) {
963         delete sweeper_;
964         sweeper_ = nullptr;
965     }
966     if (concurrentMarker_ != nullptr) {
967         delete concurrentMarker_;
968         concurrentMarker_ = nullptr;
969     }
970     if (incrementalMarker_ != nullptr) {
971         delete incrementalMarker_;
972         incrementalMarker_ = nullptr;
973     }
974     if (nonMovableMarker_ != nullptr) {
975         delete nonMovableMarker_;
976         nonMovableMarker_ = nullptr;
977     }
978     if (semiGCMarker_ != nullptr) {
979         delete semiGCMarker_;
980         semiGCMarker_ = nullptr;
981     }
982     if (compressGCMarker_ != nullptr) {
983         delete compressGCMarker_;
984         compressGCMarker_ = nullptr;
985     }
986     if (evacuator_ != nullptr) {
987         delete evacuator_;
988         evacuator_ = nullptr;
989     }
990     if (idleGCTrigger_ != nullptr) {
991         delete idleGCTrigger_;
992         idleGCTrigger_ = nullptr;
993     }
994 }
995 
Prepare()996 void Heap::Prepare()
997 {
998     MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
999     WaitRunningTaskFinished();
1000     sweeper_->EnsureAllTaskFinished();
1001     WaitClearTaskFinished();
1002 }
1003 
GetHeapPrepare()1004 void Heap::GetHeapPrepare()
1005 {
1006     // Ensure local and shared heap prepared.
1007     Prepare();
1008     SharedHeap *sHeap = SharedHeap::GetInstance();
1009     sHeap->Prepare(false);
1010 }
1011 
Resume(TriggerGCType gcType)1012 void Heap::Resume(TriggerGCType gcType)
1013 {
1014     if (edenSpace_->ShouldTryEnable()) {
1015         TryEnableEdenGC();
1016     }
1017     if (enableEdenGC_) {
1018         edenSpace_->ReclaimRegions(edenSpace_->GetInitialCapacity());
1019         edenSpace_->Restart();
1020         if (IsEdenMark()) {
1021             activeSemiSpace_->SetWaterLine();
1022             return;
1023         }
1024     }
1025 
1026     activeSemiSpace_->SetWaterLine();
1027 
1028     if (mode_ != HeapMode::SPAWN &&
1029         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
1030         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
1031         size_t multiple = 2;
1032         size_t oldSpaceMaxLimit = 0;
1033         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
1034             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
1035             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
1036         } else {
1037             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
1038             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
1039         }
1040         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1041     }
1042 
1043     PrepareRecordRegionsForReclaim();
1044     hugeObjectSpace_->ReclaimHugeRegion();
1045     hugeMachineCodeSpace_->ReclaimHugeRegion();
1046     if (parallelGC_) {
1047         if (gcType == TriggerGCType::OLD_GC) {
1048             isCSetClearing_.store(true, std::memory_order_release);
1049         }
1050         clearTaskFinished_ = false;
1051         Taskpool::GetCurrentTaskpool()->PostTask(
1052             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1053     } else {
1054         ReclaimRegions(gcType);
1055     }
1056 }
1057 
ResumeForAppSpawn()1058 void Heap::ResumeForAppSpawn()
1059 {
1060     sweeper_->WaitAllTaskFinished();
1061     hugeObjectSpace_->ReclaimHugeRegion();
1062     hugeMachineCodeSpace_->ReclaimHugeRegion();
1063     edenSpace_->ReclaimRegions();
1064     inactiveSemiSpace_->ReclaimRegions();
1065     oldSpace_->Reset();
1066     auto cb = [] (Region *region) {
1067         region->ClearMarkGCBitset();
1068     };
1069     nonMovableSpace_->EnumerateRegions(cb);
1070     machineCodeSpace_->EnumerateRegions(cb);
1071     hugeObjectSpace_->EnumerateRegions(cb);
1072     hugeMachineCodeSpace_->EnumerateRegions(cb);
1073 }
1074 
CompactHeapBeforeFork()1075 void Heap::CompactHeapBeforeFork()
1076 {
1077     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1078 }
1079 
DisableParallelGC()1080 void Heap::DisableParallelGC()
1081 {
1082     WaitAllTasksFinished();
1083     parallelGC_ = false;
1084     maxEvacuateTaskCount_ = 0;
1085     maxMarkTaskCount_ = 0;
1086     stwYoungGC_->ConfigParallelGC(false);
1087     sweeper_->ConfigConcurrentSweep(false);
1088     concurrentMarker_->ConfigConcurrentMark(false);
1089     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1090 }
1091 
EnableParallelGC()1092 void Heap::EnableParallelGC()
1093 {
1094     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1095     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1096     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1097         totalThreadNum != maxEvacuateTaskCount_ + 1) {
1098         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1099                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1100         delete workManager_;
1101         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1102         UpdateWorkManager(workManager_);
1103     }
1104     ASSERT(maxEvacuateTaskCount_ > 0);
1105     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1106                                          maxEvacuateTaskCount_ - 1);
1107     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1108 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1109     concurrentMarkerEnabled = false;
1110 #endif
1111     stwYoungGC_->ConfigParallelGC(parallelGC_);
1112     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1113     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1114 }
1115 
SelectGCType() const1116 TriggerGCType Heap::SelectGCType() const
1117 {
1118     if (shouldThrowOOMError_) {
1119         // Force Full GC after failed Old GC to avoid OOM
1120         return FULL_GC;
1121     }
1122 
1123     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1124     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1125         return YOUNG_GC;
1126     }
1127     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1128         GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1129         !GlobalNativeSizeLargerThanLimit()) {
1130         return YOUNG_GC;
1131     }
1132     return OLD_GC;
1133 }
1134 
CollectGarbage(TriggerGCType gcType,GCReason reason)1135 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1136 {
1137     Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1138     {
1139 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1140         if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1141             LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1142             UNREACHABLE();
1143         }
1144 #endif
1145         if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1146             ProcessGCListeners();
1147             return;
1148         }
1149         RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1150 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1151         [[maybe_unused]] GcStateScope scope(thread_);
1152 #endif
1153         CHECK_NO_GC;
1154         if (UNLIKELY(ShouldVerifyHeap())) {
1155             // pre gc heap verify
1156             LOG_ECMA(DEBUG) << "pre gc heap verify";
1157             ProcessSharedGCRSetWorkList();
1158             Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1159         }
1160 
1161 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1162         gcType = TriggerGCType::FULL_GC;
1163 #endif
1164         if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1165             gcType = TriggerGCType::FULL_GC;
1166         }
1167         if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1168             gcType = TriggerGCType::OLD_GC;
1169         }
1170         oldGCRequested_ = false;
1171         oldSpace_->AdjustOvershootSize();
1172 
1173         size_t originalNewSpaceSize = IsEdenMark() ? edenSpace_->GetHeapObjectSize() :
1174                 (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize());
1175         if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1176             GetEcmaGCStats()->SetGCReason(reason);
1177         } else {
1178             GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1179         }
1180         memController_->StartCalculationBeforeGC();
1181         StatisticHeapObject(gcType);
1182         gcType_ = gcType;
1183         {
1184             pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1185             switch (gcType) {
1186                 case TriggerGCType::EDEN_GC:
1187                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1188                         SetMarkType(MarkType::MARK_EDEN);
1189                     }
1190                     if (markType_ == MarkType::MARK_YOUNG) {
1191                         gcType_ = TriggerGCType::YOUNG_GC;
1192                     }
1193                     if (markType_ == MarkType::MARK_FULL) {
1194                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1195                         gcType_ = TriggerGCType::OLD_GC;
1196                     }
1197                     partialGC_->RunPhases();
1198                     break;
1199                 case TriggerGCType::YOUNG_GC:
1200                     // Use partial GC for young generation.
1201                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1202                         SetMarkType(MarkType::MARK_YOUNG);
1203                     }
1204                     if (markType_ == MarkType::MARK_FULL) {
1205                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1206                         gcType_ = TriggerGCType::OLD_GC;
1207                     }
1208                     partialGC_->RunPhases();
1209                     break;
1210                 case TriggerGCType::OLD_GC: {
1211                     bool fullConcurrentMarkRequested = false;
1212                     // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1213                     if (concurrentMarker_->IsEnabled() &&
1214                         (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1215                         reason == GCReason::ALLOCATION_LIMIT) {
1216                         fullConcurrentMarkRequested = true;
1217                     }
1218                     if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1219                         // Wait for existing concurrent marking tasks to be finished (if any),
1220                         // and reset concurrent marker's status for full mark.
1221                         bool concurrentMark = CheckOngoingConcurrentMarking();
1222                         if (concurrentMark) {
1223                             concurrentMarker_->Reset();
1224                         }
1225                     }
1226                     SetMarkType(MarkType::MARK_FULL);
1227                     if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1228                         LOG_ECMA(INFO)
1229                             << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1230                         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1231                         TriggerConcurrentMarking();
1232                         oldGCRequested_ = true;
1233                         ProcessGCListeners();
1234                         return;
1235                     }
1236                     partialGC_->RunPhases();
1237                     break;
1238                 }
1239                 case TriggerGCType::FULL_GC:
1240                     fullGC_->SetForAppSpawn(false);
1241                     fullGC_->RunPhases();
1242                     if (fullGCRequested_) {
1243                         fullGCRequested_ = false;
1244                     }
1245                     break;
1246                 case TriggerGCType::APPSPAWN_FULL_GC:
1247                     fullGC_->SetForAppSpawn(true);
1248                     fullGC_->RunPhasesForAppSpawn();
1249                     break;
1250                 default:
1251                     LOG_ECMA(FATAL) << "this branch is unreachable";
1252                     UNREACHABLE();
1253                     break;
1254             }
1255             ASSERT(thread_->IsPropertyCacheCleared());
1256         }
1257         UpdateHeapStatsAfterGC(gcType_);
1258         ClearIdleTask();
1259         // Adjust the old space capacity and global limit for the first partial GC with full mark.
1260         // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1261         AdjustBySurvivalRate(originalNewSpaceSize);
1262         memController_->StopCalculationAfterGC(gcType);
1263         if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1264             // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1265             // the limits of old space and global space can be recomputed.
1266             RecomputeLimits();
1267             ResetNativeSizeAfterLastGC();
1268             OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1269                                         << " global object size " << GetHeapObjectSize()
1270                                         << " global committed size " << GetCommittedSize()
1271                                         << " global limit " << globalSpaceAllocLimit_;
1272             markType_ = MarkType::MARK_YOUNG;
1273         }
1274         if (concurrentMarker_->IsRequestDisabled()) {
1275             concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1276         }
1277         // GC log
1278         GetEcmaGCStats()->RecordStatisticAfterGC();
1279 #ifdef ENABLE_HISYSEVENT
1280         GetEcmaGCKeyStats()->IncGCCount();
1281         if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1282             GetEcmaGCKeyStats()->AddGCStatsToKey();
1283         }
1284 #endif
1285         GetEcmaGCStats()->PrintGCStatistic();
1286     }
1287 
1288     if (gcType_ == TriggerGCType::OLD_GC) {
1289         // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1290         // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1291         // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1292         // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1293         CheckNonMovableSpaceOOM();
1294     }
1295     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1296     if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1297         sweeper_->EnsureAllTaskFinished();
1298         oldSpace_->ResetCommittedOverSizeLimit();
1299         if (oldSpace_->CommittedSizeExceed()) {
1300             DumpHeapSnapshotBeforeOOM(false);
1301             StatisticHeapDetail();
1302             ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1303         }
1304         oldSpace_->ResetMergeSize();
1305         shouldThrowOOMError_ = false;
1306     }
1307     // Update record heap object size after gc if in sensitive status
1308     if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1309         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1310     }
1311 
1312     if (UNLIKELY(ShouldVerifyHeap())) {
1313         // verify post gc heap verify
1314         LOG_ECMA(DEBUG) << "post gc heap verify";
1315         Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1316     }
1317 
1318     // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1319     // even lead to another GC, so this have to invoke after this GC process.
1320     thread_->InvokeWeakNodeNativeFinalizeCallback();
1321     // PostTask for ProcessNativeDelete
1322     CleanCallBack();
1323 
1324     JSFinalizationRegistry::CheckAndCall(thread_);
1325 #if defined(ECMASCRIPT_SUPPORT_TRACING)
1326     auto tracing = GetEcmaVM()->GetTracing();
1327     if (tracing != nullptr) {
1328         tracing->TraceEventRecordMemory();
1329     }
1330 #endif
1331     ProcessGCListeners();
1332 
1333 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1334     if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1335         ThresholdReachedDump();
1336     }
1337 #endif
1338 
1339     if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1340         // check machine code space if enough
1341         int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1342             static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1343         Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1344     }
1345 }
1346 
ThrowOutOfMemoryError(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1347 void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1348     bool NonMovableObjNearOOM)
1349 {
1350     GetEcmaGCStats()->PrintGCMemoryStatistic();
1351     std::ostringstream oss;
1352     if (NonMovableObjNearOOM) {
1353         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1354             << " function name: " << functionName.c_str();
1355     } else {
1356         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1357             << functionName.c_str();
1358     }
1359     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1360     THROW_OOM_ERROR(thread, oss.str().c_str());
1361 }
1362 
SetMachineCodeOutOfMemoryError(JSThread * thread,size_t size,std::string functionName)1363 void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1364 {
1365     std::ostringstream oss;
1366     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1367         << functionName.c_str();
1368     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1369 
1370     EcmaVM *ecmaVm = thread->GetEcmaVM();
1371     ObjectFactory *factory = ecmaVm->GetFactory();
1372     JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1373     thread->SetException(error.GetTaggedValue());
1374 }
1375 
SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)1376 void BaseHeap::SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)
1377 {
1378     if (cb != nullptr) {
1379         appfreezeCallback_ = cb;
1380     }
1381 }
1382 
ThrowOutOfMemoryErrorForDefault(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1383 void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1384     bool NonMovableObjNearOOM)
1385 {
1386     GetEcmaGCStats()->PrintGCMemoryStatistic();
1387     std::ostringstream oss;
1388     if (NonMovableObjNearOOM) {
1389         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1390             << " function name: " << functionName.c_str();
1391     } else {
1392         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1393     }
1394     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1395     EcmaVM *ecmaVm = thread->GetEcmaVM();
1396     JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1397     JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1398 
1399     thread->SetException(error.GetTaggedValue());
1400     ecmaVm->HandleUncatchableError();
1401 }
1402 
FatalOutOfMemoryError(size_t size,std::string functionName)1403 void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1404 {
1405     GetEcmaGCStats()->PrintGCMemoryStatistic();
1406     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1407                         << " function name: " << functionName.c_str();
1408 }
1409 
CheckNonMovableSpaceOOM()1410 void Heap::CheckNonMovableSpaceOOM()
1411 {
1412     if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) {
1413         sweeper_->EnsureAllTaskFinished();
1414         DumpHeapSnapshotBeforeOOM(false);
1415         StatisticHeapDetail();
1416         ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1417     }
1418 }
1419 
AdjustBySurvivalRate(size_t originalNewSpaceSize)1420 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1421 {
1422     promotedSize_ = GetEvacuator()->GetPromotedSize();
1423     edenToYoungSize_ = GetEvacuator()->GetEdenToYoungSize();
1424     if (originalNewSpaceSize <= 0) {
1425         return;
1426     }
1427     semiSpaceCopiedSize_ = IsEdenMark() ? edenToYoungSize_ : activeSemiSpace_->GetHeapObjectSize();
1428     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1429     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1430     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1431     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1432                                 << " survivalRate: " << survivalRate;
1433     if (IsEdenMark()) {
1434         memController_->AddEdenSurvivalRate(survivalRate);
1435         return;
1436     }
1437     if (!oldSpaceLimitAdjusted_) {
1438         memController_->AddSurvivalRate(survivalRate);
1439         AdjustOldSpaceLimit();
1440     } else {
1441         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1442         // 2 means half
1443         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1444             SetFullMarkRequestedState(true);
1445             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1446                 << " is less than half the average survival rates: " << averageSurvivalRate
1447                 << ". Trigger full mark next time.";
1448             // Survival rate of full mark is precise. Reset recorded survival rates.
1449             memController_->ResetRecordedSurvivalRates();
1450         }
1451         memController_->AddSurvivalRate(survivalRate);
1452     }
1453 }
1454 
VerifyHeapObjects(VerifyKind verifyKind) const1455 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1456 {
1457     size_t failCount = 0;
1458     {
1459         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1460         activeSemiSpace_->IterateOverObjects(verifier);
1461     }
1462 
1463     {
1464         if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1465             verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1466             verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1467                 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1468                     region->IterateAllMarkedBits([this](void *addr) {
1469                         VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1470                     });
1471                 });
1472             }
1473     }
1474 
1475     {
1476         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1477         oldSpace_->IterateOverObjects(verifier);
1478     }
1479 
1480     {
1481         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1482         appSpawnSpace_->IterateOverMarkedObjects(verifier);
1483     }
1484 
1485     {
1486         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1487         nonMovableSpace_->IterateOverObjects(verifier);
1488     }
1489 
1490     {
1491         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1492         hugeObjectSpace_->IterateOverObjects(verifier);
1493     }
1494     {
1495         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1496         hugeMachineCodeSpace_->IterateOverObjects(verifier);
1497     }
1498     {
1499         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1500         machineCodeSpace_->IterateOverObjects(verifier);
1501     }
1502     {
1503         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1504         snapshotSpace_->IterateOverObjects(verifier);
1505     }
1506     return failCount;
1507 }
1508 
VerifyOldToNewRSet(VerifyKind verifyKind) const1509 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1510 {
1511     size_t failCount = 0;
1512     VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1513     oldSpace_->IterateOldToNewOverObjects(verifier);
1514     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1515     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1516     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1517     return failCount;
1518 }
1519 
AdjustOldSpaceLimit()1520 void Heap::AdjustOldSpaceLimit()
1521 {
1522     if (oldSpaceLimitAdjusted_) {
1523         return;
1524     }
1525     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1526     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1527     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1528         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1529     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1530         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1531     } else {
1532         oldSpaceLimitAdjusted_ = true;
1533     }
1534 
1535     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1536         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1537     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1538         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1539     }
1540     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1541         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1542 }
1543 
OnAllocateEvent(EcmaVM * ecmaVm,TaggedObject * address,size_t size)1544 void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1545                                [[maybe_unused]] size_t size)
1546 {
1547 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1548     HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1549     if (profiler != nullptr) {
1550         base::BlockHookScope blockScope;
1551         profiler->AllocationEvent(address, size);
1552     }
1553 #endif
1554 }
1555 
DumpHeapSnapshotBeforeOOM(bool isFullGC)1556 void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
1557 {
1558 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1559 #if defined(ENABLE_DUMP_IN_FAULTLOG)
1560     if (ecmaVm_->GetHeapProfile() != nullptr) {
1561         LOG_ECMA(ERROR) << "Heap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
1562         return;
1563     }
1564     // Filter appfreeze when dump.
1565     LOG_ECMA(INFO) << " Heap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
1566     base::BlockHookScope blockScope;
1567     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1568     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
1569         LOG_ECMA(INFO) << "Heap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
1570     }
1571 #ifdef ENABLE_HISYSEVENT
1572     GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize());
1573     hasOOMDump_ = true;
1574 #endif
1575     // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1576     DumpSnapShotOption dumpOption;
1577     dumpOption.dumpFormat = DumpFormat::BINARY;
1578     dumpOption.isVmMode = true;
1579     dumpOption.isPrivate = false;
1580     dumpOption.captureNumericValue = false;
1581     dumpOption.isFullGC = isFullGC;
1582     dumpOption.isSimplify = true;
1583     dumpOption.isSync = true;
1584     dumpOption.isBeforeFill = false;
1585     dumpOption.isDumpOOM = true;
1586     heapProfile->DumpHeapSnapshot(dumpOption);
1587     HeapProfilerInterface::Destroy(ecmaVm_);
1588 #endif // ENABLE_DUMP_IN_FAULTLOG
1589 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1590 }
1591 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)1592 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
1593                        [[maybe_unused]] size_t size)
1594 {
1595 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1596     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
1597     if (profiler != nullptr) {
1598         base::BlockHookScope blockScope;
1599         profiler->MoveEvent(address, forwardAddress, size);
1600     }
1601 #endif
1602 }
1603 
AdjustSpaceSizeForAppSpawn()1604 void Heap::AdjustSpaceSizeForAppSpawn()
1605 {
1606     SetHeapMode(HeapMode::SPAWN);
1607     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1608     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1609     auto committedSize = appSpawnSpace_->GetCommittedSize();
1610     appSpawnSpace_->SetInitialCapacity(committedSize);
1611     appSpawnSpace_->SetMaximumCapacity(committedSize);
1612     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1613     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1614 }
1615 
ShouldMoveToRoSpace(JSHClass * hclass,TaggedObject * object)1616 bool Heap::ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object)
1617 {
1618     return hclass->IsString() && !Region::ObjectAddressToRange(object)->InHugeObjectSpace();
1619 }
1620 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)1621 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1622 {
1623     ASSERT(inspector != nullptr);
1624     // activeSemiSpace_/inactiveSemiSpace_:
1625     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1626     activeSemiSpace_->AddAllocationInspector(inspector);
1627     // oldSpace_/compressSpace_:
1628     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1629     oldSpace_->AddAllocationInspector(inspector);
1630     // readOnlySpace_ need not allocationInspector.
1631     // appSpawnSpace_ need not allocationInspector.
1632     nonMovableSpace_->AddAllocationInspector(inspector);
1633     machineCodeSpace_->AddAllocationInspector(inspector);
1634     hugeObjectSpace_->AddAllocationInspector(inspector);
1635     hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1636 }
1637 
ClearAllocationInspectorFromAllSpaces()1638 void Heap::ClearAllocationInspectorFromAllSpaces()
1639 {
1640     edenSpace_->ClearAllocationInspector();
1641     activeSemiSpace_->ClearAllocationInspector();
1642     oldSpace_->ClearAllocationInspector();
1643     nonMovableSpace_->ClearAllocationInspector();
1644     machineCodeSpace_->ClearAllocationInspector();
1645     hugeObjectSpace_->ClearAllocationInspector();
1646     hugeMachineCodeSpace_->ClearAllocationInspector();
1647 }
1648 
RecomputeLimits()1649 void Heap::RecomputeLimits()
1650 {
1651     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1652     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1653     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1654         hugeMachineCodeSpace_->GetHeapObjectSize();
1655     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1656 
1657     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1658     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1659     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1660         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1661     size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1662     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1663                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
1664     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1665     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1666     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1667                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1668                                                                   growingFactor);
1669     globalSpaceNativeLimit_ = std::max(globalSpaceNativeLimit_, GetGlobalNativeSize()
1670                                         + config_.GetMinNativeLimitGrowingStep());
1671     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1672         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1673         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1674     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1675         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1676         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1677                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1678                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
1679         SetFullMarkRequestedState(true);
1680     }
1681 }
1682 
CheckAndTriggerOldGC(size_t size)1683 bool Heap::CheckAndTriggerOldGC(size_t size)
1684 {
1685     bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1686     bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1687     if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1688         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1689     }
1690     if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1691         GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1692         !NeedStopCollection()) {
1693         if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1694             oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1695             return false;
1696         }
1697         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1698         if (!oldGCRequested_) {
1699             return true;
1700         }
1701     }
1702     return false;
1703 }
1704 
CheckAndTriggerHintGC()1705 bool Heap::CheckAndTriggerHintGC()
1706 {
1707     if (IsInBackground()) {
1708         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1709         return true;
1710     }
1711     if (InSensitiveStatus()) {
1712         return false;
1713     }
1714     if (memController_->GetPredictedSurvivalRate() < SURVIVAL_RATE_THRESHOLD) {
1715         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1716         return true;
1717     }
1718     return false;
1719 }
1720 
CheckOngoingConcurrentMarking()1721 bool Heap::CheckOngoingConcurrentMarking()
1722 {
1723     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark() &&
1724         concurrentMarker_->IsTriggeredConcurrentMark()) {
1725         TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1726         if (thread_->IsMarking()) {
1727             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
1728             MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1729             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
1730             WaitConcurrentMarkingFinished();
1731         }
1732         WaitRunningTaskFinished();
1733         memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1734         return true;
1735     }
1736     return false;
1737 }
1738 
ClearIdleTask()1739 void Heap::ClearIdleTask()
1740 {
1741     SetIdleTask(IdleTaskType::NO_TASK);
1742     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1743 }
1744 
TryTriggerIdleCollection()1745 void Heap::TryTriggerIdleCollection()
1746 {
1747     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1748         return;
1749     }
1750     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1751         SetIdleTask(IdleTaskType::FINISH_MARKING);
1752         EnableNotifyIdle();
1753         CalculateIdleDuration();
1754         return;
1755     }
1756 
1757     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1758     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1759     double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1760                                            static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1761                                            newSpaceAllocSpeed;
1762     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1763     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1764     // 2 means double
1765     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1766         SetIdleTask(IdleTaskType::YOUNG_GC);
1767         SetMarkType(MarkType::MARK_YOUNG);
1768         EnableNotifyIdle();
1769         CalculateIdleDuration();
1770         return;
1771     }
1772 }
1773 
CalculateIdleDuration()1774 void Heap::CalculateIdleDuration()
1775 {
1776     size_t updateReferenceSpeed = 0;
1777     // clear native object duration
1778     size_t clearNativeObjSpeed = 0;
1779     if (markType_ == MarkType::MARK_EDEN) {
1780         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_UPDATE_REFERENCE_SPEED);
1781         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_CLEAR_NATIVE_OBJ_SPEED);
1782     } else if (markType_ == MarkType::MARK_YOUNG) {
1783         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1784         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1785     } else if (markType_ == MarkType::MARK_FULL) {
1786         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1787         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1788     }
1789 
1790     // update reference duration
1791     idlePredictDuration_ = 0.0f;
1792     if (updateReferenceSpeed != 0) {
1793         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1794     }
1795 
1796     if (clearNativeObjSpeed != 0) {
1797         idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1798     }
1799 
1800     // sweep and evacuate duration
1801     size_t edenEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_EVACUATE_SPACE_SPEED);
1802     size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1803     double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1804     if (markType_ == MarkType::MARK_EDEN && edenEvacuateSpeed != 0) {
1805         idlePredictDuration_ += survivalRate * edenSpace_->GetHeapObjectSize() / edenEvacuateSpeed;
1806     } else if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1807         idlePredictDuration_ += (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize()) *
1808             survivalRate / youngEvacuateSpeed;
1809     } else if (markType_ == MarkType::MARK_FULL) {
1810         size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1811         size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1812         if (sweepSpeed != 0) {
1813             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1814         }
1815         if (oldEvacuateSpeed != 0) {
1816             size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1817                 RecordData::COLLECT_REGION_SET_SIZE);
1818             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
1819                                     oldEvacuateSpeed;
1820         }
1821     }
1822 
1823     // Idle YoungGC mark duration
1824     size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
1825     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
1826         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
1827     }
1828     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
1829 }
1830 
TryTriggerIncrementalMarking()1831 void Heap::TryTriggerIncrementalMarking()
1832 {
1833     if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
1834         return;
1835     }
1836     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1837     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1838         hugeMachineCodeSpace_->GetHeapObjectSize();
1839     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1840     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1841     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1842     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1843 
1844     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1845     // mark finished before allocate limit
1846     if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1847         // The object allocated in incremental marking should lower than limit,
1848         // otherwise select trigger concurrent mark.
1849         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1850         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1851             EnableNotifyIdle();
1852             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1853         }
1854     }
1855 }
1856 
CheckCanTriggerConcurrentMarking()1857 bool Heap::CheckCanTriggerConcurrentMarking()
1858 {
1859     return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
1860         !incrementalMarker_->IsTriggeredIncrementalMark() &&
1861         (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1862 }
1863 
TryTriggerConcurrentMarking()1864 void Heap::TryTriggerConcurrentMarking()
1865 {
1866     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1867     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1868     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1869     // full mark will be triggered.
1870     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1871     // young mark may not result in the new space reaching its limit, young mark can be triggered.
1872     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1873     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1874     if (!CheckCanTriggerConcurrentMarking()) {
1875         return;
1876     }
1877     if (fullMarkRequested_) {
1878         markType_ = MarkType::MARK_FULL;
1879         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1880         TriggerConcurrentMarking();
1881         return;
1882     }
1883     if (IsJustFinishStartup() && !ObjectExceedJustFinishStartupThresholdForCM()) {
1884         return;
1885     }
1886 
1887     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1888            oldSpaceAllocToLimitDuration = 0;
1889     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1890     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1891     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1892         hugeMachineCodeSpace_->GetHeapObjectSize();
1893     size_t globalHeapObjectSize = GetHeapObjectSize();
1894     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1895     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1896         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1897             GlobalNativeSizeLargerThanLimit()) {
1898             markType_ = MarkType::MARK_FULL;
1899             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1900             TriggerConcurrentMarking();
1901             return;
1902         }
1903     } else {
1904         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1905             GlobalNativeSizeLargerThanLimit()) {
1906             markType_ = MarkType::MARK_FULL;
1907             TriggerConcurrentMarking();
1908             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1909             return;
1910         }
1911         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1912         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1913         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1914         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1915         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1916             markType_ = MarkType::MARK_FULL;
1917             TriggerConcurrentMarking();
1918             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1919             return;
1920         }
1921     }
1922 
1923     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1924     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1925     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1926         if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
1927             markType_ = MarkType::MARK_YOUNG;
1928             TriggerConcurrentMarking();
1929             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
1930         }
1931         return;
1932     }
1933     size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1934     size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
1935     bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
1936     if (!triggerMark) {
1937         newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
1938         newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1939         // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
1940         newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1941         triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
1942     }
1943 
1944     if (triggerMark) {
1945         markType_ = MarkType::MARK_YOUNG;
1946         TriggerConcurrentMarking();
1947         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
1948         return;
1949     }
1950 
1951     if (!enableEdenGC_ || IsInBackground()) {
1952         return;
1953     }
1954 
1955     double edenSurvivalRate = memController_->GetAverageEdenSurvivalRate();
1956     double survivalRate = memController_->GetAverageSurvivalRate();
1957     constexpr double expectMaxSurvivalRate = 0.4;
1958     if ((edenSurvivalRate == 0 || edenSurvivalRate >= expectMaxSurvivalRate) && survivalRate >= expectMaxSurvivalRate) {
1959         return;
1960     }
1961 
1962     double edenSpaceAllocSpeed = memController_->GetEdenSpaceAllocationThroughputPerMS();
1963     double edenSpaceConcurrentMarkSpeed = memController_->GetEdenSpaceConcurrentMarkSpeedPerMS();
1964     if (edenSpaceConcurrentMarkSpeed == 0 || edenSpaceAllocSpeed == 0) {
1965         auto &config = ecmaVm_->GetEcmaParamConfiguration();
1966         if (edenSpace_->GetCommittedSize() >= config.GetEdenSpaceTriggerConcurrentMark()) {
1967             markType_ = MarkType::MARK_EDEN;
1968             TriggerConcurrentMarking();
1969             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first eden mark " << fullGCRequested_;
1970         }
1971         return;
1972     }
1973 
1974     auto &config = ecmaVm_->GetEcmaParamConfiguration();
1975     size_t edenCommittedSize = edenSpace_->GetCommittedSize();
1976     triggerMark = edenCommittedSize >= config.GetEdenSpaceTriggerConcurrentMark();
1977     if (!triggerMark && edenSpaceAllocSpeed != 0 && edenSpaceConcurrentMarkSpeed != 0 &&
1978             edenSpace_->GetHeapObjectSize() > 0) {
1979         double edenSpaceLimit = edenSpace_->GetInitialCapacity();
1980         double edenSpaceAllocToLimitDuration = (edenSpaceLimit - edenCommittedSize) / edenSpaceAllocSpeed;
1981         double edenSpaceMarkDuration = edenSpace_->GetHeapObjectSize() / edenSpaceConcurrentMarkSpeed;
1982         double edenSpaceRemainSize = (edenSpaceAllocToLimitDuration - edenSpaceMarkDuration) * newSpaceAllocSpeed;
1983         triggerMark = edenSpaceRemainSize < DEFAULT_REGION_SIZE;
1984     }
1985 
1986     if (triggerMark) {
1987         markType_ = MarkType::MARK_EDEN;
1988         TriggerConcurrentMarking();
1989         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger eden mark";
1990     }
1991 }
1992 
TryTriggerFullMarkOrGCByNativeSize()1993 void Heap::TryTriggerFullMarkOrGCByNativeSize()
1994 {
1995     // In high sensitive scene and native size larger than limit, trigger old gc directly
1996     if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
1997         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
1998     } else if (GlobalNativeSizeLargerThanLimit()) {
1999         if (concurrentMarker_->IsEnabled()) {
2000             SetFullMarkRequestedState(true);
2001             TryTriggerConcurrentMarking();
2002         } else {
2003             CheckAndTriggerOldGC();
2004         }
2005     }
2006 }
2007 
TryTriggerFullMarkBySharedLimit()2008 bool Heap::TryTriggerFullMarkBySharedLimit()
2009 {
2010     bool keepFullMarkRequest = false;
2011     if (concurrentMarker_->IsEnabled()) {
2012         if (!CheckCanTriggerConcurrentMarking()) {
2013             return keepFullMarkRequest;
2014         }
2015         markType_ = MarkType::MARK_FULL;
2016         if (ConcurrentMarker::TryIncreaseTaskCounts()) {
2017             concurrentMarker_->Mark();
2018         } else {
2019             // need retry full mark request again.
2020             keepFullMarkRequest = true;
2021         }
2022     }
2023     return keepFullMarkRequest;
2024 }
2025 
CheckAndTriggerTaskFinishedGC()2026 void Heap::CheckAndTriggerTaskFinishedGC()
2027 {
2028     size_t objectSizeOfTaskBegin = GetRecordObjectSize();
2029     size_t objectSizeOfTaskFinished = GetHeapObjectSize();
2030     size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
2031     size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
2032     // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
2033     bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
2034         objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
2035             TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
2036     bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
2037         nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
2038             TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
2039     if (objectSizeFlag || nativeSizeFlag) {
2040         panda::JSNApi::TriggerGC(GetEcmaVM(), panda::ecmascript::GCReason::TRIGGER_BY_TASKPOOL,
2041             panda::JSNApi::TRIGGER_GC_TYPE::OLD_GC);
2042         RecordOrResetObjectSize(0);
2043         RecordOrResetNativeSize(0);
2044     }
2045 }
2046 
IsMarking() const2047 bool Heap::IsMarking() const
2048 {
2049     return thread_->IsMarking();
2050 }
2051 
TryTriggerFullMarkBySharedSize(size_t size)2052 void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2053 {
2054     newAllocatedSharedObjectSize_ += size;
2055     if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2056         if (concurrentMarker_->IsEnabled()) {
2057             SetFullMarkRequestedState(true);
2058             TryTriggerConcurrentMarking();
2059             newAllocatedSharedObjectSize_ = 0;
2060         }
2061     }
2062 }
2063 
IsReadyToConcurrentMark() const2064 bool Heap::IsReadyToConcurrentMark() const
2065 {
2066     return thread_->IsReadyToConcurrentMark();
2067 }
2068 
IncreaseNativeBindingSize(JSNativePointer * object)2069 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2070 {
2071     size_t size = object->GetBindingSize();
2072     if (size == 0) {
2073         return;
2074     }
2075     nativeBindingSize_ += size;
2076 }
2077 
IncreaseNativeBindingSize(size_t size)2078 void Heap::IncreaseNativeBindingSize(size_t size)
2079 {
2080     if (size == 0) {
2081         return;
2082     }
2083     nativeBindingSize_ += size;
2084 }
2085 
DecreaseNativeBindingSize(size_t size)2086 void Heap::DecreaseNativeBindingSize(size_t size)
2087 {
2088     ASSERT(size <= nativeBindingSize_);
2089     nativeBindingSize_ -= size;
2090 }
2091 
PrepareRecordRegionsForReclaim()2092 void Heap::PrepareRecordRegionsForReclaim()
2093 {
2094     activeSemiSpace_->SetRecordRegion();
2095     oldSpace_->SetRecordRegion();
2096     snapshotSpace_->SetRecordRegion();
2097     nonMovableSpace_->SetRecordRegion();
2098     hugeObjectSpace_->SetRecordRegion();
2099     machineCodeSpace_->SetRecordRegion();
2100     hugeMachineCodeSpace_->SetRecordRegion();
2101 }
2102 
TriggerConcurrentMarking()2103 void Heap::TriggerConcurrentMarking()
2104 {
2105     ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2106     if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2107         ClearIdleTask();
2108         DisableNotifyIdle();
2109     }
2110     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2111         concurrentMarker_->Mark();
2112     }
2113 }
2114 
WaitAllTasksFinished()2115 void Heap::WaitAllTasksFinished()
2116 {
2117     WaitRunningTaskFinished();
2118     sweeper_->EnsureAllTaskFinished();
2119     WaitClearTaskFinished();
2120     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2121         concurrentMarker_->WaitMarkingFinished();
2122     }
2123 }
2124 
WaitConcurrentMarkingFinished()2125 void Heap::WaitConcurrentMarkingFinished()
2126 {
2127     concurrentMarker_->WaitMarkingFinished();
2128 }
2129 
PostParallelGCTask(ParallelGCTaskPhase gcTask)2130 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2131 {
2132     IncreaseTaskCount();
2133     Taskpool::GetCurrentTaskpool()->PostTask(
2134         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2135 }
2136 
ChangeGCParams(bool inBackground)2137 void Heap::ChangeGCParams(bool inBackground)
2138 {
2139     const double doubleOne = 1.0;
2140     inBackground_ = inBackground;
2141     if (inBackground) {
2142         LOG_GC(INFO) << "app is inBackground";
2143         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2144             GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2145             doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2146             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2147         }
2148         if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
2149             sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2150             doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2151             sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::SWITCH_BACKGROUND>(thread_);
2152         }
2153         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2154             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2155             LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2156         }
2157         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
2158         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
2159         maxMarkTaskCount_ = 1;
2160         maxEvacuateTaskCount_ = 1;
2161         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::BACKGROUND);
2162     } else {
2163         LOG_GC(INFO) << "app is not inBackground";
2164         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2165             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2166             LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2167         }
2168         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2169         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2170         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2171             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2172         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2173         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::FOREGROUND);
2174     }
2175 }
2176 
GetEcmaGCStats()2177 GCStats *Heap::GetEcmaGCStats()
2178 {
2179     return ecmaVm_->GetEcmaGCStats();
2180 }
2181 
GetEcmaGCKeyStats()2182 GCKeyStats *Heap::GetEcmaGCKeyStats()
2183 {
2184     return ecmaVm_->GetEcmaGCKeyStats();
2185 }
2186 
GetJSObjectResizingStrategy()2187 JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2188 {
2189     return ecmaVm_->GetJSObjectResizingStrategy();
2190 }
2191 
TriggerIdleCollection(int idleMicroSec)2192 void Heap::TriggerIdleCollection(int idleMicroSec)
2193 {
2194     if (idleTask_ == IdleTaskType::NO_TASK) {
2195         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2196             DisableNotifyIdle();
2197         }
2198         return;
2199     }
2200 
2201     // Incremental mark initialize and process
2202     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2203         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2204         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2205         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2206             CalculateIdleDuration();
2207         }
2208         return;
2209     }
2210 
2211     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2212         return;
2213     }
2214 
2215     switch (idleTask_) {
2216         case IdleTaskType::FINISH_MARKING: {
2217             if (markType_ == MarkType::MARK_FULL) {
2218                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2219             } else {
2220                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2221             }
2222             break;
2223         }
2224         case IdleTaskType::YOUNG_GC:
2225             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2226             break;
2227         case IdleTaskType::INCREMENTAL_MARK:
2228             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2229             break;
2230         default:
2231             break;
2232     }
2233     ClearIdleTask();
2234 }
2235 
NotifyMemoryPressure(bool inHighMemoryPressure)2236 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2237 {
2238     if (inHighMemoryPressure) {
2239         LOG_GC(INFO) << "app is inHighMemoryPressure";
2240         SetMemGrowingType(MemGrowingType::PRESSURE);
2241     } else {
2242         LOG_GC(INFO) << "app is not inHighMemoryPressure";
2243         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2244     }
2245 }
2246 
NotifyFinishColdStart(bool isMainThread)2247 void Heap::NotifyFinishColdStart(bool isMainThread)
2248 {
2249     if (!FinishStartupEvent()) {
2250         return;
2251     }
2252     ASSERT(!OnStartupEvent());
2253     LOG_GC(INFO) << "SmartGC: app cold start just finished";
2254 
2255     if (isMainThread && ObjectExceedJustFinishStartupThresholdForCM()) {
2256         TryTriggerConcurrentMarking();
2257     }
2258 
2259     auto startIdleMonitor = JSNApi::GetStartIdleMonitorCallback();
2260     if (startIdleMonitor != nullptr) {
2261         startIdleMonitor();
2262     }
2263 
2264     if (startupDurationInMs_ == 0) {
2265         startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2266     }
2267 
2268     // restrain GC from 2s to 8s
2269     uint64_t delayTimeInMs = FINISH_STARTUP_TIMEPOINT_MS - startupDurationInMs_;
2270     Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2271         std::make_unique<FinishGCRestrainTask>(GetJSThread()->GetThreadId(), this),
2272         delayTimeInMs);
2273 }
2274 
NotifyFinishColdStartSoon()2275 void Heap::NotifyFinishColdStartSoon()
2276 {
2277     if (!OnStartupEvent()) {
2278         return;
2279     }
2280 
2281     // post 2s task
2282     startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2283 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
2284     startupDurationInMs_ = OHOS::system::GetUintParameter<uint64_t>("persist.ark.startupDuration",
2285                                                                     DEFAULT_STARTUP_DURATION_MS);
2286     startupDurationInMs_ = std::max(startupDurationInMs_, static_cast<uint64_t>(MIN_CONFIGURABLE_STARTUP_DURATION_MS));
2287     startupDurationInMs_ = std::min(startupDurationInMs_, static_cast<uint64_t>(MAX_CONFIGURABLE_STARTUP_DURATION_MS));
2288 #endif
2289     Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2290         std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this),
2291         startupDurationInMs_);
2292 }
2293 
NotifyHighSensitive(bool isStart)2294 void Heap::NotifyHighSensitive(bool isStart)
2295 {
2296     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SmartGC: set high sensitive status: " + std::to_string(isStart));
2297     isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2298         : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2299     LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2300 }
2301 
HandleExitHighSensitiveEvent()2302 bool Heap::HandleExitHighSensitiveEvent()
2303 {
2304     AppSensitiveStatus status = GetSensitiveStatus();
2305     if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2306         && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE)) {
2307         // Set record heap obj size 0 after exit high senstive
2308         SetRecordHeapObjectSizeBeforeSensitive(0);
2309         // set overshoot size to increase gc threashold larger 8MB than current heap size.
2310         TryIncreaseNewSpaceOvershootByConfigSize();
2311 
2312         // fixme: IncrementalMarking and IdleCollection is currently not enabled
2313         TryTriggerIncrementalMarking();
2314         TryTriggerIdleCollection();
2315         TryTriggerConcurrentMarking();
2316         return true;
2317     }
2318     return false;
2319 }
2320 
2321 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2322 // concurrent mark
ObjectExceedMaxHeapSize() const2323 bool Heap::ObjectExceedMaxHeapSize() const
2324 {
2325     size_t configMaxHeapSize = config_.GetMaxHeapSize();
2326     size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2327     return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2328 }
2329 
ObjectExceedJustFinishStartupThresholdForGC() const2330 bool Heap::ObjectExceedJustFinishStartupThresholdForGC() const
2331 {
2332     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2333     return GetHeapObjectSize() > heapObjectSizeThresholdForGC;
2334 }
2335 
ObjectExceedJustFinishStartupThresholdForCM() const2336 bool Heap::ObjectExceedJustFinishStartupThresholdForCM() const
2337 {
2338     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2339     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
2340                                         * JUST_FINISH_STARTUP_LOCAL_CONCURRENT_MARK_RATIO;
2341     return GetHeapObjectSize() > heapObjectSizeThresholdForCM;
2342 }
2343 
TryIncreaseNewSpaceOvershootByConfigSize()2344 void Heap::TryIncreaseNewSpaceOvershootByConfigSize()
2345 {
2346     if (InGC() || !IsReadyToConcurrentMark()) {
2347         // overShootSize will be adjusted when resume heap during GC and
2348         // no need to reserve space for newSpace if ConcurrentMark is already triggered
2349         return;
2350     }
2351     // need lock because conflict may occur when handle exit sensitive status by main thread
2352     // and handle finish startup by child thread happen at the same time
2353     LockHolder lock(setNewSpaceOvershootSizeMutex_);
2354     // set overshoot size to increase gc threashold larger 8MB than current heap size.
2355     int64_t initialCapacity = static_cast<int64_t>(GetNewSpace()->GetInitialCapacity());
2356     int64_t committedSize = static_cast<int64_t>(GetNewSpace()->GetCommittedSize());
2357     int64_t semiRemainSize = initialCapacity - committedSize;
2358     int64_t overshootSize =
2359         static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2360     // overshoot size should be larger than 0.
2361     GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2362 }
2363 
CheckIfNeedStopCollectionByStartup()2364 bool Heap::CheckIfNeedStopCollectionByStartup()
2365 {
2366     StartupStatus startupStatus = GetStartupStatus();
2367     switch (startupStatus) {
2368         case StartupStatus::ON_STARTUP:
2369             // During app cold start, gc threshold adjust to max heap size
2370             if (!ObjectExceedMaxHeapSize()) {
2371                 return true;
2372             }
2373             break;
2374         case StartupStatus::JUST_FINISH_STARTUP:
2375             // During app cold start just finished, gc threshold adjust to a quarter of max heap size
2376             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
2377                 return true;
2378             }
2379             break;
2380         default:
2381             break;
2382     }
2383     return false;
2384 }
2385 
NeedStopCollection()2386 bool Heap::NeedStopCollection()
2387 {
2388     // gc is not allowed during value serialize
2389     if (onSerializeEvent_) {
2390         return true;
2391     }
2392 
2393     if (CheckIfNeedStopCollectionByStartup()) {
2394         return true;
2395     }
2396 
2397     if (!InSensitiveStatus()) {
2398         return false;
2399     }
2400 
2401     if (GetRecordHeapObjectSizeBeforeSensitive() == 0) {
2402         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
2403     }
2404 
2405     if (GetHeapObjectSize() < GetRecordHeapObjectSizeBeforeSensitive() + config_.GetIncObjSizeThresholdInSensitive()
2406         && !ObjectExceedMaxHeapSize()) {
2407         return true;
2408     }
2409 
2410     OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2411         << " exceed sensitive gc threshold, have to trigger gc";
2412     return false;
2413 }
2414 
Run(uint32_t threadIndex)2415 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2416 {
2417     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2418     ASSERT(heap_->GetWorkManager()->HasInitialized());
2419     while (!heap_->GetWorkManager()->HasInitialized());
2420     switch (taskPhase_) {
2421         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
2422             heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
2423             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2424             break;
2425         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
2426             heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
2427             break;
2428         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
2429             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2430             break;
2431         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2432             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2433             break;
2434         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2435             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2436             break;
2437         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2438             heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2439             break;
2440         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
2441             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
2442             break;
2443         default:
2444             LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2445             UNREACHABLE();
2446     }
2447     heap_->ReduceTaskCount();
2448     return true;
2449 }
2450 
Run(uint32_t threadIndex)2451 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2452 {
2453     heap_->ReclaimRegions(gcType_);
2454     return true;
2455 }
2456 
Run(uint32_t threadIndex)2457 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2458 {
2459     heap_->NotifyFinishColdStart(false);
2460     return true;
2461 }
2462 
Run(uint32_t threadIndex)2463 bool Heap::FinishGCRestrainTask::Run([[maybe_unused]] uint32_t threadIndex)
2464 {
2465     heap_->CancelJustFinishStartupEvent();
2466     LOG_GC(INFO) << "SmartGC: app cold start finished";
2467     return true;
2468 }
2469 
CleanCallBack()2470 void Heap::CleanCallBack()
2471 {
2472     auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2473     if (!concurrentCallbacks.empty()) {
2474         Taskpool::GetCurrentTaskpool()->PostTask(
2475             std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2476         );
2477     }
2478     ASSERT(concurrentCallbacks.empty());
2479 
2480     AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2481     if (asyncCallbacksPack.Empty()) {
2482         ASSERT(asyncCallbacksPack.TotallyEmpty());
2483         return;
2484     }
2485     AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2486     std::swap(*asyncCallbacks, asyncCallbacksPack);
2487     NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2488     if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2489         pendingAsyncNativeCallbackSize_ < asyncClearNativePointerThreshold_) {
2490         IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2491         asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2492             this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2493         });
2494         asyncTaskCb(asyncCallbacks);
2495     } else {
2496         ThreadNativeScope nativeScope(thread_);
2497         asyncCallbacks->ProcessAll();
2498         delete asyncCallbacks;
2499     }
2500     ASSERT(asyncCallbacksPack.TotallyEmpty());
2501 }
2502 
Run(uint32_t threadIndex)2503 bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2504 {
2505     for (auto iter : nativePointerCallbacks_) {
2506         if (iter.first != nullptr) {
2507             iter.first(std::get<0>(iter.second),
2508                 std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2509         }
2510     }
2511     return true;
2512 }
2513 
GetArrayBufferSize() const2514 size_t Heap::GetArrayBufferSize() const
2515 {
2516     size_t result = 0;
2517     sweeper_->EnsureAllTaskFinished();
2518     this->IterateOverObjects([&result](TaggedObject *obj) {
2519         JSHClass* jsClass = obj->GetClass();
2520         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2521     });
2522     return result;
2523 }
2524 
GetLiveObjectSize() const2525 size_t Heap::GetLiveObjectSize() const
2526 {
2527     size_t objectSize = 0;
2528     sweeper_->EnsureAllTaskFinished();
2529     this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2530         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
2531     });
2532     return objectSize;
2533 }
2534 
GetHeapLimitSize() const2535 size_t Heap::GetHeapLimitSize() const
2536 {
2537     // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2538     return config_.GetMaxHeapSize();
2539 }
2540 
IsAlive(TaggedObject * object) const2541 bool BaseHeap::IsAlive(TaggedObject *object) const
2542 {
2543     if (!ContainObject(object)) {
2544         LOG_GC(ERROR) << "The region is already free";
2545         return false;
2546     }
2547 
2548     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2549     if (isFree) {
2550         Region *region = Region::ObjectAddressToRange(object);
2551         LOG_GC(ERROR) << "The object " << object << " in "
2552                             << region->GetSpaceTypeName()
2553                             << " already free";
2554     }
2555     return !isFree;
2556 }
2557 
ContainObject(TaggedObject * object) const2558 bool BaseHeap::ContainObject(TaggedObject *object) const
2559 {
2560     /*
2561      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2562      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2563      * calculated from an object address is still valid or alive in a cheap way.
2564      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2565      * introduce additional incorrect memory access issues.
2566      * Unless we can tolerate the performance impact of iterating the region list of each space and change
2567      * the implementation to that approach, don't rely on current implementation to get accurate result.
2568      */
2569     Region *region = Region::ObjectAddressToRange(object);
2570     return region->InHeapSpace();
2571 }
2572 
PrintHeapInfo(TriggerGCType gcType) const2573 void Heap::PrintHeapInfo(TriggerGCType gcType) const
2574 {
2575     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2576     OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2577                                 << ";OnStartup:" << static_cast<int>(GetStartupStatus())
2578                                 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2579                                 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2580     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2581                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark()
2582                                 << ") Eden Mark(" << IsEdenMark() << ")";
2583     OPTIONAL_LOG(ecmaVm_, INFO) << "Eden(" << edenSpace_->GetHeapObjectSize() << "/" << edenSpace_->GetInitialCapacity()
2584                  << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2585                  << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2586                  << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2587                  << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2588                  << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2589                  << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2590                  << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2591                  << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2592                  << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2593                  << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2594 }
2595 
StatisticHeapObject(TriggerGCType gcType) const2596 void Heap::StatisticHeapObject(TriggerGCType gcType) const
2597 {
2598     PrintHeapInfo(gcType);
2599 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2600     StatisticHeapDetail();
2601 #endif
2602 }
2603 
StatisticHeapDetail()2604 void Heap::StatisticHeapDetail()
2605 {
2606     Prepare();
2607     static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
2608     int typeCount[JS_TYPE_LAST] = { 0 };
2609     static const int MIN_COUNT_THRESHOLD = 1000;
2610 
2611     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2612         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2613     });
2614     for (int i = 0; i < JS_TYPE_LAST; i++) {
2615         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2616             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2617                            << " count:" << typeCount[i];
2618         }
2619         typeCount[i] = 0;
2620     }
2621 
2622     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2623         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2624     });
2625     for (int i = 0; i < JS_TYPE_LAST; i++) {
2626         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2627             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2628                            << " count:" << typeCount[i];
2629         }
2630         typeCount[i] = 0;
2631     }
2632 
2633     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2634         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2635     });
2636     for (int i = 0; i < JS_TYPE_LAST; i++) {
2637         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2638             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2639                            << " count:" << typeCount[i];
2640         }
2641         typeCount[i] = 0;
2642     }
2643 }
2644 
UpdateWorkManager(WorkManager * workManager)2645 void Heap::UpdateWorkManager(WorkManager *workManager)
2646 {
2647     concurrentMarker_->workManager_ = workManager;
2648     fullGC_->workManager_ = workManager;
2649     stwYoungGC_->workManager_ = workManager;
2650     incrementalMarker_->workManager_ = workManager;
2651     nonMovableMarker_->workManager_ = workManager;
2652     semiGCMarker_->workManager_ = workManager;
2653     compressGCMarker_->workManager_ = workManager;
2654     partialGC_->workManager_ = workManager;
2655 }
2656 
GetMachineCodeObject(uintptr_t pc) const2657 MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2658 {
2659     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2660     MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2661     if (machineCode != nullptr) {
2662         return machineCode;
2663     }
2664     HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2665     return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2666 }
2667 
CalCallSiteInfo(uintptr_t retAddr) const2668 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2669 {
2670     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2671     MachineCode *code = nullptr;
2672     // 1. find return
2673     // 2. gc
2674     machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2675         if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2676             return;
2677         }
2678         if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2679             code = MachineCode::Cast(obj);
2680             return;
2681         }
2682     });
2683     if (code == nullptr) {
2684         HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2685         hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2686             if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2687                 return;
2688             }
2689             if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2690                 code = MachineCode::Cast(obj);
2691                 return;
2692             }
2693         });
2694     }
2695 
2696     if (code == nullptr ||
2697         (code->GetPayLoadSizeInBytes() ==
2698          code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2699         return {};
2700     }
2701     return code->CalCallSiteInfo(retAddr);
2702 };
2703 
AddGCListener(FinishGCListener listener,void * data)2704 GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2705 {
2706     gcListeners_.emplace_back(std::make_pair(listener, data));
2707     return std::prev(gcListeners_.cend());
2708 }
2709 
ProcessGCListeners()2710 void Heap::ProcessGCListeners()
2711 {
2712     for (auto &&[listener, data] : gcListeners_) {
2713         listener(data);
2714     }
2715 }
2716 
ProcessAllGCListeners()2717 void SharedHeap::ProcessAllGCListeners()
2718 {
2719     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2720         ASSERT(!thread->IsInRunningState());
2721         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2722     });
2723 }
2724 
2725 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
GetCurrentTickMillseconds()2726 uint64_t Heap::GetCurrentTickMillseconds()
2727 {
2728     return std::chrono::duration_cast<std::chrono::milliseconds>(
2729     std::chrono::steady_clock::now().time_since_epoch()).count();
2730 }
2731 
SetJsDumpThresholds(size_t thresholds) const2732 void Heap::SetJsDumpThresholds(size_t thresholds) const
2733 {
2734     if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2735         LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2736         return;
2737     }
2738     g_threshold = thresholds;
2739 }
2740 
ThresholdReachedDump()2741 void Heap::ThresholdReachedDump()
2742 {
2743     size_t limitSize = GetHeapLimitSize();
2744     if (!limitSize) {
2745         LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2746         return;
2747     }
2748     size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2749     if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2750         GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2751             size_t liveObjectSize = GetLiveObjectSize();
2752             size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2753             LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2754             if (nowPrecentRecheck < g_threshold) {
2755                 return;
2756             }
2757             g_lastHeapDumpTime = GetCurrentTickMillseconds();
2758             base::BlockHookScope blockScope;
2759             HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2760             GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2761                                                         GetHeapLimitSize(), GetLiveObjectSize());
2762             DumpSnapShotOption dumpOption;
2763             dumpOption.dumpFormat = DumpFormat::BINARY;
2764             dumpOption.isVmMode = true;
2765             dumpOption.isPrivate = false;
2766             dumpOption.captureNumericValue = false;
2767             dumpOption.isFullGC = false;
2768             dumpOption.isSimplify = true;
2769             dumpOption.isSync = false;
2770             dumpOption.isBeforeFill = false;
2771             dumpOption.isDumpOOM = true; // aim's to do binary dump
2772             heapProfile->DumpHeapSnapshot(dumpOption);
2773             hasOOMDump_ = false;
2774             HeapProfilerInterface::Destroy(ecmaVm_);
2775         }
2776 }
2777 #endif
2778 
RemoveGCListener(GCListenerId listenerId)2779 void Heap::RemoveGCListener(GCListenerId listenerId)
2780 {
2781     gcListeners_.erase(listenerId);
2782 }
2783 
IncreaseTaskCount()2784 void BaseHeap::IncreaseTaskCount()
2785 {
2786     LockHolder holder(waitTaskFinishedMutex_);
2787     runningTaskCount_++;
2788 }
2789 
WaitRunningTaskFinished()2790 void BaseHeap::WaitRunningTaskFinished()
2791 {
2792     LockHolder holder(waitTaskFinishedMutex_);
2793     while (runningTaskCount_ > 0) {
2794         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
2795     }
2796 }
2797 
CheckCanDistributeTask()2798 bool BaseHeap::CheckCanDistributeTask()
2799 {
2800     LockHolder holder(waitTaskFinishedMutex_);
2801     return runningTaskCount_ < maxMarkTaskCount_;
2802 }
2803 
ReduceTaskCount()2804 void BaseHeap::ReduceTaskCount()
2805 {
2806     LockHolder holder(waitTaskFinishedMutex_);
2807     runningTaskCount_--;
2808     if (runningTaskCount_ == 0) {
2809         waitTaskFinishedCV_.SignalAll();
2810     }
2811 }
2812 
WaitClearTaskFinished()2813 void BaseHeap::WaitClearTaskFinished()
2814 {
2815     LockHolder holder(waitClearTaskFinishedMutex_);
2816     while (!clearTaskFinished_) {
2817         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
2818     }
2819 }
2820 
ReleaseEdenAllocator()2821 void Heap::ReleaseEdenAllocator()
2822 {
2823     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
2824     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
2825     if (!topAddress || !endAddress) {
2826         return;
2827     }
2828     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2829 }
2830 
InstallEdenAllocator()2831 void Heap::InstallEdenAllocator()
2832 {
2833     if (!enableEdenGC_) {
2834         return;
2835     }
2836     auto topAddress = edenSpace_->GetAllocationTopAddress();
2837     auto endAddress = edenSpace_->GetAllocationEndAddress();
2838     if (!topAddress || !endAddress) {
2839         return;
2840     }
2841     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2842 }
2843 
EnableEdenGC()2844 void Heap::EnableEdenGC()
2845 {
2846     enableEdenGC_ = true;
2847     thread_->EnableEdenGCBarriers();
2848 }
2849 
TryEnableEdenGC()2850 void Heap::TryEnableEdenGC()
2851 {
2852     if (ohos::OhosParams::IsEdenGCEnable()) {
2853         EnableEdenGC();
2854     }
2855 }
2856 }  // namespace panda::ecmascript
2857