• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <chrono>
17 #include <thread>
18 
19 #include "ecmascript/base/block_hook_scope.h"
20 #include "ecmascript/checkpoint/thread_state_transition.h"
21 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
22 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
23 #endif
24 
25 #include "ecmascript/mem/incremental_marker.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/parallel_evacuator.h"
28 #include "ecmascript/mem/parallel_marker-inl.h"
29 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
30 #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
31 #include "ecmascript/mem/shared_heap/shared_gc.h"
32 #include "ecmascript/mem/shared_heap/shared_full_gc.h"
33 #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
34 #include "ecmascript/mem/stw_young_gc.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/runtime_call_id.h"
37 #include "ecmascript/jit/jit.h"
38 #include "ecmascript/ohos/ohos_params.h"
39 #if !WIN_OR_MAC_OR_IOS_PLATFORM
40 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
41 #include "ecmascript/dfx/hprof/heap_profiler.h"
42 #endif
43 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
44 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
45 #endif
46 #include "ecmascript/dfx/tracing/tracing.h"
47 #if defined(ENABLE_DUMP_IN_FAULTLOG)
48 #include "syspara/parameter.h"
49 #endif
50 
51 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
52 #include "parameters.h"
53 #include "hisysevent.h"
54 static constexpr uint32_t DEC_TO_INT = 100;
55 static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
56 static uint64_t g_lastHeapDumpTime = 0;
57 static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
58 static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
59 static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
60 static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
61                             (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
62 #endif
63 
64 namespace panda::ecmascript {
65 SharedHeap *SharedHeap::instance_ = nullptr;
66 
CreateNewInstance()67 void SharedHeap::CreateNewInstance()
68 {
69     ASSERT(instance_ == nullptr);
70     size_t heapShared = 0;
71 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
72     heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
73 #endif
74     EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
75         MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
76     instance_ = new SharedHeap(config);
77 }
78 
GetInstance()79 SharedHeap *SharedHeap::GetInstance()
80 {
81     ASSERT(instance_ != nullptr);
82     return instance_;
83 }
84 
DestroyInstance()85 void SharedHeap::DestroyInstance()
86 {
87     ASSERT(instance_ != nullptr);
88     instance_->Destroy();
89     delete instance_;
90     instance_ = nullptr;
91 }
92 
ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType,GCReason gcReason,JSThread * thread)93 void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
94 {
95     ASSERT(!dThread_->IsRunning());
96     SuspendAllScope scope(thread);
97     SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
98     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
99     GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
100     if (UNLIKELY(ShouldVerifyHeap())) {
101         // pre gc heap verify
102         LOG_ECMA(DEBUG) << "pre gc shared heap verify";
103         sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
104         SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
105     }
106     switch (gcType) {
107         case TriggerGCType::SHARED_GC: {
108             sharedGC_->RunPhases();
109             break;
110         }
111         case TriggerGCType::SHARED_FULL_GC: {
112             sharedFullGC_->RunPhases();
113             break;
114         }
115         default:
116             LOG_ECMA(FATAL) << "this branch is unreachable";
117             UNREACHABLE();
118             break;
119     }
120     if (UNLIKELY(ShouldVerifyHeap())) {
121         // pre gc heap verify
122         LOG_ECMA(DEBUG) << "after gc shared heap verify";
123         SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
124     }
125     CollectGarbageFinish(false, gcType);
126 }
127 
CheckAndTriggerSharedGC(JSThread * thread)128 bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
129 {
130     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
131         return false;
132     }
133     if ((OldSpaceExceedLimit() || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
134         !NeedStopCollection()) {
135         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
136         return true;
137     }
138     return false;
139 }
140 
CheckHugeAndTriggerSharedGC(JSThread * thread,size_t size)141 bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
142 {
143     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
144         return false;
145     }
146     if ((sHugeObjectSpace_->CommittedSizeExceed(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
147         !NeedStopCollection()) {
148         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
149         return true;
150     }
151     return false;
152 }
153 
CollectGarbageNearOOM(JSThread * thread)154 void SharedHeap::CollectGarbageNearOOM(JSThread *thread)
155 {
156     auto fragmentationSize = sOldSpace_->GetCommittedSize() - sOldSpace_->GetHeapObjectSize();
157     if (fragmentationSize >= fragmentationLimitForSharedFullGC_) {
158         CollectGarbage<TriggerGCType::SHARED_FULL_GC,  GCReason::ALLOCATION_FAILED>(thread);
159     } else {
160         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
161     }
162 }
163 // Shared gc trigger
AdjustGlobalSpaceAllocLimit()164 void SharedHeap::AdjustGlobalSpaceAllocLimit()
165 {
166     globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
167                                       config_.GetDefaultGlobalAllocLimit() * 2); // 2: double
168     globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
169                                       config_.GetMaxHeapSize());
170     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
171                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
172     constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
173     size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
174     globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
175     LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
176         << globalSpaceAllocLimit_;
177 }
178 
ObjectExceedMaxHeapSize() const179 bool SharedHeap::ObjectExceedMaxHeapSize() const
180 {
181     return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
182 }
183 
StartConcurrentMarking(TriggerGCType gcType,GCReason gcReason)184 void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason)
185 {
186     ASSERT(JSThread::GetCurrent() == dThread_);
187     sConcurrentMarker_->Mark(gcType, gcReason);
188 }
189 
CheckCanTriggerConcurrentMarking(JSThread * thread)190 bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
191 {
192     return thread->IsReadyToSharedConcurrentMark() &&
193            sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
194 }
195 
Initialize(NativeAreaAllocator * nativeAreaAllocator,HeapRegionAllocator * heapRegionAllocator,const JSRuntimeOptions & option,DaemonThread * dThread)196 void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
197     const JSRuntimeOptions &option, DaemonThread *dThread)
198 {
199     sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
200     nativeAreaAllocator_ = nativeAreaAllocator;
201     heapRegionAllocator_ = heapRegionAllocator;
202     shouldVerifyHeap_ = option.EnableHeapVerify();
203     parallelGC_ = option.EnableParallelGC();
204     optionalLogEnabled_ = option.EnableOptionalLog();
205     size_t maxHeapSize = config_.GetMaxHeapSize();
206     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
207     sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
208 
209     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
210     size_t oldSpaceCapacity = (maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2; // 2: half
211     globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
212     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
213                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
214 
215     sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
216     sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
217     sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
218     sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
219     sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
220     growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
221     growingStep_ = config_.GetSharedHeapLimitGrowingStep();
222     incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
223     incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
224     fragmentationLimitForSharedFullGC_ = config_.GetFragmentationLimitForSharedFullGC();
225     dThread_ = dThread;
226 }
227 
Destroy()228 void SharedHeap::Destroy()
229 {
230     if (sWorkManager_ != nullptr) {
231         delete sWorkManager_;
232         sWorkManager_ = nullptr;
233     }
234     if (sOldSpace_ != nullptr) {
235         sOldSpace_->Reset();
236         delete sOldSpace_;
237         sOldSpace_ = nullptr;
238     }
239     if (sCompressSpace_ != nullptr) {
240         sCompressSpace_->Reset();
241         delete sCompressSpace_;
242         sCompressSpace_ = nullptr;
243     }
244     if (sNonMovableSpace_ != nullptr) {
245         sNonMovableSpace_->Reset();
246         delete sNonMovableSpace_;
247         sNonMovableSpace_ = nullptr;
248     }
249     if (sHugeObjectSpace_ != nullptr) {
250         sHugeObjectSpace_->Destroy();
251         delete sHugeObjectSpace_;
252         sHugeObjectSpace_ = nullptr;
253     }
254     if (sReadOnlySpace_ != nullptr) {
255         sReadOnlySpace_->ClearReadOnly();
256         sReadOnlySpace_->Destroy();
257         delete sReadOnlySpace_;
258         sReadOnlySpace_ = nullptr;
259     }
260     if (sAppSpawnSpace_ != nullptr) {
261         sAppSpawnSpace_->Reset();
262         delete sAppSpawnSpace_;
263         sAppSpawnSpace_ = nullptr;
264     }
265     if (sharedGC_ != nullptr) {
266         delete sharedGC_;
267         sharedGC_ = nullptr;
268     }
269     if (sharedFullGC_ != nullptr) {
270         delete sharedFullGC_;
271         sharedFullGC_ = nullptr;
272     }
273 
274     nativeAreaAllocator_ = nullptr;
275     heapRegionAllocator_ = nullptr;
276 
277     if (sSweeper_ != nullptr) {
278         delete sSweeper_;
279         sSweeper_ = nullptr;
280     }
281     if (sConcurrentMarker_ != nullptr) {
282         delete sConcurrentMarker_;
283         sConcurrentMarker_ = nullptr;
284     }
285     if (sharedGCMarker_ != nullptr) {
286         delete sharedGCMarker_;
287         sharedGCMarker_ = nullptr;
288     }
289     if (sharedGCMovableMarker_ != nullptr) {
290         delete sharedGCMovableMarker_;
291         sharedGCMovableMarker_ = nullptr;
292     }
293     dThread_ = nullptr;
294 }
295 
PostInitialization(const GlobalEnvConstants * globalEnvConstants,const JSRuntimeOptions & option)296 void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
297 {
298     globalEnvConstants_ = globalEnvConstants;
299     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
300     maxMarkTaskCount_ = totalThreadNum - 1;
301     sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
302     sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
303     sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
304     sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
305         EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
306     sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
307         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
308     sharedGC_ = new SharedGC(this);
309     sharedFullGC_ = new SharedFullGC(this);
310 }
311 
PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)312 void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
313 {
314     IncreaseTaskCount();
315     Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
316                                                                                 this, sharedTaskPhase));
317 }
318 
Run(uint32_t threadIndex)319 bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
320 {
321     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
322     while (!sHeap_->GetWorkManager()->HasInitialized());
323     switch (taskPhase_) {
324         case SharedParallelMarkPhase::SHARED_MARK_TASK:
325             sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
326             break;
327         case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
328             sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
329             break;
330         default:
331             break;
332     }
333     sHeap_->ReduceTaskCount();
334     return true;
335 }
336 
Run(uint32_t threadIndex)337 bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
338 {
339     sHeap_->ReclaimRegions(gcType_);
340     return true;
341 }
342 
NotifyGCCompleted()343 void SharedHeap::NotifyGCCompleted()
344 {
345     ASSERT(JSThread::GetCurrent() == dThread_);
346     LockHolder lock(waitGCFinishedMutex_);
347     gcFinished_ = true;
348     waitGCFinishedCV_.SignalAll();
349 }
350 
WaitGCFinished(JSThread * thread)351 void SharedHeap::WaitGCFinished(JSThread *thread)
352 {
353     ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
354     ASSERT(thread->IsInRunningState());
355     ThreadSuspensionScope scope(thread);
356     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished");
357     LockHolder lock(waitGCFinishedMutex_);
358     while (!gcFinished_) {
359         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
360     }
361 }
362 
WaitGCFinishedAfterAllJSThreadEliminated()363 void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
364 {
365     ASSERT(Runtime::GetInstance()->vmCount_ == 0);
366     LockHolder lock(waitGCFinishedMutex_);
367     while (!gcFinished_) {
368         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
369     }
370 }
371 
DaemonCollectGarbage(TriggerGCType gcType,GCReason gcReason)372 void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
373 {
374     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
375     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
376     ASSERT(JSThread::GetCurrent() == dThread_);
377     {
378         ThreadManagedScope runningScope(dThread_);
379         SuspendAllScope scope(dThread_);
380         SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
381         gcType_ = gcType;
382         GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
383         if (UNLIKELY(ShouldVerifyHeap())) {
384             // pre gc heap verify
385             LOG_ECMA(DEBUG) << "pre gc shared heap verify";
386             sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
387             SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
388         }
389         switch (gcType) {
390             case TriggerGCType::SHARED_GC: {
391                 sharedGC_->RunPhases();
392                 break;
393             }
394             case TriggerGCType::SHARED_FULL_GC: {
395                 sharedFullGC_->RunPhases();
396                 break;
397             }
398             default:
399                 LOG_ECMA(FATAL) << "this branch is unreachable";
400                 UNREACHABLE();
401                 break;
402         }
403 
404         if (UNLIKELY(ShouldVerifyHeap())) {
405             // after gc heap verify
406             LOG_ECMA(DEBUG) << "after gc shared heap verify";
407             SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
408         }
409         CollectGarbageFinish(true, gcType);
410     }
411     // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
412 }
413 
WaitAllTasksFinished(JSThread * thread)414 void SharedHeap::WaitAllTasksFinished(JSThread *thread)
415 {
416     WaitGCFinished(thread);
417     sSweeper_->WaitAllTaskFinished();
418     WaitClearTaskFinished();
419 }
420 
WaitAllTasksFinishedAfterAllJSThreadEliminated()421 void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
422 {
423     WaitGCFinishedAfterAllJSThreadEliminated();
424     sSweeper_->WaitAllTaskFinished();
425     WaitClearTaskFinished();
426 }
427 
CheckOngoingConcurrentMarking()428 bool SharedHeap::CheckOngoingConcurrentMarking()
429 {
430     if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
431         sConcurrentMarker_->IsTriggeredConcurrentMark()) {
432         // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
433         return true;
434     }
435     return false;
436 }
437 
Prepare(bool inTriggerGCThread)438 void SharedHeap::Prepare(bool inTriggerGCThread)
439 {
440     WaitRunningTaskFinished();
441     if (inTriggerGCThread) {
442         sSweeper_->EnsureAllTaskFinished();
443     } else {
444         sSweeper_->WaitAllTaskFinished();
445     }
446     WaitClearTaskFinished();
447 }
448 
SharedGCScope()449 SharedHeap::SharedGCScope::SharedGCScope()
450 {
451     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
452         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
453         if (pgoProfiler != nullptr) {
454             pgoProfiler->SuspendByGC();
455         }
456 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
457         thread->SetGcState(true);
458 #endif
459     });
460 }
461 
~SharedGCScope()462 SharedHeap::SharedGCScope::~SharedGCScope()
463 {
464     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
465         ASSERT(!thread->IsInRunningState());
466         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
467         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
468         if (pgoProfiler != nullptr) {
469             pgoProfiler->ResumeByGC();
470         }
471 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
472         thread->SetGcState(false);
473 #endif
474     });
475 }
476 
PrepareRecordRegionsForReclaim()477 void SharedHeap::PrepareRecordRegionsForReclaim()
478 {
479     sOldSpace_->SetRecordRegion();
480     sNonMovableSpace_->SetRecordRegion();
481     sHugeObjectSpace_->SetRecordRegion();
482 }
483 
Reclaim(TriggerGCType gcType)484 void SharedHeap::Reclaim(TriggerGCType gcType)
485 {
486     PrepareRecordRegionsForReclaim();
487     sHugeObjectSpace_->ReclaimHugeRegion();
488 
489     if (parallelGC_) {
490         clearTaskFinished_ = false;
491         Taskpool::GetCurrentTaskpool()->PostTask(
492             std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
493     } else {
494         ReclaimRegions(gcType);
495     }
496 }
497 
ReclaimRegions(TriggerGCType gcType)498 void SharedHeap::ReclaimRegions(TriggerGCType gcType)
499 {
500     if (gcType == TriggerGCType::SHARED_FULL_GC) {
501         sCompressSpace_->Reset();
502     }
503     sSweeper_->WaitAllTaskFinished();
504     EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
505         region->ClearMarkGCBitset();
506         region->ResetAliveObject();
507     });
508     if (!clearTaskFinished_) {
509         LockHolder holder(waitClearTaskFinishedMutex_);
510         clearTaskFinished_ = true;
511         waitClearTaskFinishedCV_.SignalAll();
512     }
513 }
514 
DisableParallelGC(JSThread * thread)515 void SharedHeap::DisableParallelGC(JSThread *thread)
516 {
517     WaitAllTasksFinished(thread);
518     dThread_->WaitFinished();
519     parallelGC_ = false;
520     maxMarkTaskCount_ = 0;
521     sSweeper_->ConfigConcurrentSweep(false);
522     sConcurrentMarker_->ConfigConcurrentMark(false);
523 }
524 
EnableParallelGC(JSRuntimeOptions & option)525 void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
526 {
527     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
528     maxMarkTaskCount_ = totalThreadNum - 1;
529     parallelGC_ = option.EnableParallelGC();
530     if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
531         workThreadNum != totalThreadNum + 1) {
532         LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
533                             << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
534         delete sWorkManager_;
535         sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
536         UpdateWorkManager(sWorkManager_);
537     }
538     sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
539     sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
540 }
541 
UpdateWorkManager(SharedGCWorkManager * sWorkManager)542 void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
543 {
544     sConcurrentMarker_->ResetWorkManager(sWorkManager);
545     sharedGCMarker_->ResetWorkManager(sWorkManager);
546     sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
547     sharedGC_->ResetWorkManager(sWorkManager);
548     sharedFullGC_->ResetWorkManager(sWorkManager);
549 }
550 
TryTriggerLocalConcurrentMarking()551 void SharedHeap::TryTriggerLocalConcurrentMarking()
552 {
553     if (localFullMarkTriggered_) {
554         return;
555     }
556     if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
557             != false) {
558         return;
559     }
560     ASSERT(localFullMarkTriggered_ == true);
561     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
562         thread->SetFullMarkRequest();
563     });
564 }
565 
VerifyHeapObjects(VerifyKind verifyKind) const566 size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
567 {
568     size_t failCount = 0;
569     {
570         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
571         sOldSpace_->IterateOverObjects(verifier);
572     }
573     {
574         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
575         sNonMovableSpace_->IterateOverObjects(verifier);
576     }
577     {
578         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
579         sHugeObjectSpace_->IterateOverObjects(verifier);
580     }
581     {
582         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
583         sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
584     }
585     return failCount;
586 }
587 
IsReadyToConcurrentMark() const588 bool SharedHeap::IsReadyToConcurrentMark() const
589 {
590     return dThread_->IsReadyToConcurrentMark();
591 }
592 
NeedStopCollection()593 bool SharedHeap::NeedStopCollection()
594 {
595     if (!InSensitiveStatus()) {
596         return false;
597     }
598 
599     if (!ObjectExceedMaxHeapSize()) {
600         return true;
601     }
602     return false;
603 }
604 
CompactHeapBeforeFork(JSThread * thread)605 void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
606 {
607     ThreadManagedScope managedScope(thread);
608     WaitGCFinished(thread);
609     sharedFullGC_->SetForAppSpawn(true);
610     CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
611     sharedFullGC_->SetForAppSpawn(false);
612 }
613 
MoveOldSpaceToAppspawn()614 void SharedHeap::MoveOldSpaceToAppspawn()
615 {
616     auto committedSize = sOldSpace_->GetCommittedSize();
617     sAppSpawnSpace_->SetInitialCapacity(committedSize);
618     sAppSpawnSpace_->SetMaximumCapacity(committedSize);
619     sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
620     sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
621 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
622     sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
623 #endif
624     auto threadId = Runtime::GetInstance()->GetMainThread()->GetThreadId();
625     sOldSpace_->EnumerateRegions([&](Region *region) {
626         region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
627         PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName(), threadId);
628         sAppSpawnSpace_->AddRegion(region);
629         sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
630     });
631     sOldSpace_->GetRegionList().Clear();
632     sOldSpace_->Reset();
633 }
634 
ReclaimForAppSpawn()635 void SharedHeap::ReclaimForAppSpawn()
636 {
637     sSweeper_->WaitAllTaskFinished();
638     sHugeObjectSpace_->ReclaimHugeRegion();
639     sCompressSpace_->Reset();
640     MoveOldSpaceToAppspawn();
641     auto cb = [] (Region *region) {
642         region->ClearMarkGCBitset();
643         region->ResetAliveObject();
644     };
645     sNonMovableSpace_->EnumerateRegions(cb);
646     sHugeObjectSpace_->EnumerateRegions(cb);
647 }
648 
DumpHeapSnapshotBeforeOOM(bool isFullGC,JSThread * thread)649 void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread)
650 {
651 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
652 #if defined(ENABLE_DUMP_IN_FAULTLOG)
653     EcmaVM *vm = thread->GetEcmaVM();
654     if (vm->GetHeapProfile() != nullptr) {
655         LOG_ECMA(ERROR) << "SharedHeap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
656         return;
657     }
658     // Filter appfreeze when dump.
659     LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
660     base::BlockHookScope blockScope;
661     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(vm);
662     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
663         LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
664     }
665     vm->GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetEcmaParamConfiguration().GetMaxHeapSize(),
666                                                     GetHeapObjectSize());
667     DumpSnapShotOption dumpOption;
668     dumpOption.dumpFormat = DumpFormat::BINARY;
669     dumpOption.isVmMode = true;
670     dumpOption.isPrivate = false;
671     dumpOption.captureNumericValue = false;
672     dumpOption.isFullGC = isFullGC;
673     dumpOption.isSimplify = true;
674     dumpOption.isSync = true;
675     dumpOption.isBeforeFill = false;
676     dumpOption.isDumpOOM = true;
677     heapProfile->DumpHeapSnapshot(dumpOption);
678     HeapProfilerInterface::Destroy(vm);
679 #endif // ENABLE_DUMP_IN_FAULTLOG
680 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
681 }
682 
Heap(EcmaVM * ecmaVm)683 Heap::Heap(EcmaVM *ecmaVm)
684     : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
685       ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
686 
Initialize()687 void Heap::Initialize()
688 {
689     memController_ = new MemController(this);
690     nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
691     heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
692     size_t maxHeapSize = config_.GetMaxHeapSize();
693     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
694     size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
695     size_t edenSpaceCapacity = 2_MB;
696     edenSpace_ = new EdenSpace(this, edenSpaceCapacity, edenSpaceCapacity);
697     edenSpace_->Restart();
698     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
699     activeSemiSpace_->Restart();
700     activeSemiSpace_->SetWaterLine();
701 
702     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
703     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
704     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
705     sOldTlab_ = new ThreadLocalAllocationBuffer(this);
706     thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
707     sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
708     thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
709                                                     sNonMovableTlab_->GetEndAddress());
710     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
711 
712     // whether should verify heap duration gc
713     shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
714     // not set up from space
715 
716     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
717     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
718     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
719     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
720     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
721         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
722     }
723     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
724     nonMovableSpace_->Initialize();
725     size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
726     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
727     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
728     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
729 
730     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
731         machineCodeSpaceCapacity + readOnlySpaceCapacity;
732     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
733         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
734     }
735     size_t oldSpaceCapacity = maxHeapSize - capacities;
736     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
737     globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
738     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
739     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
740     oldSpace_->Initialize();
741 
742     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
743     hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
744     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
745     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
746         maxEvacuateTaskCount_ - 1);
747 
748     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
749                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
750                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
751                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
752                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
753                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
754                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
755                  << ", gcThreadNum = " << maxMarkTaskCount_;
756     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
757     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
758     markType_ = MarkType::MARK_YOUNG;
759 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
760     concurrentMarkerEnabled = false;
761 #endif
762     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
763     stwYoungGC_ = new STWYoungGC(this, parallelGC_);
764     fullGC_ = new FullGC(this);
765 
766     partialGC_ = new PartialGC(this);
767     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
768         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
769     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
770         EnableConcurrentMarkType::CONFIG_DISABLE);
771     nonMovableMarker_ = new NonMovableMarker(this);
772     semiGCMarker_ = new SemiGCMarker(this);
773     compressGCMarker_ = new CompressGCMarker(this);
774     evacuator_ = new ParallelEvacuator(this);
775     incrementalMarker_ = new IncrementalMarker(this);
776     gcListeners_.reserve(16U);
777     nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
778     incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
779     nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
780     idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
781     asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
782 }
783 
ResetTlab()784 void Heap::ResetTlab()
785 {
786     sOldTlab_->Reset();
787     sNonMovableTlab_->Reset();
788 }
789 
FillBumpPointerForTlab()790 void Heap::FillBumpPointerForTlab()
791 {
792     sOldTlab_->FillBumpPointer();
793     sNonMovableTlab_->FillBumpPointer();
794 }
795 
ProcessSharedGCMarkingLocalBuffer()796 void Heap::ProcessSharedGCMarkingLocalBuffer()
797 {
798     if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
799         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
800         sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
801         ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
802     }
803 }
804 
ProcessSharedGCRSetWorkList()805 void Heap::ProcessSharedGCRSetWorkList()
806 {
807     if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
808         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
809         ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
810         sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
811         // The current thread may end earlier than the deamon thread.
812         // To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
813         // Reentrant does not cause exceptions because all the values are set to false.
814         thread_->SetProcessingLocalToSharedRset(false);
815         ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
816     }
817 }
818 
GetGlobalConst() const819 const GlobalEnvConstants *Heap::GetGlobalConst() const
820 {
821     return thread_->GlobalConstants();
822 }
823 
Destroy()824 void Heap::Destroy()
825 {
826     ProcessSharedGCRSetWorkList();
827     ProcessSharedGCMarkingLocalBuffer();
828     if (sOldTlab_ != nullptr) {
829         sOldTlab_->Reset();
830         delete sOldTlab_;
831         sOldTlab_ = nullptr;
832     }
833     if (sNonMovableTlab_!= nullptr) {
834         sNonMovableTlab_->Reset();
835         delete sNonMovableTlab_;
836         sNonMovableTlab_= nullptr;
837     }
838     if (workManager_ != nullptr) {
839         delete workManager_;
840         workManager_ = nullptr;
841     }
842     if (edenSpace_ != nullptr) {
843         edenSpace_->Destroy();
844         delete edenSpace_;
845         edenSpace_ = nullptr;
846     }
847     if (activeSemiSpace_ != nullptr) {
848         activeSemiSpace_->Destroy();
849         delete activeSemiSpace_;
850         activeSemiSpace_ = nullptr;
851     }
852     if (inactiveSemiSpace_ != nullptr) {
853         inactiveSemiSpace_->Destroy();
854         delete inactiveSemiSpace_;
855         inactiveSemiSpace_ = nullptr;
856     }
857     if (oldSpace_ != nullptr) {
858         oldSpace_->Reset();
859         delete oldSpace_;
860         oldSpace_ = nullptr;
861     }
862     if (compressSpace_ != nullptr) {
863         compressSpace_->Destroy();
864         delete compressSpace_;
865         compressSpace_ = nullptr;
866     }
867     if (nonMovableSpace_ != nullptr) {
868         nonMovableSpace_->Reset();
869         delete nonMovableSpace_;
870         nonMovableSpace_ = nullptr;
871     }
872     if (snapshotSpace_ != nullptr) {
873         snapshotSpace_->Destroy();
874         delete snapshotSpace_;
875         snapshotSpace_ = nullptr;
876     }
877     if (machineCodeSpace_ != nullptr) {
878         machineCodeSpace_->Reset();
879         delete machineCodeSpace_;
880         machineCodeSpace_ = nullptr;
881     }
882     if (hugeObjectSpace_ != nullptr) {
883         hugeObjectSpace_->Destroy();
884         delete hugeObjectSpace_;
885         hugeObjectSpace_ = nullptr;
886     }
887     if (hugeMachineCodeSpace_ != nullptr) {
888         hugeMachineCodeSpace_->Destroy();
889         delete hugeMachineCodeSpace_;
890         hugeMachineCodeSpace_ = nullptr;
891     }
892     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
893         readOnlySpace_->ClearReadOnly();
894         readOnlySpace_->Destroy();
895         delete readOnlySpace_;
896         readOnlySpace_ = nullptr;
897     }
898     if (appSpawnSpace_ != nullptr) {
899         appSpawnSpace_->Reset();
900         delete appSpawnSpace_;
901         appSpawnSpace_ = nullptr;
902     }
903     if (stwYoungGC_ != nullptr) {
904         delete stwYoungGC_;
905         stwYoungGC_ = nullptr;
906     }
907     if (partialGC_ != nullptr) {
908         delete partialGC_;
909         partialGC_ = nullptr;
910     }
911     if (fullGC_ != nullptr) {
912         delete fullGC_;
913         fullGC_ = nullptr;
914     }
915 
916     nativeAreaAllocator_ = nullptr;
917     heapRegionAllocator_ = nullptr;
918 
919     if (memController_ != nullptr) {
920         delete memController_;
921         memController_ = nullptr;
922     }
923     if (sweeper_ != nullptr) {
924         delete sweeper_;
925         sweeper_ = nullptr;
926     }
927     if (concurrentMarker_ != nullptr) {
928         delete concurrentMarker_;
929         concurrentMarker_ = nullptr;
930     }
931     if (incrementalMarker_ != nullptr) {
932         delete incrementalMarker_;
933         incrementalMarker_ = nullptr;
934     }
935     if (nonMovableMarker_ != nullptr) {
936         delete nonMovableMarker_;
937         nonMovableMarker_ = nullptr;
938     }
939     if (semiGCMarker_ != nullptr) {
940         delete semiGCMarker_;
941         semiGCMarker_ = nullptr;
942     }
943     if (compressGCMarker_ != nullptr) {
944         delete compressGCMarker_;
945         compressGCMarker_ = nullptr;
946     }
947     if (evacuator_ != nullptr) {
948         delete evacuator_;
949         evacuator_ = nullptr;
950     }
951     if (idleGCTrigger_ != nullptr) {
952         delete idleGCTrigger_;
953         idleGCTrigger_ = nullptr;
954     }
955 }
956 
Prepare()957 void Heap::Prepare()
958 {
959     MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
960     WaitRunningTaskFinished();
961     sweeper_->EnsureAllTaskFinished();
962     WaitClearTaskFinished();
963 }
964 
GetHeapPrepare()965 void Heap::GetHeapPrepare()
966 {
967     // Ensure local and shared heap prepared.
968     Prepare();
969     SharedHeap *sHeap = SharedHeap::GetInstance();
970     sHeap->Prepare(false);
971 }
972 
Resume(TriggerGCType gcType)973 void Heap::Resume(TriggerGCType gcType)
974 {
975     if (edenSpace_->ShouldTryEnable()) {
976         TryEnableEdenGC();
977     }
978     if (enableEdenGC_) {
979         edenSpace_->ReclaimRegions(edenSpace_->GetInitialCapacity());
980         edenSpace_->Restart();
981         if (IsEdenMark()) {
982             activeSemiSpace_->SetWaterLine();
983             return;
984         }
985     }
986 
987     activeSemiSpace_->SetWaterLine();
988 
989     if (mode_ != HeapMode::SPAWN &&
990         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
991         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
992         size_t multiple = 2;
993         size_t oldSpaceMaxLimit = 0;
994         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
995             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
996             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
997         } else {
998             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
999             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
1000         }
1001         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1002     }
1003 
1004     PrepareRecordRegionsForReclaim();
1005     hugeObjectSpace_->ReclaimHugeRegion();
1006     hugeMachineCodeSpace_->ReclaimHugeRegion();
1007     if (parallelGC_) {
1008         if (gcType == TriggerGCType::OLD_GC) {
1009             isCSetClearing_.store(true, std::memory_order_release);
1010         }
1011         clearTaskFinished_ = false;
1012         Taskpool::GetCurrentTaskpool()->PostTask(
1013             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1014     } else {
1015         ReclaimRegions(gcType);
1016     }
1017 }
1018 
ResumeForAppSpawn()1019 void Heap::ResumeForAppSpawn()
1020 {
1021     sweeper_->WaitAllTaskFinished();
1022     hugeObjectSpace_->ReclaimHugeRegion();
1023     hugeMachineCodeSpace_->ReclaimHugeRegion();
1024     edenSpace_->ReclaimRegions();
1025     inactiveSemiSpace_->ReclaimRegions();
1026     oldSpace_->Reset();
1027     auto cb = [] (Region *region) {
1028         region->ClearMarkGCBitset();
1029     };
1030     nonMovableSpace_->EnumerateRegions(cb);
1031     machineCodeSpace_->EnumerateRegions(cb);
1032     hugeObjectSpace_->EnumerateRegions(cb);
1033     hugeMachineCodeSpace_->EnumerateRegions(cb);
1034 }
1035 
CompactHeapBeforeFork()1036 void Heap::CompactHeapBeforeFork()
1037 {
1038     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1039 }
1040 
DisableParallelGC()1041 void Heap::DisableParallelGC()
1042 {
1043     WaitAllTasksFinished();
1044     parallelGC_ = false;
1045     maxEvacuateTaskCount_ = 0;
1046     maxMarkTaskCount_ = 0;
1047     stwYoungGC_->ConfigParallelGC(false);
1048     sweeper_->ConfigConcurrentSweep(false);
1049     concurrentMarker_->ConfigConcurrentMark(false);
1050     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1051 }
1052 
EnableParallelGC()1053 void Heap::EnableParallelGC()
1054 {
1055     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1056     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1057     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1058         totalThreadNum != maxEvacuateTaskCount_ + 1) {
1059         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1060                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1061         delete workManager_;
1062         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1063         UpdateWorkManager(workManager_);
1064     }
1065     ASSERT(maxEvacuateTaskCount_ > 0);
1066     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1067                                          maxEvacuateTaskCount_ - 1);
1068     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1069 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1070     concurrentMarkerEnabled = false;
1071 #endif
1072     stwYoungGC_->ConfigParallelGC(parallelGC_);
1073     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1074     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1075 }
1076 
SelectGCType() const1077 TriggerGCType Heap::SelectGCType() const
1078 {
1079     if (shouldThrowOOMError_) {
1080         // Force Full GC after failed Old GC to avoid OOM
1081         return FULL_GC;
1082     }
1083 
1084     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1085     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1086         return YOUNG_GC;
1087     }
1088     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1089         GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1090         !GlobalNativeSizeLargerThanLimit()) {
1091         return YOUNG_GC;
1092     }
1093     return OLD_GC;
1094 }
1095 
CollectGarbage(TriggerGCType gcType,GCReason reason)1096 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1097 {
1098     Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1099     {
1100 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1101         if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1102             LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1103             UNREACHABLE();
1104         }
1105 #endif
1106         if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1107             ProcessGCListeners();
1108             return;
1109         }
1110         RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1111 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1112         [[maybe_unused]] GcStateScope scope(thread_);
1113 #endif
1114         CHECK_NO_GC;
1115         if (UNLIKELY(ShouldVerifyHeap())) {
1116             // pre gc heap verify
1117             LOG_ECMA(DEBUG) << "pre gc heap verify";
1118             ProcessSharedGCRSetWorkList();
1119             Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1120         }
1121 
1122 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1123         gcType = TriggerGCType::FULL_GC;
1124 #endif
1125         if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1126             gcType = TriggerGCType::FULL_GC;
1127         }
1128         if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1129             gcType = TriggerGCType::OLD_GC;
1130         }
1131         oldGCRequested_ = false;
1132         oldSpace_->AdjustOvershootSize();
1133 
1134         size_t originalNewSpaceSize = IsEdenMark() ? edenSpace_->GetHeapObjectSize() :
1135                 (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize());
1136         if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1137             GetEcmaGCStats()->SetGCReason(reason);
1138         } else {
1139             GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1140         }
1141         memController_->StartCalculationBeforeGC();
1142         StatisticHeapObject(gcType);
1143         gcType_ = gcType;
1144         {
1145             pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1146             switch (gcType) {
1147                 case TriggerGCType::EDEN_GC:
1148                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1149                         SetMarkType(MarkType::MARK_EDEN);
1150                     }
1151                     if (markType_ == MarkType::MARK_YOUNG) {
1152                         gcType_ = TriggerGCType::YOUNG_GC;
1153                     }
1154                     if (markType_ == MarkType::MARK_FULL) {
1155                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1156                         gcType_ = TriggerGCType::OLD_GC;
1157                     }
1158                     partialGC_->RunPhases();
1159                     break;
1160                 case TriggerGCType::YOUNG_GC:
1161                     // Use partial GC for young generation.
1162                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1163                         SetMarkType(MarkType::MARK_YOUNG);
1164                     }
1165                     if (markType_ == MarkType::MARK_FULL) {
1166                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1167                         gcType_ = TriggerGCType::OLD_GC;
1168                     }
1169                     partialGC_->RunPhases();
1170                     break;
1171                 case TriggerGCType::OLD_GC: {
1172                     bool fullConcurrentMarkRequested = false;
1173                     // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1174                     if (concurrentMarker_->IsEnabled() &&
1175                         (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1176                         reason == GCReason::ALLOCATION_LIMIT) {
1177                         fullConcurrentMarkRequested = true;
1178                     }
1179                     if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1180                         // Wait for existing concurrent marking tasks to be finished (if any),
1181                         // and reset concurrent marker's status for full mark.
1182                         bool concurrentMark = CheckOngoingConcurrentMarking();
1183                         if (concurrentMark) {
1184                             concurrentMarker_->Reset();
1185                         }
1186                     }
1187                     SetMarkType(MarkType::MARK_FULL);
1188                     if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1189                         LOG_ECMA(INFO)
1190                             << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1191                         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1192                         TriggerConcurrentMarking();
1193                         oldGCRequested_ = true;
1194                         ProcessGCListeners();
1195                         return;
1196                     }
1197                     partialGC_->RunPhases();
1198                     break;
1199                 }
1200                 case TriggerGCType::FULL_GC:
1201                     fullGC_->SetForAppSpawn(false);
1202                     fullGC_->RunPhases();
1203                     if (fullGCRequested_) {
1204                         fullGCRequested_ = false;
1205                     }
1206                     break;
1207                 case TriggerGCType::APPSPAWN_FULL_GC:
1208                     fullGC_->SetForAppSpawn(true);
1209                     fullGC_->RunPhasesForAppSpawn();
1210                     break;
1211                 default:
1212                     LOG_ECMA(FATAL) << "this branch is unreachable";
1213                     UNREACHABLE();
1214                     break;
1215             }
1216             ASSERT(thread_->IsPropertyCacheCleared());
1217         }
1218         UpdateHeapStatsAfterGC(gcType_);
1219         ClearIdleTask();
1220         // Adjust the old space capacity and global limit for the first partial GC with full mark.
1221         // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1222         AdjustBySurvivalRate(originalNewSpaceSize);
1223         memController_->StopCalculationAfterGC(gcType);
1224         if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1225             // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1226             // the limits of old space and global space can be recomputed.
1227             RecomputeLimits();
1228             ResetNativeSizeAfterLastGC();
1229             OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1230                                         << " global object size " << GetHeapObjectSize()
1231                                         << " global committed size " << GetCommittedSize()
1232                                         << " global limit " << globalSpaceAllocLimit_;
1233             markType_ = MarkType::MARK_YOUNG;
1234         }
1235         if (concurrentMarker_->IsRequestDisabled()) {
1236             concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1237         }
1238         // GC log
1239         GetEcmaGCStats()->RecordStatisticAfterGC();
1240 #ifdef ENABLE_HISYSEVENT
1241         GetEcmaGCKeyStats()->IncGCCount();
1242         if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1243             GetEcmaGCKeyStats()->AddGCStatsToKey();
1244         }
1245 #endif
1246         GetEcmaGCStats()->PrintGCStatistic();
1247     }
1248 
1249     if (gcType_ == TriggerGCType::OLD_GC) {
1250         // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1251         // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1252         // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1253         // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1254         CheckNonMovableSpaceOOM();
1255     }
1256     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1257     if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1258         sweeper_->EnsureAllTaskFinished();
1259         oldSpace_->ResetCommittedOverSizeLimit();
1260         if (oldSpace_->CommittedSizeExceed()) {
1261             DumpHeapSnapshotBeforeOOM(false);
1262             StatisticHeapDetail();
1263             ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1264         }
1265         oldSpace_->ResetMergeSize();
1266         shouldThrowOOMError_ = false;
1267     }
1268     // Update record heap object size after gc if in sensitive status
1269     if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1270         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1271     }
1272 
1273     if (UNLIKELY(ShouldVerifyHeap())) {
1274         // verify post gc heap verify
1275         LOG_ECMA(DEBUG) << "post gc heap verify";
1276         Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1277     }
1278 
1279     // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1280     // even lead to another GC, so this have to invoke after this GC process.
1281     thread_->InvokeWeakNodeNativeFinalizeCallback();
1282     // PostTask for ProcessNativeDelete
1283     CleanCallBack();
1284 
1285     JSFinalizationRegistry::CheckAndCall(thread_);
1286 #if defined(ECMASCRIPT_SUPPORT_TRACING)
1287     auto tracing = GetEcmaVM()->GetTracing();
1288     if (tracing != nullptr) {
1289         tracing->TraceEventRecordMemory();
1290     }
1291 #endif
1292     ProcessGCListeners();
1293 
1294 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1295     if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1296         ThresholdReachedDump();
1297     }
1298 #endif
1299 
1300     if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1301         // check machine code space if enough
1302         int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1303             static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1304         Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1305     }
1306 }
1307 
ThrowOutOfMemoryError(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1308 void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1309     bool NonMovableObjNearOOM)
1310 {
1311     GetEcmaGCStats()->PrintGCMemoryStatistic();
1312     std::ostringstream oss;
1313     if (NonMovableObjNearOOM) {
1314         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1315             << " function name: " << functionName.c_str();
1316     } else {
1317         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1318             << functionName.c_str();
1319     }
1320     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1321     THROW_OOM_ERROR(thread, oss.str().c_str());
1322 }
1323 
SetMachineCodeOutOfMemoryError(JSThread * thread,size_t size,std::string functionName)1324 void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1325 {
1326     std::ostringstream oss;
1327     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1328         << functionName.c_str();
1329     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1330 
1331     EcmaVM *ecmaVm = thread->GetEcmaVM();
1332     ObjectFactory *factory = ecmaVm->GetFactory();
1333     JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1334     thread->SetException(error.GetTaggedValue());
1335 }
1336 
SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)1337 void BaseHeap::SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)
1338 {
1339     if (cb != nullptr) {
1340         appfreezeCallback_ = cb;
1341     }
1342 }
1343 
ThrowOutOfMemoryErrorForDefault(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1344 void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1345     bool NonMovableObjNearOOM)
1346 {
1347     GetEcmaGCStats()->PrintGCMemoryStatistic();
1348     std::ostringstream oss;
1349     if (NonMovableObjNearOOM) {
1350         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1351             << " function name: " << functionName.c_str();
1352     } else {
1353         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1354     }
1355     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1356     EcmaVM *ecmaVm = thread->GetEcmaVM();
1357     JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1358     JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1359 
1360     thread->SetException(error.GetTaggedValue());
1361     ecmaVm->HandleUncatchableError();
1362 }
1363 
FatalOutOfMemoryError(size_t size,std::string functionName)1364 void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1365 {
1366     GetEcmaGCStats()->PrintGCMemoryStatistic();
1367     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1368                         << " function name: " << functionName.c_str();
1369 }
1370 
CheckNonMovableSpaceOOM()1371 void Heap::CheckNonMovableSpaceOOM()
1372 {
1373     if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) {
1374         sweeper_->EnsureAllTaskFinished();
1375         DumpHeapSnapshotBeforeOOM(false);
1376         StatisticHeapDetail();
1377         ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1378     }
1379 }
1380 
AdjustBySurvivalRate(size_t originalNewSpaceSize)1381 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1382 {
1383     promotedSize_ = GetEvacuator()->GetPromotedSize();
1384     edenToYoungSize_ = GetEvacuator()->GetEdenToYoungSize();
1385     if (originalNewSpaceSize <= 0) {
1386         return;
1387     }
1388     semiSpaceCopiedSize_ = IsEdenMark() ? edenToYoungSize_ : activeSemiSpace_->GetHeapObjectSize();
1389     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1390     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1391     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1392     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1393                                 << " survivalRate: " << survivalRate;
1394     if (IsEdenMark()) {
1395         memController_->AddEdenSurvivalRate(survivalRate);
1396         return;
1397     }
1398     if (!oldSpaceLimitAdjusted_) {
1399         memController_->AddSurvivalRate(survivalRate);
1400         AdjustOldSpaceLimit();
1401     } else {
1402         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1403         // 2 means half
1404         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1405             SetFullMarkRequestedState(true);
1406             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1407                 << " is less than half the average survival rates: " << averageSurvivalRate
1408                 << ". Trigger full mark next time.";
1409             // Survival rate of full mark is precise. Reset recorded survival rates.
1410             memController_->ResetRecordedSurvivalRates();
1411         }
1412         memController_->AddSurvivalRate(survivalRate);
1413     }
1414 }
1415 
VerifyHeapObjects(VerifyKind verifyKind) const1416 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1417 {
1418     size_t failCount = 0;
1419     {
1420         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1421         activeSemiSpace_->IterateOverObjects(verifier);
1422     }
1423 
1424     {
1425         if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1426             verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1427             verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1428                 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1429                     region->IterateAllMarkedBits([this](void *addr) {
1430                         VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1431                     });
1432                 });
1433             }
1434     }
1435 
1436     {
1437         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1438         oldSpace_->IterateOverObjects(verifier);
1439     }
1440 
1441     {
1442         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1443         appSpawnSpace_->IterateOverMarkedObjects(verifier);
1444     }
1445 
1446     {
1447         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1448         nonMovableSpace_->IterateOverObjects(verifier);
1449     }
1450 
1451     {
1452         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1453         hugeObjectSpace_->IterateOverObjects(verifier);
1454     }
1455     {
1456         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1457         hugeMachineCodeSpace_->IterateOverObjects(verifier);
1458     }
1459     {
1460         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1461         machineCodeSpace_->IterateOverObjects(verifier);
1462     }
1463     {
1464         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1465         snapshotSpace_->IterateOverObjects(verifier);
1466     }
1467     return failCount;
1468 }
1469 
VerifyOldToNewRSet(VerifyKind verifyKind) const1470 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1471 {
1472     size_t failCount = 0;
1473     VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1474     oldSpace_->IterateOldToNewOverObjects(verifier);
1475     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1476     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1477     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1478     return failCount;
1479 }
1480 
AdjustOldSpaceLimit()1481 void Heap::AdjustOldSpaceLimit()
1482 {
1483     if (oldSpaceLimitAdjusted_) {
1484         return;
1485     }
1486     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1487     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1488     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1489         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1490     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1491         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1492     } else {
1493         oldSpaceLimitAdjusted_ = true;
1494     }
1495 
1496     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1497         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1498     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1499         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1500     }
1501     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1502         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1503 }
1504 
OnAllocateEvent(EcmaVM * ecmaVm,TaggedObject * address,size_t size)1505 void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1506                                [[maybe_unused]] size_t size)
1507 {
1508 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1509     HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1510     if (profiler != nullptr) {
1511         base::BlockHookScope blockScope;
1512         profiler->AllocationEvent(address, size);
1513     }
1514 #endif
1515 }
1516 
DumpHeapSnapshotBeforeOOM(bool isFullGC)1517 void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
1518 {
1519 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1520 #if defined(ENABLE_DUMP_IN_FAULTLOG)
1521     if (ecmaVm_->GetHeapProfile() != nullptr) {
1522         LOG_ECMA(ERROR) << "Heap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
1523         return;
1524     }
1525     // Filter appfreeze when dump.
1526     LOG_ECMA(INFO) << " Heap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
1527     base::BlockHookScope blockScope;
1528     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1529     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
1530         LOG_ECMA(INFO) << "Heap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
1531     }
1532 #ifdef ENABLE_HISYSEVENT
1533     GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize());
1534     hasOOMDump_ = true;
1535 #endif
1536     // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1537     DumpSnapShotOption dumpOption;
1538     dumpOption.dumpFormat = DumpFormat::BINARY;
1539     dumpOption.isVmMode = true;
1540     dumpOption.isPrivate = false;
1541     dumpOption.captureNumericValue = false;
1542     dumpOption.isFullGC = isFullGC;
1543     dumpOption.isSimplify = true;
1544     dumpOption.isSync = true;
1545     dumpOption.isBeforeFill = false;
1546     dumpOption.isDumpOOM = true;
1547     heapProfile->DumpHeapSnapshot(dumpOption);
1548     HeapProfilerInterface::Destroy(ecmaVm_);
1549 #endif // ENABLE_DUMP_IN_FAULTLOG
1550 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1551 }
1552 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)1553 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
1554                        [[maybe_unused]] size_t size)
1555 {
1556 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1557     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
1558     if (profiler != nullptr) {
1559         base::BlockHookScope blockScope;
1560         profiler->MoveEvent(address, forwardAddress, size);
1561     }
1562 #endif
1563 }
1564 
AdjustSpaceSizeForAppSpawn()1565 void Heap::AdjustSpaceSizeForAppSpawn()
1566 {
1567     SetHeapMode(HeapMode::SPAWN);
1568     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1569     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1570     auto committedSize = appSpawnSpace_->GetCommittedSize();
1571     appSpawnSpace_->SetInitialCapacity(committedSize);
1572     appSpawnSpace_->SetMaximumCapacity(committedSize);
1573     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1574     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1575 }
1576 
ShouldMoveToRoSpace(JSHClass * hclass,TaggedObject * object)1577 bool Heap::ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object)
1578 {
1579     return hclass->IsString() && !Region::ObjectAddressToRange(object)->InHugeObjectSpace();
1580 }
1581 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)1582 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1583 {
1584     ASSERT(inspector != nullptr);
1585     // activeSemiSpace_/inactiveSemiSpace_:
1586     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1587     activeSemiSpace_->AddAllocationInspector(inspector);
1588     // oldSpace_/compressSpace_:
1589     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1590     oldSpace_->AddAllocationInspector(inspector);
1591     // readOnlySpace_ need not allocationInspector.
1592     // appSpawnSpace_ need not allocationInspector.
1593     nonMovableSpace_->AddAllocationInspector(inspector);
1594     machineCodeSpace_->AddAllocationInspector(inspector);
1595     hugeObjectSpace_->AddAllocationInspector(inspector);
1596     hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1597 }
1598 
ClearAllocationInspectorFromAllSpaces()1599 void Heap::ClearAllocationInspectorFromAllSpaces()
1600 {
1601     edenSpace_->ClearAllocationInspector();
1602     activeSemiSpace_->ClearAllocationInspector();
1603     oldSpace_->ClearAllocationInspector();
1604     nonMovableSpace_->ClearAllocationInspector();
1605     machineCodeSpace_->ClearAllocationInspector();
1606     hugeObjectSpace_->ClearAllocationInspector();
1607     hugeMachineCodeSpace_->ClearAllocationInspector();
1608 }
1609 
RecomputeLimits()1610 void Heap::RecomputeLimits()
1611 {
1612     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1613     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1614     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1615         hugeMachineCodeSpace_->GetHeapObjectSize();
1616     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1617 
1618     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1619     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1620     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1621         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1622     size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1623     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1624                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
1625     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1626     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1627     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1628                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1629                                                                   growingFactor);
1630     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1631         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1632         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1633     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1634         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1635         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1636                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1637                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
1638         SetFullMarkRequestedState(true);
1639     }
1640 }
1641 
CheckAndTriggerOldGC(size_t size)1642 bool Heap::CheckAndTriggerOldGC(size_t size)
1643 {
1644     bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1645     bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1646     if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1647         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1648     }
1649     if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1650         GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1651         !NeedStopCollection()) {
1652         if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1653             oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1654             return false;
1655         }
1656         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1657         if (!oldGCRequested_) {
1658             return true;
1659         }
1660     }
1661     return false;
1662 }
1663 
CheckAndTriggerHintGC()1664 bool Heap::CheckAndTriggerHintGC()
1665 {
1666     if (IsInBackground()) {
1667         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1668         return true;
1669     }
1670     if (InSensitiveStatus()) {
1671         return false;
1672     }
1673     if (memController_->GetPredictedSurvivalRate() < SURVIVAL_RATE_THRESHOLD) {
1674         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1675         return true;
1676     }
1677     return false;
1678 }
1679 
CheckOngoingConcurrentMarking()1680 bool Heap::CheckOngoingConcurrentMarking()
1681 {
1682     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark() &&
1683         concurrentMarker_->IsTriggeredConcurrentMark()) {
1684         TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1685         if (thread_->IsMarking()) {
1686             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
1687             MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1688             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
1689             WaitConcurrentMarkingFinished();
1690         }
1691         WaitRunningTaskFinished();
1692         memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1693         return true;
1694     }
1695     return false;
1696 }
1697 
ClearIdleTask()1698 void Heap::ClearIdleTask()
1699 {
1700     SetIdleTask(IdleTaskType::NO_TASK);
1701     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1702 }
1703 
TryTriggerIdleCollection()1704 void Heap::TryTriggerIdleCollection()
1705 {
1706     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1707         return;
1708     }
1709     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1710         SetIdleTask(IdleTaskType::FINISH_MARKING);
1711         EnableNotifyIdle();
1712         CalculateIdleDuration();
1713         return;
1714     }
1715 
1716     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1717     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1718     double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1719                                            static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1720                                            newSpaceAllocSpeed;
1721     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1722     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1723     // 2 means double
1724     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1725         SetIdleTask(IdleTaskType::YOUNG_GC);
1726         SetMarkType(MarkType::MARK_YOUNG);
1727         EnableNotifyIdle();
1728         CalculateIdleDuration();
1729         return;
1730     }
1731 }
1732 
CalculateIdleDuration()1733 void Heap::CalculateIdleDuration()
1734 {
1735     size_t updateReferenceSpeed = 0;
1736     // clear native object duration
1737     size_t clearNativeObjSpeed = 0;
1738     if (markType_ == MarkType::MARK_EDEN) {
1739         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_UPDATE_REFERENCE_SPEED);
1740         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_CLEAR_NATIVE_OBJ_SPEED);
1741     } else if (markType_ == MarkType::MARK_YOUNG) {
1742         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1743         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1744     } else if (markType_ == MarkType::MARK_FULL) {
1745         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1746         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1747     }
1748 
1749     // update reference duration
1750     idlePredictDuration_ = 0.0f;
1751     if (updateReferenceSpeed != 0) {
1752         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1753     }
1754 
1755     if (clearNativeObjSpeed != 0) {
1756         idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1757     }
1758 
1759     // sweep and evacuate duration
1760     size_t edenEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_EVACUATE_SPACE_SPEED);
1761     size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1762     double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1763     if (markType_ == MarkType::MARK_EDEN && edenEvacuateSpeed != 0) {
1764         idlePredictDuration_ += survivalRate * edenSpace_->GetHeapObjectSize() / edenEvacuateSpeed;
1765     } else if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1766         idlePredictDuration_ += (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize()) *
1767             survivalRate / youngEvacuateSpeed;
1768     } else if (markType_ == MarkType::MARK_FULL) {
1769         size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1770         size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1771         if (sweepSpeed != 0) {
1772             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1773         }
1774         if (oldEvacuateSpeed != 0) {
1775             size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1776                 RecordData::COLLECT_REGION_SET_SIZE);
1777             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
1778                                     oldEvacuateSpeed;
1779         }
1780     }
1781 
1782     // Idle YoungGC mark duration
1783     size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
1784     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
1785         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
1786     }
1787     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
1788 }
1789 
TryTriggerIncrementalMarking()1790 void Heap::TryTriggerIncrementalMarking()
1791 {
1792     if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
1793         return;
1794     }
1795     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1796     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1797         hugeMachineCodeSpace_->GetHeapObjectSize();
1798     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1799     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1800     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1801     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1802 
1803     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1804     // mark finished before allocate limit
1805     if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1806         // The object allocated in incremental marking should lower than limit,
1807         // otherwise select trigger concurrent mark.
1808         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1809         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1810             EnableNotifyIdle();
1811             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1812         }
1813     }
1814 }
1815 
CheckCanTriggerConcurrentMarking()1816 bool Heap::CheckCanTriggerConcurrentMarking()
1817 {
1818     return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
1819         !incrementalMarker_->IsTriggeredIncrementalMark() &&
1820         (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1821 }
1822 
TryTriggerConcurrentMarking()1823 void Heap::TryTriggerConcurrentMarking()
1824 {
1825     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1826     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1827     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1828     // full mark will be triggered.
1829     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1830     // young mark may not result in the new space reaching its limit, young mark can be triggered.
1831     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1832     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1833     if (!CheckCanTriggerConcurrentMarking()) {
1834         return;
1835     }
1836     if (fullMarkRequested_) {
1837         markType_ = MarkType::MARK_FULL;
1838         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1839         TriggerConcurrentMarking();
1840         return;
1841     }
1842     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1843            oldSpaceAllocToLimitDuration = 0;
1844     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1845     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1846     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1847         hugeMachineCodeSpace_->GetHeapObjectSize();
1848     size_t globalHeapObjectSize = GetHeapObjectSize();
1849     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1850     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1851         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1852             GlobalNativeSizeLargerThanLimit()) {
1853             markType_ = MarkType::MARK_FULL;
1854             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1855             TriggerConcurrentMarking();
1856             return;
1857         }
1858     } else {
1859         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1860             GlobalNativeSizeLargerThanLimit()) {
1861             markType_ = MarkType::MARK_FULL;
1862             TriggerConcurrentMarking();
1863             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1864             return;
1865         }
1866         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1867         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1868         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1869         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1870         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1871             markType_ = MarkType::MARK_FULL;
1872             TriggerConcurrentMarking();
1873             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1874             return;
1875         }
1876     }
1877 
1878     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1879     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1880     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1881         if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
1882             markType_ = MarkType::MARK_YOUNG;
1883             TriggerConcurrentMarking();
1884             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
1885         }
1886         return;
1887     }
1888     size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1889     size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
1890     bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
1891     if (!triggerMark) {
1892         newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
1893         newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1894         // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
1895         newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1896         triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
1897     }
1898 
1899     if (triggerMark) {
1900         markType_ = MarkType::MARK_YOUNG;
1901         TriggerConcurrentMarking();
1902         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
1903         return;
1904     }
1905 
1906     if (!enableEdenGC_ || IsInBackground()) {
1907         return;
1908     }
1909 
1910     double edenSurvivalRate = memController_->GetAverageEdenSurvivalRate();
1911     double survivalRate = memController_->GetAverageSurvivalRate();
1912     constexpr double expectMaxSurvivalRate = 0.4;
1913     if ((edenSurvivalRate == 0 || edenSurvivalRate >= expectMaxSurvivalRate) && survivalRate >= expectMaxSurvivalRate) {
1914         return;
1915     }
1916 
1917     double edenSpaceAllocSpeed = memController_->GetEdenSpaceAllocationThroughputPerMS();
1918     double edenSpaceConcurrentMarkSpeed = memController_->GetEdenSpaceConcurrentMarkSpeedPerMS();
1919     if (edenSpaceConcurrentMarkSpeed == 0 || edenSpaceAllocSpeed == 0) {
1920         auto &config = ecmaVm_->GetEcmaParamConfiguration();
1921         if (edenSpace_->GetCommittedSize() >= config.GetEdenSpaceTriggerConcurrentMark()) {
1922             markType_ = MarkType::MARK_EDEN;
1923             TriggerConcurrentMarking();
1924             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first eden mark " << fullGCRequested_;
1925         }
1926         return;
1927     }
1928 
1929     auto &config = ecmaVm_->GetEcmaParamConfiguration();
1930     size_t edenCommittedSize = edenSpace_->GetCommittedSize();
1931     triggerMark = edenCommittedSize >= config.GetEdenSpaceTriggerConcurrentMark();
1932     if (!triggerMark && edenSpaceAllocSpeed != 0 && edenSpaceConcurrentMarkSpeed != 0 &&
1933             edenSpace_->GetHeapObjectSize() > 0) {
1934         double edenSpaceLimit = edenSpace_->GetInitialCapacity();
1935         double edenSpaceAllocToLimitDuration = (edenSpaceLimit - edenCommittedSize) / edenSpaceAllocSpeed;
1936         double edenSpaceMarkDuration = edenSpace_->GetHeapObjectSize() / edenSpaceConcurrentMarkSpeed;
1937         double edenSpaceRemainSize = (edenSpaceAllocToLimitDuration - edenSpaceMarkDuration) * newSpaceAllocSpeed;
1938         triggerMark = edenSpaceRemainSize < DEFAULT_REGION_SIZE;
1939     }
1940 
1941     if (triggerMark) {
1942         markType_ = MarkType::MARK_EDEN;
1943         TriggerConcurrentMarking();
1944         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger eden mark";
1945     }
1946 }
1947 
TryTriggerFullMarkOrGCByNativeSize()1948 void Heap::TryTriggerFullMarkOrGCByNativeSize()
1949 {
1950     // In high sensitive scene and native size larger than limit, trigger old gc directly
1951     if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
1952         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
1953     } else if (GlobalNativeSizeLargerThanLimit()) {
1954         if (concurrentMarker_->IsEnabled()) {
1955             SetFullMarkRequestedState(true);
1956             TryTriggerConcurrentMarking();
1957         } else {
1958             CheckAndTriggerOldGC();
1959         }
1960     }
1961 }
1962 
TryTriggerFullMarkBySharedLimit()1963 bool Heap::TryTriggerFullMarkBySharedLimit()
1964 {
1965     bool keepFullMarkRequest = false;
1966     if (concurrentMarker_->IsEnabled()) {
1967         if (!CheckCanTriggerConcurrentMarking()) {
1968             return keepFullMarkRequest;
1969         }
1970         markType_ = MarkType::MARK_FULL;
1971         if (ConcurrentMarker::TryIncreaseTaskCounts()) {
1972             concurrentMarker_->Mark();
1973         } else {
1974             // need retry full mark request again.
1975             keepFullMarkRequest = true;
1976         }
1977     }
1978     return keepFullMarkRequest;
1979 }
1980 
CheckAndTriggerTaskFinishedGC()1981 void Heap::CheckAndTriggerTaskFinishedGC()
1982 {
1983     size_t objectSizeOfTaskBegin = GetRecordObjectSize();
1984     size_t objectSizeOfTaskFinished = GetHeapObjectSize();
1985     size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
1986     size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
1987     // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
1988     bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
1989         objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
1990             TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
1991     bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
1992         nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
1993             TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
1994     if (objectSizeFlag || nativeSizeFlag) {
1995         panda::JSNApi::TriggerGC(GetEcmaVM(), panda::ecmascript::GCReason::TRIGGER_BY_TASKPOOL,
1996             panda::JSNApi::TRIGGER_GC_TYPE::OLD_GC);
1997         RecordOrResetObjectSize(0);
1998         RecordOrResetNativeSize(0);
1999     }
2000 }
2001 
IsMarking() const2002 bool Heap::IsMarking() const
2003 {
2004     return thread_->IsMarking();
2005 }
2006 
TryTriggerFullMarkBySharedSize(size_t size)2007 void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2008 {
2009     newAllocatedSharedObjectSize_ += size;
2010     if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2011         if (concurrentMarker_->IsEnabled()) {
2012             SetFullMarkRequestedState(true);
2013             TryTriggerConcurrentMarking();
2014             newAllocatedSharedObjectSize_ = 0;
2015         }
2016     }
2017 }
2018 
IsReadyToConcurrentMark() const2019 bool Heap::IsReadyToConcurrentMark() const
2020 {
2021     return thread_->IsReadyToConcurrentMark();
2022 }
2023 
IncreaseNativeBindingSize(JSNativePointer * object)2024 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2025 {
2026     size_t size = object->GetBindingSize();
2027     if (size == 0) {
2028         return;
2029     }
2030     nativeBindingSize_ += size;
2031 }
2032 
IncreaseNativeBindingSize(size_t size)2033 void Heap::IncreaseNativeBindingSize(size_t size)
2034 {
2035     if (size == 0) {
2036         return;
2037     }
2038     nativeBindingSize_ += size;
2039 }
2040 
DecreaseNativeBindingSize(size_t size)2041 void Heap::DecreaseNativeBindingSize(size_t size)
2042 {
2043     ASSERT(size <= nativeBindingSize_);
2044     nativeBindingSize_ -= size;
2045 }
2046 
PrepareRecordRegionsForReclaim()2047 void Heap::PrepareRecordRegionsForReclaim()
2048 {
2049     activeSemiSpace_->SetRecordRegion();
2050     oldSpace_->SetRecordRegion();
2051     snapshotSpace_->SetRecordRegion();
2052     nonMovableSpace_->SetRecordRegion();
2053     hugeObjectSpace_->SetRecordRegion();
2054     machineCodeSpace_->SetRecordRegion();
2055     hugeMachineCodeSpace_->SetRecordRegion();
2056 }
2057 
TriggerConcurrentMarking()2058 void Heap::TriggerConcurrentMarking()
2059 {
2060     ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2061     if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2062         ClearIdleTask();
2063         DisableNotifyIdle();
2064     }
2065     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2066         concurrentMarker_->Mark();
2067     }
2068 }
2069 
WaitAllTasksFinished()2070 void Heap::WaitAllTasksFinished()
2071 {
2072     WaitRunningTaskFinished();
2073     sweeper_->EnsureAllTaskFinished();
2074     WaitClearTaskFinished();
2075     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2076         concurrentMarker_->WaitMarkingFinished();
2077     }
2078 }
2079 
WaitConcurrentMarkingFinished()2080 void Heap::WaitConcurrentMarkingFinished()
2081 {
2082     concurrentMarker_->WaitMarkingFinished();
2083 }
2084 
PostParallelGCTask(ParallelGCTaskPhase gcTask)2085 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2086 {
2087     IncreaseTaskCount();
2088     Taskpool::GetCurrentTaskpool()->PostTask(
2089         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2090 }
2091 
ChangeGCParams(bool inBackground)2092 void Heap::ChangeGCParams(bool inBackground)
2093 {
2094     const double doubleOne = 1.0;
2095     inBackground_ = inBackground;
2096     if (inBackground) {
2097         LOG_GC(INFO) << "app is inBackground";
2098         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2099             GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2100             doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2101             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2102         }
2103         if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
2104             sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2105             doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2106             sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::SWITCH_BACKGROUND>(thread_);
2107         }
2108         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2109             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2110             LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2111         }
2112         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
2113         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
2114         maxMarkTaskCount_ = 1;
2115         maxEvacuateTaskCount_ = 1;
2116         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::BACKGROUND);
2117     } else {
2118         LOG_GC(INFO) << "app is not inBackground";
2119         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2120             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2121             LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2122         }
2123         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2124         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2125         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2126             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2127         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2128         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::FOREGROUND);
2129     }
2130 }
2131 
GetEcmaGCStats()2132 GCStats *Heap::GetEcmaGCStats()
2133 {
2134     return ecmaVm_->GetEcmaGCStats();
2135 }
2136 
GetEcmaGCKeyStats()2137 GCKeyStats *Heap::GetEcmaGCKeyStats()
2138 {
2139     return ecmaVm_->GetEcmaGCKeyStats();
2140 }
2141 
GetJSObjectResizingStrategy()2142 JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2143 {
2144     return ecmaVm_->GetJSObjectResizingStrategy();
2145 }
2146 
TriggerIdleCollection(int idleMicroSec)2147 void Heap::TriggerIdleCollection(int idleMicroSec)
2148 {
2149     if (idleTask_ == IdleTaskType::NO_TASK) {
2150         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2151             DisableNotifyIdle();
2152         }
2153         return;
2154     }
2155 
2156     // Incremental mark initialize and process
2157     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2158         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2159         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2160         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2161             CalculateIdleDuration();
2162         }
2163         return;
2164     }
2165 
2166     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2167         return;
2168     }
2169 
2170     switch (idleTask_) {
2171         case IdleTaskType::FINISH_MARKING: {
2172             if (markType_ == MarkType::MARK_FULL) {
2173                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2174             } else {
2175                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2176             }
2177             break;
2178         }
2179         case IdleTaskType::YOUNG_GC:
2180             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2181             break;
2182         case IdleTaskType::INCREMENTAL_MARK:
2183             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2184             break;
2185         default:
2186             break;
2187     }
2188     ClearIdleTask();
2189 }
2190 
NotifyMemoryPressure(bool inHighMemoryPressure)2191 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2192 {
2193     if (inHighMemoryPressure) {
2194         LOG_GC(INFO) << "app is inHighMemoryPressure";
2195         SetMemGrowingType(MemGrowingType::PRESSURE);
2196     } else {
2197         LOG_GC(INFO) << "app is not inHighMemoryPressure";
2198         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2199     }
2200 }
2201 
NotifyFinishColdStart(bool isMainThread)2202 void Heap::NotifyFinishColdStart(bool isMainThread)
2203 {
2204     if (!FinishStartupEvent()) {
2205         return;
2206     }
2207     ASSERT(!OnStartupEvent());
2208     LOG_GC(INFO) << "SmartGC: finish app cold start";
2209 
2210     // set overshoot size to increase gc threashold larger 8MB than current heap size.
2211     int64_t semiRemainSize =
2212         static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2213     int64_t overshootSize =
2214         static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2215     // overshoot size should be larger than 0.
2216     GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2217 
2218     if (isMainThread && CheckCanTriggerConcurrentMarking()) {
2219         TryTriggerConcurrentMarking();
2220     }
2221     GetEdenSpace()->AllowTryEnable();
2222 }
2223 
NotifyFinishColdStartSoon()2224 void Heap::NotifyFinishColdStartSoon()
2225 {
2226     if (!OnStartupEvent()) {
2227         return;
2228     }
2229 
2230     // post 2s task
2231     Taskpool::GetCurrentTaskpool()->PostTask(
2232         std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this));
2233 }
2234 
NotifyHighSensitive(bool isStart)2235 void Heap::NotifyHighSensitive(bool isStart)
2236 {
2237     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SmartGC: set high sensitive status: " + std::to_string(isStart));
2238     isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2239         : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2240     LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2241 }
2242 
HandleExitHighSensitiveEvent()2243 bool Heap::HandleExitHighSensitiveEvent()
2244 {
2245     AppSensitiveStatus status = GetSensitiveStatus();
2246     if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2247         && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE)) {
2248         // Set record heap obj size 0 after exit high senstive
2249         SetRecordHeapObjectSizeBeforeSensitive(0);
2250         // set overshoot size to increase gc threashold larger 8MB than current heap size.
2251         int64_t semiRemainSize =
2252             static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2253         int64_t overshootSize =
2254             static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2255         // overshoot size should be larger than 0.
2256         GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2257 
2258         // fixme: IncrementalMarking and IdleCollection is currently not enabled
2259         TryTriggerIncrementalMarking();
2260         TryTriggerIdleCollection();
2261         TryTriggerConcurrentMarking();
2262         return true;
2263     }
2264     return false;
2265 }
2266 
2267 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2268 // concurrent mark
ObjectExceedMaxHeapSize() const2269 bool Heap::ObjectExceedMaxHeapSize() const
2270 {
2271     size_t configMaxHeapSize = config_.GetMaxHeapSize();
2272     size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2273     return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2274 }
2275 
NeedStopCollection()2276 bool Heap::NeedStopCollection()
2277 {
2278     // gc is not allowed during value serialize
2279     if (onSerializeEvent_) {
2280         return true;
2281     }
2282 
2283     if (!InSensitiveStatus()) {
2284         return false;
2285     }
2286 
2287     // During app cold start, gc threshold adjust to max heap size
2288     if (OnStartupEvent() && !ObjectExceedMaxHeapSize()) {
2289         return true;
2290     }
2291 
2292     if (GetRecordHeapObjectSizeBeforeSensitive() == 0) {
2293         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
2294     }
2295 
2296     if (GetHeapObjectSize() < GetRecordHeapObjectSizeBeforeSensitive() + config_.GetIncObjSizeThresholdInSensitive()
2297         && !ObjectExceedMaxHeapSize()) {
2298         return true;
2299     }
2300 
2301     OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2302         << " exceed sensitive gc threshold, have to trigger gc";
2303     return false;
2304 }
2305 
Run(uint32_t threadIndex)2306 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2307 {
2308     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2309     ASSERT(heap_->GetWorkManager()->HasInitialized());
2310     while (!heap_->GetWorkManager()->HasInitialized());
2311     switch (taskPhase_) {
2312         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
2313             heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
2314             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2315             break;
2316         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
2317             heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
2318             break;
2319         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
2320             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2321             break;
2322         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2323             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2324             break;
2325         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2326             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2327             break;
2328         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2329             heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2330             break;
2331         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
2332             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
2333             break;
2334         default:
2335             LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2336             UNREACHABLE();
2337     }
2338     heap_->ReduceTaskCount();
2339     return true;
2340 }
2341 
Run(uint32_t threadIndex)2342 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2343 {
2344     heap_->ReclaimRegions(gcType_);
2345     return true;
2346 }
2347 
Run(uint32_t threadIndex)2348 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2349 {
2350     std::this_thread::sleep_for(std::chrono::microseconds(2000000));  // 2000000 means 2s
2351     heap_->NotifyFinishColdStart(false);
2352     return true;
2353 }
2354 
CleanCallBack()2355 void Heap::CleanCallBack()
2356 {
2357     auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2358     if (!concurrentCallbacks.empty()) {
2359         Taskpool::GetCurrentTaskpool()->PostTask(
2360             std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2361         );
2362     }
2363     ASSERT(concurrentCallbacks.empty());
2364 
2365     AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2366     if (asyncCallbacksPack.Empty()) {
2367         ASSERT(asyncCallbacksPack.TotallyEmpty());
2368         return;
2369     }
2370     AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2371     std::swap(*asyncCallbacks, asyncCallbacksPack);
2372     NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2373     if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2374         pendingAsyncNativeCallbackSize_ < asyncClearNativePointerThreshold_) {
2375         IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2376         asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2377             this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2378         });
2379         asyncTaskCb(asyncCallbacks);
2380     } else {
2381         ThreadNativeScope nativeScope(thread_);
2382         asyncCallbacks->ProcessAll();
2383         delete asyncCallbacks;
2384     }
2385     ASSERT(asyncCallbacksPack.TotallyEmpty());
2386 }
2387 
Run(uint32_t threadIndex)2388 bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2389 {
2390     for (auto iter : nativePointerCallbacks_) {
2391         if (iter.first != nullptr) {
2392             iter.first(std::get<0>(iter.second),
2393                 std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2394         }
2395     }
2396     return true;
2397 }
2398 
GetArrayBufferSize() const2399 size_t Heap::GetArrayBufferSize() const
2400 {
2401     size_t result = 0;
2402     sweeper_->EnsureAllTaskFinished();
2403     this->IterateOverObjects([&result](TaggedObject *obj) {
2404         JSHClass* jsClass = obj->GetClass();
2405         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2406     });
2407     return result;
2408 }
2409 
GetLiveObjectSize() const2410 size_t Heap::GetLiveObjectSize() const
2411 {
2412     size_t objectSize = 0;
2413     sweeper_->EnsureAllTaskFinished();
2414     this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2415         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
2416     });
2417     return objectSize;
2418 }
2419 
GetHeapLimitSize() const2420 size_t Heap::GetHeapLimitSize() const
2421 {
2422     // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2423     return config_.GetMaxHeapSize();
2424 }
2425 
IsAlive(TaggedObject * object) const2426 bool BaseHeap::IsAlive(TaggedObject *object) const
2427 {
2428     if (!ContainObject(object)) {
2429         LOG_GC(ERROR) << "The region is already free";
2430         return false;
2431     }
2432 
2433     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2434     if (isFree) {
2435         Region *region = Region::ObjectAddressToRange(object);
2436         LOG_GC(ERROR) << "The object " << object << " in "
2437                             << region->GetSpaceTypeName()
2438                             << " already free";
2439     }
2440     return !isFree;
2441 }
2442 
ContainObject(TaggedObject * object) const2443 bool BaseHeap::ContainObject(TaggedObject *object) const
2444 {
2445     /*
2446      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2447      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2448      * calculated from an object address is still valid or alive in a cheap way.
2449      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2450      * introduce additional incorrect memory access issues.
2451      * Unless we can tolerate the performance impact of iterating the region list of each space and change
2452      * the implementation to that approach, don't rely on current implementation to get accurate result.
2453      */
2454     Region *region = Region::ObjectAddressToRange(object);
2455     return region->InHeapSpace();
2456 }
2457 
PrintHeapInfo(TriggerGCType gcType) const2458 void Heap::PrintHeapInfo(TriggerGCType gcType) const
2459 {
2460     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2461     OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2462                                 << ";OnStartup:" << OnStartupEvent()
2463                                 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2464                                 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2465     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2466                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark()
2467                                 << ") Eden Mark(" << IsEdenMark() << ")";
2468     OPTIONAL_LOG(ecmaVm_, INFO) << "Eden(" << edenSpace_->GetHeapObjectSize() << "/" << edenSpace_->GetInitialCapacity()
2469                  << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2470                  << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2471                  << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2472                  << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2473                  << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2474                  << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2475                  << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2476                  << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2477                  << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2478                  << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2479 }
2480 
StatisticHeapObject(TriggerGCType gcType) const2481 void Heap::StatisticHeapObject(TriggerGCType gcType) const
2482 {
2483     PrintHeapInfo(gcType);
2484 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2485     StatisticHeapDetail();
2486 #endif
2487 }
2488 
StatisticHeapDetail()2489 void Heap::StatisticHeapDetail()
2490 {
2491     Prepare();
2492     static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
2493     int typeCount[JS_TYPE_LAST] = { 0 };
2494     static const int MIN_COUNT_THRESHOLD = 1000;
2495 
2496     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2497         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2498     });
2499     for (int i = 0; i < JS_TYPE_LAST; i++) {
2500         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2501             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2502                            << " count:" << typeCount[i];
2503         }
2504         typeCount[i] = 0;
2505     }
2506 
2507     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2508         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2509     });
2510     for (int i = 0; i < JS_TYPE_LAST; i++) {
2511         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2512             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2513                            << " count:" << typeCount[i];
2514         }
2515         typeCount[i] = 0;
2516     }
2517 
2518     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2519         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2520     });
2521     for (int i = 0; i < JS_TYPE_LAST; i++) {
2522         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2523             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2524                            << " count:" << typeCount[i];
2525         }
2526         typeCount[i] = 0;
2527     }
2528 }
2529 
UpdateWorkManager(WorkManager * workManager)2530 void Heap::UpdateWorkManager(WorkManager *workManager)
2531 {
2532     concurrentMarker_->workManager_ = workManager;
2533     fullGC_->workManager_ = workManager;
2534     stwYoungGC_->workManager_ = workManager;
2535     incrementalMarker_->workManager_ = workManager;
2536     nonMovableMarker_->workManager_ = workManager;
2537     semiGCMarker_->workManager_ = workManager;
2538     compressGCMarker_->workManager_ = workManager;
2539     partialGC_->workManager_ = workManager;
2540 }
2541 
GetMachineCodeObject(uintptr_t pc) const2542 MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2543 {
2544     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2545     MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2546     if (machineCode != nullptr) {
2547         return machineCode;
2548     }
2549     HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2550     return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2551 }
2552 
CalCallSiteInfo(uintptr_t retAddr) const2553 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2554 {
2555     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2556     MachineCode *code = nullptr;
2557     // 1. find return
2558     // 2. gc
2559     machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2560         if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2561             return;
2562         }
2563         if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2564             code = MachineCode::Cast(obj);
2565             return;
2566         }
2567     });
2568     if (code == nullptr) {
2569         HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2570         hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2571             if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2572                 return;
2573             }
2574             if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2575                 code = MachineCode::Cast(obj);
2576                 return;
2577             }
2578         });
2579     }
2580 
2581     if (code == nullptr ||
2582         (code->GetPayLoadSizeInBytes() ==
2583          code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2584         return {};
2585     }
2586     return code->CalCallSiteInfo(retAddr);
2587 };
2588 
AddGCListener(FinishGCListener listener,void * data)2589 GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2590 {
2591     gcListeners_.emplace_back(std::make_pair(listener, data));
2592     return std::prev(gcListeners_.cend());
2593 }
2594 
ProcessGCListeners()2595 void Heap::ProcessGCListeners()
2596 {
2597     for (auto &&[listener, data] : gcListeners_) {
2598         listener(data);
2599     }
2600 }
2601 
ProcessAllGCListeners()2602 void SharedHeap::ProcessAllGCListeners()
2603 {
2604     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2605         ASSERT(!thread->IsInRunningState());
2606         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2607     });
2608 }
2609 
2610 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
GetCurrentTickMillseconds()2611 uint64_t Heap::GetCurrentTickMillseconds()
2612 {
2613     return std::chrono::duration_cast<std::chrono::milliseconds>(
2614     std::chrono::steady_clock::now().time_since_epoch()).count();
2615 }
2616 
SetJsDumpThresholds(size_t thresholds) const2617 void Heap::SetJsDumpThresholds(size_t thresholds) const
2618 {
2619     if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2620         LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2621         return;
2622     }
2623     g_threshold = thresholds;
2624 }
2625 
ThresholdReachedDump()2626 void Heap::ThresholdReachedDump()
2627 {
2628     size_t limitSize = GetHeapLimitSize();
2629     if (!limitSize) {
2630         LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2631         return;
2632     }
2633     size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2634     if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2635         GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2636             size_t liveObjectSize = GetLiveObjectSize();
2637             size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2638             LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2639             if (nowPrecentRecheck < g_threshold) {
2640                 return;
2641             }
2642             g_lastHeapDumpTime = GetCurrentTickMillseconds();
2643             base::BlockHookScope blockScope;
2644             HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2645             GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2646                                                         GetHeapLimitSize(), GetLiveObjectSize());
2647             DumpSnapShotOption dumpOption;
2648             dumpOption.dumpFormat = DumpFormat::BINARY;
2649             dumpOption.isVmMode = true;
2650             dumpOption.isPrivate = false;
2651             dumpOption.captureNumericValue = false;
2652             dumpOption.isFullGC = false;
2653             dumpOption.isSimplify = true;
2654             dumpOption.isSync = false;
2655             dumpOption.isBeforeFill = false;
2656             dumpOption.isDumpOOM = true; // aim's to do binary dump
2657             heapProfile->DumpHeapSnapshot(dumpOption);
2658             hasOOMDump_ = false;
2659             HeapProfilerInterface::Destroy(ecmaVm_);
2660         }
2661 }
2662 #endif
2663 
RemoveGCListener(GCListenerId listenerId)2664 void Heap::RemoveGCListener(GCListenerId listenerId)
2665 {
2666     gcListeners_.erase(listenerId);
2667 }
2668 
IncreaseTaskCount()2669 void BaseHeap::IncreaseTaskCount()
2670 {
2671     LockHolder holder(waitTaskFinishedMutex_);
2672     runningTaskCount_++;
2673 }
2674 
WaitRunningTaskFinished()2675 void BaseHeap::WaitRunningTaskFinished()
2676 {
2677     LockHolder holder(waitTaskFinishedMutex_);
2678     while (runningTaskCount_ > 0) {
2679         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
2680     }
2681 }
2682 
CheckCanDistributeTask()2683 bool BaseHeap::CheckCanDistributeTask()
2684 {
2685     LockHolder holder(waitTaskFinishedMutex_);
2686     return runningTaskCount_ < maxMarkTaskCount_;
2687 }
2688 
ReduceTaskCount()2689 void BaseHeap::ReduceTaskCount()
2690 {
2691     LockHolder holder(waitTaskFinishedMutex_);
2692     runningTaskCount_--;
2693     if (runningTaskCount_ == 0) {
2694         waitTaskFinishedCV_.SignalAll();
2695     }
2696 }
2697 
WaitClearTaskFinished()2698 void BaseHeap::WaitClearTaskFinished()
2699 {
2700     LockHolder holder(waitClearTaskFinishedMutex_);
2701     while (!clearTaskFinished_) {
2702         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
2703     }
2704 }
2705 
ReleaseEdenAllocator()2706 void Heap::ReleaseEdenAllocator()
2707 {
2708     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
2709     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
2710     if (!topAddress || !endAddress) {
2711         return;
2712     }
2713     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2714 }
2715 
InstallEdenAllocator()2716 void Heap::InstallEdenAllocator()
2717 {
2718     if (!enableEdenGC_) {
2719         return;
2720     }
2721     auto topAddress = edenSpace_->GetAllocationTopAddress();
2722     auto endAddress = edenSpace_->GetAllocationEndAddress();
2723     if (!topAddress || !endAddress) {
2724         return;
2725     }
2726     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2727 }
2728 
EnableEdenGC()2729 void Heap::EnableEdenGC()
2730 {
2731     enableEdenGC_ = true;
2732     thread_->EnableEdenGCBarriers();
2733 }
2734 
TryEnableEdenGC()2735 void Heap::TryEnableEdenGC()
2736 {
2737     if (ohos::OhosParams::IsEdenGCEnable()) {
2738         EnableEdenGC();
2739     }
2740 }
2741 }  // namespace panda::ecmascript
2742