• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <chrono>
17 #include <thread>
18 
19 #include "ecmascript/base/block_hook_scope.h"
20 #include "ecmascript/checkpoint/thread_state_transition.h"
21 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
22 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
23 #endif
24 
25 #include "ecmascript/mem/incremental_marker.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/parallel_evacuator.h"
28 #include "ecmascript/mem/parallel_marker-inl.h"
29 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
30 #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
31 #include "ecmascript/mem/shared_heap/shared_gc.h"
32 #include "ecmascript/mem/shared_heap/shared_full_gc.h"
33 #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
34 #include "ecmascript/mem/stw_young_gc.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/runtime_call_id.h"
37 #include "ecmascript/jit/jit.h"
38 #include "ecmascript/ohos/ohos_params.h"
39 #if !WIN_OR_MAC_OR_IOS_PLATFORM
40 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
41 #include "ecmascript/dfx/hprof/heap_profiler.h"
42 #endif
43 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
44 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
45 #endif
46 #include "ecmascript/dfx/tracing/tracing.h"
47 #if defined(ENABLE_DUMP_IN_FAULTLOG)
48 #include "syspara/parameter.h"
49 #endif
50 
51 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
52 #include "parameters.h"
53 #include "hisysevent.h"
54 static constexpr uint32_t DEC_TO_INT = 100;
55 static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
56 static uint64_t g_lastHeapDumpTime = 0;
57 static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
58 static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
59 static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
60 static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
61                             (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
62 #endif
63 
64 namespace panda::ecmascript {
65 SharedHeap *SharedHeap::instance_ = nullptr;
66 
CreateNewInstance()67 void SharedHeap::CreateNewInstance()
68 {
69     ASSERT(instance_ == nullptr);
70     size_t heapShared = 0;
71 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
72     heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
73 #endif
74     EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
75         MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
76     instance_ = new SharedHeap(config);
77 }
78 
GetInstance()79 SharedHeap *SharedHeap::GetInstance()
80 {
81     ASSERT(instance_ != nullptr);
82     return instance_;
83 }
84 
DestroyInstance()85 void SharedHeap::DestroyInstance()
86 {
87     ASSERT(instance_ != nullptr);
88     instance_->Destroy();
89     delete instance_;
90     instance_ = nullptr;
91 }
92 
ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType,GCReason gcReason,JSThread * thread)93 void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
94 {
95     ASSERT(!dThread_->IsRunning());
96     SuspendAllScope scope(thread);
97     SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
98     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
99     GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
100     if (UNLIKELY(ShouldVerifyHeap())) {
101         // pre gc heap verify
102         LOG_ECMA(DEBUG) << "pre gc shared heap verify";
103         sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
104         SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
105     }
106     switch (gcType) {
107         case TriggerGCType::SHARED_GC: {
108             sharedGC_->RunPhases();
109             break;
110         }
111         case TriggerGCType::SHARED_FULL_GC: {
112             sharedFullGC_->RunPhases();
113             break;
114         }
115         default:
116             LOG_ECMA(FATAL) << "this branch is unreachable";
117             UNREACHABLE();
118             break;
119     }
120     if (UNLIKELY(ShouldVerifyHeap())) {
121         // pre gc heap verify
122         LOG_ECMA(DEBUG) << "after gc shared heap verify";
123         SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
124     }
125     CollectGarbageFinish(false, gcType);
126 }
127 
CheckAndTriggerSharedGC(JSThread * thread)128 bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
129 {
130     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
131         return false;
132     }
133     if ((OldSpaceExceedLimit() || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
134         !NeedStopCollection()) {
135         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
136         return true;
137     }
138     return false;
139 }
140 
CheckHugeAndTriggerSharedGC(JSThread * thread,size_t size)141 bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
142 {
143     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
144         return false;
145     }
146     if ((sHugeObjectSpace_->CommittedSizeExceed(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
147         !NeedStopCollection()) {
148         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
149         return true;
150     }
151     return false;
152 }
153 
CollectGarbageNearOOM(JSThread * thread)154 void SharedHeap::CollectGarbageNearOOM(JSThread *thread)
155 {
156     auto fragmentationSize = sOldSpace_->GetCommittedSize() - sOldSpace_->GetHeapObjectSize();
157     if (fragmentationSize >= fragmentationLimitForSharedFullGC_) {
158         CollectGarbage<TriggerGCType::SHARED_FULL_GC,  GCReason::ALLOCATION_FAILED>(thread);
159     } else {
160         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
161     }
162 }
163 // Shared gc trigger
AdjustGlobalSpaceAllocLimit()164 void SharedHeap::AdjustGlobalSpaceAllocLimit()
165 {
166     globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
167                                       config_.GetDefaultGlobalAllocLimit() * 2); // 2: double
168     globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
169                                       config_.GetMaxHeapSize());
170     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
171                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
172     constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
173     size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
174     globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
175     LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
176         << globalSpaceAllocLimit_;
177 }
178 
ObjectExceedMaxHeapSize() const179 bool SharedHeap::ObjectExceedMaxHeapSize() const
180 {
181     return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
182 }
183 
StartConcurrentMarking(TriggerGCType gcType,GCReason gcReason)184 void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason)
185 {
186     ASSERT(JSThread::GetCurrent() == dThread_);
187     sConcurrentMarker_->Mark(gcType, gcReason);
188 }
189 
CheckCanTriggerConcurrentMarking(JSThread * thread)190 bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
191 {
192     return thread->IsReadyToSharedConcurrentMark() &&
193            sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
194 }
195 
Initialize(NativeAreaAllocator * nativeAreaAllocator,HeapRegionAllocator * heapRegionAllocator,const JSRuntimeOptions & option,DaemonThread * dThread)196 void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
197     const JSRuntimeOptions &option, DaemonThread *dThread)
198 {
199     sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
200     nativeAreaAllocator_ = nativeAreaAllocator;
201     heapRegionAllocator_ = heapRegionAllocator;
202     shouldVerifyHeap_ = option.EnableHeapVerify();
203     parallelGC_ = option.EnableParallelGC();
204     optionalLogEnabled_ = option.EnableOptionalLog();
205     size_t maxHeapSize = config_.GetMaxHeapSize();
206     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
207     sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
208 
209     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
210     size_t oldSpaceCapacity = (maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2; // 2: half
211     globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
212     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
213                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
214 
215     sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
216     sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
217     sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
218     sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
219     sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
220     growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
221     growingStep_ = config_.GetSharedHeapLimitGrowingStep();
222     incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
223     incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
224     fragmentationLimitForSharedFullGC_ = config_.GetFragmentationLimitForSharedFullGC();
225     dThread_ = dThread;
226 }
227 
Destroy()228 void SharedHeap::Destroy()
229 {
230     if (sWorkManager_ != nullptr) {
231         delete sWorkManager_;
232         sWorkManager_ = nullptr;
233     }
234     if (sOldSpace_ != nullptr) {
235         sOldSpace_->Reset();
236         delete sOldSpace_;
237         sOldSpace_ = nullptr;
238     }
239     if (sCompressSpace_ != nullptr) {
240         sCompressSpace_->Reset();
241         delete sCompressSpace_;
242         sCompressSpace_ = nullptr;
243     }
244     if (sNonMovableSpace_ != nullptr) {
245         sNonMovableSpace_->Reset();
246         delete sNonMovableSpace_;
247         sNonMovableSpace_ = nullptr;
248     }
249     if (sHugeObjectSpace_ != nullptr) {
250         sHugeObjectSpace_->Destroy();
251         delete sHugeObjectSpace_;
252         sHugeObjectSpace_ = nullptr;
253     }
254     if (sReadOnlySpace_ != nullptr) {
255         sReadOnlySpace_->ClearReadOnly();
256         sReadOnlySpace_->Destroy();
257         delete sReadOnlySpace_;
258         sReadOnlySpace_ = nullptr;
259     }
260     if (sAppSpawnSpace_ != nullptr) {
261         sAppSpawnSpace_->Reset();
262         delete sAppSpawnSpace_;
263         sAppSpawnSpace_ = nullptr;
264     }
265     if (sharedGC_ != nullptr) {
266         delete sharedGC_;
267         sharedGC_ = nullptr;
268     }
269     if (sharedFullGC_ != nullptr) {
270         delete sharedFullGC_;
271         sharedFullGC_ = nullptr;
272     }
273 
274     nativeAreaAllocator_ = nullptr;
275     heapRegionAllocator_ = nullptr;
276 
277     if (sSweeper_ != nullptr) {
278         delete sSweeper_;
279         sSweeper_ = nullptr;
280     }
281     if (sConcurrentMarker_ != nullptr) {
282         delete sConcurrentMarker_;
283         sConcurrentMarker_ = nullptr;
284     }
285     if (sharedGCMarker_ != nullptr) {
286         delete sharedGCMarker_;
287         sharedGCMarker_ = nullptr;
288     }
289     if (sharedGCMovableMarker_ != nullptr) {
290         delete sharedGCMovableMarker_;
291         sharedGCMovableMarker_ = nullptr;
292     }
293     dThread_ = nullptr;
294 }
295 
PostInitialization(const GlobalEnvConstants * globalEnvConstants,const JSRuntimeOptions & option)296 void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
297 {
298     globalEnvConstants_ = globalEnvConstants;
299     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
300     maxMarkTaskCount_ = totalThreadNum - 1;
301     sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
302     sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
303     sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
304     sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
305         EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
306     sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
307         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
308     sharedGC_ = new SharedGC(this);
309     sharedFullGC_ = new SharedFullGC(this);
310 }
311 
PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)312 void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
313 {
314     IncreaseTaskCount();
315     Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
316                                                                                 this, sharedTaskPhase));
317 }
318 
Run(uint32_t threadIndex)319 bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
320 {
321     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
322     while (!sHeap_->GetWorkManager()->HasInitialized());
323     switch (taskPhase_) {
324         case SharedParallelMarkPhase::SHARED_MARK_TASK:
325             sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
326             break;
327         case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
328             sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
329             break;
330         default:
331             break;
332     }
333     sHeap_->ReduceTaskCount();
334     return true;
335 }
336 
Run(uint32_t threadIndex)337 bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
338 {
339     sHeap_->ReclaimRegions(gcType_);
340     return true;
341 }
342 
NotifyGCCompleted()343 void SharedHeap::NotifyGCCompleted()
344 {
345     ASSERT(JSThread::GetCurrent() == dThread_);
346     LockHolder lock(waitGCFinishedMutex_);
347     gcFinished_ = true;
348     waitGCFinishedCV_.SignalAll();
349 }
350 
WaitGCFinished(JSThread * thread)351 void SharedHeap::WaitGCFinished(JSThread *thread)
352 {
353     ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
354     ASSERT(thread->IsInRunningState());
355     ThreadSuspensionScope scope(thread);
356     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished");
357     LockHolder lock(waitGCFinishedMutex_);
358     while (!gcFinished_) {
359         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
360     }
361 }
362 
WaitGCFinishedAfterAllJSThreadEliminated()363 void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
364 {
365     ASSERT(Runtime::GetInstance()->vmCount_ == 0);
366     LockHolder lock(waitGCFinishedMutex_);
367     while (!gcFinished_) {
368         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
369     }
370 }
371 
DaemonCollectGarbage(TriggerGCType gcType,GCReason gcReason)372 void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
373 {
374     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
375     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
376     ASSERT(JSThread::GetCurrent() == dThread_);
377     {
378         ThreadManagedScope runningScope(dThread_);
379         SuspendAllScope scope(dThread_);
380         SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
381         gcType_ = gcType;
382         GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
383         if (UNLIKELY(ShouldVerifyHeap())) {
384             // pre gc heap verify
385             LOG_ECMA(DEBUG) << "pre gc shared heap verify";
386             sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
387             SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
388         }
389         switch (gcType) {
390             case TriggerGCType::SHARED_GC: {
391                 sharedGC_->RunPhases();
392                 break;
393             }
394             case TriggerGCType::SHARED_FULL_GC: {
395                 sharedFullGC_->RunPhases();
396                 break;
397             }
398             default:
399                 LOG_ECMA(FATAL) << "this branch is unreachable";
400                 UNREACHABLE();
401                 break;
402         }
403 
404         if (UNLIKELY(ShouldVerifyHeap())) {
405             // after gc heap verify
406             LOG_ECMA(DEBUG) << "after gc shared heap verify";
407             SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
408         }
409         CollectGarbageFinish(true, gcType);
410     }
411     // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
412 }
413 
WaitAllTasksFinished(JSThread * thread)414 void SharedHeap::WaitAllTasksFinished(JSThread *thread)
415 {
416     WaitGCFinished(thread);
417     sSweeper_->WaitAllTaskFinished();
418     WaitClearTaskFinished();
419 }
420 
WaitAllTasksFinishedAfterAllJSThreadEliminated()421 void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
422 {
423     WaitGCFinishedAfterAllJSThreadEliminated();
424     sSweeper_->WaitAllTaskFinished();
425     WaitClearTaskFinished();
426 }
427 
CheckOngoingConcurrentMarking()428 bool SharedHeap::CheckOngoingConcurrentMarking()
429 {
430     if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
431         sConcurrentMarker_->IsTriggeredConcurrentMark()) {
432         // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
433         return true;
434     }
435     return false;
436 }
437 
Prepare(bool inTriggerGCThread)438 void SharedHeap::Prepare(bool inTriggerGCThread)
439 {
440     WaitRunningTaskFinished();
441     if (inTriggerGCThread) {
442         sSweeper_->EnsureAllTaskFinished();
443     } else {
444         sSweeper_->WaitAllTaskFinished();
445     }
446     WaitClearTaskFinished();
447 }
448 
SharedGCScope()449 SharedHeap::SharedGCScope::SharedGCScope()
450 {
451     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
452         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
453         if (pgoProfiler != nullptr) {
454             pgoProfiler->SuspendByGC();
455         }
456 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
457         thread->SetGcState(true);
458 #endif
459     });
460 }
461 
~SharedGCScope()462 SharedHeap::SharedGCScope::~SharedGCScope()
463 {
464     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
465         ASSERT(!thread->IsInRunningState());
466         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
467         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
468         if (pgoProfiler != nullptr) {
469             pgoProfiler->ResumeByGC();
470         }
471 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
472         thread->SetGcState(false);
473 #endif
474     });
475 }
476 
PrepareRecordRegionsForReclaim()477 void SharedHeap::PrepareRecordRegionsForReclaim()
478 {
479     sOldSpace_->SetRecordRegion();
480     sNonMovableSpace_->SetRecordRegion();
481     sHugeObjectSpace_->SetRecordRegion();
482 }
483 
Reclaim(TriggerGCType gcType)484 void SharedHeap::Reclaim(TriggerGCType gcType)
485 {
486     PrepareRecordRegionsForReclaim();
487     sHugeObjectSpace_->ReclaimHugeRegion();
488 
489     if (parallelGC_) {
490         clearTaskFinished_ = false;
491         Taskpool::GetCurrentTaskpool()->PostTask(
492             std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
493     } else {
494         ReclaimRegions(gcType);
495     }
496 }
497 
ReclaimRegions(TriggerGCType gcType)498 void SharedHeap::ReclaimRegions(TriggerGCType gcType)
499 {
500     if (gcType == TriggerGCType::SHARED_FULL_GC) {
501         sCompressSpace_->Reset();
502     }
503     sSweeper_->WaitAllTaskFinished();
504     EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
505         region->ClearMarkGCBitset();
506         region->ResetAliveObject();
507     });
508     if (!clearTaskFinished_) {
509         LockHolder holder(waitClearTaskFinishedMutex_);
510         clearTaskFinished_ = true;
511         waitClearTaskFinishedCV_.SignalAll();
512     }
513 }
514 
DisableParallelGC(JSThread * thread)515 void SharedHeap::DisableParallelGC(JSThread *thread)
516 {
517     WaitAllTasksFinished(thread);
518     dThread_->WaitFinished();
519     parallelGC_ = false;
520     maxMarkTaskCount_ = 0;
521     sSweeper_->ConfigConcurrentSweep(false);
522     sConcurrentMarker_->ConfigConcurrentMark(false);
523 }
524 
EnableParallelGC(JSRuntimeOptions & option)525 void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
526 {
527     uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
528     maxMarkTaskCount_ = totalThreadNum - 1;
529     parallelGC_ = option.EnableParallelGC();
530     if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
531         workThreadNum != totalThreadNum + 1) {
532         LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
533                             << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
534         delete sWorkManager_;
535         sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
536         UpdateWorkManager(sWorkManager_);
537     }
538     sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
539     sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
540 }
541 
UpdateWorkManager(SharedGCWorkManager * sWorkManager)542 void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
543 {
544     sConcurrentMarker_->ResetWorkManager(sWorkManager);
545     sharedGCMarker_->ResetWorkManager(sWorkManager);
546     sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
547     sharedGC_->ResetWorkManager(sWorkManager);
548     sharedFullGC_->ResetWorkManager(sWorkManager);
549 }
550 
TryTriggerLocalConcurrentMarking()551 void SharedHeap::TryTriggerLocalConcurrentMarking()
552 {
553     if (localFullMarkTriggered_) {
554         return;
555     }
556     if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
557             != false) {
558         return;
559     }
560     ASSERT(localFullMarkTriggered_ == true);
561     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
562         thread->SetFullMarkRequest();
563     });
564 }
565 
VerifyHeapObjects(VerifyKind verifyKind) const566 size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
567 {
568     size_t failCount = 0;
569     {
570         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
571         sOldSpace_->IterateOverObjects(verifier);
572     }
573     {
574         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
575         sNonMovableSpace_->IterateOverObjects(verifier);
576     }
577     {
578         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
579         sHugeObjectSpace_->IterateOverObjects(verifier);
580     }
581     {
582         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
583         sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
584     }
585     return failCount;
586 }
587 
IsReadyToConcurrentMark() const588 bool SharedHeap::IsReadyToConcurrentMark() const
589 {
590     return dThread_->IsReadyToConcurrentMark();
591 }
592 
NeedStopCollection()593 bool SharedHeap::NeedStopCollection()
594 {
595     if (!InSensitiveStatus()) {
596         return false;
597     }
598 
599     if (!ObjectExceedMaxHeapSize()) {
600         return true;
601     }
602     return false;
603 }
604 
CompactHeapBeforeFork(JSThread * thread)605 void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
606 {
607     ThreadManagedScope managedScope(thread);
608     WaitGCFinished(thread);
609     sharedFullGC_->SetForAppSpawn(true);
610     CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
611     sharedFullGC_->SetForAppSpawn(false);
612 }
613 
MoveOldSpaceToAppspawn()614 void SharedHeap::MoveOldSpaceToAppspawn()
615 {
616     auto committedSize = sOldSpace_->GetCommittedSize();
617     sAppSpawnSpace_->SetInitialCapacity(committedSize);
618     sAppSpawnSpace_->SetMaximumCapacity(committedSize);
619     sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
620     sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
621 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
622     sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
623 #endif
624     sOldSpace_->EnumerateRegions([&](Region *region) {
625         region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
626         // Region in SharedHeap do not need PageTag threadId.
627         PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName());
628         sAppSpawnSpace_->AddRegion(region);
629         sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
630     });
631     sOldSpace_->GetRegionList().Clear();
632     sOldSpace_->Reset();
633 }
634 
ReclaimForAppSpawn()635 void SharedHeap::ReclaimForAppSpawn()
636 {
637     sSweeper_->WaitAllTaskFinished();
638     sHugeObjectSpace_->ReclaimHugeRegion();
639     sCompressSpace_->Reset();
640     MoveOldSpaceToAppspawn();
641     auto cb = [] (Region *region) {
642         region->ClearMarkGCBitset();
643         region->ResetAliveObject();
644     };
645     sNonMovableSpace_->EnumerateRegions(cb);
646     sHugeObjectSpace_->EnumerateRegions(cb);
647 }
648 
DumpHeapSnapshotBeforeOOM(bool isFullGC,JSThread * thread)649 void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread)
650 {
651 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
652 #if defined(ENABLE_DUMP_IN_FAULTLOG)
653     EcmaVM *vm = thread->GetEcmaVM();
654     if (vm->GetHeapProfile() != nullptr) {
655         LOG_ECMA(ERROR) << "SharedHeap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
656         return;
657     }
658     // Filter appfreeze when dump.
659     LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
660     base::BlockHookScope blockScope;
661     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(vm);
662     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
663         LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
664     }
665     vm->GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetEcmaParamConfiguration().GetMaxHeapSize(),
666                                                     GetHeapObjectSize());
667     DumpSnapShotOption dumpOption;
668     dumpOption.dumpFormat = DumpFormat::BINARY;
669     dumpOption.isVmMode = true;
670     dumpOption.isPrivate = false;
671     dumpOption.captureNumericValue = false;
672     dumpOption.isFullGC = isFullGC;
673     dumpOption.isSimplify = true;
674     dumpOption.isSync = true;
675     dumpOption.isBeforeFill = false;
676     dumpOption.isDumpOOM = true;
677     heapProfile->DumpHeapSnapshot(dumpOption);
678     HeapProfilerInterface::Destroy(vm);
679 #endif // ENABLE_DUMP_IN_FAULTLOG
680 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
681 }
682 
Heap(EcmaVM * ecmaVm)683 Heap::Heap(EcmaVM *ecmaVm)
684     : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
685       ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
686 
Initialize()687 void Heap::Initialize()
688 {
689     enablePageTagThreadId_ = ecmaVm_->GetJSOptions().EnablePageTagThreadId();
690     memController_ = new MemController(this);
691     nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
692     heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
693     size_t maxHeapSize = config_.GetMaxHeapSize();
694     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
695     size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
696     size_t edenSpaceCapacity = 2_MB;
697     edenSpace_ = new EdenSpace(this, edenSpaceCapacity, edenSpaceCapacity);
698     edenSpace_->Restart();
699     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
700     activeSemiSpace_->Restart();
701     activeSemiSpace_->SetWaterLine();
702 
703     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
704     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
705     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
706     sOldTlab_ = new ThreadLocalAllocationBuffer(this);
707     thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
708     sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
709     thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
710                                                     sNonMovableTlab_->GetEndAddress());
711     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
712 
713     // whether should verify heap duration gc
714     shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
715     // not set up from space
716 
717     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
718     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
719     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
720     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
721     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
722         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
723     }
724     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
725     nonMovableSpace_->Initialize();
726     size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
727     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
728     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
729     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
730 
731     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
732         machineCodeSpaceCapacity + readOnlySpaceCapacity;
733     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
734         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
735     }
736     size_t oldSpaceCapacity = maxHeapSize - capacities;
737     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
738     globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
739     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
740     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
741     oldSpace_->Initialize();
742 
743     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
744     hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
745     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
746     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
747         maxEvacuateTaskCount_ - 1);
748 
749     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
750                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
751                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
752                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
753                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
754                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
755                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
756                  << ", gcThreadNum = " << maxMarkTaskCount_;
757     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
758     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
759     markType_ = MarkType::MARK_YOUNG;
760 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
761     concurrentMarkerEnabled = false;
762 #endif
763     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
764     stwYoungGC_ = new STWYoungGC(this, parallelGC_);
765     fullGC_ = new FullGC(this);
766 
767     partialGC_ = new PartialGC(this);
768     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
769         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
770     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
771         EnableConcurrentMarkType::CONFIG_DISABLE);
772     nonMovableMarker_ = new NonMovableMarker(this);
773     semiGCMarker_ = new SemiGCMarker(this);
774     compressGCMarker_ = new CompressGCMarker(this);
775     evacuator_ = new ParallelEvacuator(this);
776     incrementalMarker_ = new IncrementalMarker(this);
777     gcListeners_.reserve(16U);
778     nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
779     incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
780     nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
781     idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
782     asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
783 }
784 
ResetTlab()785 void Heap::ResetTlab()
786 {
787     sOldTlab_->Reset();
788     sNonMovableTlab_->Reset();
789 }
790 
FillBumpPointerForTlab()791 void Heap::FillBumpPointerForTlab()
792 {
793     sOldTlab_->FillBumpPointer();
794     sNonMovableTlab_->FillBumpPointer();
795 }
796 
ProcessSharedGCMarkingLocalBuffer()797 void Heap::ProcessSharedGCMarkingLocalBuffer()
798 {
799     if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
800         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
801         sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
802         ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
803     }
804 }
805 
ProcessSharedGCRSetWorkList()806 void Heap::ProcessSharedGCRSetWorkList()
807 {
808     if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
809         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
810         ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
811         sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
812         // The current thread may end earlier than the deamon thread.
813         // To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
814         // Reentrant does not cause exceptions because all the values are set to false.
815         thread_->SetProcessingLocalToSharedRset(false);
816         ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
817     }
818 }
819 
GetGlobalConst() const820 const GlobalEnvConstants *Heap::GetGlobalConst() const
821 {
822     return thread_->GlobalConstants();
823 }
824 
Destroy()825 void Heap::Destroy()
826 {
827     ProcessSharedGCRSetWorkList();
828     ProcessSharedGCMarkingLocalBuffer();
829     if (sOldTlab_ != nullptr) {
830         sOldTlab_->Reset();
831         delete sOldTlab_;
832         sOldTlab_ = nullptr;
833     }
834     if (sNonMovableTlab_!= nullptr) {
835         sNonMovableTlab_->Reset();
836         delete sNonMovableTlab_;
837         sNonMovableTlab_= nullptr;
838     }
839     if (workManager_ != nullptr) {
840         delete workManager_;
841         workManager_ = nullptr;
842     }
843     if (edenSpace_ != nullptr) {
844         edenSpace_->Destroy();
845         delete edenSpace_;
846         edenSpace_ = nullptr;
847     }
848     if (activeSemiSpace_ != nullptr) {
849         activeSemiSpace_->Destroy();
850         delete activeSemiSpace_;
851         activeSemiSpace_ = nullptr;
852     }
853     if (inactiveSemiSpace_ != nullptr) {
854         inactiveSemiSpace_->Destroy();
855         delete inactiveSemiSpace_;
856         inactiveSemiSpace_ = nullptr;
857     }
858     if (oldSpace_ != nullptr) {
859         oldSpace_->Reset();
860         delete oldSpace_;
861         oldSpace_ = nullptr;
862     }
863     if (compressSpace_ != nullptr) {
864         compressSpace_->Destroy();
865         delete compressSpace_;
866         compressSpace_ = nullptr;
867     }
868     if (nonMovableSpace_ != nullptr) {
869         nonMovableSpace_->Reset();
870         delete nonMovableSpace_;
871         nonMovableSpace_ = nullptr;
872     }
873     if (snapshotSpace_ != nullptr) {
874         snapshotSpace_->Destroy();
875         delete snapshotSpace_;
876         snapshotSpace_ = nullptr;
877     }
878     if (machineCodeSpace_ != nullptr) {
879         machineCodeSpace_->Reset();
880         delete machineCodeSpace_;
881         machineCodeSpace_ = nullptr;
882     }
883     if (hugeObjectSpace_ != nullptr) {
884         hugeObjectSpace_->Destroy();
885         delete hugeObjectSpace_;
886         hugeObjectSpace_ = nullptr;
887     }
888     if (hugeMachineCodeSpace_ != nullptr) {
889         hugeMachineCodeSpace_->Destroy();
890         delete hugeMachineCodeSpace_;
891         hugeMachineCodeSpace_ = nullptr;
892     }
893     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
894         readOnlySpace_->ClearReadOnly();
895         readOnlySpace_->Destroy();
896         delete readOnlySpace_;
897         readOnlySpace_ = nullptr;
898     }
899     if (appSpawnSpace_ != nullptr) {
900         appSpawnSpace_->Reset();
901         delete appSpawnSpace_;
902         appSpawnSpace_ = nullptr;
903     }
904     if (stwYoungGC_ != nullptr) {
905         delete stwYoungGC_;
906         stwYoungGC_ = nullptr;
907     }
908     if (partialGC_ != nullptr) {
909         delete partialGC_;
910         partialGC_ = nullptr;
911     }
912     if (fullGC_ != nullptr) {
913         delete fullGC_;
914         fullGC_ = nullptr;
915     }
916 
917     nativeAreaAllocator_ = nullptr;
918     heapRegionAllocator_ = nullptr;
919 
920     if (memController_ != nullptr) {
921         delete memController_;
922         memController_ = nullptr;
923     }
924     if (sweeper_ != nullptr) {
925         delete sweeper_;
926         sweeper_ = nullptr;
927     }
928     if (concurrentMarker_ != nullptr) {
929         delete concurrentMarker_;
930         concurrentMarker_ = nullptr;
931     }
932     if (incrementalMarker_ != nullptr) {
933         delete incrementalMarker_;
934         incrementalMarker_ = nullptr;
935     }
936     if (nonMovableMarker_ != nullptr) {
937         delete nonMovableMarker_;
938         nonMovableMarker_ = nullptr;
939     }
940     if (semiGCMarker_ != nullptr) {
941         delete semiGCMarker_;
942         semiGCMarker_ = nullptr;
943     }
944     if (compressGCMarker_ != nullptr) {
945         delete compressGCMarker_;
946         compressGCMarker_ = nullptr;
947     }
948     if (evacuator_ != nullptr) {
949         delete evacuator_;
950         evacuator_ = nullptr;
951     }
952     if (idleGCTrigger_ != nullptr) {
953         delete idleGCTrigger_;
954         idleGCTrigger_ = nullptr;
955     }
956 }
957 
Prepare()958 void Heap::Prepare()
959 {
960     MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
961     WaitRunningTaskFinished();
962     sweeper_->EnsureAllTaskFinished();
963     WaitClearTaskFinished();
964 }
965 
GetHeapPrepare()966 void Heap::GetHeapPrepare()
967 {
968     // Ensure local and shared heap prepared.
969     Prepare();
970     SharedHeap *sHeap = SharedHeap::GetInstance();
971     sHeap->Prepare(false);
972 }
973 
Resume(TriggerGCType gcType)974 void Heap::Resume(TriggerGCType gcType)
975 {
976     if (edenSpace_->ShouldTryEnable()) {
977         TryEnableEdenGC();
978     }
979     if (enableEdenGC_) {
980         edenSpace_->ReclaimRegions(edenSpace_->GetInitialCapacity());
981         edenSpace_->Restart();
982         if (IsEdenMark()) {
983             activeSemiSpace_->SetWaterLine();
984             return;
985         }
986     }
987 
988     activeSemiSpace_->SetWaterLine();
989 
990     if (mode_ != HeapMode::SPAWN &&
991         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
992         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
993         size_t multiple = 2;
994         size_t oldSpaceMaxLimit = 0;
995         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
996             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
997             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
998         } else {
999             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
1000             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
1001         }
1002         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1003     }
1004 
1005     PrepareRecordRegionsForReclaim();
1006     hugeObjectSpace_->ReclaimHugeRegion();
1007     hugeMachineCodeSpace_->ReclaimHugeRegion();
1008     if (parallelGC_) {
1009         if (gcType == TriggerGCType::OLD_GC) {
1010             isCSetClearing_.store(true, std::memory_order_release);
1011         }
1012         clearTaskFinished_ = false;
1013         Taskpool::GetCurrentTaskpool()->PostTask(
1014             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1015     } else {
1016         ReclaimRegions(gcType);
1017     }
1018 }
1019 
ResumeForAppSpawn()1020 void Heap::ResumeForAppSpawn()
1021 {
1022     sweeper_->WaitAllTaskFinished();
1023     hugeObjectSpace_->ReclaimHugeRegion();
1024     hugeMachineCodeSpace_->ReclaimHugeRegion();
1025     edenSpace_->ReclaimRegions();
1026     inactiveSemiSpace_->ReclaimRegions();
1027     oldSpace_->Reset();
1028     auto cb = [] (Region *region) {
1029         region->ClearMarkGCBitset();
1030     };
1031     nonMovableSpace_->EnumerateRegions(cb);
1032     machineCodeSpace_->EnumerateRegions(cb);
1033     hugeObjectSpace_->EnumerateRegions(cb);
1034     hugeMachineCodeSpace_->EnumerateRegions(cb);
1035 }
1036 
CompactHeapBeforeFork()1037 void Heap::CompactHeapBeforeFork()
1038 {
1039     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1040 }
1041 
DisableParallelGC()1042 void Heap::DisableParallelGC()
1043 {
1044     WaitAllTasksFinished();
1045     parallelGC_ = false;
1046     maxEvacuateTaskCount_ = 0;
1047     maxMarkTaskCount_ = 0;
1048     stwYoungGC_->ConfigParallelGC(false);
1049     sweeper_->ConfigConcurrentSweep(false);
1050     concurrentMarker_->ConfigConcurrentMark(false);
1051     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1052 }
1053 
EnableParallelGC()1054 void Heap::EnableParallelGC()
1055 {
1056     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1057     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1058     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1059         totalThreadNum != maxEvacuateTaskCount_ + 1) {
1060         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1061                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1062         delete workManager_;
1063         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1064         UpdateWorkManager(workManager_);
1065     }
1066     ASSERT(maxEvacuateTaskCount_ > 0);
1067     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1068                                          maxEvacuateTaskCount_ - 1);
1069     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1070 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1071     concurrentMarkerEnabled = false;
1072 #endif
1073     stwYoungGC_->ConfigParallelGC(parallelGC_);
1074     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1075     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1076 }
1077 
SelectGCType() const1078 TriggerGCType Heap::SelectGCType() const
1079 {
1080     if (shouldThrowOOMError_) {
1081         // Force Full GC after failed Old GC to avoid OOM
1082         return FULL_GC;
1083     }
1084 
1085     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1086     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1087         return YOUNG_GC;
1088     }
1089     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1090         GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1091         !GlobalNativeSizeLargerThanLimit()) {
1092         return YOUNG_GC;
1093     }
1094     return OLD_GC;
1095 }
1096 
CollectGarbage(TriggerGCType gcType,GCReason reason)1097 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1098 {
1099     Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1100     {
1101 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1102         if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1103             LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1104             UNREACHABLE();
1105         }
1106 #endif
1107         if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1108             ProcessGCListeners();
1109             return;
1110         }
1111         RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1112 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1113         [[maybe_unused]] GcStateScope scope(thread_);
1114 #endif
1115         CHECK_NO_GC;
1116         if (UNLIKELY(ShouldVerifyHeap())) {
1117             // pre gc heap verify
1118             LOG_ECMA(DEBUG) << "pre gc heap verify";
1119             ProcessSharedGCRSetWorkList();
1120             Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1121         }
1122 
1123 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1124         gcType = TriggerGCType::FULL_GC;
1125 #endif
1126         if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1127             gcType = TriggerGCType::FULL_GC;
1128         }
1129         if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1130             gcType = TriggerGCType::OLD_GC;
1131         }
1132         oldGCRequested_ = false;
1133         oldSpace_->AdjustOvershootSize();
1134 
1135         size_t originalNewSpaceSize = IsEdenMark() ? edenSpace_->GetHeapObjectSize() :
1136                 (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize());
1137         if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1138             GetEcmaGCStats()->SetGCReason(reason);
1139         } else {
1140             GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1141         }
1142         memController_->StartCalculationBeforeGC();
1143         StatisticHeapObject(gcType);
1144         gcType_ = gcType;
1145         {
1146             pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1147             switch (gcType) {
1148                 case TriggerGCType::EDEN_GC:
1149                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1150                         SetMarkType(MarkType::MARK_EDEN);
1151                     }
1152                     if (markType_ == MarkType::MARK_YOUNG) {
1153                         gcType_ = TriggerGCType::YOUNG_GC;
1154                     }
1155                     if (markType_ == MarkType::MARK_FULL) {
1156                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1157                         gcType_ = TriggerGCType::OLD_GC;
1158                     }
1159                     partialGC_->RunPhases();
1160                     break;
1161                 case TriggerGCType::YOUNG_GC:
1162                     // Use partial GC for young generation.
1163                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1164                         SetMarkType(MarkType::MARK_YOUNG);
1165                     }
1166                     if (markType_ == MarkType::MARK_FULL) {
1167                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1168                         gcType_ = TriggerGCType::OLD_GC;
1169                     }
1170                     partialGC_->RunPhases();
1171                     break;
1172                 case TriggerGCType::OLD_GC: {
1173                     bool fullConcurrentMarkRequested = false;
1174                     // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1175                     if (concurrentMarker_->IsEnabled() &&
1176                         (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1177                         reason == GCReason::ALLOCATION_LIMIT) {
1178                         fullConcurrentMarkRequested = true;
1179                     }
1180                     if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1181                         // Wait for existing concurrent marking tasks to be finished (if any),
1182                         // and reset concurrent marker's status for full mark.
1183                         bool concurrentMark = CheckOngoingConcurrentMarking();
1184                         if (concurrentMark) {
1185                             concurrentMarker_->Reset();
1186                         }
1187                     }
1188                     SetMarkType(MarkType::MARK_FULL);
1189                     if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1190                         LOG_ECMA(INFO)
1191                             << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1192                         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1193                         TriggerConcurrentMarking();
1194                         oldGCRequested_ = true;
1195                         ProcessGCListeners();
1196                         return;
1197                     }
1198                     partialGC_->RunPhases();
1199                     break;
1200                 }
1201                 case TriggerGCType::FULL_GC:
1202                     fullGC_->SetForAppSpawn(false);
1203                     fullGC_->RunPhases();
1204                     if (fullGCRequested_) {
1205                         fullGCRequested_ = false;
1206                     }
1207                     break;
1208                 case TriggerGCType::APPSPAWN_FULL_GC:
1209                     fullGC_->SetForAppSpawn(true);
1210                     fullGC_->RunPhasesForAppSpawn();
1211                     break;
1212                 default:
1213                     LOG_ECMA(FATAL) << "this branch is unreachable";
1214                     UNREACHABLE();
1215                     break;
1216             }
1217             ASSERT(thread_->IsPropertyCacheCleared());
1218         }
1219         UpdateHeapStatsAfterGC(gcType_);
1220         ClearIdleTask();
1221         // Adjust the old space capacity and global limit for the first partial GC with full mark.
1222         // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1223         AdjustBySurvivalRate(originalNewSpaceSize);
1224         memController_->StopCalculationAfterGC(gcType);
1225         if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1226             // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1227             // the limits of old space and global space can be recomputed.
1228             RecomputeLimits();
1229             ResetNativeSizeAfterLastGC();
1230             OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1231                                         << " global object size " << GetHeapObjectSize()
1232                                         << " global committed size " << GetCommittedSize()
1233                                         << " global limit " << globalSpaceAllocLimit_;
1234             markType_ = MarkType::MARK_YOUNG;
1235         }
1236         if (concurrentMarker_->IsRequestDisabled()) {
1237             concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1238         }
1239         // GC log
1240         GetEcmaGCStats()->RecordStatisticAfterGC();
1241 #ifdef ENABLE_HISYSEVENT
1242         GetEcmaGCKeyStats()->IncGCCount();
1243         if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1244             GetEcmaGCKeyStats()->AddGCStatsToKey();
1245         }
1246 #endif
1247         GetEcmaGCStats()->PrintGCStatistic();
1248     }
1249 
1250     if (gcType_ == TriggerGCType::OLD_GC) {
1251         // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1252         // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1253         // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1254         // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1255         CheckNonMovableSpaceOOM();
1256     }
1257     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1258     if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1259         sweeper_->EnsureAllTaskFinished();
1260         oldSpace_->ResetCommittedOverSizeLimit();
1261         if (oldSpace_->CommittedSizeExceed()) {
1262             DumpHeapSnapshotBeforeOOM(false);
1263             StatisticHeapDetail();
1264             ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1265         }
1266         oldSpace_->ResetMergeSize();
1267         shouldThrowOOMError_ = false;
1268     }
1269     // Update record heap object size after gc if in sensitive status
1270     if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1271         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1272     }
1273 
1274     if (UNLIKELY(ShouldVerifyHeap())) {
1275         // verify post gc heap verify
1276         LOG_ECMA(DEBUG) << "post gc heap verify";
1277         Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1278     }
1279 
1280     // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1281     // even lead to another GC, so this have to invoke after this GC process.
1282     thread_->InvokeWeakNodeNativeFinalizeCallback();
1283     // PostTask for ProcessNativeDelete
1284     CleanCallBack();
1285 
1286     JSFinalizationRegistry::CheckAndCall(thread_);
1287 #if defined(ECMASCRIPT_SUPPORT_TRACING)
1288     auto tracing = GetEcmaVM()->GetTracing();
1289     if (tracing != nullptr) {
1290         tracing->TraceEventRecordMemory();
1291     }
1292 #endif
1293     ProcessGCListeners();
1294 
1295 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1296     if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1297         ThresholdReachedDump();
1298     }
1299 #endif
1300 
1301     if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1302         // check machine code space if enough
1303         int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1304             static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1305         Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1306     }
1307 }
1308 
ThrowOutOfMemoryError(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1309 void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1310     bool NonMovableObjNearOOM)
1311 {
1312     GetEcmaGCStats()->PrintGCMemoryStatistic();
1313     std::ostringstream oss;
1314     if (NonMovableObjNearOOM) {
1315         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1316             << " function name: " << functionName.c_str();
1317     } else {
1318         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1319             << functionName.c_str();
1320     }
1321     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1322     THROW_OOM_ERROR(thread, oss.str().c_str());
1323 }
1324 
SetMachineCodeOutOfMemoryError(JSThread * thread,size_t size,std::string functionName)1325 void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1326 {
1327     std::ostringstream oss;
1328     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1329         << functionName.c_str();
1330     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1331 
1332     EcmaVM *ecmaVm = thread->GetEcmaVM();
1333     ObjectFactory *factory = ecmaVm->GetFactory();
1334     JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1335     thread->SetException(error.GetTaggedValue());
1336 }
1337 
SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)1338 void BaseHeap::SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)
1339 {
1340     if (cb != nullptr) {
1341         appfreezeCallback_ = cb;
1342     }
1343 }
1344 
ThrowOutOfMemoryErrorForDefault(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1345 void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1346     bool NonMovableObjNearOOM)
1347 {
1348     GetEcmaGCStats()->PrintGCMemoryStatistic();
1349     std::ostringstream oss;
1350     if (NonMovableObjNearOOM) {
1351         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1352             << " function name: " << functionName.c_str();
1353     } else {
1354         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1355     }
1356     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1357     EcmaVM *ecmaVm = thread->GetEcmaVM();
1358     JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1359     JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1360 
1361     thread->SetException(error.GetTaggedValue());
1362     ecmaVm->HandleUncatchableError();
1363 }
1364 
FatalOutOfMemoryError(size_t size,std::string functionName)1365 void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1366 {
1367     GetEcmaGCStats()->PrintGCMemoryStatistic();
1368     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1369                         << " function name: " << functionName.c_str();
1370 }
1371 
CheckNonMovableSpaceOOM()1372 void Heap::CheckNonMovableSpaceOOM()
1373 {
1374     if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) {
1375         sweeper_->EnsureAllTaskFinished();
1376         DumpHeapSnapshotBeforeOOM(false);
1377         StatisticHeapDetail();
1378         ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1379     }
1380 }
1381 
AdjustBySurvivalRate(size_t originalNewSpaceSize)1382 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1383 {
1384     promotedSize_ = GetEvacuator()->GetPromotedSize();
1385     edenToYoungSize_ = GetEvacuator()->GetEdenToYoungSize();
1386     if (originalNewSpaceSize <= 0) {
1387         return;
1388     }
1389     semiSpaceCopiedSize_ = IsEdenMark() ? edenToYoungSize_ : activeSemiSpace_->GetHeapObjectSize();
1390     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1391     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1392     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1393     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1394                                 << " survivalRate: " << survivalRate;
1395     if (IsEdenMark()) {
1396         memController_->AddEdenSurvivalRate(survivalRate);
1397         return;
1398     }
1399     if (!oldSpaceLimitAdjusted_) {
1400         memController_->AddSurvivalRate(survivalRate);
1401         AdjustOldSpaceLimit();
1402     } else {
1403         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1404         // 2 means half
1405         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1406             SetFullMarkRequestedState(true);
1407             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1408                 << " is less than half the average survival rates: " << averageSurvivalRate
1409                 << ". Trigger full mark next time.";
1410             // Survival rate of full mark is precise. Reset recorded survival rates.
1411             memController_->ResetRecordedSurvivalRates();
1412         }
1413         memController_->AddSurvivalRate(survivalRate);
1414     }
1415 }
1416 
VerifyHeapObjects(VerifyKind verifyKind) const1417 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1418 {
1419     size_t failCount = 0;
1420     {
1421         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1422         activeSemiSpace_->IterateOverObjects(verifier);
1423     }
1424 
1425     {
1426         if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1427             verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1428             verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1429                 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1430                     region->IterateAllMarkedBits([this](void *addr) {
1431                         VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1432                     });
1433                 });
1434             }
1435     }
1436 
1437     {
1438         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1439         oldSpace_->IterateOverObjects(verifier);
1440     }
1441 
1442     {
1443         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1444         appSpawnSpace_->IterateOverMarkedObjects(verifier);
1445     }
1446 
1447     {
1448         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1449         nonMovableSpace_->IterateOverObjects(verifier);
1450     }
1451 
1452     {
1453         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1454         hugeObjectSpace_->IterateOverObjects(verifier);
1455     }
1456     {
1457         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1458         hugeMachineCodeSpace_->IterateOverObjects(verifier);
1459     }
1460     {
1461         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1462         machineCodeSpace_->IterateOverObjects(verifier);
1463     }
1464     {
1465         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1466         snapshotSpace_->IterateOverObjects(verifier);
1467     }
1468     return failCount;
1469 }
1470 
VerifyOldToNewRSet(VerifyKind verifyKind) const1471 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1472 {
1473     size_t failCount = 0;
1474     VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1475     oldSpace_->IterateOldToNewOverObjects(verifier);
1476     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1477     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1478     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1479     return failCount;
1480 }
1481 
AdjustOldSpaceLimit()1482 void Heap::AdjustOldSpaceLimit()
1483 {
1484     if (oldSpaceLimitAdjusted_) {
1485         return;
1486     }
1487     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1488     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1489     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1490         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1491     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1492         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1493     } else {
1494         oldSpaceLimitAdjusted_ = true;
1495     }
1496 
1497     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1498         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1499     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1500         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1501     }
1502     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1503         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1504 }
1505 
OnAllocateEvent(EcmaVM * ecmaVm,TaggedObject * address,size_t size)1506 void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1507                                [[maybe_unused]] size_t size)
1508 {
1509 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1510     HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1511     if (profiler != nullptr) {
1512         base::BlockHookScope blockScope;
1513         profiler->AllocationEvent(address, size);
1514     }
1515 #endif
1516 }
1517 
DumpHeapSnapshotBeforeOOM(bool isFullGC)1518 void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
1519 {
1520 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1521 #if defined(ENABLE_DUMP_IN_FAULTLOG)
1522     if (ecmaVm_->GetHeapProfile() != nullptr) {
1523         LOG_ECMA(ERROR) << "Heap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
1524         return;
1525     }
1526     // Filter appfreeze when dump.
1527     LOG_ECMA(INFO) << " Heap::DumpHeapSnapshotBeforeOOM, isFullGC = " << isFullGC;
1528     base::BlockHookScope blockScope;
1529     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1530     if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
1531         LOG_ECMA(INFO) << "Heap::DumpHeapSnapshotBeforeOOM, appfreezeCallback_ success. ";
1532     }
1533 #ifdef ENABLE_HISYSEVENT
1534     GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize());
1535     hasOOMDump_ = true;
1536 #endif
1537     // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1538     DumpSnapShotOption dumpOption;
1539     dumpOption.dumpFormat = DumpFormat::BINARY;
1540     dumpOption.isVmMode = true;
1541     dumpOption.isPrivate = false;
1542     dumpOption.captureNumericValue = false;
1543     dumpOption.isFullGC = isFullGC;
1544     dumpOption.isSimplify = true;
1545     dumpOption.isSync = true;
1546     dumpOption.isBeforeFill = false;
1547     dumpOption.isDumpOOM = true;
1548     heapProfile->DumpHeapSnapshot(dumpOption);
1549     HeapProfilerInterface::Destroy(ecmaVm_);
1550 #endif // ENABLE_DUMP_IN_FAULTLOG
1551 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1552 }
1553 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)1554 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
1555                        [[maybe_unused]] size_t size)
1556 {
1557 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1558     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
1559     if (profiler != nullptr) {
1560         base::BlockHookScope blockScope;
1561         profiler->MoveEvent(address, forwardAddress, size);
1562     }
1563 #endif
1564 }
1565 
AdjustSpaceSizeForAppSpawn()1566 void Heap::AdjustSpaceSizeForAppSpawn()
1567 {
1568     SetHeapMode(HeapMode::SPAWN);
1569     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1570     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1571     auto committedSize = appSpawnSpace_->GetCommittedSize();
1572     appSpawnSpace_->SetInitialCapacity(committedSize);
1573     appSpawnSpace_->SetMaximumCapacity(committedSize);
1574     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1575     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1576 }
1577 
ShouldMoveToRoSpace(JSHClass * hclass,TaggedObject * object)1578 bool Heap::ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object)
1579 {
1580     return hclass->IsString() && !Region::ObjectAddressToRange(object)->InHugeObjectSpace();
1581 }
1582 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)1583 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1584 {
1585     ASSERT(inspector != nullptr);
1586     // activeSemiSpace_/inactiveSemiSpace_:
1587     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1588     activeSemiSpace_->AddAllocationInspector(inspector);
1589     // oldSpace_/compressSpace_:
1590     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1591     oldSpace_->AddAllocationInspector(inspector);
1592     // readOnlySpace_ need not allocationInspector.
1593     // appSpawnSpace_ need not allocationInspector.
1594     nonMovableSpace_->AddAllocationInspector(inspector);
1595     machineCodeSpace_->AddAllocationInspector(inspector);
1596     hugeObjectSpace_->AddAllocationInspector(inspector);
1597     hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1598 }
1599 
ClearAllocationInspectorFromAllSpaces()1600 void Heap::ClearAllocationInspectorFromAllSpaces()
1601 {
1602     edenSpace_->ClearAllocationInspector();
1603     activeSemiSpace_->ClearAllocationInspector();
1604     oldSpace_->ClearAllocationInspector();
1605     nonMovableSpace_->ClearAllocationInspector();
1606     machineCodeSpace_->ClearAllocationInspector();
1607     hugeObjectSpace_->ClearAllocationInspector();
1608     hugeMachineCodeSpace_->ClearAllocationInspector();
1609 }
1610 
RecomputeLimits()1611 void Heap::RecomputeLimits()
1612 {
1613     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1614     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1615     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1616         hugeMachineCodeSpace_->GetHeapObjectSize();
1617     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1618 
1619     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1620     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1621     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1622         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1623     size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1624     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1625                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
1626     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1627     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1628     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1629                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1630                                                                   growingFactor);
1631     globalSpaceNativeLimit_ = std::max(globalSpaceNativeLimit_, GetGlobalNativeSize()
1632                                         + config_.GetMinNativeLimitGrowingStep());
1633     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1634         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1635         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1636     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1637         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1638         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1639                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1640                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
1641         SetFullMarkRequestedState(true);
1642     }
1643 }
1644 
CheckAndTriggerOldGC(size_t size)1645 bool Heap::CheckAndTriggerOldGC(size_t size)
1646 {
1647     bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1648     bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1649     if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1650         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1651     }
1652     if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1653         GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1654         !NeedStopCollection()) {
1655         if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1656             oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1657             return false;
1658         }
1659         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1660         if (!oldGCRequested_) {
1661             return true;
1662         }
1663     }
1664     return false;
1665 }
1666 
CheckAndTriggerHintGC()1667 bool Heap::CheckAndTriggerHintGC()
1668 {
1669     if (IsInBackground()) {
1670         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1671         return true;
1672     }
1673     if (InSensitiveStatus()) {
1674         return false;
1675     }
1676     if (memController_->GetPredictedSurvivalRate() < SURVIVAL_RATE_THRESHOLD) {
1677         CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
1678         return true;
1679     }
1680     return false;
1681 }
1682 
CheckOngoingConcurrentMarking()1683 bool Heap::CheckOngoingConcurrentMarking()
1684 {
1685     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark() &&
1686         concurrentMarker_->IsTriggeredConcurrentMark()) {
1687         TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1688         if (thread_->IsMarking()) {
1689             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
1690             MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1691             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
1692             WaitConcurrentMarkingFinished();
1693         }
1694         WaitRunningTaskFinished();
1695         memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1696         return true;
1697     }
1698     return false;
1699 }
1700 
ClearIdleTask()1701 void Heap::ClearIdleTask()
1702 {
1703     SetIdleTask(IdleTaskType::NO_TASK);
1704     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1705 }
1706 
TryTriggerIdleCollection()1707 void Heap::TryTriggerIdleCollection()
1708 {
1709     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1710         return;
1711     }
1712     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1713         SetIdleTask(IdleTaskType::FINISH_MARKING);
1714         EnableNotifyIdle();
1715         CalculateIdleDuration();
1716         return;
1717     }
1718 
1719     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1720     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1721     double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1722                                            static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1723                                            newSpaceAllocSpeed;
1724     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1725     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1726     // 2 means double
1727     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1728         SetIdleTask(IdleTaskType::YOUNG_GC);
1729         SetMarkType(MarkType::MARK_YOUNG);
1730         EnableNotifyIdle();
1731         CalculateIdleDuration();
1732         return;
1733     }
1734 }
1735 
CalculateIdleDuration()1736 void Heap::CalculateIdleDuration()
1737 {
1738     size_t updateReferenceSpeed = 0;
1739     // clear native object duration
1740     size_t clearNativeObjSpeed = 0;
1741     if (markType_ == MarkType::MARK_EDEN) {
1742         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_UPDATE_REFERENCE_SPEED);
1743         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_CLEAR_NATIVE_OBJ_SPEED);
1744     } else if (markType_ == MarkType::MARK_YOUNG) {
1745         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1746         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1747     } else if (markType_ == MarkType::MARK_FULL) {
1748         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1749         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1750     }
1751 
1752     // update reference duration
1753     idlePredictDuration_ = 0.0f;
1754     if (updateReferenceSpeed != 0) {
1755         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1756     }
1757 
1758     if (clearNativeObjSpeed != 0) {
1759         idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1760     }
1761 
1762     // sweep and evacuate duration
1763     size_t edenEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_EVACUATE_SPACE_SPEED);
1764     size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1765     double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1766     if (markType_ == MarkType::MARK_EDEN && edenEvacuateSpeed != 0) {
1767         idlePredictDuration_ += survivalRate * edenSpace_->GetHeapObjectSize() / edenEvacuateSpeed;
1768     } else if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1769         idlePredictDuration_ += (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize()) *
1770             survivalRate / youngEvacuateSpeed;
1771     } else if (markType_ == MarkType::MARK_FULL) {
1772         size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1773         size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1774         if (sweepSpeed != 0) {
1775             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1776         }
1777         if (oldEvacuateSpeed != 0) {
1778             size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1779                 RecordData::COLLECT_REGION_SET_SIZE);
1780             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
1781                                     oldEvacuateSpeed;
1782         }
1783     }
1784 
1785     // Idle YoungGC mark duration
1786     size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
1787     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
1788         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
1789     }
1790     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
1791 }
1792 
TryTriggerIncrementalMarking()1793 void Heap::TryTriggerIncrementalMarking()
1794 {
1795     if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
1796         return;
1797     }
1798     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1799     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1800         hugeMachineCodeSpace_->GetHeapObjectSize();
1801     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1802     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1803     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1804     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1805 
1806     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1807     // mark finished before allocate limit
1808     if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1809         // The object allocated in incremental marking should lower than limit,
1810         // otherwise select trigger concurrent mark.
1811         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1812         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1813             EnableNotifyIdle();
1814             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1815         }
1816     }
1817 }
1818 
CheckCanTriggerConcurrentMarking()1819 bool Heap::CheckCanTriggerConcurrentMarking()
1820 {
1821     return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
1822         !incrementalMarker_->IsTriggeredIncrementalMark() &&
1823         (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1824 }
1825 
TryTriggerConcurrentMarking()1826 void Heap::TryTriggerConcurrentMarking()
1827 {
1828     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1829     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1830     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1831     // full mark will be triggered.
1832     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1833     // young mark may not result in the new space reaching its limit, young mark can be triggered.
1834     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1835     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1836     if (!CheckCanTriggerConcurrentMarking()) {
1837         return;
1838     }
1839     if (fullMarkRequested_) {
1840         markType_ = MarkType::MARK_FULL;
1841         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1842         TriggerConcurrentMarking();
1843         return;
1844     }
1845     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1846            oldSpaceAllocToLimitDuration = 0;
1847     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1848     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1849     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1850         hugeMachineCodeSpace_->GetHeapObjectSize();
1851     size_t globalHeapObjectSize = GetHeapObjectSize();
1852     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1853     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1854         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1855             GlobalNativeSizeLargerThanLimit()) {
1856             markType_ = MarkType::MARK_FULL;
1857             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1858             TriggerConcurrentMarking();
1859             return;
1860         }
1861     } else {
1862         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1863             GlobalNativeSizeLargerThanLimit()) {
1864             markType_ = MarkType::MARK_FULL;
1865             TriggerConcurrentMarking();
1866             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1867             return;
1868         }
1869         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1870         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1871         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1872         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1873         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1874             markType_ = MarkType::MARK_FULL;
1875             TriggerConcurrentMarking();
1876             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1877             return;
1878         }
1879     }
1880 
1881     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1882     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1883     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1884         if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
1885             markType_ = MarkType::MARK_YOUNG;
1886             TriggerConcurrentMarking();
1887             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
1888         }
1889         return;
1890     }
1891     size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1892     size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
1893     bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
1894     if (!triggerMark) {
1895         newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
1896         newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1897         // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
1898         newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1899         triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
1900     }
1901 
1902     if (triggerMark) {
1903         markType_ = MarkType::MARK_YOUNG;
1904         TriggerConcurrentMarking();
1905         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
1906         return;
1907     }
1908 
1909     if (!enableEdenGC_ || IsInBackground()) {
1910         return;
1911     }
1912 
1913     double edenSurvivalRate = memController_->GetAverageEdenSurvivalRate();
1914     double survivalRate = memController_->GetAverageSurvivalRate();
1915     constexpr double expectMaxSurvivalRate = 0.4;
1916     if ((edenSurvivalRate == 0 || edenSurvivalRate >= expectMaxSurvivalRate) && survivalRate >= expectMaxSurvivalRate) {
1917         return;
1918     }
1919 
1920     double edenSpaceAllocSpeed = memController_->GetEdenSpaceAllocationThroughputPerMS();
1921     double edenSpaceConcurrentMarkSpeed = memController_->GetEdenSpaceConcurrentMarkSpeedPerMS();
1922     if (edenSpaceConcurrentMarkSpeed == 0 || edenSpaceAllocSpeed == 0) {
1923         auto &config = ecmaVm_->GetEcmaParamConfiguration();
1924         if (edenSpace_->GetCommittedSize() >= config.GetEdenSpaceTriggerConcurrentMark()) {
1925             markType_ = MarkType::MARK_EDEN;
1926             TriggerConcurrentMarking();
1927             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first eden mark " << fullGCRequested_;
1928         }
1929         return;
1930     }
1931 
1932     auto &config = ecmaVm_->GetEcmaParamConfiguration();
1933     size_t edenCommittedSize = edenSpace_->GetCommittedSize();
1934     triggerMark = edenCommittedSize >= config.GetEdenSpaceTriggerConcurrentMark();
1935     if (!triggerMark && edenSpaceAllocSpeed != 0 && edenSpaceConcurrentMarkSpeed != 0 &&
1936             edenSpace_->GetHeapObjectSize() > 0) {
1937         double edenSpaceLimit = edenSpace_->GetInitialCapacity();
1938         double edenSpaceAllocToLimitDuration = (edenSpaceLimit - edenCommittedSize) / edenSpaceAllocSpeed;
1939         double edenSpaceMarkDuration = edenSpace_->GetHeapObjectSize() / edenSpaceConcurrentMarkSpeed;
1940         double edenSpaceRemainSize = (edenSpaceAllocToLimitDuration - edenSpaceMarkDuration) * newSpaceAllocSpeed;
1941         triggerMark = edenSpaceRemainSize < DEFAULT_REGION_SIZE;
1942     }
1943 
1944     if (triggerMark) {
1945         markType_ = MarkType::MARK_EDEN;
1946         TriggerConcurrentMarking();
1947         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger eden mark";
1948     }
1949 }
1950 
TryTriggerFullMarkOrGCByNativeSize()1951 void Heap::TryTriggerFullMarkOrGCByNativeSize()
1952 {
1953     // In high sensitive scene and native size larger than limit, trigger old gc directly
1954     if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
1955         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
1956     } else if (GlobalNativeSizeLargerThanLimit()) {
1957         if (concurrentMarker_->IsEnabled()) {
1958             SetFullMarkRequestedState(true);
1959             TryTriggerConcurrentMarking();
1960         } else {
1961             CheckAndTriggerOldGC();
1962         }
1963     }
1964 }
1965 
TryTriggerFullMarkBySharedLimit()1966 bool Heap::TryTriggerFullMarkBySharedLimit()
1967 {
1968     bool keepFullMarkRequest = false;
1969     if (concurrentMarker_->IsEnabled()) {
1970         if (!CheckCanTriggerConcurrentMarking()) {
1971             return keepFullMarkRequest;
1972         }
1973         markType_ = MarkType::MARK_FULL;
1974         if (ConcurrentMarker::TryIncreaseTaskCounts()) {
1975             concurrentMarker_->Mark();
1976         } else {
1977             // need retry full mark request again.
1978             keepFullMarkRequest = true;
1979         }
1980     }
1981     return keepFullMarkRequest;
1982 }
1983 
CheckAndTriggerTaskFinishedGC()1984 void Heap::CheckAndTriggerTaskFinishedGC()
1985 {
1986     size_t objectSizeOfTaskBegin = GetRecordObjectSize();
1987     size_t objectSizeOfTaskFinished = GetHeapObjectSize();
1988     size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
1989     size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
1990     // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
1991     bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
1992         objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
1993             TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
1994     bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
1995         nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
1996             TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
1997     if (objectSizeFlag || nativeSizeFlag) {
1998         panda::JSNApi::TriggerGC(GetEcmaVM(), panda::ecmascript::GCReason::TRIGGER_BY_TASKPOOL,
1999             panda::JSNApi::TRIGGER_GC_TYPE::OLD_GC);
2000         RecordOrResetObjectSize(0);
2001         RecordOrResetNativeSize(0);
2002     }
2003 }
2004 
IsMarking() const2005 bool Heap::IsMarking() const
2006 {
2007     return thread_->IsMarking();
2008 }
2009 
TryTriggerFullMarkBySharedSize(size_t size)2010 void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2011 {
2012     newAllocatedSharedObjectSize_ += size;
2013     if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2014         if (concurrentMarker_->IsEnabled()) {
2015             SetFullMarkRequestedState(true);
2016             TryTriggerConcurrentMarking();
2017             newAllocatedSharedObjectSize_ = 0;
2018         }
2019     }
2020 }
2021 
IsReadyToConcurrentMark() const2022 bool Heap::IsReadyToConcurrentMark() const
2023 {
2024     return thread_->IsReadyToConcurrentMark();
2025 }
2026 
IncreaseNativeBindingSize(JSNativePointer * object)2027 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2028 {
2029     size_t size = object->GetBindingSize();
2030     if (size == 0) {
2031         return;
2032     }
2033     nativeBindingSize_ += size;
2034 }
2035 
IncreaseNativeBindingSize(size_t size)2036 void Heap::IncreaseNativeBindingSize(size_t size)
2037 {
2038     if (size == 0) {
2039         return;
2040     }
2041     nativeBindingSize_ += size;
2042 }
2043 
DecreaseNativeBindingSize(size_t size)2044 void Heap::DecreaseNativeBindingSize(size_t size)
2045 {
2046     ASSERT(size <= nativeBindingSize_);
2047     nativeBindingSize_ -= size;
2048 }
2049 
PrepareRecordRegionsForReclaim()2050 void Heap::PrepareRecordRegionsForReclaim()
2051 {
2052     activeSemiSpace_->SetRecordRegion();
2053     oldSpace_->SetRecordRegion();
2054     snapshotSpace_->SetRecordRegion();
2055     nonMovableSpace_->SetRecordRegion();
2056     hugeObjectSpace_->SetRecordRegion();
2057     machineCodeSpace_->SetRecordRegion();
2058     hugeMachineCodeSpace_->SetRecordRegion();
2059 }
2060 
TriggerConcurrentMarking()2061 void Heap::TriggerConcurrentMarking()
2062 {
2063     ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2064     if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2065         ClearIdleTask();
2066         DisableNotifyIdle();
2067     }
2068     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2069         concurrentMarker_->Mark();
2070     }
2071 }
2072 
WaitAllTasksFinished()2073 void Heap::WaitAllTasksFinished()
2074 {
2075     WaitRunningTaskFinished();
2076     sweeper_->EnsureAllTaskFinished();
2077     WaitClearTaskFinished();
2078     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2079         concurrentMarker_->WaitMarkingFinished();
2080     }
2081 }
2082 
WaitConcurrentMarkingFinished()2083 void Heap::WaitConcurrentMarkingFinished()
2084 {
2085     concurrentMarker_->WaitMarkingFinished();
2086 }
2087 
PostParallelGCTask(ParallelGCTaskPhase gcTask)2088 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2089 {
2090     IncreaseTaskCount();
2091     Taskpool::GetCurrentTaskpool()->PostTask(
2092         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2093 }
2094 
ChangeGCParams(bool inBackground)2095 void Heap::ChangeGCParams(bool inBackground)
2096 {
2097     const double doubleOne = 1.0;
2098     inBackground_ = inBackground;
2099     if (inBackground) {
2100         LOG_GC(INFO) << "app is inBackground";
2101         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2102             GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2103             doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2104             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2105         }
2106         if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
2107             sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2108             doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2109             sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::SWITCH_BACKGROUND>(thread_);
2110         }
2111         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2112             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2113             LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2114         }
2115         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
2116         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
2117         maxMarkTaskCount_ = 1;
2118         maxEvacuateTaskCount_ = 1;
2119         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::BACKGROUND);
2120     } else {
2121         LOG_GC(INFO) << "app is not inBackground";
2122         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2123             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2124             LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2125         }
2126         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2127         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2128         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2129             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2130         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2131         Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::FOREGROUND);
2132     }
2133 }
2134 
GetEcmaGCStats()2135 GCStats *Heap::GetEcmaGCStats()
2136 {
2137     return ecmaVm_->GetEcmaGCStats();
2138 }
2139 
GetEcmaGCKeyStats()2140 GCKeyStats *Heap::GetEcmaGCKeyStats()
2141 {
2142     return ecmaVm_->GetEcmaGCKeyStats();
2143 }
2144 
GetJSObjectResizingStrategy()2145 JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2146 {
2147     return ecmaVm_->GetJSObjectResizingStrategy();
2148 }
2149 
TriggerIdleCollection(int idleMicroSec)2150 void Heap::TriggerIdleCollection(int idleMicroSec)
2151 {
2152     if (idleTask_ == IdleTaskType::NO_TASK) {
2153         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2154             DisableNotifyIdle();
2155         }
2156         return;
2157     }
2158 
2159     // Incremental mark initialize and process
2160     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2161         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2162         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2163         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2164             CalculateIdleDuration();
2165         }
2166         return;
2167     }
2168 
2169     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2170         return;
2171     }
2172 
2173     switch (idleTask_) {
2174         case IdleTaskType::FINISH_MARKING: {
2175             if (markType_ == MarkType::MARK_FULL) {
2176                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2177             } else {
2178                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2179             }
2180             break;
2181         }
2182         case IdleTaskType::YOUNG_GC:
2183             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2184             break;
2185         case IdleTaskType::INCREMENTAL_MARK:
2186             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2187             break;
2188         default:
2189             break;
2190     }
2191     ClearIdleTask();
2192 }
2193 
NotifyMemoryPressure(bool inHighMemoryPressure)2194 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2195 {
2196     if (inHighMemoryPressure) {
2197         LOG_GC(INFO) << "app is inHighMemoryPressure";
2198         SetMemGrowingType(MemGrowingType::PRESSURE);
2199     } else {
2200         LOG_GC(INFO) << "app is not inHighMemoryPressure";
2201         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2202     }
2203 }
2204 
NotifyFinishColdStart(bool isMainThread)2205 void Heap::NotifyFinishColdStart(bool isMainThread)
2206 {
2207     if (!FinishStartupEvent()) {
2208         return;
2209     }
2210     ASSERT(!OnStartupEvent());
2211     LOG_GC(INFO) << "SmartGC: finish app cold start";
2212 
2213     // set overshoot size to increase gc threashold larger 8MB than current heap size.
2214     int64_t semiRemainSize =
2215         static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2216     int64_t overshootSize =
2217         static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2218     // overshoot size should be larger than 0.
2219     GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2220 
2221     if (isMainThread && CheckCanTriggerConcurrentMarking()) {
2222         TryTriggerConcurrentMarking();
2223     }
2224     GetEdenSpace()->AllowTryEnable();
2225 }
2226 
NotifyFinishColdStartSoon()2227 void Heap::NotifyFinishColdStartSoon()
2228 {
2229     if (!OnStartupEvent()) {
2230         return;
2231     }
2232 
2233     // post 2s task
2234     Taskpool::GetCurrentTaskpool()->PostTask(
2235         std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this));
2236 }
2237 
NotifyHighSensitive(bool isStart)2238 void Heap::NotifyHighSensitive(bool isStart)
2239 {
2240     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SmartGC: set high sensitive status: " + std::to_string(isStart));
2241     isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2242         : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2243     LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2244 }
2245 
HandleExitHighSensitiveEvent()2246 bool Heap::HandleExitHighSensitiveEvent()
2247 {
2248     AppSensitiveStatus status = GetSensitiveStatus();
2249     if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2250         && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE)) {
2251         // Set record heap obj size 0 after exit high senstive
2252         SetRecordHeapObjectSizeBeforeSensitive(0);
2253         // set overshoot size to increase gc threashold larger 8MB than current heap size.
2254         int64_t semiRemainSize =
2255             static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2256         int64_t overshootSize =
2257             static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2258         // overshoot size should be larger than 0.
2259         GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2260 
2261         // fixme: IncrementalMarking and IdleCollection is currently not enabled
2262         TryTriggerIncrementalMarking();
2263         TryTriggerIdleCollection();
2264         TryTriggerConcurrentMarking();
2265         return true;
2266     }
2267     return false;
2268 }
2269 
2270 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2271 // concurrent mark
ObjectExceedMaxHeapSize() const2272 bool Heap::ObjectExceedMaxHeapSize() const
2273 {
2274     size_t configMaxHeapSize = config_.GetMaxHeapSize();
2275     size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2276     return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2277 }
2278 
NeedStopCollection()2279 bool Heap::NeedStopCollection()
2280 {
2281     // gc is not allowed during value serialize
2282     if (onSerializeEvent_) {
2283         return true;
2284     }
2285 
2286     if (!InSensitiveStatus()) {
2287         return false;
2288     }
2289 
2290     // During app cold start, gc threshold adjust to max heap size
2291     if (OnStartupEvent() && !ObjectExceedMaxHeapSize()) {
2292         return true;
2293     }
2294 
2295     if (GetRecordHeapObjectSizeBeforeSensitive() == 0) {
2296         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
2297     }
2298 
2299     if (GetHeapObjectSize() < GetRecordHeapObjectSizeBeforeSensitive() + config_.GetIncObjSizeThresholdInSensitive()
2300         && !ObjectExceedMaxHeapSize()) {
2301         return true;
2302     }
2303 
2304     OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2305         << " exceed sensitive gc threshold, have to trigger gc";
2306     return false;
2307 }
2308 
Run(uint32_t threadIndex)2309 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2310 {
2311     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2312     ASSERT(heap_->GetWorkManager()->HasInitialized());
2313     while (!heap_->GetWorkManager()->HasInitialized());
2314     switch (taskPhase_) {
2315         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
2316             heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
2317             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2318             break;
2319         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
2320             heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
2321             break;
2322         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
2323             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2324             break;
2325         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2326             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2327             break;
2328         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2329             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2330             break;
2331         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2332             heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2333             break;
2334         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
2335             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
2336             break;
2337         default:
2338             LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2339             UNREACHABLE();
2340     }
2341     heap_->ReduceTaskCount();
2342     return true;
2343 }
2344 
Run(uint32_t threadIndex)2345 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2346 {
2347     heap_->ReclaimRegions(gcType_);
2348     return true;
2349 }
2350 
Run(uint32_t threadIndex)2351 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2352 {
2353     std::this_thread::sleep_for(std::chrono::microseconds(2000000));  // 2000000 means 2s
2354     heap_->NotifyFinishColdStart(false);
2355     return true;
2356 }
2357 
CleanCallBack()2358 void Heap::CleanCallBack()
2359 {
2360     auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2361     if (!concurrentCallbacks.empty()) {
2362         Taskpool::GetCurrentTaskpool()->PostTask(
2363             std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2364         );
2365     }
2366     ASSERT(concurrentCallbacks.empty());
2367 
2368     AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2369     if (asyncCallbacksPack.Empty()) {
2370         ASSERT(asyncCallbacksPack.TotallyEmpty());
2371         return;
2372     }
2373     AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2374     std::swap(*asyncCallbacks, asyncCallbacksPack);
2375     NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2376     if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2377         pendingAsyncNativeCallbackSize_ < asyncClearNativePointerThreshold_) {
2378         IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2379         asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2380             this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2381         });
2382         asyncTaskCb(asyncCallbacks);
2383     } else {
2384         ThreadNativeScope nativeScope(thread_);
2385         asyncCallbacks->ProcessAll();
2386         delete asyncCallbacks;
2387     }
2388     ASSERT(asyncCallbacksPack.TotallyEmpty());
2389 }
2390 
Run(uint32_t threadIndex)2391 bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2392 {
2393     for (auto iter : nativePointerCallbacks_) {
2394         if (iter.first != nullptr) {
2395             iter.first(std::get<0>(iter.second),
2396                 std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2397         }
2398     }
2399     return true;
2400 }
2401 
GetArrayBufferSize() const2402 size_t Heap::GetArrayBufferSize() const
2403 {
2404     size_t result = 0;
2405     sweeper_->EnsureAllTaskFinished();
2406     this->IterateOverObjects([&result](TaggedObject *obj) {
2407         JSHClass* jsClass = obj->GetClass();
2408         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2409     });
2410     return result;
2411 }
2412 
GetLiveObjectSize() const2413 size_t Heap::GetLiveObjectSize() const
2414 {
2415     size_t objectSize = 0;
2416     sweeper_->EnsureAllTaskFinished();
2417     this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2418         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
2419     });
2420     return objectSize;
2421 }
2422 
GetHeapLimitSize() const2423 size_t Heap::GetHeapLimitSize() const
2424 {
2425     // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2426     return config_.GetMaxHeapSize();
2427 }
2428 
IsAlive(TaggedObject * object) const2429 bool BaseHeap::IsAlive(TaggedObject *object) const
2430 {
2431     if (!ContainObject(object)) {
2432         LOG_GC(ERROR) << "The region is already free";
2433         return false;
2434     }
2435 
2436     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2437     if (isFree) {
2438         Region *region = Region::ObjectAddressToRange(object);
2439         LOG_GC(ERROR) << "The object " << object << " in "
2440                             << region->GetSpaceTypeName()
2441                             << " already free";
2442     }
2443     return !isFree;
2444 }
2445 
ContainObject(TaggedObject * object) const2446 bool BaseHeap::ContainObject(TaggedObject *object) const
2447 {
2448     /*
2449      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2450      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2451      * calculated from an object address is still valid or alive in a cheap way.
2452      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2453      * introduce additional incorrect memory access issues.
2454      * Unless we can tolerate the performance impact of iterating the region list of each space and change
2455      * the implementation to that approach, don't rely on current implementation to get accurate result.
2456      */
2457     Region *region = Region::ObjectAddressToRange(object);
2458     return region->InHeapSpace();
2459 }
2460 
PrintHeapInfo(TriggerGCType gcType) const2461 void Heap::PrintHeapInfo(TriggerGCType gcType) const
2462 {
2463     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2464     OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2465                                 << ";OnStartup:" << OnStartupEvent()
2466                                 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2467                                 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2468     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2469                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark()
2470                                 << ") Eden Mark(" << IsEdenMark() << ")";
2471     OPTIONAL_LOG(ecmaVm_, INFO) << "Eden(" << edenSpace_->GetHeapObjectSize() << "/" << edenSpace_->GetInitialCapacity()
2472                  << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2473                  << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2474                  << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2475                  << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2476                  << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2477                  << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2478                  << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2479                  << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2480                  << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2481                  << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2482 }
2483 
StatisticHeapObject(TriggerGCType gcType) const2484 void Heap::StatisticHeapObject(TriggerGCType gcType) const
2485 {
2486     PrintHeapInfo(gcType);
2487 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2488     StatisticHeapDetail();
2489 #endif
2490 }
2491 
StatisticHeapDetail()2492 void Heap::StatisticHeapDetail()
2493 {
2494     Prepare();
2495     static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
2496     int typeCount[JS_TYPE_LAST] = { 0 };
2497     static const int MIN_COUNT_THRESHOLD = 1000;
2498 
2499     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2500         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2501     });
2502     for (int i = 0; i < JS_TYPE_LAST; i++) {
2503         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2504             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2505                            << " count:" << typeCount[i];
2506         }
2507         typeCount[i] = 0;
2508     }
2509 
2510     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2511         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2512     });
2513     for (int i = 0; i < JS_TYPE_LAST; i++) {
2514         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2515             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2516                            << " count:" << typeCount[i];
2517         }
2518         typeCount[i] = 0;
2519     }
2520 
2521     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2522         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2523     });
2524     for (int i = 0; i < JS_TYPE_LAST; i++) {
2525         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2526             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2527                            << " count:" << typeCount[i];
2528         }
2529         typeCount[i] = 0;
2530     }
2531 }
2532 
UpdateWorkManager(WorkManager * workManager)2533 void Heap::UpdateWorkManager(WorkManager *workManager)
2534 {
2535     concurrentMarker_->workManager_ = workManager;
2536     fullGC_->workManager_ = workManager;
2537     stwYoungGC_->workManager_ = workManager;
2538     incrementalMarker_->workManager_ = workManager;
2539     nonMovableMarker_->workManager_ = workManager;
2540     semiGCMarker_->workManager_ = workManager;
2541     compressGCMarker_->workManager_ = workManager;
2542     partialGC_->workManager_ = workManager;
2543 }
2544 
GetMachineCodeObject(uintptr_t pc) const2545 MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2546 {
2547     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2548     MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2549     if (machineCode != nullptr) {
2550         return machineCode;
2551     }
2552     HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2553     return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2554 }
2555 
CalCallSiteInfo(uintptr_t retAddr) const2556 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2557 {
2558     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2559     MachineCode *code = nullptr;
2560     // 1. find return
2561     // 2. gc
2562     machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2563         if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2564             return;
2565         }
2566         if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2567             code = MachineCode::Cast(obj);
2568             return;
2569         }
2570     });
2571     if (code == nullptr) {
2572         HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2573         hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2574             if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2575                 return;
2576             }
2577             if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2578                 code = MachineCode::Cast(obj);
2579                 return;
2580             }
2581         });
2582     }
2583 
2584     if (code == nullptr ||
2585         (code->GetPayLoadSizeInBytes() ==
2586          code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2587         return {};
2588     }
2589     return code->CalCallSiteInfo(retAddr);
2590 };
2591 
AddGCListener(FinishGCListener listener,void * data)2592 GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2593 {
2594     gcListeners_.emplace_back(std::make_pair(listener, data));
2595     return std::prev(gcListeners_.cend());
2596 }
2597 
ProcessGCListeners()2598 void Heap::ProcessGCListeners()
2599 {
2600     for (auto &&[listener, data] : gcListeners_) {
2601         listener(data);
2602     }
2603 }
2604 
ProcessAllGCListeners()2605 void SharedHeap::ProcessAllGCListeners()
2606 {
2607     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2608         ASSERT(!thread->IsInRunningState());
2609         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2610     });
2611 }
2612 
2613 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
GetCurrentTickMillseconds()2614 uint64_t Heap::GetCurrentTickMillseconds()
2615 {
2616     return std::chrono::duration_cast<std::chrono::milliseconds>(
2617     std::chrono::steady_clock::now().time_since_epoch()).count();
2618 }
2619 
SetJsDumpThresholds(size_t thresholds) const2620 void Heap::SetJsDumpThresholds(size_t thresholds) const
2621 {
2622     if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2623         LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2624         return;
2625     }
2626     g_threshold = thresholds;
2627 }
2628 
ThresholdReachedDump()2629 void Heap::ThresholdReachedDump()
2630 {
2631     size_t limitSize = GetHeapLimitSize();
2632     if (!limitSize) {
2633         LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2634         return;
2635     }
2636     size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2637     if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2638         GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2639             size_t liveObjectSize = GetLiveObjectSize();
2640             size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2641             LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2642             if (nowPrecentRecheck < g_threshold) {
2643                 return;
2644             }
2645             g_lastHeapDumpTime = GetCurrentTickMillseconds();
2646             base::BlockHookScope blockScope;
2647             HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2648             GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2649                                                         GetHeapLimitSize(), GetLiveObjectSize());
2650             DumpSnapShotOption dumpOption;
2651             dumpOption.dumpFormat = DumpFormat::BINARY;
2652             dumpOption.isVmMode = true;
2653             dumpOption.isPrivate = false;
2654             dumpOption.captureNumericValue = false;
2655             dumpOption.isFullGC = false;
2656             dumpOption.isSimplify = true;
2657             dumpOption.isSync = false;
2658             dumpOption.isBeforeFill = false;
2659             dumpOption.isDumpOOM = true; // aim's to do binary dump
2660             heapProfile->DumpHeapSnapshot(dumpOption);
2661             hasOOMDump_ = false;
2662             HeapProfilerInterface::Destroy(ecmaVm_);
2663         }
2664 }
2665 #endif
2666 
RemoveGCListener(GCListenerId listenerId)2667 void Heap::RemoveGCListener(GCListenerId listenerId)
2668 {
2669     gcListeners_.erase(listenerId);
2670 }
2671 
IncreaseTaskCount()2672 void BaseHeap::IncreaseTaskCount()
2673 {
2674     LockHolder holder(waitTaskFinishedMutex_);
2675     runningTaskCount_++;
2676 }
2677 
WaitRunningTaskFinished()2678 void BaseHeap::WaitRunningTaskFinished()
2679 {
2680     LockHolder holder(waitTaskFinishedMutex_);
2681     while (runningTaskCount_ > 0) {
2682         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
2683     }
2684 }
2685 
CheckCanDistributeTask()2686 bool BaseHeap::CheckCanDistributeTask()
2687 {
2688     LockHolder holder(waitTaskFinishedMutex_);
2689     return runningTaskCount_ < maxMarkTaskCount_;
2690 }
2691 
ReduceTaskCount()2692 void BaseHeap::ReduceTaskCount()
2693 {
2694     LockHolder holder(waitTaskFinishedMutex_);
2695     runningTaskCount_--;
2696     if (runningTaskCount_ == 0) {
2697         waitTaskFinishedCV_.SignalAll();
2698     }
2699 }
2700 
WaitClearTaskFinished()2701 void BaseHeap::WaitClearTaskFinished()
2702 {
2703     LockHolder holder(waitClearTaskFinishedMutex_);
2704     while (!clearTaskFinished_) {
2705         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
2706     }
2707 }
2708 
ReleaseEdenAllocator()2709 void Heap::ReleaseEdenAllocator()
2710 {
2711     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
2712     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
2713     if (!topAddress || !endAddress) {
2714         return;
2715     }
2716     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2717 }
2718 
InstallEdenAllocator()2719 void Heap::InstallEdenAllocator()
2720 {
2721     if (!enableEdenGC_) {
2722         return;
2723     }
2724     auto topAddress = edenSpace_->GetAllocationTopAddress();
2725     auto endAddress = edenSpace_->GetAllocationEndAddress();
2726     if (!topAddress || !endAddress) {
2727         return;
2728     }
2729     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2730 }
2731 
EnableEdenGC()2732 void Heap::EnableEdenGC()
2733 {
2734     enableEdenGC_ = true;
2735     thread_->EnableEdenGCBarriers();
2736 }
2737 
TryEnableEdenGC()2738 void Heap::TryEnableEdenGC()
2739 {
2740     if (ohos::OhosParams::IsEdenGCEnable()) {
2741         EnableEdenGC();
2742     }
2743 }
2744 }  // namespace panda::ecmascript
2745