• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/checkpoint/thread_state_transition.h"
17 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
18 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
19 #endif
20 
21 #include "common_components/taskpool/taskpool.h"
22 #include "ecmascript/cross_vm/unified_gc/unified_gc.h"
23 #include "ecmascript/cross_vm/unified_gc/unified_gc_marker.h"
24 #include "ecmascript/mem/idle_gc_trigger.h"
25 #include "ecmascript/mem/incremental_marker.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/parallel_evacuator.h"
28 #include "ecmascript/mem/parallel_marker.h"
29 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
30 #include "ecmascript/mem/shared_heap/shared_gc_evacuator.h"
31 #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
32 #include "ecmascript/mem/shared_heap/shared_gc.h"
33 #include "ecmascript/mem/shared_heap/shared_full_gc.h"
34 #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/runtime_call_id.h"
37 #include "ecmascript/jit/jit.h"
38 #if !WIN_OR_MAC_OR_IOS_PLATFORM
39 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
40 #include "ecmascript/dfx/hprof/heap_profiler.h"
41 #endif
42 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
43 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
44 #endif
45 #include "ecmascript/dfx/tracing/tracing.h"
46 #if defined(ENABLE_DUMP_IN_FAULTLOG)
47 #include "syspara/parameter.h"
48 #endif
49 
50 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
51 #include "parameters.h"
52 #include "hisysevent.h"
53 static constexpr uint32_t DEC_TO_INT = 100;
54 static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
55 static uint64_t g_lastHeapDumpTime = 0;
56 static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
57 static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
58 static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
59 static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
60                             (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
61 static bool g_futVersion = OHOS::system::GetIntParameter("const.product.dfx.fans.stage", 0) == 1;
62 #endif
63 
64 namespace panda::ecmascript {
65 SharedHeap *SharedHeap::instance_ = nullptr;
66 
CreateNewInstance()67 void SharedHeap::CreateNewInstance()
68 {
69     ASSERT(instance_ == nullptr);
70     size_t heapShared = 0;
71 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
72     heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
73     if (Runtime::GetInstance()->GetEnableLargeHeap()) {
74         heapShared = panda::ecmascript::MAX_SHARED_HEAP_SIZE;
75     }
76 #endif
77     EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
78         MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
79     instance_ = new SharedHeap(config);
80 }
81 
GetInstance()82 SharedHeap *SharedHeap::GetInstance()
83 {
84     ASSERT(instance_ != nullptr);
85     return instance_;
86 }
87 
DestroyInstance()88 void SharedHeap::DestroyInstance()
89 {
90     ASSERT(instance_ != nullptr);
91     instance_->Destroy();
92     delete instance_;
93     instance_ = nullptr;
94 }
95 
ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType,GCReason gcReason,JSThread * thread)96 void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
97 {
98     ASSERT(!dThread_->IsRunning());
99     SuspendAllScope scope(thread);
100     SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
101     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
102     CheckInHeapProfiler();
103     GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
104     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
105         // pre gc heap verify
106         LOG_ECMA(DEBUG) << "pre gc shared heap verify";
107         sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
108         SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
109     }
110     switch (gcType) { // LCOV_EXCL_BR_LINE
111         case TriggerGCType::SHARED_PARTIAL_GC:
112         case TriggerGCType::SHARED_GC: {
113             sharedGC_->RunPhases();
114             break;
115         }
116         case TriggerGCType::SHARED_FULL_GC: {
117             sharedFullGC_->RunPhases();
118             break;
119         }
120         default: // LOCV_EXCL_BR_LINE
121             LOG_ECMA(FATAL) << "this branch is unreachable";
122             UNREACHABLE();
123             break;
124     }
125     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
126         // pre gc heap verify
127         LOG_ECMA(DEBUG) << "after gc shared heap verify";
128         SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
129     }
130     CollectGarbageFinish(false, gcType);
131     InvokeSharedNativePointerCallbacks();
132 }
133 
CheckAndTriggerSharedGC(JSThread * thread)134 bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
135 {
136     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
137         return false;
138     }
139     size_t sharedGCThreshold = globalSpaceAllocLimit_ + spaceOvershoot_.load(std::memory_order_relaxed);
140     if ((OldSpaceExceedLimit() || GetHeapObjectSize() > sharedGCThreshold) &&
141         !NeedStopCollection()) {
142         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
143         return true;
144     }
145     return false;
146 }
147 
CheckHugeAndTriggerSharedGC(JSThread * thread,size_t size)148 bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
149 {
150     if (sHugeObjectSpace_->CheckOOM(size)) {
151         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
152         return true;
153     }
154     if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
155         return false;
156     }
157     if (GetHeapObjectSize() > globalSpaceAllocLimit_ && !NeedStopCollection()) {
158         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
159         return true;
160     }
161     return false;
162 }
163 
CollectGarbageNearOOM(JSThread * thread)164 void SharedHeap::CollectGarbageNearOOM(JSThread *thread)
165 {
166     auto fragmentationSize = sOldSpace_->GetCommittedSize() - sOldSpace_->GetHeapObjectSize();
167     if (fragmentationSize >= fragmentationLimitForSharedFullGC_) {
168         CollectGarbage<TriggerGCType::SHARED_FULL_GC,  GCReason::ALLOCATION_FAILED>(thread);
169     } else {
170         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
171     }
172 }
173 // Shared gc trigger
AdjustGlobalSpaceAllocLimit()174 void SharedHeap::AdjustGlobalSpaceAllocLimit()
175 {
176     globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
177                                       config_.GetDefaultGlobalAllocLimit());
178     globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
179                                       config_.GetMaxHeapSize());
180     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
181                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
182     constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
183     size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
184     globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
185     LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
186         << globalSpaceAllocLimit_;
187 }
188 
ObjectExceedMaxHeapSize() const189 bool SharedHeap::ObjectExceedMaxHeapSize() const
190 {
191     return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
192 }
193 
StartConcurrentMarking(TriggerGCType gcType,MarkReason markReason)194 void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, MarkReason markReason)
195 {
196     ASSERT(JSThread::GetCurrent() == dThread_);
197     GetEcmaGCStats()->SetMarkReason(markReason);
198     sConcurrentMarker_->Mark(gcType);
199 }
200 
CheckCanTriggerConcurrentMarking(JSThread * thread)201 bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
202 {
203     return thread->IsReadyToSharedConcurrentMark() &&
204            sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
205 }
206 
Initialize(NativeAreaAllocator * nativeAreaAllocator,HeapRegionAllocator * heapRegionAllocator,const JSRuntimeOptions & option,DaemonThread * dThread)207 void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
208     const JSRuntimeOptions &option, DaemonThread *dThread)
209 {
210     sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
211     nativeAreaAllocator_ = nativeAreaAllocator;
212     heapRegionAllocator_ = heapRegionAllocator;
213     shouldVerifyHeap_ = option.EnableHeapVerify();
214     parallelGC_ = option.EnableParallelGC();
215     optionalLogEnabled_ = option.EnableOptionalLog();
216     size_t maxHeapSize = config_.GetMaxHeapSize();
217     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
218     sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
219 
220     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
221     size_t oldSpaceCapacity =
222         AlignUp((maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2, DEFAULT_REGION_SIZE); // 2: half
223     globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
224     globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
225                                                           TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
226 
227     sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
228     sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
229     sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
230     sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
231     sharedMemController_ = new SharedMemController(this);
232     sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
233     growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
234     growingStep_ = config_.GetSharedHeapLimitGrowingStep();
235     incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
236     incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
237     fragmentationLimitForSharedFullGC_ = config_.GetFragmentationLimitForSharedFullGC();
238     dThread_ = dThread;
239 }
240 
Destroy()241 void SharedHeap::Destroy()
242 {
243     if (sWorkManager_ != nullptr) {
244         delete sWorkManager_;
245         sWorkManager_ = nullptr;
246     }
247     if (sOldSpace_ != nullptr) {
248         sOldSpace_->Reset();
249         delete sOldSpace_;
250         sOldSpace_ = nullptr;
251     }
252     if (sCompressSpace_ != nullptr) {
253         sCompressSpace_->Reset();
254         delete sCompressSpace_;
255         sCompressSpace_ = nullptr;
256     }
257     if (sNonMovableSpace_ != nullptr) {
258         sNonMovableSpace_->Reset();
259         delete sNonMovableSpace_;
260         sNonMovableSpace_ = nullptr;
261     }
262     if (sHugeObjectSpace_ != nullptr) {
263         sHugeObjectSpace_->Destroy();
264         delete sHugeObjectSpace_;
265         sHugeObjectSpace_ = nullptr;
266     }
267     if (sReadOnlySpace_ != nullptr) {
268         sReadOnlySpace_->ClearReadOnly();
269         sReadOnlySpace_->Destroy();
270         delete sReadOnlySpace_;
271         sReadOnlySpace_ = nullptr;
272     }
273     if (sAppSpawnSpace_ != nullptr) {
274         sAppSpawnSpace_->Reset();
275         delete sAppSpawnSpace_;
276         sAppSpawnSpace_ = nullptr;
277     }
278     if (sharedGC_ != nullptr) {
279         delete sharedGC_;
280         sharedGC_ = nullptr;
281     }
282     if (sharedFullGC_ != nullptr) {
283         delete sharedFullGC_;
284         sharedFullGC_ = nullptr;
285     }
286     if (sEvacuator_ != nullptr) {
287         delete sEvacuator_;
288         sEvacuator_ = nullptr;
289     }
290     nativeAreaAllocator_ = nullptr;
291     heapRegionAllocator_ = nullptr;
292 
293     if (sSweeper_ != nullptr) {
294         delete sSweeper_;
295         sSweeper_ = nullptr;
296     }
297     if (sConcurrentMarker_ != nullptr) {
298         delete sConcurrentMarker_;
299         sConcurrentMarker_ = nullptr;
300     }
301     if (sharedGCMarker_ != nullptr) {
302         delete sharedGCMarker_;
303         sharedGCMarker_ = nullptr;
304     }
305     if (sharedGCMovableMarker_ != nullptr) {
306         delete sharedGCMovableMarker_;
307         sharedGCMovableMarker_ = nullptr;
308     }
309     if (sharedMemController_ != nullptr) {
310         delete sharedMemController_;
311         sharedMemController_ = nullptr;
312     }
313     if (Runtime::GetInstance()->IsHybridVm() && unifiedGC_ != nullptr) {
314         delete unifiedGC_;
315         unifiedGC_ = nullptr;
316     }
317 
318     dThread_ = nullptr;
319 }
320 
PostInitialization(const GlobalEnvConstants * globalEnvConstants,const JSRuntimeOptions & option)321 void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
322 {
323     globalEnvConstants_ = globalEnvConstants;
324     uint32_t totalThreadNum = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
325     maxMarkTaskCount_ = totalThreadNum - 1;
326     sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
327     sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
328     sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
329     sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
330         EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
331     sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
332         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
333     sharedGC_ = new SharedGC(this);
334     sEvacuator_ = new SharedGCEvacuator(this);
335     sharedFullGC_ = new SharedFullGC(this);
336     if (Runtime::GetInstance()->IsHybridVm()) {
337         unifiedGC_ = new UnifiedGC();
338     }
339 }
340 
PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)341 void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
342 {
343     IncreaseTaskCount();
344     common::Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
345                                                                                 this, sharedTaskPhase));
346 }
347 
Run(uint32_t threadIndex)348 bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
349 {
350     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
351     while (!sHeap_->GetWorkManager()->HasInitialized());
352     switch (taskPhase_) {
353         case SharedParallelMarkPhase::SHARED_MARK_TASK:
354             sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
355             break;
356         case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
357             sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
358             break;
359         default: // LOCV_EXCL_BR_LINE
360             break;
361     }
362     sHeap_->ReduceTaskCount();
363     return true;
364 }
365 
Run(uint32_t threadIndex)366 bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
367 {
368     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "SharedHeap::AsyncClearTask::Run", "");
369     sHeap_->ReclaimRegions(gcType_);
370     return true;
371 }
372 
NotifyGCCompleted()373 void SharedHeap::NotifyGCCompleted()
374 {
375     ASSERT(JSThread::GetCurrent() == dThread_);
376     LockHolder lock(waitGCFinishedMutex_);
377     gcFinished_ = true;
378     waitGCFinishedCV_.SignalAll();
379 }
380 
WaitGCFinished(JSThread * thread)381 void SharedHeap::WaitGCFinished(JSThread *thread)
382 {
383     ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
384     ASSERT(thread->IsInRunningState());
385     ThreadSuspensionScope scope(thread);
386     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished", "");
387     LockHolder lock(waitGCFinishedMutex_);
388     while (!gcFinished_) {
389         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
390     }
391 }
392 
WaitGCFinishedAfterAllJSThreadEliminated()393 void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
394 {
395     ASSERT(Runtime::GetInstance()->vmCount_ == 0);
396     LockHolder lock(waitGCFinishedMutex_);
397     while (!gcFinished_) {
398         waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
399     }
400 }
401 
DaemonCollectGarbage(TriggerGCType gcType,GCReason gcReason)402 void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
403 {
404     RecursionScope recurScope(this, HeapType::SHARED_HEAP);
405     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC ||
406         gcType == TriggerGCType::SHARED_FULL_GC);
407     ASSERT(JSThread::GetCurrent() == dThread_);
408     {
409         ThreadManagedScope runningScope(dThread_);
410         SuspendAllScope scope(dThread_);
411         SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
412         CheckInHeapProfiler();
413         gcType_ = gcType;
414         GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
415         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
416             // pre gc heap verify
417             LOG_ECMA(DEBUG) << "pre gc shared heap verify";
418             sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
419             SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
420         }
421         switch (gcType) {
422             case TriggerGCType::SHARED_PARTIAL_GC:
423             case TriggerGCType::SHARED_GC: {
424                 sharedGC_->RunPhases();
425                 break;
426             }
427             case TriggerGCType::SHARED_FULL_GC: {
428                 sharedFullGC_->RunPhases();
429                 break;
430             }
431             default: // LOCV_EXCL_BR_LINE
432                 LOG_ECMA(FATAL) << "this branch is unreachable";
433                 UNREACHABLE();
434                 break;
435         }
436 
437         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
438             // after gc heap verify
439             LOG_ECMA(DEBUG) << "after gc shared heap verify";
440             SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
441         }
442         CollectGarbageFinish(true, gcType);
443     }
444     InvokeSharedNativePointerCallbacks();
445     // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
446 }
447 
WaitAllTasksFinished(JSThread * thread)448 void SharedHeap::WaitAllTasksFinished(JSThread *thread)
449 {
450     WaitGCFinished(thread);
451     sSweeper_->WaitAllTaskFinished();
452     WaitClearTaskFinished();
453 }
454 
WaitAllTasksFinishedAfterAllJSThreadEliminated()455 void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
456 {
457     WaitGCFinishedAfterAllJSThreadEliminated();
458     sSweeper_->WaitAllTaskFinished();
459     WaitClearTaskFinished();
460 }
461 
CheckOngoingConcurrentMarking()462 bool SharedHeap::CheckOngoingConcurrentMarking()
463 {
464     if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
465         sConcurrentMarker_->IsTriggeredConcurrentMark()) {
466         // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
467         return true;
468     }
469     return false;
470 }
471 
CheckInHeapProfiler()472 void SharedHeap::CheckInHeapProfiler()
473 {
474 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
475     Runtime::GetInstance()->GCIterateThreadList([this](JSThread *thread) {
476         if (thread->GetEcmaVM()->GetHeapProfile() != nullptr) {
477             inHeapProfiler_ = true;
478             return;
479         }
480     });
481 #else
482     inHeapProfiler_ = false;
483 #endif
484 }
485 
Prepare(bool inTriggerGCThread)486 void SharedHeap::Prepare(bool inTriggerGCThread)
487 {
488     WaitRunningTaskFinished();
489     if (inTriggerGCThread) {
490         sSweeper_->EnsureAllTaskFinished();
491     } else {
492         sSweeper_->WaitAllTaskFinished();
493     }
494     WaitClearTaskFinished();
495 }
496 
SharedGCScope()497 SharedHeap::SharedGCScope::SharedGCScope()
498 {
499     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
500         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
501         if (pgoProfiler != nullptr) {
502             pgoProfiler->SuspendByGC();
503         }
504 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
505         thread->SetGcState(true);
506 #endif
507     });
508 }
509 
~SharedGCScope()510 SharedHeap::SharedGCScope::~SharedGCScope()
511 {
512     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
513         ASSERT(!thread->IsInRunningState());
514         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
515         std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
516         if (pgoProfiler != nullptr) {
517             pgoProfiler->ResumeByGC();
518         }
519 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
520         thread->SetGcState(false);
521 #endif
522     });
523 }
524 
PrepareRecordRegionsForReclaim()525 void SharedHeap::PrepareRecordRegionsForReclaim()
526 {
527     sOldSpace_->SetRecordRegion();
528     sNonMovableSpace_->SetRecordRegion();
529     sHugeObjectSpace_->SetRecordRegion();
530 }
531 
Reclaim(TriggerGCType gcType)532 void SharedHeap::Reclaim(TriggerGCType gcType)
533 {
534     PrepareRecordRegionsForReclaim();
535     sHugeObjectSpace_->ReclaimHugeRegion();
536 
537     if (parallelGC_) {
538         clearTaskFinished_ = false;
539         common::Taskpool::GetCurrentTaskpool()->PostTask(
540             std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
541     } else {
542         ReclaimRegions(gcType);
543     }
544 }
545 
ReclaimRegions(TriggerGCType gcType)546 void SharedHeap::ReclaimRegions(TriggerGCType gcType)
547 {
548     if (gcType == TriggerGCType::SHARED_FULL_GC) {
549         sCompressSpace_->Reset();
550     }
551     sOldSpace_->ReclaimCSets();
552     sSweeper_->WaitAllTaskFinished();
553     EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
554         region->ClearMarkGCBitset();
555         region->ClearCrossRegionRSet();
556     });
557     if (!clearTaskFinished_) {
558         LockHolder holder(waitClearTaskFinishedMutex_);
559         clearTaskFinished_ = true;
560         waitClearTaskFinishedCV_.SignalAll();
561     }
562 }
563 
DisableParallelGC(JSThread * thread)564 void SharedHeap::DisableParallelGC(JSThread *thread)
565 {
566     WaitAllTasksFinished(thread);
567     dThread_->WaitFinished();
568     parallelGC_ = false;
569     maxMarkTaskCount_ = 0;
570     sSweeper_->ConfigConcurrentSweep(false);
571     sConcurrentMarker_->ConfigConcurrentMark(false);
572 }
573 
EnableParallelGC(JSRuntimeOptions & option)574 void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
575 {
576     uint32_t totalThreadNum = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
577     maxMarkTaskCount_ = totalThreadNum - 1;
578     parallelGC_ = option.EnableParallelGC();
579     if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
580         workThreadNum != totalThreadNum + 1) {
581         LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
582                             << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
583         delete sWorkManager_;
584         sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
585         UpdateWorkManager(sWorkManager_);
586     }
587     sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
588     sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
589 }
590 
UpdateWorkManager(SharedGCWorkManager * sWorkManager)591 void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
592 {
593     sConcurrentMarker_->ResetWorkManager(sWorkManager);
594     sharedGCMarker_->ResetWorkManager(sWorkManager);
595     sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
596     sharedGC_->ResetWorkManager(sWorkManager);
597     sharedFullGC_->ResetWorkManager(sWorkManager);
598 }
599 
TryTriggerLocalConcurrentMarking()600 void SharedHeap::TryTriggerLocalConcurrentMarking()
601 {
602     if (localFullMarkTriggered_) {
603         return;
604     }
605     if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
606             != false) { // LCOV_EXCL_BR_LINE
607         return;
608     }
609     ASSERT(localFullMarkTriggered_ == true);
610     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
611         thread->SetFullMarkRequest();
612     });
613 }
614 
VerifyHeapObjects(VerifyKind verifyKind) const615 size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
616 {
617     size_t failCount = 0;
618     {
619         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
620         sOldSpace_->IterateOverObjects(verifier);
621     }
622     {
623         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
624         sNonMovableSpace_->IterateOverObjects(verifier);
625     }
626     {
627         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
628         sHugeObjectSpace_->IterateOverObjects(verifier);
629     }
630     {
631         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
632         sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
633     }
634     return failCount;
635 }
636 
CollectGarbageFinish(bool inDaemon,TriggerGCType gcType)637 void SharedHeap::CollectGarbageFinish(bool inDaemon, TriggerGCType gcType)
638 {
639     if (inDaemon) {
640         ASSERT(JSThread::GetCurrent() == dThread_);
641 #ifndef NDEBUG
642         ASSERT(dThread_->HasLaunchedSuspendAll());
643 #endif
644         dThread_->FinishRunningTask();
645         NotifyGCCompleted();
646         // Update to forceGC_ is in DaemeanSuspendAll, and protected by the Runtime::mutatorLock_,
647         // so do not need lock.
648         smartGCStats_.forceGC_ = false;
649     }
650     localFullMarkTriggered_ = false;
651     // Record alive object size after shared gc and other stats
652     UpdateHeapStatsAfterGC(gcType);
653     // Adjust shared gc trigger threshold
654     AdjustGlobalSpaceAllocLimit();
655     spaceOvershoot_.store(0, std::memory_order_relaxed);
656     GetEcmaGCStats()->RecordStatisticAfterGC();
657     GetEcmaGCStats()->PrintGCStatistic();
658     ProcessAllGCListeners();
659     if (shouldThrowOOMError_ || shouldForceThrowOOMError_) {
660         // LocalHeap could do FullGC later instead of Fatal at once if only set `shouldThrowOOMError_` because there
661         // is kind of partial compress GC in LocalHeap, but SharedHeap differs.
662         DumpHeapSnapshotBeforeOOM(Runtime::GetInstance()->GetMainThread(), SharedHeapOOMSource::SHARED_GC);
663         LOG_GC(FATAL) << "SharedHeap OOM";
664         UNREACHABLE();
665     }
666 }
667 
SetGCThreadQosPriority(common::PriorityMode mode)668 void SharedHeap::SetGCThreadQosPriority(common::PriorityMode mode)
669 {
670     dThread_->SetQosPriority(mode);
671     common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(mode);
672 }
673 
IsReadyToConcurrentMark() const674 bool SharedHeap::IsReadyToConcurrentMark() const
675 {
676     return dThread_->IsReadyToConcurrentMark();
677 }
678 
ObjectExceedJustFinishStartupThresholdForGC() const679 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForGC() const
680 {
681     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
682     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForGC;
683 }
684 
ObjectExceedJustFinishStartupThresholdForCM() const685 bool SharedHeap::ObjectExceedJustFinishStartupThresholdForCM() const
686 {
687     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO;
688     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
689                                         * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
690     return ObjectExceedMaxHeapSize() || GetHeapObjectSize() > heapObjectSizeThresholdForCM;
691 }
692 
CheckIfNeedStopCollectionByStartup()693 bool SharedHeap::CheckIfNeedStopCollectionByStartup()
694 {
695     StartupStatus startupStatus = GetStartupStatus();
696     switch (startupStatus) {
697         case StartupStatus::ON_STARTUP:
698             if (!ObjectExceedMaxHeapSize()) {
699                 return true;
700             }
701             break;
702         case StartupStatus::JUST_FINISH_STARTUP:
703             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
704                 return true;
705             }
706             break;
707         default:
708             break;
709     }
710     return false;
711 }
712 
NeedStopCollection()713 bool SharedHeap::NeedStopCollection()
714 {
715     if (CheckIfNeedStopCollectionByStartup()) {
716         return true;
717     }
718 
719     if (!InSensitiveStatus()) {
720         return false;
721     }
722 
723     if (!ObjectExceedMaxHeapSize()) {
724         return true;
725     }
726     return false;
727 }
728 
TryAdjustSpaceOvershootByConfigSize()729 void SharedHeap::TryAdjustSpaceOvershootByConfigSize()
730 {
731     if (InGC() || !IsReadyToConcurrentMark()) {
732         // no need to reserve space if SharedGC or SharedConcurrentMark is already triggered
733         return;
734     }
735     // set overshoot size to increase gc threashold larger 8MB than current heap size.
736     int64_t heapObjectSize = static_cast<int64_t>(GetHeapObjectSize());
737     int64_t remainSizeBeforeGC = static_cast<int64_t>(globalSpaceAllocLimit_) - heapObjectSize;
738     int64_t overshootSize = static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - remainSizeBeforeGC;
739     // overshoot size should be larger than 0.
740     spaceOvershoot_.store(std::max(overshootSize, (int64_t)0), std::memory_order_relaxed);
741 }
742 
CompactHeapBeforeFork(JSThread * thread)743 void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
744 {
745     ThreadManagedScope managedScope(thread);
746     WaitGCFinished(thread);
747     sharedFullGC_->SetForAppSpawn(true);
748     CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
749     sharedFullGC_->SetForAppSpawn(false);
750 }
751 
MoveOldSpaceToAppspawn()752 void SharedHeap::MoveOldSpaceToAppspawn()
753 {
754     auto committedSize = sOldSpace_->GetCommittedSize();
755     sAppSpawnSpace_->SetInitialCapacity(committedSize);
756     sAppSpawnSpace_->SetMaximumCapacity(committedSize);
757     sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
758     sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
759     sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
760     sCompressSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity());
761 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
762     sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
763 #endif
764     sOldSpace_->EnumerateRegions([&](Region *region) {
765         region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
766         // Region in SharedHeap do not need PageTag threadId.
767         PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName());
768         sAppSpawnSpace_->AddRegion(region);
769         sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
770     });
771     sOldSpace_->GetRegionList().Clear();
772     sOldSpace_->Reset();
773 }
774 
ReclaimForAppSpawn()775 void SharedHeap::ReclaimForAppSpawn()
776 {
777     sSweeper_->WaitAllTaskFinished();
778     sHugeObjectSpace_->ReclaimHugeRegion();
779     sCompressSpace_->Reset();
780     MoveOldSpaceToAppspawn();
781     auto cb = [] (Region *region) {
782         region->ClearMarkGCBitset();
783     };
784     sNonMovableSpace_->EnumerateRegions(cb);
785     sHugeObjectSpace_->EnumerateRegions(cb);
786 }
787 
DumpHeapSnapshotBeforeOOM(JSThread * thread,SharedHeapOOMSource source)788 void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]JSThread *thread,
789                                            [[maybe_unused]] SharedHeapOOMSource source)
790 {
791 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(ENABLE_DUMP_IN_FAULTLOG)
792     AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
793     std::string eventConfig;
794     bool shouldDump = (appfreezeCallback == nullptr || appfreezeCallback(getprocpid(), true, eventConfig));
795     EcmaVM *vm = thread->GetEcmaVM();
796     vm->GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetEcmaParamConfiguration().GetMaxHeapSize(),
797                                                     GetHeapObjectSize(), eventConfig);
798     if (!shouldDump) {
799         LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, no dump quota.";
800         return;
801     }
802 #endif
803 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
804 #if defined(ENABLE_DUMP_IN_FAULTLOG)
805     HeapProfilerInterface *heapProfile = nullptr;
806     if (source == SharedHeapOOMSource::SHARED_GC) {
807 #ifndef NDEBUG
808         // If OOM during SharedGC, use main JSThread and create a new HeapProfile instancre to dump when GC completed.
809         ASSERT(thread == Runtime::GetInstance()->GetMainThread() && JSThread::GetCurrent()->HasLaunchedSuspendAll());
810 #endif
811         heapProfile = HeapProfilerInterface::CreateNewInstance(vm);
812     } else {
813         if (vm->GetHeapProfile() != nullptr) {
814             LOG_ECMA(ERROR) << "SharedHeap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
815             return;
816         }
817         heapProfile = HeapProfilerInterface::GetInstance(vm);
818     }
819     // Filter appfreeze when dump.
820     LOG_ECMA(INFO) << "SharedHeap::DumpHeapSnapshotBeforeOOM, trigger oom dump";
821     base::BlockHookScope blockScope;
822     DumpSnapShotOption dumpOption;
823     dumpOption.dumpFormat = DumpFormat::BINARY;
824     dumpOption.isVmMode = true;
825     dumpOption.isPrivate = false;
826     dumpOption.captureNumericValue = false;
827     dumpOption.isFullGC = false;
828     dumpOption.isSimplify = true;
829     dumpOption.isSync = true;
830     dumpOption.isBeforeFill = false;
831     dumpOption.isDumpOOM = true;
832     if (source == SharedHeapOOMSource::SHARED_GC) {
833         heapProfile->DumpHeapSnapshotForOOM(dumpOption, true);
834         HeapProfilerInterface::DestroyInstance(heapProfile);
835     } else {
836         heapProfile->DumpHeapSnapshotForOOM(dumpOption);
837         HeapProfilerInterface::Destroy(vm);
838     }
839 #endif // ENABLE_DUMP_IN_FAULTLOG
840 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
841 }
842 
Heap(EcmaVM * ecmaVm)843 Heap::Heap(EcmaVM *ecmaVm)
844     : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
845       ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
846 
Initialize()847 void Heap::Initialize()
848 {
849     enablePageTagThreadId_ = ecmaVm_->GetJSOptions().EnablePageTagThreadId();
850     memController_ = new MemController(this);
851     nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
852     heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
853     size_t maxHeapSize = config_.GetMaxHeapSize();
854     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
855     size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
856     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
857     activeSemiSpace_->Restart();
858     activeSemiSpace_->SetWaterLine();
859 
860     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
861     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
862     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
863     sOldTlab_ = new ThreadLocalAllocationBuffer(this);
864     thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
865     sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
866     thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
867                                                     sNonMovableTlab_->GetEndAddress());
868     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
869 
870     // whether should verify heap duration gc
871     shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
872     // not set up from space
873 
874     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
875     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
876     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
877     size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
878     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) { // LCOV_EXCL_BR_LINE
879         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
880     }
881     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
882     if (!g_isEnableCMCGC) {
883         nonMovableSpace_->Initialize();
884     }
885     size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
886     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
887     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
888     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
889 
890     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
891         machineCodeSpaceCapacity + readOnlySpaceCapacity;
892     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
893         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
894     }
895     size_t oldSpaceCapacity = maxHeapSize - capacities;
896     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
897     globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
898     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
899     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
900     oldSpace_->Initialize();
901 
902     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
903     hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
904     maxEvacuateTaskCount_ = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
905     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
906         maxEvacuateTaskCount_ - 1);
907 
908     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
909                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
910                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
911                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
912                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
913                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
914                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
915                  << ", gcThreadNum = " << maxMarkTaskCount_;
916     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
917     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
918     markType_ = MarkType::MARK_YOUNG;
919 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
920     concurrentMarkerEnabled = false;
921 #endif
922     workManager_ = new WorkManager(this, common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
923     fullGC_ = new FullGC(this);
924 
925     partialGC_ = new PartialGC(this);
926     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
927         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
928     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
929         EnableConcurrentMarkType::CONFIG_DISABLE);
930     nonMovableMarker_ = new NonMovableMarker(this);
931     compressGCMarker_ = new CompressGCMarker(this);
932     if (Runtime::GetInstance()->IsHybridVm()) {
933         unifiedGCMarker_ = new UnifiedGCMarker(this);
934     }
935     evacuator_ = new ParallelEvacuator(this);
936     incrementalMarker_ = new IncrementalMarker(this);
937     gcListeners_.reserve(16U);
938     nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
939     incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
940     nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
941     asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
942     idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
943 }
944 
ResetLargeCapacity()945 void Heap::ResetLargeCapacity()
946 {
947     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
948     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
949     size_t nonMovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
950     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
951         nonMovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
952     }
953     size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
954     size_t capacities = minSemiSpaceCapacity * 2 + nonMovableSpaceCapacity +
955         machineCodeSpaceCapacity + readOnlySpaceCapacity;
956     if (MAX_HEAP_SIZE < capacities || MAX_HEAP_SIZE - capacities < MIN_OLD_SPACE_LIMIT) {
957         LOG_ECMA_MEM(FATAL) << "Capacities is too big to reset oldspace: " << capacities;
958     }
959     size_t newOldCapacity = MAX_HEAP_SIZE - capacities;
960     LOG_ECMA(INFO) << "Main thread heap reset old capacity size: " << newOldCapacity;
961     oldSpace_->SetInitialCapacity(newOldCapacity);
962     oldSpace_->SetMaximumCapacity(newOldCapacity);
963     compressSpace_->SetInitialCapacity(newOldCapacity);
964     compressSpace_->SetMaximumCapacity(newOldCapacity);
965     hugeObjectSpace_->SetInitialCapacity(newOldCapacity);
966     hugeObjectSpace_->SetMaximumCapacity(newOldCapacity);
967 }
968 
ResetLargeCapacity()969 void SharedHeap::ResetLargeCapacity()
970 {
971     size_t nonMovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
972     size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
973     size_t capacities = nonMovableSpaceCapacity + readOnlySpaceCapacity;
974     if (MAX_SHARED_HEAP_SIZE < capacities || MAX_SHARED_HEAP_SIZE - capacities < MIN_OLD_SPACE_LIMIT) {
975         LOG_ECMA_MEM(FATAL) << "Shared capacities is too big to reset oldspace: " << capacities;
976     }
977     size_t newOldCapacity = AlignUp((MAX_SHARED_HEAP_SIZE - capacities) / 2, DEFAULT_REGION_SIZE);
978     LOG_ECMA(INFO) << "Shared heap reset old capacity size: " << newOldCapacity;
979     sOldSpace_->SetInitialCapacity(newOldCapacity);
980     sOldSpace_->SetMaximumCapacity(newOldCapacity);
981     sCompressSpace_->SetInitialCapacity(newOldCapacity);
982     sCompressSpace_->SetMaximumCapacity(newOldCapacity);
983     sHugeObjectSpace_->SetInitialCapacity(newOldCapacity);
984     sHugeObjectSpace_->SetMaximumCapacity(newOldCapacity);
985 }
986 
ResetTlab()987 void Heap::ResetTlab()
988 {
989     sOldTlab_->Reset();
990     sNonMovableTlab_->Reset();
991 }
992 
FillBumpPointerForTlab()993 void Heap::FillBumpPointerForTlab()
994 {
995     sOldTlab_->FillBumpPointer();
996     sNonMovableTlab_->FillBumpPointer();
997 }
998 
ProcessSharedGCMarkingLocalBuffer()999 void Heap::ProcessSharedGCMarkingLocalBuffer()
1000 {
1001     if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
1002         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
1003         sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
1004         ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
1005     }
1006 }
1007 
ProcessSharedGCRSetWorkList()1008 void Heap::ProcessSharedGCRSetWorkList()
1009 {
1010     if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
1011         ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
1012         ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
1013         sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
1014         // The current thread may end earlier than the deamon thread.
1015         // To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
1016         // Reentrant does not cause exceptions because all the values are set to false.
1017         thread_->SetProcessingLocalToSharedRset(false);
1018         ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
1019     }
1020 }
1021 
GetGlobalConst() const1022 const GlobalEnvConstants *Heap::GetGlobalConst() const
1023 {
1024     return thread_->GlobalConstants();
1025 }
1026 
Destroy()1027 void Heap::Destroy()
1028 {
1029     ProcessSharedGCRSetWorkList();
1030     ProcessSharedGCMarkingLocalBuffer();
1031     if (sOldTlab_ != nullptr) {
1032         sOldTlab_->Reset();
1033         delete sOldTlab_;
1034         sOldTlab_ = nullptr;
1035     }
1036     if (sNonMovableTlab_!= nullptr) {
1037         sNonMovableTlab_->Reset();
1038         delete sNonMovableTlab_;
1039         sNonMovableTlab_= nullptr;
1040     }
1041     if (workManager_ != nullptr) {
1042         delete workManager_;
1043         workManager_ = nullptr;
1044     }
1045     if (activeSemiSpace_ != nullptr) {
1046         activeSemiSpace_->Destroy();
1047         delete activeSemiSpace_;
1048         activeSemiSpace_ = nullptr;
1049     }
1050     if (inactiveSemiSpace_ != nullptr) {
1051         inactiveSemiSpace_->Destroy();
1052         delete inactiveSemiSpace_;
1053         inactiveSemiSpace_ = nullptr;
1054     }
1055     if (oldSpace_ != nullptr) {
1056         oldSpace_->Reset();
1057         delete oldSpace_;
1058         oldSpace_ = nullptr;
1059     }
1060     if (compressSpace_ != nullptr) {
1061         compressSpace_->Destroy();
1062         delete compressSpace_;
1063         compressSpace_ = nullptr;
1064     }
1065     if (nonMovableSpace_ != nullptr) {
1066         nonMovableSpace_->Reset();
1067         delete nonMovableSpace_;
1068         nonMovableSpace_ = nullptr;
1069     }
1070     if (snapshotSpace_ != nullptr) {
1071         snapshotSpace_->Destroy();
1072         delete snapshotSpace_;
1073         snapshotSpace_ = nullptr;
1074     }
1075     if (machineCodeSpace_ != nullptr) {
1076         machineCodeSpace_->Reset();
1077         delete machineCodeSpace_;
1078         machineCodeSpace_ = nullptr;
1079     }
1080     if (hugeObjectSpace_ != nullptr) {
1081         hugeObjectSpace_->Destroy();
1082         delete hugeObjectSpace_;
1083         hugeObjectSpace_ = nullptr;
1084     }
1085     if (hugeMachineCodeSpace_ != nullptr) {
1086         hugeMachineCodeSpace_->Destroy();
1087         delete hugeMachineCodeSpace_;
1088         hugeMachineCodeSpace_ = nullptr;
1089     }
1090     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
1091         readOnlySpace_->ClearReadOnly();
1092         readOnlySpace_->Destroy();
1093         delete readOnlySpace_;
1094         readOnlySpace_ = nullptr;
1095     }
1096     if (appSpawnSpace_ != nullptr) {
1097         appSpawnSpace_->Reset();
1098         delete appSpawnSpace_;
1099         appSpawnSpace_ = nullptr;
1100     }
1101     if (partialGC_ != nullptr) {
1102         delete partialGC_;
1103         partialGC_ = nullptr;
1104     }
1105     if (fullGC_ != nullptr) {
1106         delete fullGC_;
1107         fullGC_ = nullptr;
1108     }
1109 
1110     nativeAreaAllocator_ = nullptr;
1111     heapRegionAllocator_ = nullptr;
1112 
1113     if (memController_ != nullptr) {
1114         delete memController_;
1115         memController_ = nullptr;
1116     }
1117     if (sweeper_ != nullptr) {
1118         delete sweeper_;
1119         sweeper_ = nullptr;
1120     }
1121     if (concurrentMarker_ != nullptr) {
1122         delete concurrentMarker_;
1123         concurrentMarker_ = nullptr;
1124     }
1125     if (incrementalMarker_ != nullptr) {
1126         delete incrementalMarker_;
1127         incrementalMarker_ = nullptr;
1128     }
1129     if (nonMovableMarker_ != nullptr) {
1130         delete nonMovableMarker_;
1131         nonMovableMarker_ = nullptr;
1132     }
1133     if (compressGCMarker_ != nullptr) {
1134         delete compressGCMarker_;
1135         compressGCMarker_ = nullptr;
1136     }
1137     if (Runtime::GetInstance()->IsHybridVm() && unifiedGCMarker_ != nullptr) {
1138         delete unifiedGCMarker_;
1139         unifiedGCMarker_ = nullptr;
1140     }
1141     if (evacuator_ != nullptr) {
1142         delete evacuator_;
1143         evacuator_ = nullptr;
1144     }
1145 }
1146 
Prepare()1147 void Heap::Prepare()
1148 {
1149     MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
1150     WaitRunningTaskFinished();
1151     sweeper_->EnsureAllTaskFinished();
1152     WaitClearTaskFinished();
1153 }
1154 
GetHeapPrepare()1155 void Heap::GetHeapPrepare()
1156 {
1157     // Ensure local and shared heap prepared.
1158     Prepare();
1159     SharedHeap *sHeap = SharedHeap::GetInstance();
1160     sHeap->Prepare(false);
1161 }
1162 
Resume(TriggerGCType gcType)1163 void Heap::Resume(TriggerGCType gcType)
1164 {
1165     activeSemiSpace_->SetWaterLine();
1166 
1167     if (mode_ != HeapMode::SPAWN &&
1168         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
1169         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
1170         size_t multiple = 2;
1171         size_t oldSpaceMaxLimit = 0;
1172         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
1173             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
1174             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
1175         } else {
1176             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
1177             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
1178         }
1179         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1180     }
1181 
1182     PrepareRecordRegionsForReclaim();
1183     hugeObjectSpace_->ReclaimHugeRegion();
1184     hugeMachineCodeSpace_->ReclaimHugeRegion();
1185     if (parallelGC_) {
1186         if (gcType == TriggerGCType::OLD_GC) {
1187             isCSetClearing_.store(true, std::memory_order_release);
1188         }
1189         clearTaskFinished_ = false;
1190         common::Taskpool::GetCurrentTaskpool()->PostTask(
1191             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1192     } else {
1193         ReclaimRegions(gcType);
1194     }
1195 }
1196 
ResumeForAppSpawn()1197 void Heap::ResumeForAppSpawn()
1198 {
1199     sweeper_->WaitAllTaskFinished();
1200     hugeObjectSpace_->ReclaimHugeRegion();
1201     hugeMachineCodeSpace_->ReclaimHugeRegion();
1202     inactiveSemiSpace_->ReclaimRegions();
1203     oldSpace_->Reset();
1204     auto cb = [] (Region *region) {
1205         region->ClearMarkGCBitset();
1206     };
1207     nonMovableSpace_->EnumerateRegions(cb);
1208     machineCodeSpace_->EnumerateRegions(cb);
1209     hugeObjectSpace_->EnumerateRegions(cb);
1210     hugeMachineCodeSpace_->EnumerateRegions(cb);
1211 }
1212 
CompactHeapBeforeFork()1213 void Heap::CompactHeapBeforeFork()
1214 {
1215     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1216 }
1217 
DisableParallelGC()1218 void Heap::DisableParallelGC()
1219 {
1220     WaitAllTasksFinished();
1221     parallelGC_ = false;
1222     maxEvacuateTaskCount_ = 0;
1223     maxMarkTaskCount_ = 0;
1224     sweeper_->ConfigConcurrentSweep(false);
1225     concurrentMarker_->ConfigConcurrentMark(false);
1226     common::Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1227 }
1228 
EnableParallelGC()1229 void Heap::EnableParallelGC()
1230 {
1231     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1232     maxEvacuateTaskCount_ = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1233     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1234         totalThreadNum != maxEvacuateTaskCount_ + 1) {
1235         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1236                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1237         delete workManager_;
1238         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1239         UpdateWorkManager(workManager_);
1240     }
1241     ASSERT(maxEvacuateTaskCount_ > 0);
1242     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1243                                          maxEvacuateTaskCount_ - 1);
1244     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1245 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1246     concurrentMarkerEnabled = false;
1247 #endif
1248     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1249     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1250 }
1251 
SelectGCType() const1252 TriggerGCType Heap::SelectGCType() const
1253 {
1254     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1255     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1256         return YOUNG_GC;
1257     }
1258     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1259         GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1260         !GlobalNativeSizeLargerThanLimit()) {
1261         return YOUNG_GC;
1262     }
1263     return OLD_GC;
1264 }
1265 
CollectGarbageImpl(TriggerGCType gcType,GCReason reason)1266 void Heap::CollectGarbageImpl(TriggerGCType gcType, GCReason reason)
1267 {
1268     ASSERT("CollectGarbageImpl should not be called" && !g_isEnableCMCGC);
1269     Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1270     {
1271 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1272         if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1273             LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1274             UNREACHABLE();
1275         }
1276 #endif
1277         if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1278             ProcessGCListeners();
1279             return;
1280         }
1281         RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1282 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1283         [[maybe_unused]] GcStateScope scope(thread_);
1284 #endif
1285         CHECK_NO_GC;
1286         if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1287             // pre gc heap verify
1288             LOG_ECMA(DEBUG) << "pre gc heap verify";
1289             ProcessSharedGCRSetWorkList();
1290             Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1291         }
1292 
1293 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1294         gcType = TriggerGCType::FULL_GC;
1295 #endif
1296         if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1297             gcType = TriggerGCType::FULL_GC;
1298         }
1299         if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1300             gcType = TriggerGCType::OLD_GC;
1301         }
1302         if (shouldThrowOOMError_) {
1303             // Force Full GC after failed Old GC to avoid OOM
1304             LOG_ECMA(INFO) << "Old space is almost OOM, attempt trigger full gc to avoid OOM.";
1305             gcType = TriggerGCType::FULL_GC;
1306         }
1307         oldGCRequested_ = false;
1308         oldSpace_->AdjustOvershootSize();
1309 
1310         size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
1311         if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1312             GetEcmaGCStats()->SetGCReason(reason);
1313         } else {
1314             GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1315         }
1316         memController_->StartCalculationBeforeGC();
1317         StatisticHeapObject(gcType);
1318         gcType_ = gcType;
1319         {
1320             pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1321             switch (gcType) {
1322                 case TriggerGCType::YOUNG_GC:
1323                     // Use partial GC for young generation.
1324                     if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1325                         SetMarkType(MarkType::MARK_YOUNG);
1326                     }
1327                     if (markType_ == MarkType::MARK_FULL) {
1328                         // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1329                         gcType_ = TriggerGCType::OLD_GC;
1330                     }
1331                     partialGC_->RunPhases();
1332                     break;
1333                 case TriggerGCType::OLD_GC: {
1334                     bool fullConcurrentMarkRequested = false;
1335                     // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1336                     if (concurrentMarker_->IsEnabled() &&
1337                         (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1338                         reason == GCReason::ALLOCATION_LIMIT) {
1339                         fullConcurrentMarkRequested = true;
1340                     }
1341                     if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1342                         // Wait for existing concurrent marking tasks to be finished (if any),
1343                         // and reset concurrent marker's status for full mark.
1344                         bool concurrentMark = CheckOngoingConcurrentMarking();
1345                         if (concurrentMark) {
1346                             concurrentMarker_->Reset();
1347                         }
1348                     }
1349                     SetMarkType(MarkType::MARK_FULL);
1350                     if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1351                         LOG_ECMA(INFO)
1352                             << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1353                         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1354                         TriggerConcurrentMarking(MarkReason::OLD_GC_WITHOUT_FULLMARK);
1355                         oldGCRequested_ = true;
1356                         ProcessGCListeners();
1357                         memController_->ResetCalculationWithoutGC();
1358                         return;
1359                     }
1360                     partialGC_->RunPhases();
1361                     break;
1362                 }
1363                 case TriggerGCType::FULL_GC:
1364                     fullGC_->SetForAppSpawn(false);
1365                     fullGC_->RunPhases();
1366                     if (fullGCRequested_) {
1367                         fullGCRequested_ = false;
1368                     }
1369                     break;
1370                 case TriggerGCType::APPSPAWN_FULL_GC:
1371                     fullGC_->SetForAppSpawn(true);
1372                     fullGC_->RunPhasesForAppSpawn();
1373                     break;
1374                 default: // LOCV_EXCL_BR_LINE
1375                     LOG_ECMA(FATAL) << "this branch is unreachable";
1376                     UNREACHABLE();
1377                     break;
1378             }
1379             ASSERT(thread_->IsPropertyCacheCleared());
1380         }
1381         UpdateHeapStatsAfterGC(gcType_);
1382         ClearIdleTask();
1383         // Adjust the old space capacity and global limit for the first partial GC with full mark.
1384         // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1385         AdjustBySurvivalRate(originalNewSpaceSize);
1386         memController_->StopCalculationAfterGC(gcType);
1387         if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1388             // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1389             // the limits of old space and global space can be recomputed.
1390             RecomputeLimits();
1391             ResetNativeSizeAfterLastGC();
1392             OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1393                                         << " global object size " << GetHeapObjectSize()
1394                                         << " global committed size " << GetCommittedSize()
1395                                         << " global limit " << globalSpaceAllocLimit_;
1396             markType_ = MarkType::MARK_YOUNG;
1397         }
1398         if (concurrentMarker_->IsRequestDisabled()) {
1399             concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1400         }
1401         // GC log
1402         GetEcmaGCStats()->RecordStatisticAfterGC();
1403 #ifdef ENABLE_HISYSEVENT
1404         GetEcmaGCKeyStats()->IncGCCount();
1405         if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1406             GetEcmaGCKeyStats()->AddGCStatsToKey();
1407         }
1408 #endif
1409         GetEcmaGCStats()->PrintGCStatistic();
1410     }
1411 
1412     if (gcType_ == TriggerGCType::OLD_GC) {
1413         // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1414         // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1415         // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1416         // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1417         CheckNonMovableSpaceOOM();
1418     }
1419     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1420     if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1421         oldSpace_->ResetCommittedOverSizeLimit();
1422         if (oldSpace_->CommittedSizeExceed()) { // LCOV_EXCL_BR_LINE
1423             sweeper_->EnsureAllTaskFinished();
1424             DumpHeapSnapshotBeforeOOM();
1425             StatisticHeapDetail();
1426             ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1427         }
1428         oldSpace_->ResetMergeSize();
1429         shouldThrowOOMError_ = false;
1430     }
1431     // Allocate region failed during GC, MUST throw OOM here
1432     if (shouldForceThrowOOMError_) {
1433         sweeper_->EnsureAllTaskFinished();
1434         DumpHeapSnapshotBeforeOOM();
1435         StatisticHeapDetail();
1436         ThrowOutOfMemoryError(thread_, DEFAULT_REGION_SIZE, " HeapRegionAllocator::AllocateAlignedRegion");
1437     }
1438     // Update record heap object size after gc if in sensitive status
1439     if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1440         SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1441     }
1442 
1443     if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1444         // verify post gc heap verify
1445         LOG_ECMA(DEBUG) << "post gc heap verify";
1446         Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1447     }
1448 
1449 #if defined(ECMASCRIPT_SUPPORT_TRACING)
1450     auto tracing = GetEcmaVM()->GetTracing();
1451     if (tracing != nullptr) {
1452         tracing->TraceEventRecordMemory();
1453     }
1454 #endif
1455     ProcessGCListeners();
1456 
1457 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1458     if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1459         ThresholdReachedDump();
1460     }
1461 #endif
1462 
1463     if (GetEcmaGCKeyStats()->CheckIfMainThread()) {
1464         GetEcmaGCKeyStats()->ProcessLongGCEvent();
1465     }
1466 
1467     if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1468         // check machine code space if enough
1469         int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1470             static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1471         Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1472     }
1473 }
1474 
CollectGarbage(TriggerGCType gcType,GCReason reason)1475 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1476 {
1477     if (g_isEnableCMCGC) {
1478         common::GCReason cmcReason = common::GC_REASON_USER;
1479         bool async = true;
1480         if (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC ||
1481             gcType == TriggerGCType::APPSPAWN_FULL_GC || gcType == TriggerGCType::APPSPAWN_SHARED_FULL_GC ||
1482             reason == GCReason::ALLOCATION_FAILED) {
1483             cmcReason = common::GC_REASON_BACKUP;
1484             async = false;
1485         }
1486         common::BaseRuntime::RequestGC(cmcReason, async, common::GC_TYPE_FULL);
1487         return;
1488     }
1489     CollectGarbageImpl(gcType, reason);
1490     ProcessGCCallback();
1491 }
1492 
ProcessGCCallback()1493 void Heap::ProcessGCCallback()
1494 {
1495     // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1496     // even lead to another GC, so this have to invoke after this GC process.
1497     if (g_isEnableCMCGC) {
1498         thread_->InvokeWeakNodeFreeGlobalCallBack();
1499     }
1500     thread_->InvokeWeakNodeNativeFinalizeCallback();
1501     // PostTask for ProcessNativeDelete
1502     CleanCallback();
1503     JSFinalizationRegistry::CheckAndCall(thread_);
1504     // clear env cache
1505     thread_->ClearCache();
1506 }
1507 
ThrowOutOfMemoryError(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1508 void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1509     bool NonMovableObjNearOOM)
1510 { // LCOV_EXCL_START
1511     GetEcmaGCStats()->PrintGCMemoryStatistic();
1512     std::ostringstream oss;
1513     if (NonMovableObjNearOOM) {
1514         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1515             << " function name: " << functionName.c_str();
1516     } else {
1517         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1518             << functionName.c_str();
1519     }
1520     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1521     THROW_OOM_ERROR(thread, oss.str().c_str());
1522 } // LCOV_EXCL_STOP
1523 
SetMachineCodeOutOfMemoryError(JSThread * thread,size_t size,std::string functionName)1524 void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1525 {
1526     std::ostringstream oss;
1527     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1528         << functionName.c_str();
1529     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1530 
1531     EcmaVM *ecmaVm = thread->GetEcmaVM();
1532     ObjectFactory *factory = ecmaVm->GetFactory();
1533     JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1534     thread->SetException(error.GetTaggedValue());
1535 }
1536 
ThrowOutOfMemoryErrorForDefault(JSThread * thread,size_t size,std::string functionName,bool NonMovableObjNearOOM)1537 void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1538     bool NonMovableObjNearOOM)
1539 { // LCOV_EXCL_START
1540     GetEcmaGCStats()->PrintGCMemoryStatistic();
1541     std::ostringstream oss;
1542     if (NonMovableObjNearOOM) {
1543         oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1544             << " function name: " << functionName.c_str();
1545     } else {
1546         oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1547     }
1548     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1549     EcmaVM *ecmaVm = thread->GetEcmaVM();
1550     JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1551     JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1552 
1553     thread->SetException(error.GetTaggedValue());
1554     ecmaVm->HandleUncatchableError();
1555 } // LCOV_EXCL_STOP
1556 
FatalOutOfMemoryError(size_t size,std::string functionName)1557 void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1558 { // LCOV_EXCL_START
1559     GetEcmaGCStats()->PrintGCMemoryStatistic();
1560     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1561                         << " function name: " << functionName.c_str();
1562 } // LCOV_EXCL_STOP
1563 
CheckNonMovableSpaceOOM()1564 void Heap::CheckNonMovableSpaceOOM()
1565 {
1566     if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) { // LCOV_EXCL_BR_LINE
1567         sweeper_->EnsureAllTaskFinished();
1568         DumpHeapSnapshotBeforeOOM();
1569         StatisticHeapDetail();
1570         ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1571     }
1572 }
1573 
AdjustBySurvivalRate(size_t originalNewSpaceSize)1574 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1575 {
1576     promotedSize_ = GetEvacuator()->GetPromotedSize();
1577     if (originalNewSpaceSize <= 0) {
1578         return;
1579     }
1580     semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
1581     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1582     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1583     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1584     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1585                                 << " survivalRate: " << survivalRate;
1586     if (!oldSpaceLimitAdjusted_) {
1587         memController_->AddSurvivalRate(survivalRate);
1588         AdjustOldSpaceLimit();
1589     } else {
1590         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1591         // 2 means half
1592         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1593             SetFullMarkRequestedState(true);
1594             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1595                 << " is less than half the average survival rates: " << averageSurvivalRate
1596                 << ". Trigger full mark next time.";
1597             // Survival rate of full mark is precise. Reset recorded survival rates.
1598             memController_->ResetRecordedSurvivalRates();
1599         }
1600         memController_->AddSurvivalRate(survivalRate);
1601     }
1602 }
1603 
VerifyHeapObjects(VerifyKind verifyKind) const1604 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1605 {
1606     size_t failCount = 0;
1607     {
1608         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1609         activeSemiSpace_->IterateOverObjects(verifier);
1610     }
1611 
1612     {
1613         if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1614             verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1615             verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1616                 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1617                     region->IterateAllMarkedBits([this](void *addr) {
1618                         VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1619                     });
1620                 });
1621             }
1622     }
1623 
1624     {
1625         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1626         oldSpace_->IterateOverObjects(verifier);
1627     }
1628 
1629     {
1630         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1631         appSpawnSpace_->IterateOverMarkedObjects(verifier);
1632     }
1633 
1634     {
1635         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1636         nonMovableSpace_->IterateOverObjects(verifier);
1637     }
1638 
1639     {
1640         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1641         hugeObjectSpace_->IterateOverObjects(verifier);
1642     }
1643     {
1644         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1645         hugeMachineCodeSpace_->IterateOverObjects(verifier);
1646     }
1647     {
1648         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1649         machineCodeSpace_->IterateOverObjects(verifier);
1650     }
1651     {
1652         VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1653         snapshotSpace_->IterateOverObjects(verifier);
1654     }
1655     return failCount;
1656 }
1657 
VerifyOldToNewRSet(VerifyKind verifyKind) const1658 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1659 {
1660     size_t failCount = 0;
1661     VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1662     oldSpace_->IterateOldToNewOverObjects(verifier);
1663     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1664     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1665     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1666     return failCount;
1667 }
1668 
AdjustOldSpaceLimit()1669 void Heap::AdjustOldSpaceLimit()
1670 {
1671     if (oldSpaceLimitAdjusted_) {
1672         return;
1673     }
1674     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1675     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1676     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1677         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1678     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1679         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1680     } else {
1681         oldSpaceLimitAdjusted_ = true;
1682     }
1683 
1684     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1685         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1686     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1687         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1688     }
1689     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1690         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1691 }
1692 
OnAllocateEvent(EcmaVM * ecmaVm,TaggedObject * address,size_t size)1693 void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1694                                [[maybe_unused]] size_t size)
1695 {
1696 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1697     HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1698     if (profiler != nullptr) {
1699         base::BlockHookScope blockScope;
1700         profiler->AllocationEvent(address, size);
1701     }
1702 #endif
1703 }
1704 
DumpHeapSnapshotBeforeOOM()1705 void Heap::DumpHeapSnapshotBeforeOOM()
1706 {
1707 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(ENABLE_DUMP_IN_FAULTLOG)
1708     AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
1709     std::string eventConfig;
1710     bool shouldDump = (appfreezeCallback == nullptr || appfreezeCallback(getprocpid(), true, eventConfig));
1711     GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize(), eventConfig);
1712     if (!shouldDump) {
1713         LOG_ECMA(INFO) << "Heap::DumpHeapSnapshotBeforeOOM, no dump quota.";
1714         return;
1715     }
1716 #endif
1717 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1718 #if defined(ENABLE_DUMP_IN_FAULTLOG)
1719     if (ecmaVm_->GetHeapProfile() != nullptr) {
1720         LOG_ECMA(ERROR) << "Heap::DumpHeapSnapshotBeforeOOM, HeapProfile is nullptr";
1721         return;
1722     }
1723     // Filter appfreeze when dump.
1724     LOG_ECMA(INFO) << " Heap::DumpHeapSnapshotBeforeOOM, trigger oom dump";
1725     base::BlockHookScope blockScope;
1726     HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1727     hasOOMDump_ = true;
1728     // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1729     DumpSnapShotOption dumpOption;
1730     dumpOption.dumpFormat = DumpFormat::BINARY;
1731     dumpOption.isVmMode = true;
1732     dumpOption.isPrivate = false;
1733     dumpOption.captureNumericValue = false;
1734     dumpOption.isFullGC = false;
1735     dumpOption.isSimplify = true;
1736     dumpOption.isSync = true;
1737     dumpOption.isBeforeFill = false;
1738     dumpOption.isDumpOOM = true;
1739     heapProfile->DumpHeapSnapshotForOOM(dumpOption);
1740     HeapProfilerInterface::Destroy(ecmaVm_);
1741 #endif // ENABLE_DUMP_IN_FAULTLOG
1742 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1743 }
1744 
AdjustSpaceSizeForAppSpawn()1745 void Heap::AdjustSpaceSizeForAppSpawn()
1746 {
1747     SetHeapMode(HeapMode::SPAWN);
1748     size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1749     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1750     auto committedSize = appSpawnSpace_->GetCommittedSize();
1751     appSpawnSpace_->SetInitialCapacity(committedSize);
1752     appSpawnSpace_->SetMaximumCapacity(committedSize);
1753     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1754     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1755 }
1756 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)1757 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1758 {
1759     ASSERT(inspector != nullptr);
1760     // activeSemiSpace_/inactiveSemiSpace_:
1761     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1762     activeSemiSpace_->AddAllocationInspector(inspector);
1763     // oldSpace_/compressSpace_:
1764     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1765     oldSpace_->AddAllocationInspector(inspector);
1766     // readOnlySpace_ need not allocationInspector.
1767     // appSpawnSpace_ need not allocationInspector.
1768     nonMovableSpace_->AddAllocationInspector(inspector);
1769     machineCodeSpace_->AddAllocationInspector(inspector);
1770     hugeObjectSpace_->AddAllocationInspector(inspector);
1771     hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1772 }
1773 
ClearAllocationInspectorFromAllSpaces()1774 void Heap::ClearAllocationInspectorFromAllSpaces()
1775 {
1776     activeSemiSpace_->ClearAllocationInspector();
1777     oldSpace_->ClearAllocationInspector();
1778     nonMovableSpace_->ClearAllocationInspector();
1779     machineCodeSpace_->ClearAllocationInspector();
1780     hugeObjectSpace_->ClearAllocationInspector();
1781     hugeMachineCodeSpace_->ClearAllocationInspector();
1782 }
1783 
RecomputeLimits()1784 void Heap::RecomputeLimits()
1785 {
1786     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1787     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1788     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1789         hugeMachineCodeSpace_->GetHeapObjectSize();
1790     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1791 
1792     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1793     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1794     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1795         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1796     size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1797     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1798                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
1799     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1800     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1801     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1802                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1803                                                                   growingFactor);
1804     globalSpaceNativeLimit_ = std::max(globalSpaceNativeLimit_, GetGlobalNativeSize()
1805                                         + config_.GetMinNativeLimitGrowingStep());
1806     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1807         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1808         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1809     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1810         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1811         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1812                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1813                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
1814         SetFullMarkRequestedState(true);
1815     }
1816 }
1817 
CheckAndTriggerOldGC(size_t size)1818 bool Heap::CheckAndTriggerOldGC(size_t size)
1819 {
1820     bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1821     bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1822     if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1823         oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1824     }
1825     if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1826         GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1827         !NeedStopCollection()) {
1828         if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1829             oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1830             return false;
1831         }
1832         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1833         if (!oldGCRequested_) {
1834             return true;
1835         }
1836     }
1837     return false;
1838 }
1839 
CheckAndTriggerHintGC(MemoryReduceDegree degree,GCReason reason)1840 bool Heap::CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason)
1841 {
1842     if (InSensitiveStatus()) {
1843         return false;
1844     }
1845     if (g_isEnableCMCGC) {
1846         common::MemoryReduceDegree cmcDegree = common::MemoryReduceDegree::LOW;
1847         if (degree == MemoryReduceDegree::HIGH) {
1848             cmcDegree = common::MemoryReduceDegree::HIGH;
1849         }
1850         return common::BaseRuntime::CheckAndTriggerHintGC(cmcDegree);
1851     }
1852     LOG_GC(INFO) << "HintGC degree:"<< static_cast<int>(degree) << " reason:" << GCStats::GCReasonToString(reason);
1853     switch (degree) {
1854         case MemoryReduceDegree::LOW: {
1855             if (idleGCTrigger_->HintGCInLowDegree<Heap>(this)) {
1856                 if (CheckCanTriggerConcurrentMarking()) {
1857                     markType_ = MarkType::MARK_FULL;
1858                     TriggerConcurrentMarking(MarkReason::HINT_GC);
1859                     LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerConcurrentMark.";
1860                     return true;
1861                 }
1862             }
1863             if (idleGCTrigger_->HintGCInLowDegree<SharedHeap>(sHeap_)) {
1864                 if (sHeap_->CheckCanTriggerConcurrentMarking(thread_)) {
1865                     LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerSharedConcurrentMark.";
1866                     sHeap_->TriggerConcurrentMarking<TriggerGCType::SHARED_GC, MarkReason::HINT_GC>(thread_);
1867                     return true;
1868                 }
1869             }
1870             break;
1871         }
1872         case MemoryReduceDegree::MIDDLE: {
1873             if (idleGCTrigger_->HintGCInMiddleDegree<Heap>(this)) {
1874                 CollectGarbage(TriggerGCType::FULL_GC, reason);
1875                 return true;
1876             }
1877             if (idleGCTrigger_->HintGCInMiddleDegree<SharedHeap>(sHeap_)) {
1878                 sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1879                 return true;
1880             }
1881             break;
1882         }
1883         case MemoryReduceDegree::HIGH: {
1884             bool result = false;
1885             if (idleGCTrigger_->HintGCInHighDegree<Heap>(this)) {
1886                 CollectGarbage(TriggerGCType::FULL_GC, reason);
1887                 result = true;
1888             }
1889             if (idleGCTrigger_->HintGCInHighDegree<SharedHeap>(sHeap_)) {
1890                 sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1891                 result = true;
1892             }
1893             return result;
1894         }
1895         default: // LCOV_EXCL_BR_LINE
1896             LOG_GC(INFO) << "HintGC invalid degree value: " << static_cast<int>(degree);
1897             break;
1898     }
1899     return false;
1900 }
1901 
CheckOngoingConcurrentMarkingImpl(ThreadType threadType,int threadIndex,const char * traceName)1902 bool Heap::CheckOngoingConcurrentMarkingImpl(ThreadType threadType, int threadIndex,
1903                                              [[maybe_unused]] const char* traceName)
1904 {
1905     if (!concurrentMarker_->IsEnabled() || !concurrentMarker_->IsTriggeredConcurrentMark() ||
1906         thread_->IsReadyToConcurrentMark()) {
1907         return false;
1908     }
1909     TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1910     if (thread_->IsMarking()) {
1911         ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, traceName, "");
1912         if (threadType == ThreadType::JS_THREAD) {
1913             MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1914             GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1915             WaitConcurrentMarkingFinished();
1916         } else if (threadType == ThreadType::DAEMON_THREAD) {
1917             CHECK_DAEMON_THREAD();
1918             GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1919             WaitConcurrentMarkingFinished();
1920         }
1921     }
1922     WaitRunningTaskFinished();
1923     memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1924     return true;
1925 }
1926 
ClearIdleTask()1927 void Heap::ClearIdleTask()
1928 {
1929     SetIdleTask(IdleTaskType::NO_TASK);
1930     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1931 }
1932 
TryTriggerIdleCollection()1933 void Heap::TryTriggerIdleCollection()
1934 {
1935     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1936         return;
1937     }
1938     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1939         SetIdleTask(IdleTaskType::FINISH_MARKING);
1940         EnableNotifyIdle();
1941         CalculateIdleDuration();
1942         return;
1943     }
1944 
1945     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1946     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1947     double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1948                                            static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1949                                            newSpaceAllocSpeed;
1950     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1951     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1952     // 2 means double
1953     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1954         SetIdleTask(IdleTaskType::YOUNG_GC);
1955         SetMarkType(MarkType::MARK_YOUNG);
1956         EnableNotifyIdle();
1957         CalculateIdleDuration();
1958         return;
1959     }
1960 }
1961 
CalculateIdleDuration()1962 void Heap::CalculateIdleDuration()
1963 {
1964     size_t updateReferenceSpeed = 0;
1965     // clear native object duration
1966     size_t clearNativeObjSpeed = 0;
1967     if (markType_ == MarkType::MARK_YOUNG) {
1968         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1969         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1970     } else if (markType_ == MarkType::MARK_FULL) {
1971         updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1972         clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1973     }
1974 
1975     // update reference duration
1976     idlePredictDuration_ = 0.0f;
1977     if (updateReferenceSpeed != 0) {
1978         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1979     }
1980 
1981     if (clearNativeObjSpeed != 0) {
1982         idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1983     }
1984 
1985     // sweep and evacuate duration
1986     size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1987     double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1988     if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1989         idlePredictDuration_ += activeSemiSpace_->GetHeapObjectSize() * survivalRate / youngEvacuateSpeed;
1990     } else if (markType_ == MarkType::MARK_FULL) {
1991         size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1992         size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1993         if (sweepSpeed != 0) {
1994             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1995         }
1996         if (oldEvacuateSpeed != 0) {
1997             size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1998                 RecordData::COLLECT_REGION_SET_SIZE);
1999             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
2000                                     oldEvacuateSpeed;
2001         }
2002     }
2003 
2004     // Idle YoungGC mark duration
2005     size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
2006     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
2007         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
2008     }
2009     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
2010 }
2011 
TryTriggerIncrementalMarking()2012 void Heap::TryTriggerIncrementalMarking()
2013 {
2014     if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
2015         return;
2016     }
2017     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
2018     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
2019         hugeMachineCodeSpace_->GetHeapObjectSize();
2020     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
2021     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
2022     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
2023     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
2024 
2025     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
2026     // mark finished before allocate limit
2027     if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
2028         // The object allocated in incremental marking should lower than limit,
2029         // otherwise select trigger concurrent mark.
2030         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
2031         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
2032             EnableNotifyIdle();
2033             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
2034         }
2035     }
2036 }
2037 
CheckCanTriggerConcurrentMarking()2038 bool Heap::CheckCanTriggerConcurrentMarking()
2039 {
2040     return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
2041         !incrementalMarker_->IsTriggeredIncrementalMark() &&
2042         (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
2043 }
2044 
TryTriggerConcurrentMarking(MarkReason markReason)2045 void Heap::TryTriggerConcurrentMarking(MarkReason markReason)
2046 {
2047     if (g_isEnableCMCGC) {
2048         return;
2049     }
2050     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
2051     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
2052     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
2053     // full mark will be triggered.
2054     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
2055     // young mark may not result in the new space reaching its limit, young mark can be triggered.
2056     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
2057     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
2058     if (!CheckCanTriggerConcurrentMarking()) {
2059         return;
2060     }
2061     if (fullMarkRequested_) {
2062         markType_ = MarkType::MARK_FULL;
2063         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
2064         TriggerConcurrentMarking(markReason);
2065         return;
2066     }
2067     if (InSensitiveStatus() && !ObjectExceedHighSensitiveThresholdForCM()) {
2068         return;
2069     }
2070     if (IsJustFinishStartup() && !ObjectExceedJustFinishStartupThresholdForCM()) {
2071         return;
2072     }
2073 
2074     double oldSpaceMarkDuration = 0;
2075     double newSpaceMarkDuration = 0;
2076     double newSpaceRemainSize = 0;
2077     double newSpaceAllocToLimitDuration = 0;
2078     double oldSpaceAllocToLimitDuration = 0;
2079     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
2080     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
2081     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
2082         hugeMachineCodeSpace_->GetHeapObjectSize();
2083     size_t globalHeapObjectSize = GetHeapObjectSize();
2084     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
2085     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
2086         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
2087             GlobalNativeSizeLargerThanLimit()) {
2088             markType_ = MarkType::MARK_FULL;
2089             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
2090             TriggerConcurrentMarking(markReason);
2091             return;
2092         }
2093     } else {
2094         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
2095             GlobalNativeSizeLargerThanLimit()) {
2096             markType_ = MarkType::MARK_FULL;
2097             TriggerConcurrentMarking(markReason);
2098             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
2099             return;
2100         }
2101         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
2102         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
2103         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
2104         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
2105         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
2106             markType_ = MarkType::MARK_FULL;
2107             TriggerConcurrentMarking(markReason);
2108             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
2109             return;
2110         }
2111     }
2112 
2113     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
2114     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
2115     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
2116         if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
2117             markType_ = MarkType::MARK_YOUNG;
2118             TriggerConcurrentMarking(markReason);
2119             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
2120         }
2121         return;
2122     }
2123     size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity() + activeSemiSpace_->GetOvershootSize();
2124     size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
2125     bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
2126     if (!triggerMark) {
2127         newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
2128         newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
2129         // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
2130         newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
2131         triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
2132     }
2133 
2134     if (triggerMark) {
2135         markType_ = MarkType::MARK_YOUNG;
2136         TriggerConcurrentMarking(markReason);
2137         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
2138         return;
2139     }
2140 }
2141 
TryTriggerFullMarkOrGCByNativeSize()2142 void Heap::TryTriggerFullMarkOrGCByNativeSize()
2143 {
2144     // In high sensitive scene and native size larger than limit, trigger old gc directly
2145     if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
2146         CollectGarbage(TriggerGCType::OLD_GC, GCReason::NATIVE_LIMIT);
2147     } else if (GlobalNativeSizeLargerThanLimit()) {
2148         if (concurrentMarker_->IsEnabled()) {
2149             SetFullMarkRequestedState(true);
2150             TryTriggerConcurrentMarking(MarkReason::NATIVE_LIMIT);
2151         } else {
2152             CheckAndTriggerOldGC();
2153         }
2154     }
2155 }
2156 
TryTriggerFullMarkBySharedLimit()2157 bool Heap::TryTriggerFullMarkBySharedLimit()
2158 {
2159     bool keepFullMarkRequest = false;
2160     if (concurrentMarker_->IsEnabled()) {
2161         if (!CheckCanTriggerConcurrentMarking()) {
2162             return keepFullMarkRequest;
2163         }
2164         markType_ = MarkType::MARK_FULL;
2165         if (ConcurrentMarker::TryIncreaseTaskCounts()) {
2166             GetEcmaGCStats()->SetMarkReason(MarkReason::SHARED_LIMIT);
2167             concurrentMarker_->Mark();
2168         } else {
2169             // need retry full mark request again.
2170             keepFullMarkRequest = true;
2171         }
2172     }
2173     return keepFullMarkRequest;
2174 }
2175 
CheckAndTriggerTaskFinishedGC()2176 void Heap::CheckAndTriggerTaskFinishedGC()
2177 {
2178     if (g_isEnableCMCGC) {
2179         return;
2180     }
2181     size_t objectSizeOfTaskBegin = GetRecordObjectSize();
2182     size_t objectSizeOfTaskFinished = GetHeapObjectSize();
2183     size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
2184     size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
2185     // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
2186     bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
2187         objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
2188             TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
2189     bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
2190         nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
2191             TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
2192     if (objectSizeFlag || nativeSizeFlag) {
2193         CollectGarbage(TriggerGCType::OLD_GC, GCReason::TRIGGER_BY_TASKPOOL);
2194         RecordOrResetObjectSize(0);
2195         RecordOrResetNativeSize(0);
2196     }
2197 }
2198 
IsMarking() const2199 bool Heap::IsMarking() const
2200 {
2201     return thread_->IsMarking();
2202 }
2203 
TryTriggerFullMarkBySharedSize(size_t size)2204 void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2205 {
2206     newAllocatedSharedObjectSize_ += size;
2207     if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2208         if (thread_->IsMarkFinished() && GetConcurrentMarker()->IsTriggeredConcurrentMark() &&
2209             !GetOnSerializeEvent() && InSensitiveStatus()) {
2210             GetConcurrentMarker()->HandleMarkingFinished(GCReason::SHARED_LIMIT);
2211             newAllocatedSharedObjectSize_ = 0;
2212         } else if (concurrentMarker_->IsEnabled()) {
2213             SetFullMarkRequestedState(true);
2214             TryTriggerConcurrentMarking(MarkReason::SHARED_LIMIT);
2215             newAllocatedSharedObjectSize_ = 0;
2216         }
2217     }
2218 }
2219 
IsReadyToConcurrentMark() const2220 bool Heap::IsReadyToConcurrentMark() const
2221 {
2222     return thread_->IsReadyToConcurrentMark();
2223 }
2224 
IncreaseNativeBindingSize(JSNativePointer * object)2225 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2226 {
2227     size_t size = object->GetBindingSize();
2228     IncreaseNativeBindingSize(size);
2229 }
2230 
IncreaseNativeBindingSize(size_t size)2231 void Heap::IncreaseNativeBindingSize(size_t size)
2232 {
2233     if (size == 0) {
2234         return;
2235     }
2236     nativeBindingSize_ += size;
2237 }
2238 
DecreaseNativeBindingSize(size_t size)2239 void Heap::DecreaseNativeBindingSize(size_t size)
2240 {
2241     ASSERT(size <= nativeBindingSize_);
2242     nativeBindingSize_ -= size;
2243 }
2244 
PrepareRecordRegionsForReclaim()2245 void Heap::PrepareRecordRegionsForReclaim()
2246 {
2247     activeSemiSpace_->SetRecordRegion();
2248     oldSpace_->SetRecordRegion();
2249     snapshotSpace_->SetRecordRegion();
2250     nonMovableSpace_->SetRecordRegion();
2251     hugeObjectSpace_->SetRecordRegion();
2252     machineCodeSpace_->SetRecordRegion();
2253     hugeMachineCodeSpace_->SetRecordRegion();
2254 }
2255 
TriggerConcurrentMarking(MarkReason markReason)2256 void Heap::TriggerConcurrentMarking(MarkReason markReason)
2257 {
2258     ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2259     if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2260         ClearIdleTask();
2261         DisableNotifyIdle();
2262     }
2263     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2264         GetEcmaGCStats()->SetMarkReason(markReason);
2265         concurrentMarker_->Mark();
2266     }
2267 }
2268 
WaitAllTasksFinished()2269 void Heap::WaitAllTasksFinished()
2270 {
2271     WaitRunningTaskFinished();
2272     sweeper_->EnsureAllTaskFinished();
2273     WaitClearTaskFinished();
2274     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2275         concurrentMarker_->WaitMarkingFinished();
2276     }
2277 }
2278 
WaitConcurrentMarkingFinished()2279 void Heap::WaitConcurrentMarkingFinished()
2280 {
2281     concurrentMarker_->WaitMarkingFinished();
2282 }
2283 
PostParallelGCTask(ParallelGCTaskPhase gcTask)2284 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2285 {
2286     IncreaseTaskCount();
2287     common::Taskpool::GetCurrentTaskpool()->PostTask(
2288         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2289 }
2290 
ChangeGCParams(bool inBackground)2291 void Heap::ChangeGCParams(bool inBackground)
2292 {
2293     const double doubleOne = 1.0;
2294     inBackground_ = inBackground;
2295     if (g_isEnableCMCGC) {
2296         common::BaseRuntime::ChangeGCParams(inBackground);
2297         return;
2298     }
2299     if (inBackground) {
2300         LOG_GC(INFO) << "app is inBackground";
2301         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2302             GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2303             doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2304             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2305         }
2306         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2307             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2308             LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2309         }
2310         common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::BACKGROUND);
2311     } else {
2312         LOG_GC(INFO) << "app is not inBackground";
2313         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2314             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2315             LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2316         }
2317         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2318         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2319         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2320             common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2321         maxEvacuateTaskCount_ = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2322         common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::FOREGROUND);
2323     }
2324 }
2325 
GetEcmaGCStats()2326 GCStats *Heap::GetEcmaGCStats()
2327 {
2328     return ecmaVm_->GetEcmaGCStats();
2329 }
2330 
GetEcmaGCKeyStats()2331 GCKeyStats *Heap::GetEcmaGCKeyStats()
2332 {
2333     return ecmaVm_->GetEcmaGCKeyStats();
2334 }
2335 
GetJSObjectResizingStrategy()2336 JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2337 {
2338     return ecmaVm_->GetJSObjectResizingStrategy();
2339 }
2340 
TriggerIdleCollection(int idleMicroSec)2341 void Heap::TriggerIdleCollection(int idleMicroSec)
2342 {
2343     if (idleTask_ == IdleTaskType::NO_TASK) {
2344         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2345             DisableNotifyIdle();
2346         }
2347         return;
2348     }
2349 
2350     // Incremental mark initialize and process
2351     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2352         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2353         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2354         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2355             CalculateIdleDuration();
2356         }
2357         return;
2358     }
2359 
2360     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2361         return;
2362     }
2363 
2364     switch (idleTask_) {
2365         case IdleTaskType::FINISH_MARKING: {
2366             if (markType_ == MarkType::MARK_FULL) {
2367                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2368             } else {
2369                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2370             }
2371             break;
2372         }
2373         case IdleTaskType::YOUNG_GC:
2374             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2375             break;
2376         case IdleTaskType::INCREMENTAL_MARK:
2377             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2378             break;
2379         default: // LCOV_EXCL_BR_LINE
2380             break;
2381     }
2382     ClearIdleTask();
2383 }
2384 
NotifyMemoryPressure(bool inHighMemoryPressure)2385 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2386 {
2387     if (inHighMemoryPressure) {
2388         LOG_GC(INFO) << "app is inHighMemoryPressure";
2389         SetMemGrowingType(MemGrowingType::PRESSURE);
2390     } else {
2391         LOG_GC(INFO) << "app is not inHighMemoryPressure";
2392         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2393     }
2394 }
2395 
NotifyFinishColdStart(bool isMainThread)2396 void Heap::NotifyFinishColdStart(bool isMainThread)
2397 {
2398     if (!FinishStartupEvent()) {
2399         LOG_GC(WARN) << "SmartGC: app cold start last status is not ON_STARTUP, just return";
2400         return;
2401     }
2402     ASSERT(!OnStartupEvent());
2403     LOG_GC(INFO) << "SmartGC: app cold start just finished";
2404 
2405     if (isMainThread && ObjectExceedJustFinishStartupThresholdForCM()) {
2406         TryTriggerConcurrentMarking(MarkReason::OTHER);
2407     }
2408 
2409     auto startIdleMonitor = JSNApi::GetStartIdleMonitorCallback();
2410     if (startIdleMonitor != nullptr) {
2411         startIdleMonitor();
2412     }
2413 
2414     if (startupDurationInMs_ == 0) {
2415         startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2416     }
2417 
2418     // restrain GC from 2s to 8s
2419     uint64_t delayTimeInMs = FINISH_STARTUP_TIMEPOINT_MS - startupDurationInMs_;
2420     common::Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2421         std::make_unique<FinishGCRestrainTask>(GetJSThread()->GetThreadId(), this),
2422         delayTimeInMs);
2423     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
2424         "SmartGC: app startup just finished, FinishGCRestrainTask create", "");
2425 }
2426 
NotifyFinishColdStartSoon()2427 void Heap::NotifyFinishColdStartSoon()
2428 {
2429     if (!OnStartupEvent()) {
2430         return;
2431     }
2432 
2433     // post 2s task
2434     startupDurationInMs_ = DEFAULT_STARTUP_DURATION_MS;
2435 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
2436     startupDurationInMs_ = OHOS::system::GetUintParameter<uint64_t>("persist.ark.startupDuration",
2437                                                                     DEFAULT_STARTUP_DURATION_MS);
2438     startupDurationInMs_ = std::max(startupDurationInMs_, static_cast<uint64_t>(MIN_CONFIGURABLE_STARTUP_DURATION_MS));
2439     startupDurationInMs_ = std::min(startupDurationInMs_, static_cast<uint64_t>(MAX_CONFIGURABLE_STARTUP_DURATION_MS));
2440 #endif
2441     common::Taskpool::GetCurrentTaskpool()->PostDelayedTask(
2442         std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this),
2443         startupDurationInMs_);
2444 }
2445 
NotifyWarmStartup()2446 void Heap::NotifyWarmStartup()
2447 {
2448     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "SmartGC: warm startup GC restrain start", "");
2449     // warm startup use the same GC restrain policy as cold startup
2450     LOG_GC(INFO) << "SmartGC: warm startup use the same GC restrain policy as cold startup";
2451     NotifyPostFork();
2452     NotifyFinishColdStartSoon();
2453 }
2454 
NotifyHighSensitive(bool isStart)2455 void Heap::NotifyHighSensitive(bool isStart)
2456 {
2457     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
2458         ("SmartGC: set high sensitive status: " + std::to_string(isStart)).c_str(), "");
2459     isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2460         : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2461     LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2462 
2463     if (g_isEnableCMCGC) {
2464         common::BaseRuntime::NotifyHighSensitive(isStart);
2465     }
2466 }
2467 
HandleExitHighSensitiveEvent()2468 bool Heap::HandleExitHighSensitiveEvent()
2469 {
2470     AppSensitiveStatus status = GetSensitiveStatus();
2471     if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2472         && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE) && !OnStartupEvent()) {
2473         // Set record heap obj size 0 after exit high senstive
2474         SetRecordHeapObjectSizeBeforeSensitive(0);
2475         // set overshoot size to increase gc threashold larger 8MB than current heap size.
2476         TryIncreaseNewSpaceOvershootByConfigSize();
2477 
2478         // fixme: IncrementalMarking and IdleCollection is currently not enabled
2479         TryTriggerIncrementalMarking();
2480         TryTriggerIdleCollection();
2481         TryTriggerConcurrentMarking(MarkReason::EXIT_HIGH_SENSITIVE);
2482         return true;
2483     }
2484     return false;
2485 }
2486 
2487 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2488 // concurrent mark
ObjectExceedMaxHeapSize() const2489 bool Heap::ObjectExceedMaxHeapSize() const
2490 {
2491     size_t configMaxHeapSize = config_.GetMaxHeapSize();
2492     size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2493     return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2494 }
2495 
ObjectExceedHighSensitiveThresholdForCM() const2496 bool Heap::ObjectExceedHighSensitiveThresholdForCM() const
2497 {
2498     size_t recordSizeBeforeSensitive = GetRecordHeapObjectSizeBeforeSensitive();
2499     return GetHeapObjectSize() > (recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive())
2500                                  * MIN_SENSITIVE_OBJECT_SURVIVAL_RATE;
2501 }
2502 
ObjectExceedJustFinishStartupThresholdForGC() const2503 bool Heap::ObjectExceedJustFinishStartupThresholdForGC() const
2504 {
2505     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2506     return GetHeapObjectSize() > heapObjectSizeThresholdForGC;
2507 }
2508 
ObjectExceedJustFinishStartupThresholdForCM() const2509 bool Heap::ObjectExceedJustFinishStartupThresholdForCM() const
2510 {
2511     size_t heapObjectSizeThresholdForGC = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
2512     size_t heapObjectSizeThresholdForCM = heapObjectSizeThresholdForGC
2513                                         * JUST_FINISH_STARTUP_LOCAL_CONCURRENT_MARK_RATIO;
2514     return GetHeapObjectSize() > heapObjectSizeThresholdForCM;
2515 }
2516 
TryIncreaseNewSpaceOvershootByConfigSize()2517 void Heap::TryIncreaseNewSpaceOvershootByConfigSize()
2518 {
2519     if (InGC() || !IsReadyToConcurrentMark()) {
2520         // overShootSize will be adjusted when resume heap during GC and
2521         // no need to reserve space for newSpace if ConcurrentMark is already triggered
2522         return;
2523     }
2524     // need lock because conflict may occur when handle exit sensitive status by main thread
2525     // and handle finish startup by child thread happen at the same time
2526     LockHolder lock(setNewSpaceOvershootSizeMutex_);
2527     // set overshoot size to increase gc threashold larger 8MB than current heap size.
2528     int64_t initialCapacity = static_cast<int64_t>(GetNewSpace()->GetInitialCapacity());
2529     int64_t committedSize = static_cast<int64_t>(GetNewSpace()->GetCommittedSize());
2530     int64_t semiRemainSize = initialCapacity - committedSize;
2531     int64_t overshootSize =
2532         static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2533     // overshoot size should be larger than 0.
2534     GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2535 }
2536 
TryIncreaseOvershootByConfigSize()2537 void Heap::TryIncreaseOvershootByConfigSize()
2538 {
2539     TryIncreaseNewSpaceOvershootByConfigSize();
2540     sHeap_->TryAdjustSpaceOvershootByConfigSize();
2541 }
2542 
CheckIfNeedStopCollectionByStartup()2543 bool Heap::CheckIfNeedStopCollectionByStartup()
2544 {
2545     StartupStatus startupStatus = GetStartupStatus();
2546     switch (startupStatus) {
2547         case StartupStatus::ON_STARTUP:
2548             // During app cold start, gc threshold adjust to max heap size
2549             if (!ObjectExceedMaxHeapSize()) {
2550                 return true;
2551             }
2552             break;
2553         case StartupStatus::JUST_FINISH_STARTUP:
2554             // During app cold start just finished, gc threshold adjust to a quarter of max heap size
2555             if (!ObjectExceedJustFinishStartupThresholdForGC()) {
2556                 return true;
2557             }
2558             break;
2559         default:
2560             break;
2561     }
2562     return false;
2563 }
2564 
CheckIfNeedStopCollectionByHighSensitive()2565 bool Heap::CheckIfNeedStopCollectionByHighSensitive()
2566 {
2567     AppSensitiveStatus sensitiveStatus = GetSensitiveStatus();
2568     if (sensitiveStatus != AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
2569         return false;
2570     }
2571 
2572     size_t objSize = GetHeapObjectSize();
2573     size_t recordSizeBeforeSensitive = GetRecordHeapObjectSizeBeforeSensitive();
2574     if (recordSizeBeforeSensitive == 0) {
2575         recordSizeBeforeSensitive = objSize;
2576         SetRecordHeapObjectSizeBeforeSensitive(recordSizeBeforeSensitive);
2577     }
2578 
2579     if (objSize < recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive()
2580         && !ObjectExceedMaxHeapSize()) {
2581         if (!IsNearGCInSensitive() &&
2582             objSize > (recordSizeBeforeSensitive + config_.GetIncObjSizeThresholdInSensitive())
2583             * MIN_SENSITIVE_OBJECT_SURVIVAL_RATE) {
2584             SetNearGCInSensitive(true);
2585         }
2586         return true;
2587     }
2588 
2589     OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2590         << ", exceed sensitive gc threshold";
2591     return false;
2592 }
2593 
NeedStopCollection()2594 bool Heap::NeedStopCollection()
2595 {
2596     // gc is not allowed during value serialize
2597     if (onSerializeEvent_) {
2598         return true;
2599     }
2600 
2601     // check high sensitive before checking startup because we still need to record
2602     // current heap object size when high sensitive happens in startup duration
2603     if (CheckIfNeedStopCollectionByHighSensitive()) {
2604         return true;
2605     }
2606 
2607     if (CheckIfNeedStopCollectionByStartup()) {
2608         return true;
2609     }
2610 
2611     return false;
2612 }
2613 
Run(uint32_t threadIndex)2614 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2615 {
2616     // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2617     ASSERT(heap_->GetWorkManager()->HasInitialized());
2618     while (!heap_->GetWorkManager()->HasInitialized());
2619     switch (taskPhase_) {
2620         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2621             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2622             break;
2623         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2624             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2625             break;
2626         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2627             heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2628             break;
2629         case ParallelGCTaskPhase::UNIFIED_HANDLE_GLOBAL_POOL_TASK:
2630             heap_->GetUnifiedGCMarker()->ProcessMarkStack(threadIndex);
2631             break;
2632         default: // LOCV_EXCL_BR_LINE
2633             LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2634             UNREACHABLE();
2635     }
2636     heap_->ReduceTaskCount();
2637     return true;
2638 }
2639 
Run(uint32_t threadIndex)2640 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2641 {
2642     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "AsyncClearTask::Run", "");
2643     heap_->ReclaimRegions(gcType_);
2644     return true;
2645 }
2646 
Run(uint32_t threadIndex)2647 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2648 {
2649     heap_->NotifyFinishColdStart(false);
2650     return true;
2651 }
2652 
Run(uint32_t threadIndex)2653 bool Heap::FinishGCRestrainTask::Run([[maybe_unused]] uint32_t threadIndex)
2654 {
2655     heap_->CancelJustFinishStartupEvent();
2656     LOG_GC(INFO) << "SmartGC: app cold start finished";
2657     return true;
2658 }
2659 
CleanCallback()2660 void Heap::CleanCallback()
2661 {
2662     auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2663     if (!concurrentCallbacks.empty()) {
2664         common::Taskpool::GetCurrentTaskpool()->PostTask(
2665             std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2666         );
2667     }
2668     ASSERT(concurrentCallbacks.empty());
2669 
2670     AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2671     if (asyncCallbacksPack.Empty()) {
2672         ASSERT(asyncCallbacksPack.TotallyEmpty());
2673         return;
2674     }
2675     AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2676     std::swap(*asyncCallbacks, asyncCallbacksPack);
2677     NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2678     size_t currentSize = 0;
2679     if (g_isEnableCMCGC) {
2680         currentSize = asyncCallbacks->GetTotalBindingSize();
2681     }
2682     if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2683         (pendingAsyncNativeCallbackSize_ + currentSize) < asyncClearNativePointerThreshold_) {
2684         IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2685         asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2686             this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2687         });
2688         asyncTaskCb(asyncCallbacks);
2689     } else {
2690         ThreadNativeScope nativeScope(thread_);
2691         asyncCallbacks->ProcessAll("ArkCompiler");
2692         delete asyncCallbacks;
2693     }
2694     ASSERT(asyncCallbacksPack.TotallyEmpty());
2695 }
2696 
Run(uint32_t threadIndex)2697 bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2698 {
2699     for (auto iter : nativePointerCallbacks_) {
2700         if (iter.first != nullptr) {
2701             iter.first(std::get<0>(iter.second),
2702                 std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2703         }
2704     }
2705     return true;
2706 }
2707 
GetArrayBufferSize() const2708 size_t Heap::GetArrayBufferSize() const
2709 {
2710     size_t result = 0;
2711     sweeper_->EnsureAllTaskFinished();
2712     this->IterateOverObjects([&result](TaggedObject *obj) {
2713         JSHClass* jsClass = obj->GetClass();
2714         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2715     });
2716     return result;
2717 }
2718 
GetLiveObjectSize() const2719 size_t Heap::GetLiveObjectSize() const
2720 {
2721     size_t objectSize = 0;
2722     sweeper_->EnsureAllTaskFinished();
2723     this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2724         objectSize += obj->GetSize();
2725     });
2726     return objectSize;
2727 }
2728 
GetHeapLimitSize() const2729 size_t Heap::GetHeapLimitSize() const
2730 {
2731     // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2732     return config_.GetMaxHeapSize();
2733 }
2734 
IsAlive(TaggedObject * object) const2735 bool BaseHeap::IsAlive(TaggedObject *object) const
2736 {
2737     if (!ContainObject(object)) {
2738         LOG_GC(ERROR) << "The region is already free";
2739         return false;
2740     }
2741 
2742     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2743     if (isFree) {
2744         Region *region = Region::ObjectAddressToRange(object);
2745         LOG_GC(ERROR) << "The object " << object << " in "
2746                             << region->GetSpaceTypeName()
2747                             << " already free";
2748     }
2749     return !isFree;
2750 }
2751 
ContainObject(TaggedObject * object) const2752 bool BaseHeap::ContainObject(TaggedObject *object) const
2753 {
2754     /*
2755      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2756      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2757      * calculated from an object address is still valid or alive in a cheap way.
2758      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2759      * introduce additional incorrect memory access issues.
2760      * Unless we can tolerate the performance impact of iterating the region list of each space and change
2761      * the implementation to that approach, don't rely on current implementation to get accurate result.
2762      */
2763     Region *region = Region::ObjectAddressToRange(object);
2764     return region->InHeapSpace();
2765 }
2766 
UpdateHeapStatsAfterGC(TriggerGCType gcType)2767 void SharedHeap::UpdateHeapStatsAfterGC(TriggerGCType gcType)
2768 {
2769     heapAliveSizeAfterGC_ = GetHeapObjectSize();
2770     fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize();
2771     if (gcType == TriggerGCType::SHARED_FULL_GC) {
2772         heapBasicLoss_ = fragmentSizeAfterGC_;
2773     }
2774 }
2775 
UpdateHeapStatsAfterGC(TriggerGCType gcType)2776 void Heap::UpdateHeapStatsAfterGC(TriggerGCType gcType)
2777 {
2778     if (gcType == TriggerGCType::YOUNG_GC) {
2779         return;
2780     }
2781     heapAliveSizeAfterGC_ = GetHeapObjectSize();
2782     heapAliveSizeExcludesYoungAfterGC_ = heapAliveSizeAfterGC_ - activeSemiSpace_->GetHeapObjectSize();
2783     fragmentSizeAfterGC_ = GetCommittedSize() - heapAliveSizeAfterGC_;
2784     if (gcType == TriggerGCType::FULL_GC) {
2785         heapBasicLoss_ = fragmentSizeAfterGC_;
2786     }
2787 }
2788 
PrintHeapInfo(TriggerGCType gcType) const2789 void Heap::PrintHeapInfo(TriggerGCType gcType) const
2790 {
2791     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2792     OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2793                                 << ";OnStartup:" << static_cast<int>(GetStartupStatus())
2794                                 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2795                                 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2796     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2797                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark();
2798     OPTIONAL_LOG(ecmaVm_, INFO) << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2799                  << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2800                  << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2801                  << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2802                  << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2803                  << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2804                  << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2805                  << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2806                  << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2807                  << "), NativeBindingSize(" << nativeBindingSize_
2808                  << "), NativeLimitSize(" << globalSpaceNativeLimit_
2809                  << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2810 }
2811 
StatisticHeapObject(TriggerGCType gcType) const2812 void Heap::StatisticHeapObject(TriggerGCType gcType) const
2813 {
2814     PrintHeapInfo(gcType);
2815 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2816     StatisticHeapDetail();
2817 #endif
2818 }
2819 
StatisticHeapDetail()2820 void Heap::StatisticHeapDetail()
2821 {
2822     Prepare();
2823     static const int JS_TYPE_SUM = static_cast<int>(JSType::TYPE_LAST) + 1;
2824     int typeCount[JS_TYPE_SUM] = { 0 };
2825     static const int MIN_COUNT_THRESHOLD = 1000;
2826 
2827     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2828         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2829     });
2830     for (int i = 0; i < JS_TYPE_SUM; i++) {
2831         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2832             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2833                            << " count:" << typeCount[i];
2834         }
2835         typeCount[i] = 0;
2836     }
2837 
2838     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2839         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2840     });
2841     for (int i = 0; i < JS_TYPE_SUM; i++) {
2842         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2843             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2844                            << " count:" << typeCount[i];
2845         }
2846         typeCount[i] = 0;
2847     }
2848 
2849     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2850         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2851     });
2852     for (int i = 0; i < JS_TYPE_SUM; i++) {
2853         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2854             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2855                            << " count:" << typeCount[i];
2856         }
2857         typeCount[i] = 0;
2858     }
2859 }
2860 
UpdateWorkManager(WorkManager * workManager)2861 void Heap::UpdateWorkManager(WorkManager *workManager)
2862 {
2863     concurrentMarker_->workManager_ = workManager;
2864     fullGC_->workManager_ = workManager;
2865     incrementalMarker_->workManager_ = workManager;
2866     nonMovableMarker_->workManager_ = workManager;
2867     compressGCMarker_->workManager_ = workManager;
2868     if (Runtime::GetInstance()->IsHybridVm()) {
2869         unifiedGCMarker_->workManager_ = workManager;
2870     }
2871     partialGC_->workManager_ = workManager;
2872 }
2873 
GetMachineCodeObject(uintptr_t pc) const2874 MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2875 {
2876     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2877     MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2878     if (machineCode != nullptr) {
2879         return machineCode;
2880     }
2881     HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2882     return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2883 }
2884 
SetMachineCodeObject(uintptr_t start,uintptr_t end,uintptr_t address) const2885 void Heap::SetMachineCodeObject(uintptr_t start, uintptr_t end, uintptr_t address) const
2886 {
2887     machineCodeSpace_->StoreMachineCodeObjectLocation(start, end, address);
2888 }
2889 
CalCallSiteInfo(uintptr_t retAddr) const2890 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2891 {
2892     MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2893     MachineCode *code = nullptr;
2894     // 1. find return
2895     // 2. gc
2896     machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2897         if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2898             return;
2899         }
2900         if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2901             code = MachineCode::Cast(obj);
2902             return;
2903         }
2904     });
2905     if (code == nullptr) {
2906         HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2907         hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2908             if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2909                 return;
2910             }
2911             if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2912                 code = MachineCode::Cast(obj);
2913                 return;
2914             }
2915         });
2916     }
2917 
2918     if (code == nullptr ||
2919         (code->GetPayLoadSizeInBytes() ==
2920          code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2921         return {};
2922     }
2923     return code->CalCallSiteInfo();
2924 };
2925 
AddGCListener(FinishGCListener listener,void * data)2926 GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2927 {
2928     gcListeners_.emplace_back(std::make_pair(listener, data));
2929     return std::prev(gcListeners_.cend());
2930 }
2931 
ProcessGCListeners()2932 void Heap::ProcessGCListeners()
2933 {
2934     for (auto &&[listener, data] : gcListeners_) {
2935         listener(data);
2936     }
2937 }
2938 
ProcessAllGCListeners()2939 void SharedHeap::ProcessAllGCListeners()
2940 {
2941     Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2942         ASSERT(!thread->IsInRunningState());
2943         const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2944     });
2945 }
2946 
2947 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
GetCurrentTickMillseconds()2948 uint64_t Heap::GetCurrentTickMillseconds()
2949 {
2950     return std::chrono::duration_cast<std::chrono::milliseconds>(
2951     std::chrono::steady_clock::now().time_since_epoch()).count();
2952 }
2953 
SetJsDumpThresholds(size_t thresholds) const2954 void Heap::SetJsDumpThresholds(size_t thresholds) const
2955 {
2956     if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2957         LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2958         return;
2959     }
2960     g_threshold = thresholds;
2961 }
2962 
ThresholdReachedDump()2963 void Heap::ThresholdReachedDump()
2964 {
2965     size_t limitSize = GetHeapLimitSize();
2966     if (!limitSize) {
2967         LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2968         return;
2969     }
2970     size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2971     if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2972         GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2973             size_t liveObjectSize = GetLiveObjectSize();
2974             size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2975             LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2976             if (nowPrecentRecheck < g_threshold) {
2977                 return;
2978             }
2979             g_lastHeapDumpTime = GetCurrentTickMillseconds();
2980             base::BlockHookScope blockScope;
2981             HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2982             AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
2983             std::string eventConfig;
2984             bool shouldDump = (appfreezeCallback == nullptr || appfreezeCallback(getprocpid(), true, eventConfig));
2985             GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2986                                                         GetHeapLimitSize(), GetLiveObjectSize(), eventConfig);
2987             if (shouldDump) {
2988                 LOG_ECMA(INFO) << "ThresholdReachedDump and avoid freeze success.";
2989             } else {
2990                 LOG_ECMA(WARN) << "ThresholdReachedDump but avoid freeze failed.";
2991                 return;
2992             }
2993             DumpSnapShotOption dumpOption;
2994             dumpOption.dumpFormat = DumpFormat::BINARY;
2995             dumpOption.isVmMode = true;
2996             dumpOption.isPrivate = false;
2997             dumpOption.captureNumericValue = false;
2998             dumpOption.isFullGC = false;
2999             dumpOption.isSimplify = true;
3000             dumpOption.isSync = false;
3001             dumpOption.isBeforeFill = false;
3002             dumpOption.isDumpOOM = true; // aim's to do binary dump
3003             heapProfile->DumpHeapSnapshotForOOM(dumpOption);
3004             hasOOMDump_ = false;
3005             HeapProfilerInterface::Destroy(ecmaVm_);
3006         }
3007 }
3008 #endif
3009 
RemoveGCListener(GCListenerId listenerId)3010 void Heap::RemoveGCListener(GCListenerId listenerId)
3011 {
3012     gcListeners_.erase(listenerId);
3013 }
3014 
IncreaseTaskCount()3015 void BaseHeap::IncreaseTaskCount()
3016 {
3017     LockHolder holder(waitTaskFinishedMutex_);
3018     runningTaskCount_++;
3019 }
3020 
WaitRunningTaskFinished()3021 void BaseHeap::WaitRunningTaskFinished()
3022 {
3023     LockHolder holder(waitTaskFinishedMutex_);
3024     while (runningTaskCount_ > 0) {
3025         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
3026     }
3027 }
3028 
CheckCanDistributeTask()3029 bool BaseHeap::CheckCanDistributeTask()
3030 {
3031     LockHolder holder(waitTaskFinishedMutex_);
3032     return runningTaskCount_ < maxMarkTaskCount_;
3033 }
3034 
ReduceTaskCount()3035 void BaseHeap::ReduceTaskCount()
3036 {
3037     LockHolder holder(waitTaskFinishedMutex_);
3038     runningTaskCount_--;
3039     if (runningTaskCount_ == 0) {
3040         waitTaskFinishedCV_.SignalAll();
3041     }
3042 }
3043 
WaitClearTaskFinished()3044 void BaseHeap::WaitClearTaskFinished()
3045 {
3046     LockHolder holder(waitClearTaskFinishedMutex_);
3047     while (!clearTaskFinished_) {
3048         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
3049     }
3050 }
3051 }  // namespace panda::ecmascript
3052