• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_H
17 #define ECMASCRIPT_MEM_HEAP_H
18 
19 #include "common_components/taskpool/task.h"
20 #include "ecmascript/base/config.h"
21 #include "ecmascript/cross_vm/heap_hybrid.h"
22 #include "ecmascript/daemon/daemon_thread.h"
23 #include "ecmascript/frames.h"
24 #include "ecmascript/js_object_resizing_strategy.h"
25 #include "ecmascript/mem/linear_space.h"
26 #include "ecmascript/mem/machine_code.h"
27 #include "ecmascript/mem/mark_stack.h"
28 #include "ecmascript/mem/shared_heap/shared_space.h"
29 #include "ecmascript/mem/sparse_space.h"
30 #include "ecmascript/mem/visitor.h"
31 #include "ecmascript/mem/work_manager.h"
32 #include "ecmascript/napi/include/jsnapi_expo.h"
33 
34 namespace panda::test {
35 class GCTest_CallbackTask_Test;
36 class HProfTestHelper;
37 class HeapTestHelper;
38 }
39 
40 namespace panda::ecmascript {
41 class ConcurrentMarker;
42 class ConcurrentSweeper;
43 class EcmaVM;
44 class FullGC;
45 class GCStats;
46 class GCKeyStats;
47 class HeapRegionAllocator;
48 class HeapTracker;
49 #if !WIN_OR_MAC_OR_IOS_PLATFORM
50 class HeapProfilerInterface;
51 class HeapProfiler;
52 #endif
53 class IncrementalMarker;
54 class JSNativePointer;
55 class Marker;
56 class UnifiedGC;
57 class UnifiedGCMarker;
58 class MemController;
59 class IdleGCTrigger;
60 class NativeAreaAllocator;
61 class ParallelEvacuator;
62 class PartialGC;
63 class RSetWorkListHandler;
64 class SharedConcurrentMarker;
65 class SharedConcurrentSweeper;
66 class SharedGC;
67 class SharedGCEvacuator;
68 class SharedGCMarkerBase;
69 class SharedGCMarker;
70 class SharedFullGC;
71 class SharedGCMovableMarker;
72 class ThreadLocalAllocationBuffer;
73 class JSThread;
74 class DaemonThread;
75 class GlobalEnvConstants;
76 class SharedMemController;
77 class IdleGCTrigger;
78 
79 enum ThreadType : uint8_t;
80 
81 using namespace panda;
82 using IdleNotifyStatusCallback = std::function<void(bool)>;
83 using FinishGCListener = void (*)(void *);
84 using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator;
85 using Clock = std::chrono::high_resolution_clock;
86 using AppFreezeFilterCallback =
87     std::function<bool(const int32_t pid, const bool needDecreaseQuota, std::string &eventConfig)>;
88 using BytesAndDuration = std::pair<uint64_t, double>;
89 using MemoryReduceDegree = panda::JSNApi::MemoryReduceDegree;
90 using NativePointerList = CVector<JSTaggedValue>;
91 enum class IdleTaskType : uint8_t {
92     NO_TASK,
93     YOUNG_GC,
94     FINISH_MARKING,
95     INCREMENTAL_MARK
96 };
97 
98 enum class MemGrowingType : uint8_t {
99     HIGH_THROUGHPUT,
100     CONSERVATIVE,
101     PRESSURE
102 };
103 
104 enum class HeapMode {
105     NORMAL,
106     SPAWN,
107     SHARE,
108 };
109 
110 enum AppSensitiveStatus : uint8_t {
111     NORMAL_SCENE,
112     ENTER_HIGH_SENSITIVE,
113     EXIT_HIGH_SENSITIVE,
114 };
115 
116 enum class StartupStatus : uint8_t {
117     BEFORE_STARTUP,
118     ON_STARTUP,
119     JUST_FINISH_STARTUP,
120     FINISH_STARTUP
121 };
122 
123 enum class VerifyKind {
124     VERIFY_PRE_GC,
125     VERIFY_POST_GC,
126     VERIFY_MARK_YOUNG,
127     VERIFY_EVACUATE_YOUNG,
128     VERIFY_MARK_FULL,
129     VERIFY_EVACUATE_OLD,
130     VERIFY_EVACUATE_FULL,
131     VERIFY_SHARED_RSET_POST_FULL_GC,
132     VERIFY_PRE_SHARED_GC,
133     VERIFY_POST_SHARED_GC,
134     VERIFY_SHARED_GC_MARK,
135     VERIFY_SHARED_GC_SWEEP,
136     VERIFY_END,
137 };
138 
139 enum class SharedHeapOOMSource {
140     NORMAL_ALLOCATION,
141     DESERIALIZE,
142     SHARED_GC,
143 };
144 
145 class BaseHeap {
146 public:
BaseHeap(const EcmaParamConfiguration & config)147     BaseHeap(const EcmaParamConfiguration &config) : config_(config) {}
148     virtual ~BaseHeap() = default;
149     NO_COPY_SEMANTIC(BaseHeap);
150     NO_MOVE_SEMANTIC(BaseHeap);
151 
152     virtual void Destroy() = 0;
153 
154     virtual bool IsMarking() const = 0;
155 
156     virtual bool IsReadyToConcurrentMark() const = 0;
157 
158     virtual bool NeedStopCollection() = 0;
159 
160     virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0;
161 
162     virtual AppSensitiveStatus GetSensitiveStatus() const = 0;
163 
164     virtual bool FinishStartupEvent() = 0;
165 
166     virtual bool OnStartupEvent() const = 0;
167 
168     virtual void NotifyPostFork() = 0;
169 
170     virtual void TryTriggerIdleCollection() = 0;
171 
172     virtual void TryTriggerIncrementalMarking() = 0;
173 
174     /*
175      * Wait for existing concurrent marking tasks to be finished (if any).
176      * Return true if there's ongoing concurrent marking.
177      */
178     virtual bool CheckOngoingConcurrentMarking() = 0;
179 
180     virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
181 
182     virtual bool OldSpaceExceedLimit() const = 0;
183 
184     virtual inline size_t GetCommittedSize() const = 0;
185 
186     virtual inline size_t GetHeapObjectSize() const = 0;
187 
188     virtual inline size_t GetRegionCount() const = 0;
189 
190     virtual void ChangeGCParams(bool inBackground) = 0;
191 
192     virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
193 
194     virtual GCStats *GetEcmaGCStats() = 0;
195 
196     virtual bool ObjectExceedMaxHeapSize() const = 0;
197 
198     virtual void UpdateHeapStatsAfterGC(TriggerGCType gcType) = 0;
199 
GetMarkType()200     MarkType GetMarkType() const
201     {
202         return markType_;
203     }
204 
SetMarkType(MarkType markType)205     void SetMarkType(MarkType markType)
206     {
207         markType_ = markType;
208     }
209 
IsYoungMark()210     bool IsYoungMark() const
211     {
212         return markType_ == MarkType::MARK_YOUNG;
213     }
214 
IsFullMark()215     bool IsFullMark() const
216     {
217         return markType_ == MarkType::MARK_FULL;
218     }
219 
IsConcurrentFullMark()220     bool IsConcurrentFullMark() const
221     {
222         return markType_ == MarkType::MARK_FULL;
223     }
224 
GetGCType()225     TriggerGCType GetGCType() const
226     {
227         return gcType_;
228     }
229 
230     bool PUBLIC_API IsAlive(TaggedObject *object) const;
231 
232     bool ContainObject(TaggedObject *object) const;
233 
GetOldGCRequested()234     bool GetOldGCRequested()
235     {
236         return oldGCRequested_;
237     }
238 
GetEcmaParamConfiguration()239     EcmaParamConfiguration GetEcmaParamConfiguration() const
240     {
241         return config_;
242     }
243 
GetNativeAreaAllocator()244     NativeAreaAllocator *GetNativeAreaAllocator() const
245     {
246         return nativeAreaAllocator_;
247     }
248 
GetHeapRegionAllocator()249     HeapRegionAllocator *GetHeapRegionAllocator() const
250     {
251         return heapRegionAllocator_;
252     }
253 
ShouldThrowOOMError(bool shouldThrow)254     void ShouldThrowOOMError(bool shouldThrow)
255     {
256         shouldThrowOOMError_ = shouldThrow;
257     }
258 
ShouldForceThrowOOMError()259     void ShouldForceThrowOOMError()
260     {
261         shouldForceThrowOOMError_ = true;
262     }
263 
SetCanThrowOOMError(bool canThrow)264     void SetCanThrowOOMError(bool canThrow)
265     {
266         canThrowOOMError_ = canThrow;
267     }
268 
CanThrowOOMError()269     bool CanThrowOOMError()
270     {
271         return canThrowOOMError_;
272     }
273 
IsInBackground()274     bool IsInBackground() const
275     {
276         return inBackground_;
277     }
278 
279     // ONLY used for heap verification.
IsVerifying()280     bool IsVerifying() const
281     {
282         return isVerifying_;
283     }
284 
285     // ONLY used for heap verification.
SetVerifying(bool verifying)286     void SetVerifying(bool verifying)
287     {
288         isVerifying_ = verifying;
289     }
290 
SetGCState(bool inGC)291     void SetGCState(bool inGC)
292     {
293         inGC_.store(inGC, std::memory_order_relaxed);
294     }
295 
InGC()296     bool InGC() const
297     {
298         return inGC_.load(std::memory_order_relaxed);
299     }
300 
NotifyHeapAliveSizeAfterGC(size_t size)301     void NotifyHeapAliveSizeAfterGC(size_t size)
302     {
303         heapAliveSizeAfterGC_ = size;
304     }
305 
GetHeapAliveSizeAfterGC()306     size_t GetHeapAliveSizeAfterGC() const
307     {
308         return heapAliveSizeAfterGC_;
309     }
310 
GetFragmentSizeAfterGC()311     size_t GetFragmentSizeAfterGC() const
312     {
313         return fragmentSizeAfterGC_;
314     }
315 
GetHeapBasicLoss()316     size_t GetHeapBasicLoss() const
317     {
318         return heapBasicLoss_;
319     }
320 
GetGlobalSpaceAllocLimit()321     size_t GetGlobalSpaceAllocLimit() const
322     {
323         return globalSpaceAllocLimit_;
324     }
325 
326     // Whether should verify heap during gc.
ShouldVerifyHeap()327     bool ShouldVerifyHeap() const
328     {
329         return shouldVerifyHeap_;
330     }
331 
EnablePageTagThreadId()332     bool EnablePageTagThreadId() const
333     {
334         return enablePageTagThreadId_;
335     }
336 
337     void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
338         bool NonMovableObjNearOOM = false);
339 
GetMaxMarkTaskCount()340     uint32_t GetMaxMarkTaskCount() const
341     {
342         return maxMarkTaskCount_;
343     }
344 
InSensitiveStatus()345     bool InSensitiveStatus() const
346     {
347         return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent();
348     }
349 
350     void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size);
351     inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
352                                             [[maybe_unused]] size_t size);
353     bool CheckCanDistributeTask();
354     void IncreaseTaskCount();
355     void ReduceTaskCount();
356     void WaitRunningTaskFinished();
357     void WaitClearTaskFinished();
358     void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
359         bool NonMovableObjNearOOM = false);
360     void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName);
361 
362 #ifndef NDEBUG
TriggerCollectionOnNewObjectEnabled()363     bool TriggerCollectionOnNewObjectEnabled() const
364     {
365         return triggerCollectionOnNewObject_;
366     };
367 
EnableTriggerCollectionOnNewObject()368     void EnableTriggerCollectionOnNewObject()
369     {
370         triggerCollectionOnNewObject_ = true;
371     }
372 
DisableTriggerCollectionOnNewObject()373     void DisableTriggerCollectionOnNewObject()
374     {
375         triggerCollectionOnNewObject_ = false;
376     }
377 #endif
378 
379     BASEHEAP_PUBLIC_HYBRID_EXTENSION();
380 
381 protected:
382     void FatalOutOfMemoryError(size_t size, std::string functionName);
383 
384     inline TaggedObject *FastAllocateYoungInTlabForCMC(JSThread *thread, size_t size) const;
385     inline TaggedObject *FastAllocateOldInTlabForCMC(JSThread *thread, size_t size) const;
386     inline TaggedObject *AllocateYoungForCMC(JSThread *thread, size_t size) const;
387     inline TaggedObject *AllocateOldForCMC(JSThread *thread, size_t size) const;
388 
389     enum class HeapType {
390         LOCAL_HEAP,
391         SHARED_HEAP,
392         INVALID,
393     };
394 
395     class RecursionScope {
396     public:
RecursionScope(BaseHeap * heap,HeapType heapType)397         explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType)
398         {
399             if (heap_->recursionDepth_++ != 0) {
400                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
401                               << ") Constructor, depth: " << heap_->recursionDepth_;
402             }
403             heap_->SetGCState(true);
404         }
~RecursionScope()405         ~RecursionScope()
406         {
407             if (--heap_->recursionDepth_ != 0) {
408                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
409                               << ") Destructor, depth: " << heap_->recursionDepth_;
410             }
411             heap_->SetGCState(false);
412         }
413     private:
414         BaseHeap *heap_ {nullptr};
415         HeapType heapType_ {HeapType::INVALID};
416     };
417 
418     static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75;
419 
420     const EcmaParamConfiguration config_;
421     MarkType markType_ {MarkType::MARK_YOUNG};
422     TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
423     Mutex gcCollectGarbageMutex_;
424     // Region allocators.
425     NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
426     HeapRegionAllocator *heapRegionAllocator_ {nullptr};
427 
428     size_t heapAliveSizeAfterGC_ {0};
429     size_t globalSpaceAllocLimit_ {0};
430     size_t globalSpaceConcurrentMarkLimit_ {0};
431     size_t heapBasicLoss_ {1_MB};
432     size_t fragmentSizeAfterGC_ {0};
433     // parallel marker task count.
434     uint32_t runningTaskCount_ {0};
435     uint32_t maxMarkTaskCount_ {0};
436     Mutex waitTaskFinishedMutex_;
437     ConditionVariable waitTaskFinishedCV_;
438     Mutex waitClearTaskFinishedMutex_;
439     ConditionVariable waitClearTaskFinishedCV_;
440     bool clearTaskFinished_ {true};
441     bool inBackground_ {false};
442     bool shouldThrowOOMError_ {false};
443     // Diffs from `shouldThrowOOMError_`, this is set due to allocating region failed during GC, and thus make
444     // MemMapAllocator infinite to complete this GC. After GC, if this flag is set, we MUST throw OOM force.
445     bool shouldForceThrowOOMError_ {false};
446     bool canThrowOOMError_ {true};
447     bool oldGCRequested_ {false};
448     // ONLY used for heap verification.
449     bool shouldVerifyHeap_ {false};
450     bool isVerifying_ {false};
451     bool enablePageTagThreadId_ {false};
452     std::atomic_bool inGC_ {false};
453     int32_t recursionDepth_ {0};
454 #ifndef NDEBUG
455     bool triggerCollectionOnNewObject_ {true};
456 #endif
457 };
458 
459 class SharedHeap : public BaseHeap {
460 public:
SharedHeap(const EcmaParamConfiguration & config)461     SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
462     virtual ~SharedHeap() = default;
463 
464     static void CreateNewInstance();
465     static SharedHeap *GetInstance();
466     static void DestroyInstance();
467 
468     void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
469         const JSRuntimeOptions &option, DaemonThread *dThread);
470 
471     void Destroy() override;
472 
473     void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
474 
475     void EnableParallelGC(JSRuntimeOptions &option);
476 
477     void DisableParallelGC(JSThread *thread);
478 
479     void AdjustGlobalSpaceAllocLimit();
480 
481     inline void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
482 
483     void ResetLargeCapacity();
484 
485     class ParallelMarkTask : public common::Task {
486     public:
ParallelMarkTask(int32_t id,SharedHeap * heap,SharedParallelMarkPhase taskPhase)487         ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
488             : common::Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
489         ~ParallelMarkTask() override = default;
490         bool Run(uint32_t threadIndex) override;
491 
492         NO_COPY_SEMANTIC(ParallelMarkTask);
493         NO_MOVE_SEMANTIC(ParallelMarkTask);
494 
495     private:
496         SharedHeap *sHeap_ {nullptr};
497         SharedParallelMarkPhase taskPhase_;
498     };
499 
500     class AsyncClearTask : public common::Task {
501     public:
AsyncClearTask(int32_t id,SharedHeap * heap,TriggerGCType type)502         AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
503             : common::Task(id), sHeap_(heap), gcType_(type) {}
504         ~AsyncClearTask() override = default;
505         bool Run(uint32_t threadIndex) override;
506 
507         NO_COPY_SEMANTIC(AsyncClearTask);
508         NO_MOVE_SEMANTIC(AsyncClearTask);
509     private:
510         SharedHeap *sHeap_;
511         TriggerGCType gcType_;
512     };
IsMarking()513     bool IsMarking() const override
514     {
515         LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
516         return false;
517     }
518 
519     bool IsReadyToConcurrentMark() const override;
520 
521     bool NeedStopCollection() override;
522 
SetSensitiveStatus(AppSensitiveStatus status)523     void SetSensitiveStatus(AppSensitiveStatus status) override
524     {
525         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
526         smartGCStats_.sensitiveStatus_ = status;
527         if (!InSensitiveStatus()) {
528             smartGCStats_.sensitiveStatusCV_.Signal();
529         }
530     }
531 
532     // This should be called when holding lock of sensitiveStatusMutex_.
GetSensitiveStatus()533     AppSensitiveStatus GetSensitiveStatus() const override
534     {
535         return smartGCStats_.sensitiveStatus_;
536     }
537 
GetStartupStatus()538     StartupStatus GetStartupStatus() const
539     {
540         return smartGCStats_.startupStatus_;
541     }
542 
IsJustFinishStartup()543     bool IsJustFinishStartup() const
544     {
545         return smartGCStats_.startupStatus_ == StartupStatus::JUST_FINISH_STARTUP;
546     }
547 
CancelJustFinishStartupEvent()548     bool CancelJustFinishStartupEvent()
549     {
550         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
551         if (!IsJustFinishStartup()) {
552             return false;
553         }
554         smartGCStats_.startupStatus_ = StartupStatus::FINISH_STARTUP;
555         return true;
556     }
557 
FinishStartupEvent()558     bool FinishStartupEvent() override
559     {
560         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
561         if (!OnStartupEvent()) {
562             return false;
563         }
564         smartGCStats_.startupStatus_ = StartupStatus::JUST_FINISH_STARTUP;
565         if (!InSensitiveStatus()) {
566             smartGCStats_.sensitiveStatusCV_.Signal();
567         }
568         return true;
569     }
570 
571     // This should be called when holding lock of sensitiveStatusMutex_.
OnStartupEvent()572     bool OnStartupEvent() const override
573     {
574         return smartGCStats_.startupStatus_ == StartupStatus::ON_STARTUP;
575     }
576 
NotifyPostFork()577     void NotifyPostFork() override
578     {
579         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
580         smartGCStats_.startupStatus_ = StartupStatus::ON_STARTUP;
581     }
582 
WaitSensitiveStatusFinished()583     void WaitSensitiveStatusFinished()
584     {
585         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
586         while (InSensitiveStatus() && !smartGCStats_.forceGC_) {
587             smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_);
588         }
589     }
590 
591     bool ObjectExceedMaxHeapSize() const override;
592 
593     bool ObjectExceedJustFinishStartupThresholdForGC() const;
594 
595     bool ObjectExceedJustFinishStartupThresholdForCM() const;
596 
597     bool CheckIfNeedStopCollectionByStartup();
598 
599     void TryAdjustSpaceOvershootByConfigSize();
600 
601     bool CheckAndTriggerSharedGC(JSThread *thread);
602 
603     bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
604 
HasCSetRegions()605     bool HasCSetRegions()
606     {
607         return sOldSpace_->GetCollectSetRegionCount() > 0;
608     }
609 
610     void TryTriggerLocalConcurrentMarking();
611 
612     // Called when all vm is destroyed, and try to destroy daemon thread.
613     void WaitAllTasksFinishedAfterAllJSThreadEliminated();
614 
615     void WaitAllTasksFinished(JSThread *thread);
616 
617     void StartConcurrentMarking(TriggerGCType gcType, MarkReason markReason);         // In daemon thread
618 
619     // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load.
620     bool CheckCanTriggerConcurrentMarking(JSThread *thread);
621 
TryTriggerIdleCollection()622     void TryTriggerIdleCollection() override
623     {
624         LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
625         return;
626     }
627 
TryTriggerIncrementalMarking()628     void TryTriggerIncrementalMarking() override
629     {
630         LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
631         return;
632     }
633 
634     void UpdateWorkManager(SharedGCWorkManager *sWorkManager);
635 
636     bool CheckOngoingConcurrentMarking() override;
637 
OldSpaceExceedCapacity(size_t size)638     bool OldSpaceExceedCapacity(size_t size) const override
639     {
640         size_t totalSize = sOldSpace_->GetCommittedSize() + size;
641         return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
642     }
643 
OldSpaceExceedLimit()644     bool OldSpaceExceedLimit() const override
645     {
646         return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity();
647     }
648 
GetConcurrentMarker()649     SharedConcurrentMarker *GetConcurrentMarker() const
650     {
651         return sConcurrentMarker_;
652     }
653 
GetSharedGCEvacuator()654     SharedGCEvacuator *GetSharedGCEvacuator() const
655     {
656         return sEvacuator_;
657     }
658 
GetSweeper()659     SharedConcurrentSweeper *GetSweeper() const
660     {
661         return sSweeper_;
662     }
663 
IsParallelGCEnabled()664     bool IsParallelGCEnabled() const
665     {
666         return parallelGC_;
667     }
668 
GetOldSpace()669     SharedOldSpace *GetOldSpace() const
670     {
671         return sOldSpace_;
672     }
673 
GetCompressSpace()674     SharedOldSpace *GetCompressSpace() const
675     {
676         return sCompressSpace_;
677     }
678 
GetNonMovableSpace()679     SharedNonMovableSpace *GetNonMovableSpace() const
680     {
681         return sNonMovableSpace_;
682     }
683 
GetHugeObjectSpace()684     SharedHugeObjectSpace *GetHugeObjectSpace() const
685     {
686         return sHugeObjectSpace_;
687     }
688 
GetReadOnlySpace()689     SharedReadOnlySpace *GetReadOnlySpace() const
690     {
691         return sReadOnlySpace_;
692     }
693 
GetAppSpawnSpace()694     SharedAppSpawnSpace *GetAppSpawnSpace() const
695     {
696         return sAppSpawnSpace_;
697     }
698 
SetForceGC(bool forceGC)699     void SetForceGC(bool forceGC)
700     {
701         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
702         smartGCStats_.forceGC_ = forceGC;
703         if (smartGCStats_.forceGC_) {
704             smartGCStats_.sensitiveStatusCV_.Signal();
705         }
706     }
707 
708     inline void TryTriggerConcurrentMarking(JSThread *thread);
709 
710     template<TriggerGCType gcType, MarkReason markReason>
711     void TriggerConcurrentMarking(JSThread *thread);
712 
713     template<TriggerGCType gcType, GCReason gcReason>
714     void CollectGarbage(JSThread *thread);
715 
716     template<GCReason gcReason>
717     void CompressCollectGarbageNotWaiting(JSThread *thread);
718 
719     template<TriggerGCType gcType, GCReason gcReason>
720     void PostGCTaskForTest(JSThread *thread);
721 
722     void CollectGarbageNearOOM(JSThread *thread);
723     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
724     // to true even if sweep_task and clear_task is running asynchronously
725     void NotifyGCCompleted();            // In daemon thread
726 
727     // Called when all vm is destroyed, and try to destroy daemon thread
728     void WaitGCFinishedAfterAllJSThreadEliminated();
729 
730     void WaitGCFinished(JSThread *thread);
731 
732     void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason);
733 
SetMaxMarkTaskCount(uint32_t maxTaskCount)734     void SetMaxMarkTaskCount(uint32_t maxTaskCount)
735     {
736         maxMarkTaskCount_ = maxTaskCount;
737     }
738 
GetCommittedSize()739     inline size_t GetCommittedSize() const override
740     {
741         size_t result = sOldSpace_->GetCommittedSize() +
742             sHugeObjectSpace_->GetCommittedSize() +
743             sNonMovableSpace_->GetCommittedSize() +
744             sReadOnlySpace_->GetCommittedSize();
745         return result;
746     }
747 
GetHeapObjectSize()748     inline size_t GetHeapObjectSize() const override
749     {
750         size_t result = sOldSpace_->GetHeapObjectSize() +
751             sHugeObjectSpace_->GetHeapObjectSize() +
752             sNonMovableSpace_->GetHeapObjectSize() +
753             sReadOnlySpace_->GetCommittedSize();
754         return result;
755     }
756 
GetRegionCount()757     inline size_t GetRegionCount() const override
758     {
759         size_t result = sOldSpace_->GetRegionCount() +
760             sHugeObjectSpace_->GetRegionCount() +
761             sNonMovableSpace_->GetRegionCount() +
762             sReadOnlySpace_->GetRegionCount();
763         return result;
764     }
765 
ResetNativeSizeAfterLastGC()766     void ResetNativeSizeAfterLastGC()
767     {
768         nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed);
769     }
770 
IncNativeSizeAfterLastGC(size_t size)771     void IncNativeSizeAfterLastGC(size_t size)
772     {
773         nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed);
774     }
775 
GetNativeSizeAfterLastGC()776     size_t GetNativeSizeAfterLastGC() const
777     {
778         return nativeSizeAfterLastGC_.load(std::memory_order_relaxed);
779     }
780 
GetNativeSizeTriggerSharedGC()781     size_t GetNativeSizeTriggerSharedGC() const
782     {
783         return incNativeSizeTriggerSharedGC_;
784     }
785 
GetNativeSizeTriggerSharedCM()786     size_t GetNativeSizeTriggerSharedCM() const
787     {
788         return incNativeSizeTriggerSharedCM_;
789     }
790 
ChangeGCParams(bool inBackground)791     void ChangeGCParams([[maybe_unused]]bool inBackground) override
792     {
793         LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
794         return;
795     }
796 
GetEcmaGCStats()797     GCStats *GetEcmaGCStats() override
798     {
799         return sGCStats_;
800     }
801 
SetGlobalEnvConstants(const GlobalEnvConstants * globalEnvConstants)802     inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
803     {
804         globalEnvConstants_ = globalEnvConstants;
805     }
806 
GetGlobalConst()807     inline const GlobalEnvConstants *GetGlobalConst() const override
808     {
809         return globalEnvConstants_;
810     }
811 
GetSpaceWithType(MemSpaceType type)812     SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
813     {
814         switch (type) {
815             case MemSpaceType::SHARED_OLD_SPACE:
816                 return sOldSpace_;
817             case MemSpaceType::SHARED_NON_MOVABLE:
818                 return sNonMovableSpace_;
819             default:
820                 LOG_ECMA(FATAL) << "this branch is unreachable";
821                 UNREACHABLE();
822                 break;
823         }
824     }
825 
826     void Prepare(bool inTriggerGCThread);
827     void Reclaim(TriggerGCType gcType);
828     void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
829     void CompactHeapBeforeFork(JSThread *thread);
830     void ReclaimForAppSpawn();
831 
GetWorkManager()832     SharedGCWorkManager *GetWorkManager() const
833     {
834         return sWorkManager_;
835     }
836 
GetSharedGCMarker()837     SharedGCMarker *GetSharedGCMarker() const
838     {
839         return sharedGCMarker_;
840     }
841 
GetSharedGCMovableMarker()842     SharedGCMovableMarker *GetSharedGCMovableMarker() const
843     {
844         return sharedGCMovableMarker_;
845     }
846     inline void SwapOldSpace();
847 
GetSharedMemController()848     SharedMemController *GetSharedMemController() const
849     {
850         return sharedMemController_;
851     }
852 
853     void PrepareRecordRegionsForReclaim();
854 
855     template<class Callback>
856     void EnumerateOldSpaceRegions(const Callback &cb) const;
857 
858     template<class Callback>
859     void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
860 
861     template<class Callback>
862     void IterateOverObjects(const Callback &cb) const;
863 
864     inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size);
865 
866     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
867 
868     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
869 
870     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size);
871 
872     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass);
873 
874     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
875 
876     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size);
877 
878     inline TaggedObject *AllocateOldOrHugeObjectNoGC(JSThread *thread, size_t size);
879 
880     inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
881 
882     inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
883 
884     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
885 
886     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
887 
888     inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size);
889 
890     inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size);
891 
892     size_t VerifyHeapObjects(VerifyKind verifyKind) const;
893 
894     inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
895 
896     void DumpHeapSnapshotBeforeOOM(JSThread *thread, SharedHeapOOMSource source);
897 
898     inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor);
899     inline void PushToSharedNativePointerList(JSNativePointer* pointer);
900     inline void IteratorNativePointerList(WeakVisitor &visitor);
901 
902     void UpdateHeapStatsAfterGC(TriggerGCType gcType) override;
903 
904     class SharedGCScope {
905     public:
906         SharedGCScope();
907         ~SharedGCScope();
908     };
909 
InHeapProfiler()910     bool InHeapProfiler() const
911     {
912         return inHeapProfiler_;
913     }
914 
915     void CheckInHeapProfiler();
916     void SetGCThreadQosPriority(common::PriorityMode mode);
917 
918     SHAREDHEAP_PUBLIC_HYBRID_EXTENSION();
919 
920 private:
921     void ProcessAllGCListeners();
922     void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType);
923 
924     void MoveOldSpaceToAppspawn();
925 
926     void ReclaimRegions(TriggerGCType type);
927 
928     void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
929     inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
930     inline void InvokeSharedNativePointerCallbacks();
931     struct SharedHeapSmartGCStats {
932         /**
933          * For SmartGC.
934          * For daemon thread, it check these status before trying to collect garbage, and wait until finish.
935          * It need that check-wait events is atomic, so use a Mutex/CV.
936         */
937         Mutex sensitiveStatusMutex_;
938         ConditionVariable sensitiveStatusCV_;
939         AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
940         StartupStatus startupStatus_ {StartupStatus::BEFORE_STARTUP};
941         // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED,
942         // must do GC at once even in sensitive status.
943         bool forceGC_ {false};
944     };
945 
946     SharedHeapSmartGCStats smartGCStats_;
947 
948     static SharedHeap *instance_;
949 
950     GCStats *sGCStats_ {nullptr};
951 
952     bool localFullMarkTriggered_ {false};
953 
954     bool optionalLogEnabled_ {false};
955 
956     bool parallelGC_ {true};
957 
958     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
959     // to true even if sweep_task and clear_task is running asynchronously
960     bool gcFinished_ {true};
961     Mutex waitGCFinishedMutex_;
962     ConditionVariable waitGCFinishedCV_;
963 
964     DaemonThread *dThread_ {nullptr};
965     const GlobalEnvConstants *globalEnvConstants_ {nullptr};
966     SharedOldSpace *sOldSpace_ {nullptr};
967     SharedOldSpace *sCompressSpace_ {nullptr};
968     SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
969     SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
970     SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
971     SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
972     SharedGCWorkManager *sWorkManager_ {nullptr};
973     SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
974     SharedConcurrentSweeper *sSweeper_ {nullptr};
975     SharedGC *sharedGC_ {nullptr};
976     SharedFullGC *sharedFullGC_ {nullptr};
977     SharedGCEvacuator *sEvacuator_ {nullptr};
978     SharedGCMarker *sharedGCMarker_ {nullptr};
979     SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
980     SharedMemController *sharedMemController_ {nullptr};
981     size_t growingFactor_ {0};
982     size_t growingStep_ {0};
983     size_t incNativeSizeTriggerSharedCM_ {0};
984     size_t incNativeSizeTriggerSharedGC_ {0};
985     size_t fragmentationLimitForSharedFullGC_ {0};
986     std::atomic<size_t> spaceOvershoot_ {0};
987     std::atomic<size_t> nativeSizeAfterLastGC_ {0};
988     bool inHeapProfiler_ {false};
989     NativePointerList sharedNativePointerList_;
990     std::mutex sNativePointerListMutex_;
991     SHAREDHEAP_PRIVATE_HYBRID_EXTENSION();
992 };
993 
994 class Heap : public BaseHeap {
995 public:
996     explicit Heap(EcmaVM *ecmaVm);
997     virtual ~Heap() = default;
998     NO_COPY_SEMANTIC(Heap);
999     NO_MOVE_SEMANTIC(Heap);
1000     void Initialize();
1001     void Destroy() override;
1002     void Prepare();
1003     void GetHeapPrepare();
1004     void ResetLargeCapacity();
1005     void Resume(TriggerGCType gcType);
1006     void ResumeForAppSpawn();
1007     void CompactHeapBeforeFork();
1008     void DisableParallelGC();
1009     void EnableParallelGC();
1010 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1011     void SetJsDumpThresholds(size_t thresholds) const;
1012 #endif
1013 
1014     // fixme: Rename NewSpace to YoungSpace.
1015     // This is the active young generation space that the new objects are allocated in
1016     // or copied into (from the other semi space) during semi space GC.
GetNewSpace()1017     SemiSpace *GetNewSpace() const
1018     {
1019         return activeSemiSpace_;
1020     }
1021 
1022     /*
1023      * Return the original active space where the objects are to be evacuated during semi space GC.
1024      * This should be invoked only in the evacuation phase of semi space GC.
1025      * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement.
1026      */
GetFromSpaceDuringEvacuation()1027     SemiSpace *GetFromSpaceDuringEvacuation() const
1028     {
1029         return inactiveSemiSpace_;
1030     }
1031 
GetOldSpace()1032     OldSpace *GetOldSpace() const
1033     {
1034         return oldSpace_;
1035     }
1036 
GetCompressSpace()1037     OldSpace *GetCompressSpace() const
1038     {
1039         return compressSpace_;
1040     }
1041 
GetNonMovableSpace()1042     NonMovableSpace *GetNonMovableSpace() const
1043     {
1044         return nonMovableSpace_;
1045     }
1046 
GetHugeObjectSpace()1047     HugeObjectSpace *GetHugeObjectSpace() const
1048     {
1049         return hugeObjectSpace_;
1050     }
1051 
GetMachineCodeSpace()1052     MachineCodeSpace *GetMachineCodeSpace() const
1053     {
1054         return machineCodeSpace_;
1055     }
1056 
GetHugeMachineCodeSpace()1057     HugeMachineCodeSpace *GetHugeMachineCodeSpace() const
1058     {
1059         return hugeMachineCodeSpace_;
1060     }
1061 
GetSnapshotSpace()1062     SnapshotSpace *GetSnapshotSpace() const
1063     {
1064         return snapshotSpace_;
1065     }
1066 
GetReadOnlySpace()1067     ReadOnlySpace *GetReadOnlySpace() const
1068     {
1069         return readOnlySpace_;
1070     }
1071 
GetAppSpawnSpace()1072     AppSpawnSpace *GetAppSpawnSpace() const
1073     {
1074         return appSpawnSpace_;
1075     }
1076 
GetSpaceWithType(MemSpaceType type)1077     SparseSpace *GetSpaceWithType(MemSpaceType type) const
1078     {
1079         switch (type) {
1080             case MemSpaceType::OLD_SPACE:
1081                 return oldSpace_;
1082             case MemSpaceType::NON_MOVABLE:
1083                 return nonMovableSpace_;
1084             case MemSpaceType::MACHINE_CODE_SPACE:
1085                 return machineCodeSpace_;
1086             default:
1087                 LOG_ECMA(FATAL) << "this branch is unreachable";
1088                 UNREACHABLE();
1089                 break;
1090         }
1091     }
1092 
GetPartialGC()1093     PartialGC *GetPartialGC() const
1094     {
1095         return partialGC_;
1096     }
1097 
GetFullGC()1098     FullGC *GetFullGC() const
1099     {
1100         return fullGC_;
1101     }
1102 
GetSweeper()1103     ConcurrentSweeper *GetSweeper() const
1104     {
1105         return sweeper_;
1106     }
1107 
GetEvacuator()1108     ParallelEvacuator *GetEvacuator() const
1109     {
1110         return evacuator_;
1111     }
1112 
GetConcurrentMarker()1113     ConcurrentMarker *GetConcurrentMarker() const
1114     {
1115         return concurrentMarker_;
1116     }
1117 
GetIncrementalMarker()1118     IncrementalMarker *GetIncrementalMarker() const
1119     {
1120         return incrementalMarker_;
1121     }
1122 
GetNonMovableMarker()1123     Marker *GetNonMovableMarker() const
1124     {
1125         return nonMovableMarker_;
1126     }
1127 
GetCompressGCMarker()1128     Marker *GetCompressGCMarker() const
1129     {
1130         return compressGCMarker_;
1131     }
1132 
GetEcmaVM()1133     EcmaVM *GetEcmaVM() const
1134     {
1135         return ecmaVm_;
1136     }
1137 
GetJSThread()1138     JSThread *GetJSThread() const
1139     {
1140         return thread_;
1141     }
1142 
GetWorkManager()1143     WorkManager *GetWorkManager() const
1144     {
1145         return workManager_;
1146     }
1147 
GetMarkingObjectLocalBuffer()1148     WorkNode *&GetMarkingObjectLocalBuffer()
1149     {
1150         return sharedGCData_.sharedConcurrentMarkingLocalBuffer_;
1151     }
1152 
GetIdleGCTrigger()1153     IdleGCTrigger *GetIdleGCTrigger() const
1154     {
1155         return idleGCTrigger_;
1156     }
1157 
SetRSetWorkListHandler(RSetWorkListHandler * handler)1158     void SetRSetWorkListHandler(RSetWorkListHandler *handler)
1159     {
1160         ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr));
1161         sharedGCData_.rSetWorkListHandler_ = handler;
1162     }
1163 
1164     void ProcessSharedGCMarkingLocalBuffer();
1165 
1166     void ProcessSharedGCRSetWorkList();
1167 
1168     const GlobalEnvConstants *GetGlobalConst() const override;
1169 
GetMemController()1170     MemController *GetMemController() const
1171     {
1172         return memController_;
1173     }
1174 
RecordOrResetObjectSize(size_t objectSize)1175     inline void RecordOrResetObjectSize(size_t objectSize)
1176     {
1177         recordObjectSize_ = objectSize;
1178     }
1179 
GetRecordObjectSize()1180     inline size_t GetRecordObjectSize() const
1181     {
1182         return recordObjectSize_;
1183     }
1184 
RecordOrResetNativeSize(size_t nativeSize)1185     inline void RecordOrResetNativeSize(size_t nativeSize)
1186     {
1187         recordNativeSize_ = nativeSize;
1188     }
1189 
GetRecordNativeSize()1190     inline size_t GetRecordNativeSize() const
1191     {
1192         return recordNativeSize_;
1193     }
1194 
1195     /*
1196      * For object allocations.
1197      */
1198 
1199     // Young
1200     inline TaggedObject *AllocateInYoungSpace(size_t size);
1201     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass);
1202     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size);
1203     inline TaggedObject *AllocateYoungOrHugeObject(size_t size);
1204     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass);
1205     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size);
1206     inline TaggedObject *AllocateReadOnlyOrHugeObject(size_t size);
1207     inline uintptr_t AllocateYoungSync(size_t size);
1208     inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size);
1209     // Old
1210     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass);
1211     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size);
1212     inline TaggedObject *AllocateOldOrHugeObject(size_t size);
1213     // Non-movable
1214     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass);
1215     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size);
1216     inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
1217     // Huge
1218     inline TaggedObject *AllocateHugeObject(size_t size);
1219     inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size);
1220     // Machine code
1221     inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr);
1222     inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr);
1223     // Snapshot
1224     inline uintptr_t AllocateSnapshotSpace(size_t size);
1225 
1226     // shared non movable space tlab
1227     inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size);
1228     // shared old space tlab
1229     inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size);
1230 
1231     void ResetTlab();
1232     void FillBumpPointerForTlab();
1233     /*
1234      * GC triggers.
1235      */
1236     void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1237     void ProcessGCCallback();
1238     bool CheckAndTriggerOldGC(size_t size = 0);
1239     bool CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason = GCReason::OTHER);
1240     TriggerGCType SelectGCType() const;
1241     /*
1242      * Parallel GC related configurations and utilities.
1243      */
1244 
1245     void PostParallelGCTask(ParallelGCTaskPhase taskPhase);
1246 
IsParallelGCEnabled()1247     bool IsParallelGCEnabled() const
1248     {
1249         return parallelGC_;
1250     }
1251     void ChangeGCParams(bool inBackground) override;
1252 
1253     GCStats *GetEcmaGCStats() override;
1254 
1255     GCKeyStats *GetEcmaGCKeyStats();
1256 
1257     JSObjectResizingStrategy *GetJSObjectResizingStrategy();
1258 
1259     void TriggerIdleCollection(int idleMicroSec);
1260     void NotifyMemoryPressure(bool inHighMemoryPressure);
1261 
1262     void TryTriggerConcurrentMarking(MarkReason markReason = MarkReason::OTHER);
1263     void AdjustBySurvivalRate(size_t originalNewSpaceSize);
1264     void TriggerConcurrentMarking(MarkReason markReason = MarkReason::OTHER);
1265     bool CheckCanTriggerConcurrentMarking();
1266 
1267     void TryTriggerIdleCollection() override;
1268     void TryTriggerIncrementalMarking() override;
1269     void CalculateIdleDuration();
1270     void UpdateWorkManager(WorkManager *workManager);
1271 
CheckOngoingConcurrentMarking()1272     bool CheckOngoingConcurrentMarking() override
1273     {
1274         return CheckOngoingConcurrentMarkingImpl(ThreadType::JS_THREAD, MAIN_THREAD_INDEX,
1275                                                  "Heap::CheckOngoingConcurrentMarking");
1276     }
DaemonCheckOngoingConcurrentMarking()1277     bool DaemonCheckOngoingConcurrentMarking()
1278     {
1279         return CheckOngoingConcurrentMarkingImpl(ThreadType::DAEMON_THREAD, DAEMON_THREAD_INDEX,
1280                                                  "Heap::DaemonCheckOngoingConcurrentMarking");
1281     }
1282 
1283     inline void SwapNewSpace();
1284     inline void SwapOldSpace();
1285 
1286     inline bool MoveYoungRegion(Region *region);
1287     inline bool MoveYoungRegionToOld(Region *region);
1288     inline void MergeToOldSpaceSync(LocalSpace *localSpace);
1289 
1290     template<class Callback>
1291     void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const;
1292 
1293     template<class Callback>
1294     void EnumerateNonNewSpaceRegions(const Callback &cb) const;
1295 
1296     template<class Callback>
1297     void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const;
1298 
1299     template<class Callback>
1300     void EnumerateNewSpaceRegions(const Callback &cb) const;
1301 
1302     template<class Callback>
1303     void EnumerateSnapshotSpaceRegions(const Callback &cb) const;
1304 
1305     template<class Callback>
1306     void EnumerateNonMovableRegions(const Callback &cb) const;
1307 
1308     template<class Callback>
1309     inline void EnumerateRegions(const Callback &cb) const;
1310 
1311     inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd);
1312 
1313     void WaitAllTasksFinished();
1314     void WaitConcurrentMarkingFinished();
1315 
GetMemGrowingType()1316     MemGrowingType GetMemGrowingType() const
1317     {
1318         return memGrowingtype_;
1319     }
1320 
SetMemGrowingType(MemGrowingType memGrowingType)1321     void SetMemGrowingType(MemGrowingType memGrowingType)
1322     {
1323         memGrowingtype_ = memGrowingType;
1324     }
1325 
CalculateLinearSpaceOverShoot()1326     size_t CalculateLinearSpaceOverShoot()
1327     {
1328         return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
1329     }
1330 
1331     inline size_t GetCommittedSize() const override;
1332 
1333     inline size_t GetHeapObjectSize() const override;
1334 
1335     inline void NotifyRecordMemorySize();
1336 
1337     inline size_t GetRegionCount() const override;
1338 
GetRegionCachedSize()1339     size_t GetRegionCachedSize() const
1340     {
1341         return activeSemiSpace_->GetInitialCapacity();
1342     }
1343 
1344     size_t GetLiveObjectSize() const;
1345 
1346     inline uint32_t GetHeapObjectCount() const;
1347 
GetPromotedSize()1348     size_t GetPromotedSize() const
1349     {
1350         return promotedSize_;
1351     }
1352 
1353     size_t GetArrayBufferSize() const;
1354 
1355     size_t GetHeapLimitSize() const;
1356 
GetMaxEvacuateTaskCount()1357     uint32_t GetMaxEvacuateTaskCount() const
1358     {
1359         return maxEvacuateTaskCount_;
1360     }
1361 
1362     /*
1363      * Receive callback function to control idletime.
1364      */
1365     inline void InitializeIdleStatusControl(std::function<void(bool)> callback);
1366 
DisableNotifyIdle()1367     void DisableNotifyIdle()
1368     {
1369         if (notifyIdleStatusCallback != nullptr) {
1370             notifyIdleStatusCallback(true);
1371         }
1372     }
1373 
EnableNotifyIdle()1374     void EnableNotifyIdle()
1375     {
1376         if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) {
1377             notifyIdleStatusCallback(false);
1378         }
1379     }
1380 
SetIdleTask(IdleTaskType task)1381     void SetIdleTask(IdleTaskType task)
1382     {
1383         idleTask_ = task;
1384     }
1385 
1386     void ClearIdleTask();
1387 
IsEmptyIdleTask()1388     bool IsEmptyIdleTask()
1389     {
1390         return idleTask_ == IdleTaskType::NO_TASK;
1391     }
1392 
SetOnSerializeEvent(bool isSerialize)1393     void SetOnSerializeEvent(bool isSerialize)
1394     {
1395         onSerializeEvent_ = isSerialize;
1396         if (!onSerializeEvent_ && !InSensitiveStatus()) {
1397             TryTriggerIncrementalMarking();
1398             TryTriggerIdleCollection();
1399             TryTriggerConcurrentMarking(MarkReason::EXIT_SERIALIZE);
1400         }
1401     }
1402 
GetOnSerializeEvent()1403     bool GetOnSerializeEvent() const
1404     {
1405         return onSerializeEvent_;
1406     }
1407 
1408     void NotifyFinishColdStart(bool isMainThread = true);
1409 
1410     void NotifyFinishColdStartSoon();
1411 
1412     void NotifyWarmStartup();
1413 
1414     void NotifyHighSensitive(bool isStart);
1415 
1416     bool HandleExitHighSensitiveEvent();
1417 
1418     bool ObjectExceedMaxHeapSize() const override;
1419 
1420     bool ObjectExceedHighSensitiveThresholdForCM() const;
1421 
1422     bool ObjectExceedJustFinishStartupThresholdForGC() const;
1423 
1424     bool ObjectExceedJustFinishStartupThresholdForCM() const;
1425 
1426     void TryIncreaseNewSpaceOvershootByConfigSize();
1427 
1428     void TryIncreaseOvershootByConfigSize();
1429 
1430     bool CheckIfNeedStopCollectionByStartup();
1431 
1432     bool CheckIfNeedStopCollectionByHighSensitive();
1433 
1434     bool NeedStopCollection() override;
1435 
SetSensitiveStatus(AppSensitiveStatus status)1436     void SetSensitiveStatus(AppSensitiveStatus status) override
1437     {
1438         sHeap_->SetSensitiveStatus(status);
1439         smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release);
1440     }
1441 
GetSensitiveStatus()1442     AppSensitiveStatus GetSensitiveStatus() const override
1443     {
1444         return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire);
1445     }
1446 
SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)1447     void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)
1448     {
1449         recordObjSizeBeforeSensitive_ = objSize;
1450     }
1451 
GetRecordHeapObjectSizeBeforeSensitive()1452     size_t GetRecordHeapObjectSizeBeforeSensitive() const
1453     {
1454         return recordObjSizeBeforeSensitive_;
1455     }
1456 
SetNearGCInSensitive(bool flag)1457     void SetNearGCInSensitive(bool flag)
1458     {
1459         nearGCInSensitive_ = flag;
1460     }
1461 
IsNearGCInSensitive()1462     bool IsNearGCInSensitive()
1463     {
1464         return nearGCInSensitive_;
1465     }
1466 
CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)1467     bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status)
1468     {
1469         return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst);
1470     }
1471 
GetStartupStatus()1472     StartupStatus GetStartupStatus() const
1473     {
1474         ASSERT(smartGCStats_.startupStatus_.load(std::memory_order_relaxed) == sHeap_->GetStartupStatus());
1475         return smartGCStats_.startupStatus_.load(std::memory_order_relaxed);
1476     }
1477 
IsJustFinishStartup()1478     bool IsJustFinishStartup() const
1479     {
1480         return GetStartupStatus() == StartupStatus::JUST_FINISH_STARTUP;
1481     }
1482 
CancelJustFinishStartupEvent()1483     bool CancelJustFinishStartupEvent()
1484     {
1485         if (!IsJustFinishStartup()) {
1486             return false;
1487         }
1488         TryIncreaseOvershootByConfigSize();
1489         smartGCStats_.startupStatus_.store(StartupStatus::FINISH_STARTUP, std::memory_order_release);
1490         sHeap_->CancelJustFinishStartupEvent();
1491         return true;
1492     }
1493 
FinishStartupEvent()1494     bool FinishStartupEvent() override
1495     {
1496         if (!OnStartupEvent()) {
1497             LOG_GC(WARN) << "SmartGC: app cold start last status is not JUST_FINISH_STARTUP, just return false";
1498             return false;
1499         }
1500         TryIncreaseOvershootByConfigSize();
1501         smartGCStats_.startupStatus_.store(StartupStatus::JUST_FINISH_STARTUP, std::memory_order_release);
1502         sHeap_->FinishStartupEvent();
1503         return true;
1504     }
1505 
OnStartupEvent()1506     bool OnStartupEvent() const override
1507     {
1508         return GetStartupStatus() == StartupStatus::ON_STARTUP;
1509     }
1510 
NotifyPostFork()1511     void NotifyPostFork() override
1512     {
1513         sHeap_->NotifyPostFork();
1514         smartGCStats_.startupStatus_.store(StartupStatus::ON_STARTUP, std::memory_order_relaxed);
1515         size_t localFirst = config_.GetMaxHeapSize();
1516         size_t localSecond = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
1517         auto sharedHeapConfig = sHeap_->GetEcmaParamConfiguration();
1518         size_t sharedFirst = sHeap_->GetOldSpace()->GetInitialCapacity();
1519         size_t sharedSecond = sharedHeapConfig.GetMaxHeapSize()
1520                             * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO
1521                             * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
1522         LOG_GC(INFO) << "SmartGC: startup GC restrain, "
1523             << "phase 1 threshold: local " << localFirst / 1_MB << "MB, shared " << sharedFirst / 1_MB << "MB; "
1524             << "phase 2 threshold: local " << localSecond / 1_MB << "MB, shared " << sharedSecond / 1_MB << "MB";
1525     }
1526 
1527 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
StartHeapTracking()1528     void StartHeapTracking()
1529     {
1530         WaitAllTasksFinished();
1531     }
1532 
StopHeapTracking()1533     void StopHeapTracking()
1534     {
1535         WaitAllTasksFinished();
1536     }
1537 #endif
1538     inline bool InHeapProfiler();
1539 
1540     inline void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
1541 
1542     // add allocationInspector to each space
1543     void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector);
1544 
1545     // clear allocationInspector from each space
1546     void ClearAllocationInspectorFromAllSpaces();
1547 
1548     /*
1549      * Funtions used by heap verification.
1550      */
1551 
1552     template<class Callback>
1553     void IterateOverObjects(const Callback &cb, bool isSimplify = false) const;
1554 
1555     size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1556     size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1557     void StatisticHeapObject(TriggerGCType gcType) const;
1558     void StatisticHeapDetail();
1559     void PrintHeapInfo(TriggerGCType gcType) const;
1560 
OldSpaceExceedCapacity(size_t size)1561     bool OldSpaceExceedCapacity(size_t size) const override
1562     {
1563         size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size;
1564         return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() +
1565                oldSpace_->GetOutOfMemoryOvershootSize();
1566     }
1567 
OldSpaceExceedLimit()1568     bool OldSpaceExceedLimit() const override
1569     {
1570         size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
1571         return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize();
1572     }
1573 
1574     void AdjustSpaceSizeForAppSpawn();
1575 
1576     static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object);
1577 
IsFullMarkRequested()1578     bool IsFullMarkRequested() const
1579     {
1580         return fullMarkRequested_;
1581     }
1582 
SetFullMarkRequestedState(bool fullMarkRequested)1583     void SetFullMarkRequestedState(bool fullMarkRequested)
1584     {
1585         fullMarkRequested_ = fullMarkRequested;
1586     }
1587 
SetHeapMode(HeapMode mode)1588     void SetHeapMode(HeapMode mode)
1589     {
1590         mode_ = mode;
1591     }
1592 
1593     void IncreaseNativeBindingSize(size_t size);
1594     void IncreaseNativeBindingSize(JSNativePointer *object);
1595     void DecreaseNativeBindingSize(size_t size);
ResetNativeBindingSize()1596     void ResetNativeBindingSize()
1597     {
1598         nativeBindingSize_ = 0;
1599     }
1600 
GetNativeBindingSize()1601     size_t GetNativeBindingSize() const
1602     {
1603         if (g_isEnableCMCGC) {
1604             return common::BaseRuntime::GetNotifiedNativeSize();
1605         }
1606         return nativeBindingSize_;
1607     }
1608 
GetGlobalSpaceNativeLimit()1609     size_t GetGlobalSpaceNativeLimit() const
1610     {
1611         return globalSpaceNativeLimit_;
1612     }
1613 
GetNativeBindingSizeAfterLastGC()1614     size_t GetNativeBindingSizeAfterLastGC() const
1615     {
1616         return nativeBindingSizeAfterLastGC_;
1617     }
1618 
GetGlobalNativeSize()1619     size_t GetGlobalNativeSize() const
1620     {
1621         return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage();
1622     }
1623 
ResetNativeSizeAfterLastGC()1624     void ResetNativeSizeAfterLastGC()
1625     {
1626         nativeSizeAfterLastGC_ = 0;
1627         nativeBindingSizeAfterLastGC_= nativeBindingSize_;
1628     }
1629 
IncNativeSizeAfterLastGC(size_t size)1630     void IncNativeSizeAfterLastGC(size_t size)
1631     {
1632         nativeSizeAfterLastGC_ += size;
1633     }
1634 
GlobalNativeSizeLargerToTriggerGC()1635     bool GlobalNativeSizeLargerToTriggerGC() const
1636     {
1637         auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ?
1638             nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0;
1639         return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ &&
1640             nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_;
1641     }
1642 
GlobalNativeSizeLargerThanLimit()1643     bool GlobalNativeSizeLargerThanLimit() const
1644     {
1645         size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0;
1646         return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot;
1647     }
1648 
GlobalNativeSizeLargerThanLimitForIdle()1649     bool GlobalNativeSizeLargerThanLimitForIdle() const
1650     {
1651         return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ *
1652             IDLE_SPACE_SIZE_LIMIT_RATE);
1653     }
1654 
1655     void TryTriggerFullMarkOrGCByNativeSize();
1656 
1657     void TryTriggerFullMarkBySharedSize(size_t size);
1658 
1659     bool TryTriggerFullMarkBySharedLimit();
1660 
1661     void CheckAndTriggerTaskFinishedGC();
1662 
1663     bool IsMarking() const override;
1664 
1665     bool IsReadyToConcurrentMark() const override;
1666 
IsYoungGC()1667     bool IsYoungGC() const
1668     {
1669         return gcType_ == TriggerGCType::YOUNG_GC;
1670     }
1671 
1672     void CheckNonMovableSpaceOOM();
1673     void DumpHeapSnapshotBeforeOOM();
1674     std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const;
1675     MachineCode *GetMachineCodeObject(uintptr_t pc) const;
1676     void SetMachineCodeObject(uintptr_t start, uintptr_t end, uintptr_t address) const;
1677 
1678     PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
1679     PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
1680     void ProcessGCListeners();
1681 
1682     inline void ProcessNativeDelete(const WeakRootVisitor& visitor);
1683     inline void ProcessReferences(const WeakRootVisitor& visitor);
1684     inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent);
1685     inline void RemoveFromNativePointerList(const JSNativePointer* pointer);
1686     inline void ClearNativePointerList();
1687     inline void IteratorNativePointerList(WeakVisitor &vistor);
1688 
GetNativePointerListSize()1689     size_t GetNativePointerListSize() const
1690     {
1691         return nativePointerList_.size();
1692     }
1693 
GetHeapAliveSizeExcludesYoungAfterGC()1694     size_t GetHeapAliveSizeExcludesYoungAfterGC() const
1695     {
1696         return heapAliveSizeExcludesYoungAfterGC_;
1697     }
1698 
1699     void UpdateHeapStatsAfterGC(TriggerGCType gcType) override;
1700     HEAP_PUBLIC_HYBRID_EXTENSION();
1701 
1702 private:
1703     void CollectGarbageImpl(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1704 
1705     static constexpr int MIN_JSDUMP_THRESHOLDS = 85;
1706     static constexpr int MAX_JSDUMP_THRESHOLDS = 95;
1707     static constexpr int IDLE_TIME_LIMIT = 10;  // if idle time over 10ms we can do something
1708     static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB;
1709     static constexpr int IDLE_MAINTAIN_TIME = 500;
1710     static constexpr int BACKGROUND_GROW_LIMIT = 2_MB;
1711     // Threadshold that HintGC will actually trigger GC.
1712     static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5;
1713     static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times.
1714     static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB;
1715     void RecomputeLimits();
1716     void AdjustOldSpaceLimit();
1717     // record lastRegion for each space, which will be used in ReclaimRegions()
1718     void PrepareRecordRegionsForReclaim();
1719     inline void ReclaimRegions(TriggerGCType gcType);
1720     inline size_t CalculateCommittedCacheSize();
1721 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1722     uint64_t GetCurrentTickMillseconds();
1723     void ThresholdReachedDump();
1724 #endif
1725     void CleanCallback();
IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)1726     void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1727     {
1728         pendingAsyncNativeCallbackSize_ += bindingSize;
1729     }
DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)1730     void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1731     {
1732         pendingAsyncNativeCallbackSize_ -= bindingSize;
1733     }
1734     bool CheckOngoingConcurrentMarkingImpl(ThreadType threadType, int threadIndex,
1735                                            [[maybe_unused]] const char* traceName);
1736     class ParallelGCTask : public common::Task {
1737     public:
ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)1738         ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase)
1739             : common::Task(id), heap_(heap), taskPhase_(taskPhase) {};
1740         ~ParallelGCTask() override = default;
1741         bool Run(uint32_t threadIndex) override;
1742 
1743         NO_COPY_SEMANTIC(ParallelGCTask);
1744         NO_MOVE_SEMANTIC(ParallelGCTask);
1745 
1746     private:
1747         Heap *heap_ {nullptr};
1748         ParallelGCTaskPhase taskPhase_;
1749     };
1750 
1751     class AsyncClearTask : public common::Task {
1752     public:
AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)1753         AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type)
1754             : common::Task(id), heap_(heap), gcType_(type) {}
1755         ~AsyncClearTask() override = default;
1756         bool Run(uint32_t threadIndex) override;
1757 
1758         NO_COPY_SEMANTIC(AsyncClearTask);
1759         NO_MOVE_SEMANTIC(AsyncClearTask);
1760     private:
1761         Heap *heap_;
1762         TriggerGCType gcType_;
1763     };
1764 
1765     class FinishColdStartTask : public common::Task {
1766     public:
FinishColdStartTask(int32_t id,Heap * heap)1767         FinishColdStartTask(int32_t id, Heap *heap)
1768             : common::Task(id), heap_(heap) {}
1769         ~FinishColdStartTask() override = default;
1770         bool Run(uint32_t threadIndex) override;
1771 
1772         NO_COPY_SEMANTIC(FinishColdStartTask);
1773         NO_MOVE_SEMANTIC(FinishColdStartTask);
1774     private:
1775         Heap *heap_;
1776     };
1777 
1778     class FinishGCRestrainTask : public common::Task {
1779     public:
FinishGCRestrainTask(int32_t id,Heap * heap)1780         FinishGCRestrainTask(int32_t id, Heap *heap)
1781             : common::Task(id), heap_(heap) {}
1782         ~FinishGCRestrainTask() override = default;
1783         bool Run(uint32_t threadIndex) override;
1784 
1785         NO_COPY_SEMANTIC(FinishGCRestrainTask);
1786         NO_MOVE_SEMANTIC(FinishGCRestrainTask);
1787     private:
1788         Heap *heap_;
1789     };
1790 
1791     class DeleteCallbackTask : public common::Task {
1792     public:
DeleteCallbackTask(int32_t id,std::vector<NativePointerCallbackData> & callbacks)1793         DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : common::Task(id)
1794         {
1795             std::swap(callbacks, nativePointerCallbacks_);
1796         }
1797         ~DeleteCallbackTask() override = default;
1798         bool Run(uint32_t threadIndex) override;
1799 
1800         NO_COPY_SEMANTIC(DeleteCallbackTask);
1801         NO_MOVE_SEMANTIC(DeleteCallbackTask);
1802 
1803     private:
1804         std::vector<NativePointerCallbackData> nativePointerCallbacks_ {};
1805     };
1806 
1807     struct MainLocalHeapSmartGCStats {
1808         /**
1809          * For SmartGC.
1810          * For main js thread, it check these status everytime when trying to
1811          * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough.
1812         */
1813         std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
1814         std::atomic<StartupStatus> startupStatus_ {StartupStatus::BEFORE_STARTUP};
1815     };
1816 
1817     // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack.
1818     struct SharedGCLocalStoragePackedData {
1819         /**
1820          * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking,
1821          * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can
1822          * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global
1823          * in remark.
1824          * If the heap is destructed before push this node to global, check and try to push remain object as well.
1825         */
1826         WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr};
1827         /**
1828          * Recording the local_to_share rset used in SharedGC concurrentMark,
1829          * which lifecycle is in one SharedGC.
1830          * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed,
1831          * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit.
1832          * Before destroying local heap, RSetWorkList should be done as well.
1833         */
1834         RSetWorkListHandler *rSetWorkListHandler_ {nullptr};
1835     };
1836 
1837     EcmaVM *ecmaVm_ {nullptr};
1838     JSThread *thread_ {nullptr};
1839 
1840     SharedHeap *sHeap_ {nullptr};
1841     MainLocalHeapSmartGCStats smartGCStats_;
1842 
1843     /*
1844      * Heap spaces.
1845      */
1846 
1847     /*
1848      * Young generation spaces where most new objects are allocated.
1849      * (only one of the spaces is active at a time in semi space GC).
1850      */
1851     SemiSpace *activeSemiSpace_ {nullptr};
1852     SemiSpace *inactiveSemiSpace_ {nullptr};
1853 
1854     // Old generation spaces where some long living objects are allocated or promoted.
1855     OldSpace *oldSpace_ {nullptr};
1856     OldSpace *compressSpace_ {nullptr};
1857     ReadOnlySpace *readOnlySpace_ {nullptr};
1858     AppSpawnSpace *appSpawnSpace_ {nullptr};
1859     // Spaces used for special kinds of objects.
1860     NonMovableSpace *nonMovableSpace_ {nullptr};
1861     MachineCodeSpace *machineCodeSpace_ {nullptr};
1862     HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr};
1863     HugeObjectSpace *hugeObjectSpace_ {nullptr};
1864     SnapshotSpace *snapshotSpace_ {nullptr};
1865     // tlab for shared non movable space
1866     ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr};
1867     // tlab for shared old space
1868     ThreadLocalAllocationBuffer *sOldTlab_ {nullptr};
1869     /*
1870      * Garbage collectors collecting garbage in different scopes.
1871      */
1872 
1873     /*
1874      * The mostly used partial GC which collects garbage in young spaces,
1875      * and part of old spaces if needed determined by GC heuristics.
1876      */
1877     PartialGC *partialGC_ {nullptr};
1878 
1879     // Full collector which collects garbage in all valid heap spaces.
1880     FullGC *fullGC_ {nullptr};
1881 
1882     // Concurrent marker which coordinates actions of GC markers and mutators.
1883     ConcurrentMarker *concurrentMarker_ {nullptr};
1884 
1885     // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators.
1886     ConcurrentSweeper *sweeper_ {nullptr};
1887 
1888     // Parallel evacuator which evacuates objects from one space to another one.
1889     ParallelEvacuator *evacuator_ {nullptr};
1890 
1891     // Incremental marker which coordinates actions of GC markers in idle time.
1892     IncrementalMarker *incrementalMarker_ {nullptr};
1893 
1894     /*
1895      * Different kinds of markers used by different collectors.
1896      * Depending on the collector algorithm, some markers can do simple marking
1897      *  while some others need to handle object movement.
1898      */
1899     Marker *nonMovableMarker_ {nullptr};
1900     Marker *compressGCMarker_ {nullptr};
1901 
1902     // Work manager managing the tasks mostly generated in the GC mark phase.
1903     WorkManager *workManager_ {nullptr};
1904 
1905     SharedGCLocalStoragePackedData sharedGCData_;
1906 
1907     bool onSerializeEvent_ {false};
1908     bool parallelGC_ {true};
1909     bool fullGCRequested_ {false};
1910     bool fullMarkRequested_ {false};
1911     bool oldSpaceLimitAdjusted_ {false};
1912     bool enableIdleGC_ {false};
1913     std::atomic_bool isCSetClearing_ {false};
1914     HeapMode mode_ { HeapMode::NORMAL };
1915 
1916     /*
1917      * The memory controller providing memory statistics (by allocations and coleections),
1918      * which is used for GC heuristics.
1919      */
1920     MemController *memController_ {nullptr};
1921     size_t promotedSize_ {0};
1922     size_t semiSpaceCopiedSize_ {0};
1923     size_t nativeBindingSize_{0};
1924     size_t globalSpaceNativeLimit_ {0};
1925     size_t nativeSizeTriggerGCThreshold_ {0};
1926     size_t incNativeSizeTriggerGC_ {0};
1927     size_t nativeSizeOvershoot_ {0};
1928     size_t asyncClearNativePointerThreshold_ {0};
1929     size_t nativeSizeAfterLastGC_ {0};
1930     size_t heapAliveSizeExcludesYoungAfterGC_ {0};
1931     size_t nativeBindingSizeAfterLastGC_ {0};
1932     size_t newAllocatedSharedObjectSize_ {0};
1933     // recordObjectSize_ & recordNativeSize_:
1934     // Record memory before taskpool start, used to determine trigger GC or not after task finish.
1935     size_t recordObjectSize_ {0};
1936     size_t recordNativeSize_ {0};
1937     // Record heap object size before enter sensitive status
1938     size_t recordObjSizeBeforeSensitive_ {0};
1939     bool nearGCInSensitive_ {false};
1940 
1941     size_t pendingAsyncNativeCallbackSize_ {0};
1942     MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
1943 
1944     // parallel evacuator task number.
1945     uint32_t maxEvacuateTaskCount_ {0};
1946 
1947     uint64_t startupDurationInMs_ {0};
1948 
1949     Mutex setNewSpaceOvershootSizeMutex_;
1950 
1951     // Application status
1952 
1953     IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr};
1954 
1955     IdleTaskType idleTask_ {IdleTaskType::NO_TASK};
1956     float idlePredictDuration_ {0.0f};
1957     double idleTaskFinishTime_ {0.0};
1958 
1959     /*
1960      * The listeners which are called at the end of GC
1961      */
1962     std::vector<std::pair<FinishGCListener, void *>> gcListeners_;
1963 
1964     IdleGCTrigger *idleGCTrigger_ {nullptr};
1965 
1966     bool hasOOMDump_ {false};
1967 
1968     NativePointerList nativePointerList_;
1969     NativePointerList concurrentNativePointerList_;
1970     HEAP_PRIVATE_HYBRID_EXTENSION();
1971 
1972     friend panda::test::HProfTestHelper;
1973     friend panda::test::GCTest_CallbackTask_Test;
1974     friend panda::test::HeapTestHelper;
1975 };
1976 }  // namespace panda::ecmascript
1977 
1978 #endif  // ECMASCRIPT_MEM_HEAP_H
1979