• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_H
17 #define ECMASCRIPT_MEM_HEAP_H
18 
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/frames.h"
21 #include "ecmascript/js_object_resizing_strategy.h"
22 #include "ecmascript/mem/linear_space.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/shared_heap/shared_space.h"
25 #include "ecmascript/mem/sparse_space.h"
26 #include "ecmascript/mem/work_manager.h"
27 #include "ecmascript/taskpool/taskpool.h"
28 #include "ecmascript/mem/machine_code.h"
29 #include "ecmascript/mem/idle_gc_trigger.h"
30 
31 namespace panda::test {
32 class GCTest_CallbackTask_Test;
33 class HProfTestHelper;
34 }
35 
36 namespace panda::ecmascript {
37 class ConcurrentMarker;
38 class ConcurrentSweeper;
39 class EcmaVM;
40 class FullGC;
41 class GCStats;
42 class GCKeyStats;
43 class HeapRegionAllocator;
44 class HeapTracker;
45 #if !WIN_OR_MAC_OR_IOS_PLATFORM
46 class HeapProfilerInterface;
47 class HeapProfiler;
48 #endif
49 class IncrementalMarker;
50 class JSNativePointer;
51 class Marker;
52 class MemController;
53 class NativeAreaAllocator;
54 class ParallelEvacuator;
55 class PartialGC;
56 class RSetWorkListHandler;
57 class SharedConcurrentMarker;
58 class SharedConcurrentSweeper;
59 class SharedGC;
60 class SharedGCMarkerBase;
61 class SharedGCMarker;
62 class SharedFullGC;
63 class SharedGCMovableMarker;
64 class STWYoungGC;
65 class ThreadLocalAllocationBuffer;
66 class JSThread;
67 class DaemonThread;
68 class GlobalEnvConstants;
69 class IdleGCTrigger;
70 
71 using IdleNotifyStatusCallback = std::function<void(bool)>;
72 using FinishGCListener = void (*)(void *);
73 using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator;
74 using Clock = std::chrono::high_resolution_clock;
75 using AppFreezeFilterCallback = std::function<bool(const int32_t pid)>;
76 
77 enum class IdleTaskType : uint8_t {
78     NO_TASK,
79     YOUNG_GC,
80     FINISH_MARKING,
81     INCREMENTAL_MARK
82 };
83 
84 enum class MarkType : uint8_t {
85     MARK_EDEN,
86     MARK_YOUNG,
87     MARK_FULL
88 };
89 
90 enum class MemGrowingType : uint8_t {
91     HIGH_THROUGHPUT,
92     CONSERVATIVE,
93     PRESSURE
94 };
95 
96 enum class HeapMode {
97     NORMAL,
98     SPAWN,
99     SHARE,
100 };
101 
102 enum AppSensitiveStatus : uint8_t {
103     NORMAL_SCENE,
104     ENTER_HIGH_SENSITIVE,
105     EXIT_HIGH_SENSITIVE,
106 };
107 
108 enum class StartupStatus : uint8_t {
109     BEFORE_STARTUP,
110     ON_STARTUP,
111     JUST_FINISH_STARTUP,
112     FINISH_STARTUP
113 };
114 
115 enum class VerifyKind {
116     VERIFY_PRE_GC,
117     VERIFY_POST_GC,
118     VERIFY_MARK_EDEN,
119     VERIFY_EVACUATE_EDEN,
120     VERIFY_MARK_YOUNG,
121     VERIFY_EVACUATE_YOUNG,
122     VERIFY_MARK_FULL,
123     VERIFY_EVACUATE_OLD,
124     VERIFY_EVACUATE_FULL,
125     VERIFY_SHARED_RSET_POST_FULL_GC,
126     VERIFY_PRE_SHARED_GC,
127     VERIFY_POST_SHARED_GC,
128     VERIFY_SHARED_GC_MARK,
129     VERIFY_SHARED_GC_SWEEP,
130     VERIFY_END,
131 };
132 
133 class BaseHeap {
134 public:
BaseHeap(const EcmaParamConfiguration & config)135     BaseHeap(const EcmaParamConfiguration &config) : config_(config) {}
136     virtual ~BaseHeap() = default;
137     NO_COPY_SEMANTIC(BaseHeap);
138     NO_MOVE_SEMANTIC(BaseHeap);
139 
140     virtual void Destroy() = 0;
141 
142     virtual bool IsMarking() const = 0;
143 
144     virtual bool IsReadyToConcurrentMark() const = 0;
145 
146     virtual bool NeedStopCollection() = 0;
147 
148     virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0;
149 
150     virtual AppSensitiveStatus GetSensitiveStatus() const = 0;
151 
152     virtual bool FinishStartupEvent() = 0;
153 
154     virtual bool OnStartupEvent() const = 0;
155 
156     virtual void NotifyPostFork() = 0;
157 
158     virtual void TryTriggerIdleCollection() = 0;
159 
160     virtual void TryTriggerIncrementalMarking() = 0;
161 
162     /*
163      * Wait for existing concurrent marking tasks to be finished (if any).
164      * Return true if there's ongoing concurrent marking.
165      */
166     virtual bool CheckOngoingConcurrentMarking() = 0;
167 
168     virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
169 
170     virtual bool OldSpaceExceedLimit() const = 0;
171 
172     virtual inline size_t GetCommittedSize() const = 0;
173 
174     virtual inline size_t GetHeapObjectSize() const = 0;
175 
176     virtual inline size_t GetRegionCount() const = 0;
177 
178     virtual void ChangeGCParams(bool inBackground) = 0;
179 
180     virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
181 
182     virtual GCStats *GetEcmaGCStats() = 0;
183 
184     virtual bool ObjectExceedMaxHeapSize() const = 0;
185 
GetMarkType()186     MarkType GetMarkType() const
187     {
188         return markType_;
189     }
190 
SetMarkType(MarkType markType)191     void SetMarkType(MarkType markType)
192     {
193         markType_ = markType;
194     }
195 
IsEdenMark()196     bool IsEdenMark() const
197     {
198         return markType_ == MarkType::MARK_EDEN;
199     }
200 
IsYoungMark()201     bool IsYoungMark() const
202     {
203         return markType_ == MarkType::MARK_YOUNG;
204     }
205 
IsFullMark()206     bool IsFullMark() const
207     {
208         return markType_ == MarkType::MARK_FULL;
209     }
210 
IsConcurrentFullMark()211     bool IsConcurrentFullMark() const
212     {
213         return markType_ == MarkType::MARK_FULL;
214     }
215 
GetGCType()216     TriggerGCType GetGCType() const
217     {
218         return gcType_;
219     }
220 
221     bool PUBLIC_API IsAlive(TaggedObject *object) const;
222 
223     bool ContainObject(TaggedObject *object) const;
224 
GetOldGCRequested()225     bool GetOldGCRequested()
226     {
227         return oldGCRequested_;
228     }
229 
GetEcmaParamConfiguration()230     EcmaParamConfiguration GetEcmaParamConfiguration() const
231     {
232         return config_;
233     }
234 
GetNativeAreaAllocator()235     NativeAreaAllocator *GetNativeAreaAllocator() const
236     {
237         return nativeAreaAllocator_;
238     }
239 
GetHeapRegionAllocator()240     HeapRegionAllocator *GetHeapRegionAllocator() const
241     {
242         return heapRegionAllocator_;
243     }
244 
ShouldThrowOOMError(bool shouldThrow)245     void ShouldThrowOOMError(bool shouldThrow)
246     {
247         shouldThrowOOMError_ = shouldThrow;
248     }
249 
SetCanThrowOOMError(bool canThrow)250     void SetCanThrowOOMError(bool canThrow)
251     {
252         canThrowOOMError_ = canThrow;
253     }
254 
CanThrowOOMError()255     bool CanThrowOOMError()
256     {
257         return canThrowOOMError_;
258     }
259 
IsInBackground()260     bool IsInBackground() const
261     {
262         return inBackground_;
263     }
264 
265     // ONLY used for heap verification.
IsVerifying()266     bool IsVerifying() const
267     {
268         return isVerifying_;
269     }
270 
271     // ONLY used for heap verification.
SetVerifying(bool verifying)272     void SetVerifying(bool verifying)
273     {
274         isVerifying_ = verifying;
275     }
276 
SetGCState(bool inGC)277     void SetGCState(bool inGC)
278     {
279         inGC_ = inGC;
280     }
281 
InGC()282     bool InGC() const
283     {
284         return inGC_;
285     }
286 
NotifyHeapAliveSizeAfterGC(size_t size)287     void NotifyHeapAliveSizeAfterGC(size_t size)
288     {
289         heapAliveSizeAfterGC_ = size;
290     }
291 
GetHeapAliveSizeAfterGC()292     size_t GetHeapAliveSizeAfterGC() const
293     {
294         return heapAliveSizeAfterGC_;
295     }
296 
UpdateHeapStatsAfterGC(TriggerGCType gcType)297     void UpdateHeapStatsAfterGC(TriggerGCType gcType)
298     {
299         if (gcType == TriggerGCType::EDEN_GC || gcType == TriggerGCType::YOUNG_GC) {
300             return;
301         }
302         heapAliveSizeAfterGC_ = GetHeapObjectSize();
303         fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize();
304         if (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC) {
305             heapBasicLoss_ = fragmentSizeAfterGC_;
306         }
307     }
308 
GetFragmentSizeAfterGC()309     size_t GetFragmentSizeAfterGC() const
310     {
311         return fragmentSizeAfterGC_;
312     }
313 
GetHeapBasicLoss()314     size_t GetHeapBasicLoss() const
315     {
316         return heapBasicLoss_;
317     }
318 
GetGlobalSpaceAllocLimit()319     size_t GetGlobalSpaceAllocLimit() const
320     {
321         return globalSpaceAllocLimit_;
322     }
323 
324     // Whether should verify heap during gc.
ShouldVerifyHeap()325     bool ShouldVerifyHeap() const
326     {
327         return shouldVerifyHeap_;
328     }
329 
EnablePageTagThreadId()330     bool EnablePageTagThreadId() const
331     {
332         return enablePageTagThreadId_;
333     }
334 
335     void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
336         bool NonMovableObjNearOOM = false);
337 
GetMaxMarkTaskCount()338     uint32_t GetMaxMarkTaskCount() const
339     {
340         return maxMarkTaskCount_;
341     }
342 
InSensitiveStatus()343     bool InSensitiveStatus() const
344     {
345         return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent();
346     }
347 
348     void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size);
349     inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
350                                             [[maybe_unused]] size_t size);
351     bool CheckCanDistributeTask();
352     void IncreaseTaskCount();
353     void ReduceTaskCount();
354     void WaitRunningTaskFinished();
355     void WaitClearTaskFinished();
356     void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
357         bool NonMovableObjNearOOM = false);
358     void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName);
359     void SetAppFreezeFilterCallback(AppFreezeFilterCallback cb);
360 
361 protected:
362     void FatalOutOfMemoryError(size_t size, std::string functionName);
363 
364     enum class HeapType {
365         LOCAL_HEAP,
366         SHARED_HEAP,
367         INVALID,
368     };
369 
370     class RecursionScope {
371     public:
RecursionScope(BaseHeap * heap,HeapType heapType)372         explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType)
373         {
374             if (heap_->recursionDepth_++ != 0) {
375                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
376                               << ") Constructor, depth: " << heap_->recursionDepth_;
377             }
378             heap_->SetGCState(true);
379         }
~RecursionScope()380         ~RecursionScope()
381         {
382             if (--heap_->recursionDepth_ != 0) {
383                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
384                               << ") Destructor, depth: " << heap_->recursionDepth_;
385             }
386             heap_->SetGCState(false);
387         }
388     private:
389         BaseHeap *heap_ {nullptr};
390         HeapType heapType_ {HeapType::INVALID};
391     };
392 
393     static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75;
394 
395     const EcmaParamConfiguration config_;
396     MarkType markType_ {MarkType::MARK_YOUNG};
397     TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
398     Mutex gcCollectGarbageMutex_;
399     // Region allocators.
400     NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
401     HeapRegionAllocator *heapRegionAllocator_ {nullptr};
402 
403     size_t heapAliveSizeAfterGC_ {0};
404     size_t globalSpaceAllocLimit_ {0};
405     size_t globalSpaceConcurrentMarkLimit_ {0};
406     size_t heapBasicLoss_ {1_MB};
407     size_t fragmentSizeAfterGC_ {0};
408     // parallel marker task count.
409     uint32_t runningTaskCount_ {0};
410     uint32_t maxMarkTaskCount_ {0};
411     Mutex waitTaskFinishedMutex_;
412     ConditionVariable waitTaskFinishedCV_;
413     Mutex waitClearTaskFinishedMutex_;
414     ConditionVariable waitClearTaskFinishedCV_;
415     AppFreezeFilterCallback appfreezeCallback_ {nullptr};
416     bool clearTaskFinished_ {true};
417     bool inBackground_ {false};
418     bool shouldThrowOOMError_ {false};
419     bool canThrowOOMError_ {true};
420     bool oldGCRequested_ {false};
421     // ONLY used for heap verification.
422     bool shouldVerifyHeap_ {false};
423     bool enablePageTagThreadId_ {false};
424     bool inGC_ {false};
425     bool isVerifying_ {false};
426     int32_t recursionDepth_ {0};
427 };
428 
429 class SharedHeap : public BaseHeap {
430 public:
SharedHeap(const EcmaParamConfiguration & config)431     SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
432     virtual ~SharedHeap() = default;
433 
434     static void CreateNewInstance();
435     static SharedHeap *GetInstance();
436     static void DestroyInstance();
437 
438     void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
439         const JSRuntimeOptions &option, DaemonThread *dThread);
440 
441     void Destroy() override;
442 
443     void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
444 
445     void EnableParallelGC(JSRuntimeOptions &option);
446 
447     void DisableParallelGC(JSThread *thread);
448 
449     void AdjustGlobalSpaceAllocLimit();
450 
451     class ParallelMarkTask : public Task {
452     public:
ParallelMarkTask(int32_t id,SharedHeap * heap,SharedParallelMarkPhase taskPhase)453         ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
454             : Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
455         ~ParallelMarkTask() override = default;
456         bool Run(uint32_t threadIndex) override;
457 
458         NO_COPY_SEMANTIC(ParallelMarkTask);
459         NO_MOVE_SEMANTIC(ParallelMarkTask);
460 
461     private:
462         SharedHeap *sHeap_ {nullptr};
463         SharedParallelMarkPhase taskPhase_;
464     };
465 
466     class AsyncClearTask : public Task {
467     public:
AsyncClearTask(int32_t id,SharedHeap * heap,TriggerGCType type)468         AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
469             : Task(id), sHeap_(heap), gcType_(type) {}
470         ~AsyncClearTask() override = default;
471         bool Run(uint32_t threadIndex) override;
472 
473         NO_COPY_SEMANTIC(AsyncClearTask);
474         NO_MOVE_SEMANTIC(AsyncClearTask);
475     private:
476         SharedHeap *sHeap_;
477         TriggerGCType gcType_;
478     };
IsMarking()479     bool IsMarking() const override
480     {
481         LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
482         return false;
483     }
484 
485     bool IsReadyToConcurrentMark() const override;
486 
487     bool NeedStopCollection() override;
488 
SetSensitiveStatus(AppSensitiveStatus status)489     void SetSensitiveStatus(AppSensitiveStatus status) override
490     {
491         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
492         smartGCStats_.sensitiveStatus_ = status;
493         if (!InSensitiveStatus()) {
494             smartGCStats_.sensitiveStatusCV_.Signal();
495         }
496     }
497 
498     // This should be called when holding lock of sensitiveStatusMutex_.
GetSensitiveStatus()499     AppSensitiveStatus GetSensitiveStatus() const override
500     {
501         return smartGCStats_.sensitiveStatus_;
502     }
503 
GetStartupStatus()504     StartupStatus GetStartupStatus() const
505     {
506         return smartGCStats_.startupStatus_;
507     }
508 
IsJustFinishStartup()509     bool IsJustFinishStartup() const
510     {
511         return smartGCStats_.startupStatus_ == StartupStatus::JUST_FINISH_STARTUP;
512     }
513 
CancelJustFinishStartupEvent()514     bool CancelJustFinishStartupEvent()
515     {
516         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
517         if (!IsJustFinishStartup()) {
518             return false;
519         }
520         smartGCStats_.startupStatus_ = StartupStatus::FINISH_STARTUP;
521         return true;
522     }
523 
FinishStartupEvent()524     bool FinishStartupEvent() override
525     {
526         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
527         if (!OnStartupEvent()) {
528             return false;
529         }
530         smartGCStats_.startupStatus_ = StartupStatus::JUST_FINISH_STARTUP;
531         if (!InSensitiveStatus()) {
532             smartGCStats_.sensitiveStatusCV_.Signal();
533         }
534         return true;
535     }
536 
537     // This should be called when holding lock of sensitiveStatusMutex_.
OnStartupEvent()538     bool OnStartupEvent() const override
539     {
540         return smartGCStats_.startupStatus_ == StartupStatus::ON_STARTUP;
541     }
542 
NotifyPostFork()543     void NotifyPostFork() override
544     {
545         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
546         smartGCStats_.startupStatus_ = StartupStatus::ON_STARTUP;
547     }
548 
WaitSensitiveStatusFinished()549     void WaitSensitiveStatusFinished()
550     {
551         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
552         while (InSensitiveStatus() && !smartGCStats_.forceGC_) {
553             smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_);
554         }
555     }
556 
557     bool ObjectExceedMaxHeapSize() const override;
558 
559     bool ObjectExceedJustFinishStartupThresholdForGC() const;
560 
561     bool ObjectExceedJustFinishStartupThresholdForCM() const;
562 
563     bool CheckIfNeedStopCollectionByStartup();
564 
565     bool CheckAndTriggerSharedGC(JSThread *thread);
566 
567     bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
568 
569     void TryTriggerLocalConcurrentMarking();
570 
571     // Called when all vm is destroyed, and try to destroy daemon thread.
572     void WaitAllTasksFinishedAfterAllJSThreadEliminated();
573 
574     void WaitAllTasksFinished(JSThread *thread);
575 
576     void StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason);         // In daemon thread
577 
578     // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load.
579     bool CheckCanTriggerConcurrentMarking(JSThread *thread);
580 
TryTriggerIdleCollection()581     void TryTriggerIdleCollection() override
582     {
583         LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
584         return;
585     }
586 
TryTriggerIncrementalMarking()587     void TryTriggerIncrementalMarking() override
588     {
589         LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
590         return;
591     }
592 
593     void UpdateWorkManager(SharedGCWorkManager *sWorkManager);
594 
595     bool CheckOngoingConcurrentMarking() override;
596 
OldSpaceExceedCapacity(size_t size)597     bool OldSpaceExceedCapacity(size_t size) const override
598     {
599         size_t totalSize = sOldSpace_->GetCommittedSize() + size;
600         return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
601     }
602 
OldSpaceExceedLimit()603     bool OldSpaceExceedLimit() const override
604     {
605         return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity();
606     }
607 
GetConcurrentMarker()608     SharedConcurrentMarker *GetConcurrentMarker() const
609     {
610         return sConcurrentMarker_;
611     }
612 
GetSweeper()613     SharedConcurrentSweeper *GetSweeper() const
614     {
615         return sSweeper_;
616     }
617 
IsParallelGCEnabled()618     bool IsParallelGCEnabled() const
619     {
620         return parallelGC_;
621     }
622 
GetOldSpace()623     SharedOldSpace *GetOldSpace() const
624     {
625         return sOldSpace_;
626     }
627 
GetCompressSpace()628     SharedOldSpace *GetCompressSpace() const
629     {
630         return sCompressSpace_;
631     }
632 
GetNonMovableSpace()633     SharedNonMovableSpace *GetNonMovableSpace() const
634     {
635         return sNonMovableSpace_;
636     }
637 
GetHugeObjectSpace()638     SharedHugeObjectSpace *GetHugeObjectSpace() const
639     {
640         return sHugeObjectSpace_;
641     }
642 
GetReadOnlySpace()643     SharedReadOnlySpace *GetReadOnlySpace() const
644     {
645         return sReadOnlySpace_;
646     }
647 
GetAppSpawnSpace()648     SharedAppSpawnSpace *GetAppSpawnSpace() const
649     {
650         return sAppSpawnSpace_;
651     }
652 
SetForceGC(bool forceGC)653     void SetForceGC(bool forceGC)
654     {
655         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
656         smartGCStats_.forceGC_ = forceGC;
657         if (smartGCStats_.forceGC_) {
658             smartGCStats_.sensitiveStatusCV_.Signal();
659         }
660     }
661 
662     inline void TryTriggerConcurrentMarking(JSThread *thread);
663 
664     template<TriggerGCType gcType, GCReason gcReason>
665     void TriggerConcurrentMarking(JSThread *thread);
666 
667     template<TriggerGCType gcType, GCReason gcReason>
668     void CollectGarbage(JSThread *thread);
669 
670     template<TriggerGCType gcType, GCReason gcReason>
671     void PostGCTaskForTest(JSThread *thread);
672 
673     void CollectGarbageNearOOM(JSThread *thread);
674     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
675     // to true even if sweep_task and clear_task is running asynchronously
676     void NotifyGCCompleted();            // In daemon thread
677 
678     // Called when all vm is destroyed, and try to destroy daemon thread
679     void WaitGCFinishedAfterAllJSThreadEliminated();
680 
681     void WaitGCFinished(JSThread *thread);
682 
683     void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason);
684 
SetMaxMarkTaskCount(uint32_t maxTaskCount)685     void SetMaxMarkTaskCount(uint32_t maxTaskCount)
686     {
687         maxMarkTaskCount_ = maxTaskCount;
688     }
689 
GetCommittedSize()690     inline size_t GetCommittedSize() const override
691     {
692         size_t result = sOldSpace_->GetCommittedSize() +
693             sHugeObjectSpace_->GetCommittedSize() +
694             sNonMovableSpace_->GetCommittedSize() +
695             sReadOnlySpace_->GetCommittedSize();
696         return result;
697     }
698 
GetHeapObjectSize()699     inline size_t GetHeapObjectSize() const override
700     {
701         size_t result = sOldSpace_->GetHeapObjectSize() +
702             sHugeObjectSpace_->GetHeapObjectSize() +
703             sNonMovableSpace_->GetHeapObjectSize() +
704             sReadOnlySpace_->GetCommittedSize();
705         return result;
706     }
707 
GetRegionCount()708     inline size_t GetRegionCount() const override
709     {
710         size_t result = sOldSpace_->GetRegionCount() +
711             sHugeObjectSpace_->GetRegionCount() +
712             sNonMovableSpace_->GetRegionCount() +
713             sReadOnlySpace_->GetRegionCount();
714         return result;
715     }
716 
ResetNativeSizeAfterLastGC()717     void ResetNativeSizeAfterLastGC()
718     {
719         nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed);
720     }
721 
IncNativeSizeAfterLastGC(size_t size)722     void IncNativeSizeAfterLastGC(size_t size)
723     {
724         nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed);
725     }
726 
GetNativeSizeAfterLastGC()727     size_t GetNativeSizeAfterLastGC() const
728     {
729         return nativeSizeAfterLastGC_.load(std::memory_order_relaxed);
730     }
731 
GetNativeSizeTriggerSharedGC()732     size_t GetNativeSizeTriggerSharedGC() const
733     {
734         return incNativeSizeTriggerSharedGC_;
735     }
736 
GetNativeSizeTriggerSharedCM()737     size_t GetNativeSizeTriggerSharedCM() const
738     {
739         return incNativeSizeTriggerSharedCM_;
740     }
741 
ChangeGCParams(bool inBackground)742     void ChangeGCParams([[maybe_unused]]bool inBackground) override
743     {
744         LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
745         return;
746     }
747 
GetEcmaGCStats()748     GCStats *GetEcmaGCStats() override
749     {
750         return sGCStats_;
751     }
752 
SetGlobalEnvConstants(const GlobalEnvConstants * globalEnvConstants)753     inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
754     {
755         globalEnvConstants_ = globalEnvConstants;
756     }
757 
GetGlobalConst()758     inline const GlobalEnvConstants *GetGlobalConst() const override
759     {
760         return globalEnvConstants_;
761     }
762 
GetSpaceWithType(MemSpaceType type)763     SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
764     {
765         switch (type) {
766             case MemSpaceType::SHARED_OLD_SPACE:
767                 return sOldSpace_;
768             case MemSpaceType::SHARED_NON_MOVABLE:
769                 return sNonMovableSpace_;
770             default:
771                 LOG_ECMA(FATAL) << "this branch is unreachable";
772                 UNREACHABLE();
773                 break;
774         }
775     }
776 
777     void Prepare(bool inTriggerGCThread);
778     void Reclaim(TriggerGCType gcType);
779     void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
780     void CompactHeapBeforeFork(JSThread *thread);
781     void ReclaimForAppSpawn();
782 
GetWorkManager()783     SharedGCWorkManager *GetWorkManager() const
784     {
785         return sWorkManager_;
786     }
787 
GetSharedGCMarker()788     SharedGCMarker *GetSharedGCMarker() const
789     {
790         return sharedGCMarker_;
791     }
792 
GetSharedGCMovableMarker()793     SharedGCMovableMarker *GetSharedGCMovableMarker() const
794     {
795         return sharedGCMovableMarker_;
796     }
797     inline void SwapOldSpace();
798 
799     void PrepareRecordRegionsForReclaim();
800 
801     template<class Callback>
802     void EnumerateOldSpaceRegions(const Callback &cb) const;
803 
804     template<class Callback>
805     void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
806 
807     template<class Callback>
808     void IterateOverObjects(const Callback &cb) const;
809 
810     inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size);
811 
812     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
813 
814     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
815 
816     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size);
817 
818     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass);
819 
820     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
821 
822     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size);
823 
824     inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
825 
826     inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
827 
828     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
829 
830     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
831 
832     inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size);
833 
834     inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size);
835 
836     size_t VerifyHeapObjects(VerifyKind verifyKind) const;
837 
838     inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
839 
840     void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread);
841 
842     class SharedGCScope {
843     public:
844         SharedGCScope();
845         ~SharedGCScope();
846     };
847 
848 private:
849     void ProcessAllGCListeners();
850     inline void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType);
851 
852     void MoveOldSpaceToAppspawn();
853 
854     void ReclaimRegions(TriggerGCType type);
855 
856     void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
857     inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
858     struct SharedHeapSmartGCStats {
859         /**
860          * For SmartGC.
861          * For daemon thread, it check these status before trying to collect garbage, and wait until finish.
862          * It need that check-wait events is atomic, so use a Mutex/CV.
863         */
864         Mutex sensitiveStatusMutex_;
865         ConditionVariable sensitiveStatusCV_;
866         AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
867         StartupStatus startupStatus_ {StartupStatus::BEFORE_STARTUP};
868         // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED,
869         // must do GC at once even in sensitive status.
870         bool forceGC_ {false};
871     };
872 
873     SharedHeapSmartGCStats smartGCStats_;
874 
875     static SharedHeap *instance_;
876 
877     GCStats *sGCStats_ {nullptr};
878 
879     bool localFullMarkTriggered_ {false};
880 
881     bool optionalLogEnabled_ {false};
882 
883     bool parallelGC_ {true};
884 
885     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
886     // to true even if sweep_task and clear_task is running asynchronously
887     bool gcFinished_ {true};
888     Mutex waitGCFinishedMutex_;
889     ConditionVariable waitGCFinishedCV_;
890 
891     DaemonThread *dThread_ {nullptr};
892     const GlobalEnvConstants *globalEnvConstants_ {nullptr};
893     SharedOldSpace *sOldSpace_ {nullptr};
894     SharedOldSpace *sCompressSpace_ {nullptr};
895     SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
896     SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
897     SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
898     SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
899     SharedGCWorkManager *sWorkManager_ {nullptr};
900     SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
901     SharedConcurrentSweeper *sSweeper_ {nullptr};
902     SharedGC *sharedGC_ {nullptr};
903     SharedFullGC *sharedFullGC_ {nullptr};
904     SharedGCMarker *sharedGCMarker_ {nullptr};
905     SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
906     size_t growingFactor_ {0};
907     size_t growingStep_ {0};
908     size_t incNativeSizeTriggerSharedCM_ {0};
909     size_t incNativeSizeTriggerSharedGC_ {0};
910     size_t fragmentationLimitForSharedFullGC_ {0};
911     std::atomic<size_t> nativeSizeAfterLastGC_ {0};
912 };
913 
914 class Heap : public BaseHeap {
915 public:
916     explicit Heap(EcmaVM *ecmaVm);
917     virtual ~Heap() = default;
918     NO_COPY_SEMANTIC(Heap);
919     NO_MOVE_SEMANTIC(Heap);
920     void Initialize();
921     void Destroy() override;
922     void Prepare();
923     void GetHeapPrepare();
924     void Resume(TriggerGCType gcType);
925     void ResumeForAppSpawn();
926     void CompactHeapBeforeFork();
927     void DisableParallelGC();
928     void EnableParallelGC();
929 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
930     void SetJsDumpThresholds(size_t thresholds) const;
931 #endif
932 
GetEdenSpace()933     EdenSpace *GetEdenSpace() const
934     {
935         return edenSpace_;
936     }
937 
938     // fixme: Rename NewSpace to YoungSpace.
939     // This is the active young generation space that the new objects are allocated in
940     // or copied into (from the other semi space) during semi space GC.
GetNewSpace()941     SemiSpace *GetNewSpace() const
942     {
943         return activeSemiSpace_;
944     }
945 
946     /*
947      * Return the original active space where the objects are to be evacuated during semi space GC.
948      * This should be invoked only in the evacuation phase of semi space GC.
949      * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement.
950      */
GetFromSpaceDuringEvacuation()951     SemiSpace *GetFromSpaceDuringEvacuation() const
952     {
953         return inactiveSemiSpace_;
954     }
955 
GetOldSpace()956     OldSpace *GetOldSpace() const
957     {
958         return oldSpace_;
959     }
960 
GetCompressSpace()961     OldSpace *GetCompressSpace() const
962     {
963         return compressSpace_;
964     }
965 
GetNonMovableSpace()966     NonMovableSpace *GetNonMovableSpace() const
967     {
968         return nonMovableSpace_;
969     }
970 
GetHugeObjectSpace()971     HugeObjectSpace *GetHugeObjectSpace() const
972     {
973         return hugeObjectSpace_;
974     }
975 
GetMachineCodeSpace()976     MachineCodeSpace *GetMachineCodeSpace() const
977     {
978         return machineCodeSpace_;
979     }
980 
GetHugeMachineCodeSpace()981     HugeMachineCodeSpace *GetHugeMachineCodeSpace() const
982     {
983         return hugeMachineCodeSpace_;
984     }
985 
GetSnapshotSpace()986     SnapshotSpace *GetSnapshotSpace() const
987     {
988         return snapshotSpace_;
989     }
990 
GetReadOnlySpace()991     ReadOnlySpace *GetReadOnlySpace() const
992     {
993         return readOnlySpace_;
994     }
995 
GetAppSpawnSpace()996     AppSpawnSpace *GetAppSpawnSpace() const
997     {
998         return appSpawnSpace_;
999     }
1000 
GetSpaceWithType(MemSpaceType type)1001     SparseSpace *GetSpaceWithType(MemSpaceType type) const
1002     {
1003         switch (type) {
1004             case MemSpaceType::OLD_SPACE:
1005                 return oldSpace_;
1006             case MemSpaceType::NON_MOVABLE:
1007                 return nonMovableSpace_;
1008             case MemSpaceType::MACHINE_CODE_SPACE:
1009                 return machineCodeSpace_;
1010             default:
1011                 LOG_ECMA(FATAL) << "this branch is unreachable";
1012                 UNREACHABLE();
1013                 break;
1014         }
1015     }
1016 
GetSTWYoungGC()1017     STWYoungGC *GetSTWYoungGC() const
1018     {
1019         return stwYoungGC_;
1020     }
1021 
GetPartialGC()1022     PartialGC *GetPartialGC() const
1023     {
1024         return partialGC_;
1025     }
1026 
GetFullGC()1027     FullGC *GetFullGC() const
1028     {
1029         return fullGC_;
1030     }
1031 
GetSweeper()1032     ConcurrentSweeper *GetSweeper() const
1033     {
1034         return sweeper_;
1035     }
1036 
GetEvacuator()1037     ParallelEvacuator *GetEvacuator() const
1038     {
1039         return evacuator_;
1040     }
1041 
GetConcurrentMarker()1042     ConcurrentMarker *GetConcurrentMarker() const
1043     {
1044         return concurrentMarker_;
1045     }
1046 
GetIncrementalMarker()1047     IncrementalMarker *GetIncrementalMarker() const
1048     {
1049         return incrementalMarker_;
1050     }
1051 
GetNonMovableMarker()1052     Marker *GetNonMovableMarker() const
1053     {
1054         return nonMovableMarker_;
1055     }
1056 
GetSemiGCMarker()1057     Marker *GetSemiGCMarker() const
1058     {
1059         return semiGCMarker_;
1060     }
1061 
GetCompressGCMarker()1062     Marker *GetCompressGCMarker() const
1063     {
1064         return compressGCMarker_;
1065     }
1066 
GetEcmaVM()1067     EcmaVM *GetEcmaVM() const
1068     {
1069         return ecmaVm_;
1070     }
1071 
GetJSThread()1072     JSThread *GetJSThread() const
1073     {
1074         return thread_;
1075     }
1076 
GetWorkManager()1077     WorkManager *GetWorkManager() const
1078     {
1079         return workManager_;
1080     }
1081 
GetMarkingObjectLocalBuffer()1082     WorkNode *&GetMarkingObjectLocalBuffer()
1083     {
1084         return sharedGCData_.sharedConcurrentMarkingLocalBuffer_;
1085     }
1086 
GetIdleGCTrigger()1087     IdleGCTrigger *GetIdleGCTrigger() const
1088     {
1089         return idleGCTrigger_;
1090     }
1091 
SetRSetWorkListHandler(RSetWorkListHandler * handler)1092     void SetRSetWorkListHandler(RSetWorkListHandler *handler)
1093     {
1094         ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr));
1095         sharedGCData_.rSetWorkListHandler_ = handler;
1096     }
1097 
1098     void ProcessSharedGCMarkingLocalBuffer();
1099 
1100     void ProcessSharedGCRSetWorkList();
1101 
1102     const GlobalEnvConstants *GetGlobalConst() const override;
1103 
GetMemController()1104     MemController *GetMemController() const
1105     {
1106         return memController_;
1107     }
1108 
RecordOrResetObjectSize(size_t objectSize)1109     inline void RecordOrResetObjectSize(size_t objectSize)
1110     {
1111         recordObjectSize_ = objectSize;
1112     }
1113 
GetRecordObjectSize()1114     inline size_t GetRecordObjectSize() const
1115     {
1116         return recordObjectSize_;
1117     }
1118 
RecordOrResetNativeSize(size_t nativeSize)1119     inline void RecordOrResetNativeSize(size_t nativeSize)
1120     {
1121         recordNativeSize_ = nativeSize;
1122     }
1123 
GetRecordNativeSize()1124     inline size_t GetRecordNativeSize() const
1125     {
1126         return recordNativeSize_;
1127     }
1128 
1129     /*
1130      * For object allocations.
1131      */
1132 
1133     // Young
1134     inline TaggedObject *AllocateInGeneralNewSpace(size_t size);
1135     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass);
1136     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size);
1137     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass);
1138     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size);
1139     inline TaggedObject *AllocateYoungOrHugeObject(size_t size);
1140     inline uintptr_t AllocateYoungSync(size_t size);
1141     inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size);
1142     // Old
1143     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass);
1144     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size);
1145     // Non-movable
1146     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass);
1147     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size);
1148     inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
1149     // Huge
1150     inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size);
1151     // Machine code
1152     inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr);
1153     inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr);
1154     // Snapshot
1155     inline uintptr_t AllocateSnapshotSpace(size_t size);
1156 
1157     // shared non movable space tlab
1158     inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size);
1159     // shared old space tlab
1160     inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size);
1161 
1162     void ResetTlab();
1163     void FillBumpPointerForTlab();
1164     /*
1165      * GC triggers.
1166      */
1167     void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1168     bool CheckAndTriggerOldGC(size_t size = 0);
1169     bool CheckAndTriggerHintGC();
1170     TriggerGCType SelectGCType() const;
1171     /*
1172      * Parallel GC related configurations and utilities.
1173      */
1174 
1175     void PostParallelGCTask(ParallelGCTaskPhase taskPhase);
1176 
IsParallelGCEnabled()1177     bool IsParallelGCEnabled() const
1178     {
1179         return parallelGC_;
1180     }
1181     void ChangeGCParams(bool inBackground) override;
1182 
1183     GCStats *GetEcmaGCStats() override;
1184 
1185     GCKeyStats *GetEcmaGCKeyStats();
1186 
1187     JSObjectResizingStrategy *GetJSObjectResizingStrategy();
1188 
1189     void TriggerIdleCollection(int idleMicroSec);
1190     void NotifyMemoryPressure(bool inHighMemoryPressure);
1191 
1192     void TryTriggerConcurrentMarking();
1193     void AdjustBySurvivalRate(size_t originalNewSpaceSize);
1194     void TriggerConcurrentMarking();
1195     bool CheckCanTriggerConcurrentMarking();
1196 
1197     void TryTriggerIdleCollection() override;
1198     void TryTriggerIncrementalMarking() override;
1199     void CalculateIdleDuration();
1200     void UpdateWorkManager(WorkManager *workManager);
1201     bool CheckOngoingConcurrentMarking() override;
1202 
1203     inline void SwapNewSpace();
1204     inline void SwapOldSpace();
1205 
1206     inline bool MoveYoungRegionSync(Region *region);
1207     inline void MergeToOldSpaceSync(LocalSpace *localSpace);
1208 
1209     template<class Callback>
1210     void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const;
1211 
1212     template<class Callback>
1213     void EnumerateNonNewSpaceRegions(const Callback &cb) const;
1214 
1215     template<class Callback>
1216     void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const;
1217 
1218     template<class Callback>
1219     void EnumerateEdenSpaceRegions(const Callback &cb) const;
1220 
1221     template<class Callback>
1222     void EnumerateNewSpaceRegions(const Callback &cb) const;
1223 
1224     template<class Callback>
1225     void EnumerateSnapshotSpaceRegions(const Callback &cb) const;
1226 
1227     template<class Callback>
1228     void EnumerateNonMovableRegions(const Callback &cb) const;
1229 
1230     template<class Callback>
1231     inline void EnumerateRegions(const Callback &cb) const;
1232 
1233     inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd);
1234 
1235     void WaitAllTasksFinished();
1236     void WaitConcurrentMarkingFinished();
1237 
GetMemGrowingType()1238     MemGrowingType GetMemGrowingType() const
1239     {
1240         return memGrowingtype_;
1241     }
1242 
SetMemGrowingType(MemGrowingType memGrowingType)1243     void SetMemGrowingType(MemGrowingType memGrowingType)
1244     {
1245         memGrowingtype_ = memGrowingType;
1246     }
1247 
CalculateLinearSpaceOverShoot()1248     size_t CalculateLinearSpaceOverShoot()
1249     {
1250         return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
1251     }
1252 
1253     inline size_t GetCommittedSize() const override;
1254 
1255     inline size_t GetHeapObjectSize() const override;
1256 
1257     inline void NotifyRecordMemorySize();
1258 
1259     inline size_t GetRegionCount() const override;
1260 
GetRegionCachedSize()1261     size_t GetRegionCachedSize() const
1262     {
1263         return activeSemiSpace_->GetInitialCapacity();
1264     }
1265 
1266     size_t GetLiveObjectSize() const;
1267 
1268     inline uint32_t GetHeapObjectCount() const;
1269 
GetPromotedSize()1270     size_t GetPromotedSize() const
1271     {
1272         return promotedSize_;
1273     }
GetEdenToYoungSize()1274     size_t GetEdenToYoungSize() const
1275     {
1276         return edenToYoungSize_;
1277     }
1278 
1279     size_t GetArrayBufferSize() const;
1280 
1281     size_t GetHeapLimitSize() const;
1282 
GetMaxEvacuateTaskCount()1283     uint32_t GetMaxEvacuateTaskCount() const
1284     {
1285         return maxEvacuateTaskCount_;
1286     }
1287 
1288     /*
1289      * Receive callback function to control idletime.
1290      */
1291     inline void InitializeIdleStatusControl(std::function<void(bool)> callback);
1292 
DisableNotifyIdle()1293     void DisableNotifyIdle()
1294     {
1295         if (notifyIdleStatusCallback != nullptr) {
1296             notifyIdleStatusCallback(true);
1297         }
1298     }
1299 
EnableNotifyIdle()1300     void EnableNotifyIdle()
1301     {
1302         if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) {
1303             notifyIdleStatusCallback(false);
1304         }
1305     }
1306 
SetIdleTask(IdleTaskType task)1307     void SetIdleTask(IdleTaskType task)
1308     {
1309         idleTask_ = task;
1310     }
1311 
1312     void ClearIdleTask();
1313 
IsEmptyIdleTask()1314     bool IsEmptyIdleTask()
1315     {
1316         return idleTask_ == IdleTaskType::NO_TASK;
1317     }
1318 
SetOnSerializeEvent(bool isSerialize)1319     void SetOnSerializeEvent(bool isSerialize)
1320     {
1321         onSerializeEvent_ = isSerialize;
1322         if (!onSerializeEvent_ && !InSensitiveStatus()) {
1323             TryTriggerIncrementalMarking();
1324             TryTriggerIdleCollection();
1325             TryTriggerConcurrentMarking();
1326         }
1327     }
1328 
GetOnSerializeEvent()1329     bool GetOnSerializeEvent() const
1330     {
1331         return onSerializeEvent_;
1332     }
1333 
1334     void NotifyFinishColdStart(bool isMainThread = true);
1335 
1336     void NotifyFinishColdStartSoon();
1337 
1338     void NotifyHighSensitive(bool isStart);
1339 
1340     bool HandleExitHighSensitiveEvent();
1341 
1342     bool ObjectExceedMaxHeapSize() const override;
1343 
1344     bool ObjectExceedJustFinishStartupThresholdForGC() const;
1345 
1346     bool ObjectExceedJustFinishStartupThresholdForCM() const;
1347 
1348     void TryIncreaseNewSpaceOvershootByConfigSize();
1349 
1350     bool CheckIfNeedStopCollectionByStartup();
1351 
1352     bool NeedStopCollection() override;
1353 
SetSensitiveStatus(AppSensitiveStatus status)1354     void SetSensitiveStatus(AppSensitiveStatus status) override
1355     {
1356         sHeap_->SetSensitiveStatus(status);
1357         smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release);
1358     }
1359 
GetSensitiveStatus()1360     AppSensitiveStatus GetSensitiveStatus() const override
1361     {
1362         return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire);
1363     }
1364 
SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)1365     void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)
1366     {
1367         recordObjSizeBeforeSensitive_ = objSize;
1368     }
1369 
GetRecordHeapObjectSizeBeforeSensitive()1370     size_t GetRecordHeapObjectSizeBeforeSensitive() const
1371     {
1372         return recordObjSizeBeforeSensitive_;
1373     }
1374 
CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)1375     bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status)
1376     {
1377         return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst);
1378     }
1379 
GetStartupStatus()1380     StartupStatus GetStartupStatus() const
1381     {
1382         ASSERT(smartGCStats_.startupStatus_.load(std::memory_order_relaxed) == sHeap_->GetStartupStatus());
1383         return smartGCStats_.startupStatus_.load(std::memory_order_relaxed);
1384     }
1385 
IsJustFinishStartup()1386     bool IsJustFinishStartup() const
1387     {
1388         return GetStartupStatus() == StartupStatus::JUST_FINISH_STARTUP;
1389     }
1390 
CancelJustFinishStartupEvent()1391     bool CancelJustFinishStartupEvent()
1392     {
1393         if (!IsJustFinishStartup()) {
1394             return false;
1395         }
1396         TryIncreaseNewSpaceOvershootByConfigSize();
1397         smartGCStats_.startupStatus_.store(StartupStatus::FINISH_STARTUP, std::memory_order_release);
1398         sHeap_->CancelJustFinishStartupEvent();
1399         return true;
1400     }
1401 
FinishStartupEvent()1402     bool FinishStartupEvent() override
1403     {
1404         if (!OnStartupEvent()) {
1405             return false;
1406         }
1407         TryIncreaseNewSpaceOvershootByConfigSize();
1408         smartGCStats_.startupStatus_.store(StartupStatus::JUST_FINISH_STARTUP, std::memory_order_release);
1409         sHeap_->FinishStartupEvent();
1410         return true;
1411     }
1412 
OnStartupEvent()1413     bool OnStartupEvent() const override
1414     {
1415         return GetStartupStatus() == StartupStatus::ON_STARTUP;
1416     }
1417 
NotifyPostFork()1418     void NotifyPostFork() override
1419     {
1420         sHeap_->NotifyPostFork();
1421         smartGCStats_.startupStatus_.store(StartupStatus::ON_STARTUP, std::memory_order_relaxed);
1422         LOG_GC(INFO) << "SmartGC: enter app cold start";
1423         size_t localFirst = config_.GetMaxHeapSize();
1424         size_t localSecond = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
1425         auto sharedHeapConfig = sHeap_->GetEcmaParamConfiguration();
1426         size_t sharedFirst = sHeap_->GetOldSpace()->GetInitialCapacity();
1427         size_t sharedSecond = sharedHeapConfig.GetMaxHeapSize()
1428                             * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO
1429                             * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
1430         LOG_GC(INFO) << "SmartGC: startup GC restrain, "
1431             << "phase 1 threshold: local " << localFirst / 1_MB << "MB, shared " << sharedFirst / 1_MB << "MB; "
1432             << "phase 2 threshold: local " << localSecond / 1_MB << "MB, shared " << sharedSecond / 1_MB << "MB";
1433     }
1434 
1435 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
StartHeapTracking()1436     void StartHeapTracking()
1437     {
1438         WaitAllTasksFinished();
1439     }
1440 
StopHeapTracking()1441     void StopHeapTracking()
1442     {
1443         WaitAllTasksFinished();
1444     }
1445 #endif
1446     inline bool InHeapProfiler();
1447 
1448     void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
1449 
1450     // add allocationInspector to each space
1451     void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector);
1452 
1453     // clear allocationInspector from each space
1454     void ClearAllocationInspectorFromAllSpaces();
1455 
1456     /*
1457      * Funtions used by heap verification.
1458      */
1459 
1460     template<class Callback>
1461     void IterateOverObjects(const Callback &cb, bool isSimplify = false) const;
1462 
1463     size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1464     size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1465     void StatisticHeapObject(TriggerGCType gcType) const;
1466     void StatisticHeapDetail();
1467     void PrintHeapInfo(TriggerGCType gcType) const;
1468 
OldSpaceExceedCapacity(size_t size)1469     bool OldSpaceExceedCapacity(size_t size) const override
1470     {
1471         size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size;
1472         return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() +
1473                oldSpace_->GetOutOfMemoryOvershootSize();
1474     }
1475 
OldSpaceExceedLimit()1476     bool OldSpaceExceedLimit() const override
1477     {
1478         size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
1479         return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize();
1480     }
1481 
1482     void AdjustSpaceSizeForAppSpawn();
1483 
1484     static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object);
1485 
IsFullMarkRequested()1486     bool IsFullMarkRequested() const
1487     {
1488         return fullMarkRequested_;
1489     }
1490 
SetFullMarkRequestedState(bool fullMarkRequested)1491     void SetFullMarkRequestedState(bool fullMarkRequested)
1492     {
1493         fullMarkRequested_ = fullMarkRequested;
1494     }
1495 
SetHeapMode(HeapMode mode)1496     void SetHeapMode(HeapMode mode)
1497     {
1498         mode_ = mode;
1499     }
1500 
1501     void IncreaseNativeBindingSize(size_t size);
1502     void IncreaseNativeBindingSize(JSNativePointer *object);
1503     void DecreaseNativeBindingSize(size_t size);
ResetNativeBindingSize()1504     void ResetNativeBindingSize()
1505     {
1506         nativeBindingSize_ = 0;
1507     }
1508 
GetNativeBindingSize()1509     size_t GetNativeBindingSize() const
1510     {
1511         return nativeBindingSize_;
1512     }
1513 
GetGlobalNativeSize()1514     size_t GetGlobalNativeSize() const
1515     {
1516         return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage();
1517     }
1518 
ResetNativeSizeAfterLastGC()1519     void ResetNativeSizeAfterLastGC()
1520     {
1521         nativeSizeAfterLastGC_ = 0;
1522         nativeBindingSizeAfterLastGC_= nativeBindingSize_;
1523     }
1524 
IncNativeSizeAfterLastGC(size_t size)1525     void IncNativeSizeAfterLastGC(size_t size)
1526     {
1527         nativeSizeAfterLastGC_ += size;
1528     }
1529 
GlobalNativeSizeLargerToTriggerGC()1530     bool GlobalNativeSizeLargerToTriggerGC() const
1531     {
1532         auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ?
1533             nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0;
1534         return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ &&
1535             nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_;
1536     }
1537 
GlobalNativeSizeLargerThanLimit()1538     bool GlobalNativeSizeLargerThanLimit() const
1539     {
1540         size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0;
1541         return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot;
1542     }
1543 
GlobalNativeSizeLargerThanLimitForIdle()1544     bool GlobalNativeSizeLargerThanLimitForIdle() const
1545     {
1546         return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ *
1547             IDLE_SPACE_SIZE_LIMIT_RATE);
1548     }
1549 
1550     void TryTriggerFullMarkOrGCByNativeSize();
1551 
1552     void TryTriggerFullMarkBySharedSize(size_t size);
1553 
1554     bool TryTriggerFullMarkBySharedLimit();
1555 
1556     void CheckAndTriggerTaskFinishedGC();
1557 
1558     bool IsMarking() const override;
1559 
1560     bool IsReadyToConcurrentMark() const override;
1561 
IsEdenGC()1562     bool IsEdenGC() const
1563     {
1564         return gcType_ == TriggerGCType::EDEN_GC;
1565     }
1566 
IsYoungGC()1567     bool IsYoungGC() const
1568     {
1569         return gcType_ == TriggerGCType::YOUNG_GC;
1570     }
1571 
IsGeneralYoungGC()1572     bool IsGeneralYoungGC() const
1573     {
1574         return gcType_ == TriggerGCType::YOUNG_GC || gcType_ == TriggerGCType::EDEN_GC;
1575     }
1576 
1577     void EnableEdenGC();
1578 
1579     void TryEnableEdenGC();
1580 
1581     void CheckNonMovableSpaceOOM();
1582     void ReleaseEdenAllocator();
1583     void InstallEdenAllocator();
1584     void DumpHeapSnapshotBeforeOOM(bool isFullGC = true);
1585     std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const;
1586     MachineCode *GetMachineCodeObject(uintptr_t pc) const;
1587 
1588     PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
1589     PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
1590     void ProcessGCListeners();
1591 
1592     inline void ProcessNativeDelete(const WeakRootVisitor& visitor);
1593     inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor);
1594     inline void ProcessReferences(const WeakRootVisitor& visitor);
1595     inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent);
1596     inline void PushToSharedNativePointerList(JSNativePointer* pointer);
1597     inline void RemoveFromNativePointerList(const JSNativePointer* pointer);
1598     inline void ClearNativePointerList();
1599 
GetNativePointerListSize()1600     size_t GetNativePointerListSize() const
1601     {
1602         return nativePointerList_.size();
1603     }
1604 
1605 private:
1606     inline TaggedObject *AllocateHugeObject(size_t size);
1607 
1608     static constexpr int MIN_JSDUMP_THRESHOLDS = 85;
1609     static constexpr int MAX_JSDUMP_THRESHOLDS = 95;
1610     static constexpr int IDLE_TIME_LIMIT = 10;  // if idle time over 10ms we can do something
1611     static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB;
1612     static constexpr int IDLE_MAINTAIN_TIME = 500;
1613     static constexpr int BACKGROUND_GROW_LIMIT = 2_MB;
1614     // Threadshold that HintGC will actually trigger GC.
1615     static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5;
1616     static constexpr double IDLE_SPACE_SIZE_LIMIT_RATE = 0.8;
1617     static constexpr double IDLE_FULLGC_SPACE_USAGE_LIMIT_RATE = 0.7;
1618     static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times.
1619     static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB;
1620     void RecomputeLimits();
1621     void AdjustOldSpaceLimit();
1622     // record lastRegion for each space, which will be used in ReclaimRegions()
1623     void PrepareRecordRegionsForReclaim();
1624     inline void ReclaimRegions(TriggerGCType gcType);
1625     inline size_t CalculateCommittedCacheSize();
1626 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1627     uint64_t GetCurrentTickMillseconds();
1628     void ThresholdReachedDump();
1629 #endif
1630     void CleanCallBack();
IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)1631     void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1632     {
1633         pendingAsyncNativeCallbackSize_ += bindingSize;
1634     }
DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)1635     void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1636     {
1637         pendingAsyncNativeCallbackSize_ -= bindingSize;
1638     }
1639     class ParallelGCTask : public Task {
1640     public:
ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)1641         ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase)
1642             : Task(id), heap_(heap), taskPhase_(taskPhase) {};
1643         ~ParallelGCTask() override = default;
1644         bool Run(uint32_t threadIndex) override;
1645 
1646         NO_COPY_SEMANTIC(ParallelGCTask);
1647         NO_MOVE_SEMANTIC(ParallelGCTask);
1648 
1649     private:
1650         Heap *heap_ {nullptr};
1651         ParallelGCTaskPhase taskPhase_;
1652     };
1653 
1654     class AsyncClearTask : public Task {
1655     public:
AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)1656         AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type)
1657             : Task(id), heap_(heap), gcType_(type) {}
1658         ~AsyncClearTask() override = default;
1659         bool Run(uint32_t threadIndex) override;
1660 
1661         NO_COPY_SEMANTIC(AsyncClearTask);
1662         NO_MOVE_SEMANTIC(AsyncClearTask);
1663     private:
1664         Heap *heap_;
1665         TriggerGCType gcType_;
1666     };
1667 
1668     class FinishColdStartTask : public Task {
1669     public:
FinishColdStartTask(int32_t id,Heap * heap)1670         FinishColdStartTask(int32_t id, Heap *heap)
1671             : Task(id), heap_(heap) {}
1672         ~FinishColdStartTask() override = default;
1673         bool Run(uint32_t threadIndex) override;
1674 
1675         NO_COPY_SEMANTIC(FinishColdStartTask);
1676         NO_MOVE_SEMANTIC(FinishColdStartTask);
1677     private:
1678         Heap *heap_;
1679     };
1680 
1681     class FinishGCRestrainTask : public Task {
1682     public:
FinishGCRestrainTask(int32_t id,Heap * heap)1683         FinishGCRestrainTask(int32_t id, Heap *heap)
1684             : Task(id), heap_(heap) {}
1685         ~FinishGCRestrainTask() override = default;
1686         bool Run(uint32_t threadIndex) override;
1687 
1688         NO_COPY_SEMANTIC(FinishGCRestrainTask);
1689         NO_MOVE_SEMANTIC(FinishGCRestrainTask);
1690     private:
1691         Heap *heap_;
1692     };
1693 
1694     class DeleteCallbackTask : public Task {
1695     public:
DeleteCallbackTask(int32_t id,std::vector<NativePointerCallbackData> & callbacks)1696         DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : Task(id)
1697         {
1698             std::swap(callbacks, nativePointerCallbacks_);
1699         }
1700         ~DeleteCallbackTask() override = default;
1701         bool Run(uint32_t threadIndex) override;
1702 
1703         NO_COPY_SEMANTIC(DeleteCallbackTask);
1704         NO_MOVE_SEMANTIC(DeleteCallbackTask);
1705 
1706     private:
1707         std::vector<NativePointerCallbackData> nativePointerCallbacks_ {};
1708     };
1709 
1710     struct MainLocalHeapSmartGCStats {
1711         /**
1712          * For SmartGC.
1713          * For main js thread, it check these status everytime when trying to
1714          * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough.
1715         */
1716         std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
1717         std::atomic<StartupStatus> startupStatus_ {StartupStatus::BEFORE_STARTUP};
1718     };
1719 
1720     // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack.
1721     struct SharedGCLocalStoragePackedData {
1722         /**
1723          * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking,
1724          * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can
1725          * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global
1726          * in remark.
1727          * If the heap is destructed before push this node to global, check and try to push remain object as well.
1728         */
1729         WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr};
1730         /**
1731          * Recording the local_to_share rset used in SharedGC concurrentMark,
1732          * which lifecycle is in one SharedGC.
1733          * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed,
1734          * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit.
1735          * Before destroying local heap, RSetWorkList should be done as well.
1736         */
1737         RSetWorkListHandler *rSetWorkListHandler_ {nullptr};
1738     };
1739 
1740     EcmaVM *ecmaVm_ {nullptr};
1741     JSThread *thread_ {nullptr};
1742 
1743     SharedHeap *sHeap_ {nullptr};
1744     MainLocalHeapSmartGCStats smartGCStats_;
1745 
1746     /*
1747      * Heap spaces.
1748      */
1749 
1750     /*
1751      * Young generation spaces where most new objects are allocated.
1752      * (only one of the spaces is active at a time in semi space GC).
1753      */
1754     EdenSpace *edenSpace_ {nullptr};
1755     SemiSpace *activeSemiSpace_ {nullptr};
1756     SemiSpace *inactiveSemiSpace_ {nullptr};
1757 
1758     // Old generation spaces where some long living objects are allocated or promoted.
1759     OldSpace *oldSpace_ {nullptr};
1760     OldSpace *compressSpace_ {nullptr};
1761     ReadOnlySpace *readOnlySpace_ {nullptr};
1762     AppSpawnSpace *appSpawnSpace_ {nullptr};
1763     // Spaces used for special kinds of objects.
1764     NonMovableSpace *nonMovableSpace_ {nullptr};
1765     MachineCodeSpace *machineCodeSpace_ {nullptr};
1766     HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr};
1767     HugeObjectSpace *hugeObjectSpace_ {nullptr};
1768     SnapshotSpace *snapshotSpace_ {nullptr};
1769     // tlab for shared non movable space
1770     ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr};
1771     // tlab for shared old space
1772     ThreadLocalAllocationBuffer *sOldTlab_ {nullptr};
1773     /*
1774      * Garbage collectors collecting garbage in different scopes.
1775      */
1776 
1777     /*
1778      * Semi sapce GC which collects garbage only in young spaces.
1779      * This is however optional for now because the partial GC also covers its functionality.
1780      */
1781     STWYoungGC *stwYoungGC_ {nullptr};
1782 
1783     /*
1784      * The mostly used partial GC which collects garbage in young spaces,
1785      * and part of old spaces if needed determined by GC heuristics.
1786      */
1787     PartialGC *partialGC_ {nullptr};
1788 
1789     // Full collector which collects garbage in all valid heap spaces.
1790     FullGC *fullGC_ {nullptr};
1791 
1792     // Concurrent marker which coordinates actions of GC markers and mutators.
1793     ConcurrentMarker *concurrentMarker_ {nullptr};
1794 
1795     // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators.
1796     ConcurrentSweeper *sweeper_ {nullptr};
1797 
1798     // Parallel evacuator which evacuates objects from one space to another one.
1799     ParallelEvacuator *evacuator_ {nullptr};
1800 
1801     // Incremental marker which coordinates actions of GC markers in idle time.
1802     IncrementalMarker *incrementalMarker_ {nullptr};
1803 
1804     /*
1805      * Different kinds of markers used by different collectors.
1806      * Depending on the collector algorithm, some markers can do simple marking
1807      *  while some others need to handle object movement.
1808      */
1809     Marker *nonMovableMarker_ {nullptr};
1810     Marker *semiGCMarker_ {nullptr};
1811     Marker *compressGCMarker_ {nullptr};
1812 
1813     // Work manager managing the tasks mostly generated in the GC mark phase.
1814     WorkManager *workManager_ {nullptr};
1815 
1816     SharedGCLocalStoragePackedData sharedGCData_;
1817 
1818     bool onSerializeEvent_ {false};
1819     bool parallelGC_ {true};
1820     bool fullGCRequested_ {false};
1821     bool fullMarkRequested_ {false};
1822     bool oldSpaceLimitAdjusted_ {false};
1823     bool enableIdleGC_ {false};
1824     std::atomic_bool isCSetClearing_ {false};
1825     HeapMode mode_ { HeapMode::NORMAL };
1826 
1827     /*
1828      * The memory controller providing memory statistics (by allocations and coleections),
1829      * which is used for GC heuristics.
1830      */
1831     MemController *memController_ {nullptr};
1832     size_t edenToYoungSize_ {0};
1833     size_t promotedSize_ {0};
1834     size_t semiSpaceCopiedSize_ {0};
1835     size_t nativeBindingSize_{0};
1836     size_t globalSpaceNativeLimit_ {0};
1837     size_t nativeSizeTriggerGCThreshold_ {0};
1838     size_t incNativeSizeTriggerGC_ {0};
1839     size_t nativeSizeOvershoot_ {0};
1840     size_t asyncClearNativePointerThreshold_ {0};
1841     size_t nativeSizeAfterLastGC_ {0};
1842     size_t nativeBindingSizeAfterLastGC_ {0};
1843     size_t newAllocatedSharedObjectSize_ {0};
1844     // recordObjectSize_ & recordNativeSize_:
1845     // Record memory before taskpool start, used to determine trigger GC or not after task finish.
1846     size_t recordObjectSize_ {0};
1847     size_t recordNativeSize_ {0};
1848     // Record heap object size before enter sensitive status
1849     size_t recordObjSizeBeforeSensitive_ {0};
1850     size_t pendingAsyncNativeCallbackSize_ {0};
1851     MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
1852 
1853     // parallel evacuator task number.
1854     uint32_t maxEvacuateTaskCount_ {0};
1855 
1856     uint64_t startupDurationInMs_ {0};
1857 
1858     Mutex setNewSpaceOvershootSizeMutex_;
1859 
1860     // Application status
1861 
1862     IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr};
1863 
1864     IdleTaskType idleTask_ {IdleTaskType::NO_TASK};
1865     float idlePredictDuration_ {0.0f};
1866     double idleTaskFinishTime_ {0.0};
1867 
1868     /*
1869      * The listeners which are called at the end of GC
1870      */
1871     std::vector<std::pair<FinishGCListener, void *>> gcListeners_;
1872 
1873     IdleGCTrigger *idleGCTrigger_ {nullptr};
1874 
1875     bool hasOOMDump_ {false};
1876     bool enableEdenGC_ {false};
1877 
1878     CVector<JSNativePointer *> nativePointerList_;
1879     CVector<JSNativePointer *> concurrentNativePointerList_;
1880     CVector<JSNativePointer *> sharedNativePointerList_;
1881 
1882     friend panda::test::HProfTestHelper;
1883     friend panda::test::GCTest_CallbackTask_Test;
1884 };
1885 }  // namespace panda::ecmascript
1886 
1887 #endif  // ECMASCRIPT_MEM_HEAP_H
1888