• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_H
17 #define ECMASCRIPT_MEM_HEAP_H
18 
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/frames.h"
21 #include "ecmascript/js_object_resizing_strategy.h"
22 #include "ecmascript/mem/linear_space.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/shared_heap/shared_space.h"
25 #include "ecmascript/mem/sparse_space.h"
26 #include "ecmascript/mem/work_manager.h"
27 #include "ecmascript/taskpool/taskpool.h"
28 #include "ecmascript/mem/machine_code.h"
29 #include "ecmascript/mem/idle_gc_trigger.h"
30 
31 namespace panda::test {
32 class GCTest_CallbackTask_Test;
33 class HProfTestHelper;
34 class HeapTestHelper;
35 }
36 
37 namespace panda::ecmascript {
38 class ConcurrentMarker;
39 class ConcurrentSweeper;
40 class EcmaVM;
41 class FullGC;
42 class GCStats;
43 class GCKeyStats;
44 class HeapRegionAllocator;
45 class HeapTracker;
46 #if !WIN_OR_MAC_OR_IOS_PLATFORM
47 class HeapProfilerInterface;
48 class HeapProfiler;
49 #endif
50 class IncrementalMarker;
51 class JSNativePointer;
52 class Marker;
53 class MemController;
54 class IdleGCTrigger;
55 class NativeAreaAllocator;
56 class ParallelEvacuator;
57 class PartialGC;
58 class RSetWorkListHandler;
59 class SharedConcurrentMarker;
60 class SharedConcurrentSweeper;
61 class SharedGC;
62 class SharedGCEvacuator;
63 class SharedGCMarkerBase;
64 class SharedGCMarker;
65 class SharedFullGC;
66 class SharedGCMovableMarker;
67 class ThreadLocalAllocationBuffer;
68 class JSThread;
69 class DaemonThread;
70 class GlobalEnvConstants;
71 class SharedMemController;
72 class IdleGCTrigger;
73 
74 using IdleNotifyStatusCallback = std::function<void(bool)>;
75 using FinishGCListener = void (*)(void *);
76 using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator;
77 using Clock = std::chrono::high_resolution_clock;
78 using AppFreezeFilterCallback = std::function<bool(const int32_t pid)>;
79 using BytesAndDuration = std::pair<uint64_t, double>;
80 using MemoryReduceDegree = panda::JSNApi::MemoryReduceDegree;
81 enum class IdleTaskType : uint8_t {
82     NO_TASK,
83     YOUNG_GC,
84     FINISH_MARKING,
85     INCREMENTAL_MARK
86 };
87 
88 enum class MemGrowingType : uint8_t {
89     HIGH_THROUGHPUT,
90     CONSERVATIVE,
91     PRESSURE
92 };
93 
94 enum class HeapMode {
95     NORMAL,
96     SPAWN,
97     SHARE,
98 };
99 
100 enum AppSensitiveStatus : uint8_t {
101     NORMAL_SCENE,
102     ENTER_HIGH_SENSITIVE,
103     EXIT_HIGH_SENSITIVE,
104 };
105 
106 enum class StartupStatus : uint8_t {
107     BEFORE_STARTUP,
108     ON_STARTUP,
109     JUST_FINISH_STARTUP,
110     FINISH_STARTUP
111 };
112 
113 enum class VerifyKind {
114     VERIFY_PRE_GC,
115     VERIFY_POST_GC,
116     VERIFY_MARK_YOUNG,
117     VERIFY_EVACUATE_YOUNG,
118     VERIFY_MARK_FULL,
119     VERIFY_EVACUATE_OLD,
120     VERIFY_EVACUATE_FULL,
121     VERIFY_SHARED_RSET_POST_FULL_GC,
122     VERIFY_PRE_SHARED_GC,
123     VERIFY_POST_SHARED_GC,
124     VERIFY_SHARED_GC_MARK,
125     VERIFY_SHARED_GC_SWEEP,
126     VERIFY_END,
127 };
128 
129 enum class SharedHeapOOMSource {
130     NORMAL_ALLOCATION,
131     DESERIALIZE,
132     SHARED_GC,
133 };
134 
135 class BaseHeap {
136 public:
BaseHeap(const EcmaParamConfiguration & config)137     BaseHeap(const EcmaParamConfiguration &config) : config_(config) {}
138     virtual ~BaseHeap() = default;
139     NO_COPY_SEMANTIC(BaseHeap);
140     NO_MOVE_SEMANTIC(BaseHeap);
141 
142     virtual void Destroy() = 0;
143 
144     virtual bool IsMarking() const = 0;
145 
146     virtual bool IsReadyToConcurrentMark() const = 0;
147 
148     virtual bool NeedStopCollection() = 0;
149 
150     virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0;
151 
152     virtual AppSensitiveStatus GetSensitiveStatus() const = 0;
153 
154     virtual bool FinishStartupEvent() = 0;
155 
156     virtual bool OnStartupEvent() const = 0;
157 
158     virtual void NotifyPostFork() = 0;
159 
160     virtual void TryTriggerIdleCollection() = 0;
161 
162     virtual void TryTriggerIncrementalMarking() = 0;
163 
164     /*
165      * Wait for existing concurrent marking tasks to be finished (if any).
166      * Return true if there's ongoing concurrent marking.
167      */
168     virtual bool CheckOngoingConcurrentMarking() = 0;
169 
170     virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
171 
172     virtual bool OldSpaceExceedLimit() const = 0;
173 
174     virtual inline size_t GetCommittedSize() const = 0;
175 
176     virtual inline size_t GetHeapObjectSize() const = 0;
177 
178     virtual inline size_t GetRegionCount() const = 0;
179 
180     virtual void ChangeGCParams(bool inBackground) = 0;
181 
182     virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
183 
184     virtual GCStats *GetEcmaGCStats() = 0;
185 
186     virtual bool ObjectExceedMaxHeapSize() const = 0;
187 
188     virtual void UpdateHeapStatsAfterGC(TriggerGCType gcType) = 0;
189 
GetMarkType()190     MarkType GetMarkType() const
191     {
192         return markType_;
193     }
194 
SetMarkType(MarkType markType)195     void SetMarkType(MarkType markType)
196     {
197         markType_ = markType;
198     }
199 
IsYoungMark()200     bool IsYoungMark() const
201     {
202         return markType_ == MarkType::MARK_YOUNG;
203     }
204 
IsFullMark()205     bool IsFullMark() const
206     {
207         return markType_ == MarkType::MARK_FULL;
208     }
209 
IsConcurrentFullMark()210     bool IsConcurrentFullMark() const
211     {
212         return markType_ == MarkType::MARK_FULL;
213     }
214 
GetGCType()215     TriggerGCType GetGCType() const
216     {
217         return gcType_;
218     }
219 
220     bool PUBLIC_API IsAlive(TaggedObject *object) const;
221 
222     bool ContainObject(TaggedObject *object) const;
223 
GetOldGCRequested()224     bool GetOldGCRequested()
225     {
226         return oldGCRequested_;
227     }
228 
GetEcmaParamConfiguration()229     EcmaParamConfiguration GetEcmaParamConfiguration() const
230     {
231         return config_;
232     }
233 
GetNativeAreaAllocator()234     NativeAreaAllocator *GetNativeAreaAllocator() const
235     {
236         return nativeAreaAllocator_;
237     }
238 
GetHeapRegionAllocator()239     HeapRegionAllocator *GetHeapRegionAllocator() const
240     {
241         return heapRegionAllocator_;
242     }
243 
ShouldThrowOOMError(bool shouldThrow)244     void ShouldThrowOOMError(bool shouldThrow)
245     {
246         shouldThrowOOMError_ = shouldThrow;
247     }
248 
ShouldForceThrowOOMError()249     void ShouldForceThrowOOMError()
250     {
251         shouldForceThrowOOMError_ = true;
252     }
253 
SetCanThrowOOMError(bool canThrow)254     void SetCanThrowOOMError(bool canThrow)
255     {
256         canThrowOOMError_ = canThrow;
257     }
258 
CanThrowOOMError()259     bool CanThrowOOMError()
260     {
261         return canThrowOOMError_;
262     }
263 
IsInBackground()264     bool IsInBackground() const
265     {
266         return inBackground_;
267     }
268 
269     // ONLY used for heap verification.
IsVerifying()270     bool IsVerifying() const
271     {
272         return isVerifying_;
273     }
274 
275     // ONLY used for heap verification.
SetVerifying(bool verifying)276     void SetVerifying(bool verifying)
277     {
278         isVerifying_ = verifying;
279     }
280 
SetGCState(bool inGC)281     void SetGCState(bool inGC)
282     {
283         inGC_.store(inGC, std::memory_order_relaxed);
284     }
285 
InGC()286     bool InGC() const
287     {
288         return inGC_.load(std::memory_order_relaxed);
289     }
290 
NotifyHeapAliveSizeAfterGC(size_t size)291     void NotifyHeapAliveSizeAfterGC(size_t size)
292     {
293         heapAliveSizeAfterGC_ = size;
294     }
295 
GetHeapAliveSizeAfterGC()296     size_t GetHeapAliveSizeAfterGC() const
297     {
298         return heapAliveSizeAfterGC_;
299     }
300 
GetFragmentSizeAfterGC()301     size_t GetFragmentSizeAfterGC() const
302     {
303         return fragmentSizeAfterGC_;
304     }
305 
GetHeapBasicLoss()306     size_t GetHeapBasicLoss() const
307     {
308         return heapBasicLoss_;
309     }
310 
GetGlobalSpaceAllocLimit()311     size_t GetGlobalSpaceAllocLimit() const
312     {
313         return globalSpaceAllocLimit_;
314     }
315 
316     // Whether should verify heap during gc.
ShouldVerifyHeap()317     bool ShouldVerifyHeap() const
318     {
319         return shouldVerifyHeap_;
320     }
321 
EnablePageTagThreadId()322     bool EnablePageTagThreadId() const
323     {
324         return enablePageTagThreadId_;
325     }
326 
327     void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
328         bool NonMovableObjNearOOM = false);
329 
GetMaxMarkTaskCount()330     uint32_t GetMaxMarkTaskCount() const
331     {
332         return maxMarkTaskCount_;
333     }
334 
InSensitiveStatus()335     bool InSensitiveStatus() const
336     {
337         return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent();
338     }
339 
340     void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size);
341     inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
342                                             [[maybe_unused]] size_t size);
343     bool CheckCanDistributeTask();
344     void IncreaseTaskCount();
345     void ReduceTaskCount();
346     void WaitRunningTaskFinished();
347     void WaitClearTaskFinished();
348     void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
349         bool NonMovableObjNearOOM = false);
350     void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName);
351 
352 #ifndef NDEBUG
TriggerCollectionOnNewObjectEnabled()353     bool TriggerCollectionOnNewObjectEnabled() const
354     {
355         return triggerCollectionOnNewObject_;
356     };
357 
EnableTriggerCollectionOnNewObject()358     void EnableTriggerCollectionOnNewObject()
359     {
360         triggerCollectionOnNewObject_ = true;
361     }
362 
DisableTriggerCollectionOnNewObject()363     void DisableTriggerCollectionOnNewObject()
364     {
365         triggerCollectionOnNewObject_ = false;
366     }
367 #endif
368 
369 protected:
370     void FatalOutOfMemoryError(size_t size, std::string functionName);
371 
372     enum class HeapType {
373         LOCAL_HEAP,
374         SHARED_HEAP,
375         INVALID,
376     };
377 
378     class RecursionScope {
379     public:
RecursionScope(BaseHeap * heap,HeapType heapType)380         explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType)
381         {
382             if (heap_->recursionDepth_++ != 0) {
383                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
384                               << ") Constructor, depth: " << heap_->recursionDepth_;
385             }
386             heap_->SetGCState(true);
387         }
~RecursionScope()388         ~RecursionScope()
389         {
390             if (--heap_->recursionDepth_ != 0) {
391                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
392                               << ") Destructor, depth: " << heap_->recursionDepth_;
393             }
394             heap_->SetGCState(false);
395         }
396     private:
397         BaseHeap *heap_ {nullptr};
398         HeapType heapType_ {HeapType::INVALID};
399     };
400 
401     static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75;
402 
403     const EcmaParamConfiguration config_;
404     MarkType markType_ {MarkType::MARK_YOUNG};
405     TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
406     Mutex gcCollectGarbageMutex_;
407     // Region allocators.
408     NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
409     HeapRegionAllocator *heapRegionAllocator_ {nullptr};
410 
411     size_t heapAliveSizeAfterGC_ {0};
412     size_t globalSpaceAllocLimit_ {0};
413     size_t globalSpaceConcurrentMarkLimit_ {0};
414     size_t heapBasicLoss_ {1_MB};
415     size_t fragmentSizeAfterGC_ {0};
416     // parallel marker task count.
417     uint32_t runningTaskCount_ {0};
418     uint32_t maxMarkTaskCount_ {0};
419     Mutex waitTaskFinishedMutex_;
420     ConditionVariable waitTaskFinishedCV_;
421     Mutex waitClearTaskFinishedMutex_;
422     ConditionVariable waitClearTaskFinishedCV_;
423     bool clearTaskFinished_ {true};
424     bool inBackground_ {false};
425     bool shouldThrowOOMError_ {false};
426     // Diffs from `shouldThrowOOMError_`, this is set due to allocating region failed during GC, and thus make
427     // MemMapAllocator infinite to complete this GC. After GC, if this flag is set, we MUST throw OOM force.
428     bool shouldForceThrowOOMError_ {false};
429     bool canThrowOOMError_ {true};
430     bool oldGCRequested_ {false};
431     // ONLY used for heap verification.
432     bool shouldVerifyHeap_ {false};
433     bool isVerifying_ {false};
434     bool enablePageTagThreadId_ {false};
435     std::atomic_bool inGC_ {false};
436     int32_t recursionDepth_ {0};
437 #ifndef NDEBUG
438     bool triggerCollectionOnNewObject_ {true};
439 #endif
440 };
441 
442 class SharedHeap : public BaseHeap {
443 public:
SharedHeap(const EcmaParamConfiguration & config)444     SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
445     virtual ~SharedHeap() = default;
446 
447     static void CreateNewInstance();
448     static SharedHeap *GetInstance();
449     static void DestroyInstance();
450 
451     void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
452         const JSRuntimeOptions &option, DaemonThread *dThread);
453 
454     void Destroy() override;
455 
456     void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
457 
458     void EnableParallelGC(JSRuntimeOptions &option);
459 
460     void DisableParallelGC(JSThread *thread);
461 
462     void AdjustGlobalSpaceAllocLimit();
463 
464     inline void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
465 
466     class ParallelMarkTask : public Task {
467     public:
ParallelMarkTask(int32_t id,SharedHeap * heap,SharedParallelMarkPhase taskPhase)468         ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
469             : Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
470         ~ParallelMarkTask() override = default;
471         bool Run(uint32_t threadIndex) override;
472 
473         NO_COPY_SEMANTIC(ParallelMarkTask);
474         NO_MOVE_SEMANTIC(ParallelMarkTask);
475 
476     private:
477         SharedHeap *sHeap_ {nullptr};
478         SharedParallelMarkPhase taskPhase_;
479     };
480 
481     class AsyncClearTask : public Task {
482     public:
AsyncClearTask(int32_t id,SharedHeap * heap,TriggerGCType type)483         AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
484             : Task(id), sHeap_(heap), gcType_(type) {}
485         ~AsyncClearTask() override = default;
486         bool Run(uint32_t threadIndex) override;
487 
488         NO_COPY_SEMANTIC(AsyncClearTask);
489         NO_MOVE_SEMANTIC(AsyncClearTask);
490     private:
491         SharedHeap *sHeap_;
492         TriggerGCType gcType_;
493     };
IsMarking()494     bool IsMarking() const override
495     {
496         LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
497         return false;
498     }
499 
500     bool IsReadyToConcurrentMark() const override;
501 
502     bool NeedStopCollection() override;
503 
SetSensitiveStatus(AppSensitiveStatus status)504     void SetSensitiveStatus(AppSensitiveStatus status) override
505     {
506         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
507         smartGCStats_.sensitiveStatus_ = status;
508         if (!InSensitiveStatus()) {
509             smartGCStats_.sensitiveStatusCV_.Signal();
510         }
511     }
512 
513     // This should be called when holding lock of sensitiveStatusMutex_.
GetSensitiveStatus()514     AppSensitiveStatus GetSensitiveStatus() const override
515     {
516         return smartGCStats_.sensitiveStatus_;
517     }
518 
GetStartupStatus()519     StartupStatus GetStartupStatus() const
520     {
521         return smartGCStats_.startupStatus_;
522     }
523 
IsJustFinishStartup()524     bool IsJustFinishStartup() const
525     {
526         return smartGCStats_.startupStatus_ == StartupStatus::JUST_FINISH_STARTUP;
527     }
528 
CancelJustFinishStartupEvent()529     bool CancelJustFinishStartupEvent()
530     {
531         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
532         if (!IsJustFinishStartup()) {
533             return false;
534         }
535         smartGCStats_.startupStatus_ = StartupStatus::FINISH_STARTUP;
536         return true;
537     }
538 
FinishStartupEvent()539     bool FinishStartupEvent() override
540     {
541         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
542         if (!OnStartupEvent()) {
543             return false;
544         }
545         smartGCStats_.startupStatus_ = StartupStatus::JUST_FINISH_STARTUP;
546         if (!InSensitiveStatus()) {
547             smartGCStats_.sensitiveStatusCV_.Signal();
548         }
549         return true;
550     }
551 
552     // This should be called when holding lock of sensitiveStatusMutex_.
OnStartupEvent()553     bool OnStartupEvent() const override
554     {
555         return smartGCStats_.startupStatus_ == StartupStatus::ON_STARTUP;
556     }
557 
NotifyPostFork()558     void NotifyPostFork() override
559     {
560         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
561         smartGCStats_.startupStatus_ = StartupStatus::ON_STARTUP;
562     }
563 
WaitSensitiveStatusFinished()564     void WaitSensitiveStatusFinished()
565     {
566         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
567         while (InSensitiveStatus() && !smartGCStats_.forceGC_) {
568             smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_);
569         }
570     }
571 
572     bool ObjectExceedMaxHeapSize() const override;
573 
574     bool ObjectExceedJustFinishStartupThresholdForGC() const;
575 
576     bool ObjectExceedJustFinishStartupThresholdForCM() const;
577 
578     bool CheckIfNeedStopCollectionByStartup();
579 
580     void TryAdjustSpaceOvershootByConfigSize();
581 
582     bool CheckAndTriggerSharedGC(JSThread *thread);
583 
584     bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
585 
HasCSetRegions()586     bool HasCSetRegions()
587     {
588         return sOldSpace_->GetCollectSetRegionCount() > 0;
589     }
590 
591     void TryTriggerLocalConcurrentMarking();
592 
593     // Called when all vm is destroyed, and try to destroy daemon thread.
594     void WaitAllTasksFinishedAfterAllJSThreadEliminated();
595 
596     void WaitAllTasksFinished(JSThread *thread);
597 
598     void StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason);         // In daemon thread
599 
600     // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load.
601     bool CheckCanTriggerConcurrentMarking(JSThread *thread);
602 
TryTriggerIdleCollection()603     void TryTriggerIdleCollection() override
604     {
605         LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
606         return;
607     }
608 
TryTriggerIncrementalMarking()609     void TryTriggerIncrementalMarking() override
610     {
611         LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
612         return;
613     }
614 
615     void UpdateWorkManager(SharedGCWorkManager *sWorkManager);
616 
617     bool CheckOngoingConcurrentMarking() override;
618 
OldSpaceExceedCapacity(size_t size)619     bool OldSpaceExceedCapacity(size_t size) const override
620     {
621         size_t totalSize = sOldSpace_->GetCommittedSize() + size;
622         return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
623     }
624 
OldSpaceExceedLimit()625     bool OldSpaceExceedLimit() const override
626     {
627         return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity();
628     }
629 
GetConcurrentMarker()630     SharedConcurrentMarker *GetConcurrentMarker() const
631     {
632         return sConcurrentMarker_;
633     }
634 
GetSharedGCEvacuator()635     SharedGCEvacuator *GetSharedGCEvacuator() const
636     {
637         return sEvacuator_;
638     }
639 
GetSweeper()640     SharedConcurrentSweeper *GetSweeper() const
641     {
642         return sSweeper_;
643     }
644 
IsParallelGCEnabled()645     bool IsParallelGCEnabled() const
646     {
647         return parallelGC_;
648     }
649 
GetOldSpace()650     SharedOldSpace *GetOldSpace() const
651     {
652         return sOldSpace_;
653     }
654 
GetCompressSpace()655     SharedOldSpace *GetCompressSpace() const
656     {
657         return sCompressSpace_;
658     }
659 
GetNonMovableSpace()660     SharedNonMovableSpace *GetNonMovableSpace() const
661     {
662         return sNonMovableSpace_;
663     }
664 
GetHugeObjectSpace()665     SharedHugeObjectSpace *GetHugeObjectSpace() const
666     {
667         return sHugeObjectSpace_;
668     }
669 
GetReadOnlySpace()670     SharedReadOnlySpace *GetReadOnlySpace() const
671     {
672         return sReadOnlySpace_;
673     }
674 
GetAppSpawnSpace()675     SharedAppSpawnSpace *GetAppSpawnSpace() const
676     {
677         return sAppSpawnSpace_;
678     }
679 
SetForceGC(bool forceGC)680     void SetForceGC(bool forceGC)
681     {
682         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
683         smartGCStats_.forceGC_ = forceGC;
684         if (smartGCStats_.forceGC_) {
685             smartGCStats_.sensitiveStatusCV_.Signal();
686         }
687     }
688 
689     inline void TryTriggerConcurrentMarking(JSThread *thread);
690 
691     template<TriggerGCType gcType, GCReason gcReason>
692     void TriggerConcurrentMarking(JSThread *thread);
693 
694     template<TriggerGCType gcType, GCReason gcReason>
695     void CollectGarbage(JSThread *thread);
696 
697     template<GCReason gcReason>
698     void CompressCollectGarbageNotWaiting(JSThread *thread);
699 
700     template<TriggerGCType gcType, GCReason gcReason>
701     void PostGCTaskForTest(JSThread *thread);
702 
703     void CollectGarbageNearOOM(JSThread *thread);
704     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
705     // to true even if sweep_task and clear_task is running asynchronously
706     void NotifyGCCompleted();            // In daemon thread
707 
708     // Called when all vm is destroyed, and try to destroy daemon thread
709     void WaitGCFinishedAfterAllJSThreadEliminated();
710 
711     void WaitGCFinished(JSThread *thread);
712 
713     void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason);
714 
SetMaxMarkTaskCount(uint32_t maxTaskCount)715     void SetMaxMarkTaskCount(uint32_t maxTaskCount)
716     {
717         maxMarkTaskCount_ = maxTaskCount;
718     }
719 
GetCommittedSize()720     inline size_t GetCommittedSize() const override
721     {
722         size_t result = sOldSpace_->GetCommittedSize() +
723             sHugeObjectSpace_->GetCommittedSize() +
724             sNonMovableSpace_->GetCommittedSize() +
725             sReadOnlySpace_->GetCommittedSize();
726         return result;
727     }
728 
GetHeapObjectSize()729     inline size_t GetHeapObjectSize() const override
730     {
731         size_t result = sOldSpace_->GetHeapObjectSize() +
732             sHugeObjectSpace_->GetHeapObjectSize() +
733             sNonMovableSpace_->GetHeapObjectSize() +
734             sReadOnlySpace_->GetCommittedSize();
735         return result;
736     }
737 
GetRegionCount()738     inline size_t GetRegionCount() const override
739     {
740         size_t result = sOldSpace_->GetRegionCount() +
741             sHugeObjectSpace_->GetRegionCount() +
742             sNonMovableSpace_->GetRegionCount() +
743             sReadOnlySpace_->GetRegionCount();
744         return result;
745     }
746 
ResetNativeSizeAfterLastGC()747     void ResetNativeSizeAfterLastGC()
748     {
749         nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed);
750     }
751 
IncNativeSizeAfterLastGC(size_t size)752     void IncNativeSizeAfterLastGC(size_t size)
753     {
754         nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed);
755     }
756 
GetNativeSizeAfterLastGC()757     size_t GetNativeSizeAfterLastGC() const
758     {
759         return nativeSizeAfterLastGC_.load(std::memory_order_relaxed);
760     }
761 
GetNativeSizeTriggerSharedGC()762     size_t GetNativeSizeTriggerSharedGC() const
763     {
764         return incNativeSizeTriggerSharedGC_;
765     }
766 
GetNativeSizeTriggerSharedCM()767     size_t GetNativeSizeTriggerSharedCM() const
768     {
769         return incNativeSizeTriggerSharedCM_;
770     }
771 
ChangeGCParams(bool inBackground)772     void ChangeGCParams([[maybe_unused]]bool inBackground) override
773     {
774         LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
775         return;
776     }
777 
GetEcmaGCStats()778     GCStats *GetEcmaGCStats() override
779     {
780         return sGCStats_;
781     }
782 
SetGlobalEnvConstants(const GlobalEnvConstants * globalEnvConstants)783     inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
784     {
785         globalEnvConstants_ = globalEnvConstants;
786     }
787 
GetGlobalConst()788     inline const GlobalEnvConstants *GetGlobalConst() const override
789     {
790         return globalEnvConstants_;
791     }
792 
GetSpaceWithType(MemSpaceType type)793     SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
794     {
795         switch (type) {
796             case MemSpaceType::SHARED_OLD_SPACE:
797                 return sOldSpace_;
798             case MemSpaceType::SHARED_NON_MOVABLE:
799                 return sNonMovableSpace_;
800             default:
801                 LOG_ECMA(FATAL) << "this branch is unreachable";
802                 UNREACHABLE();
803                 break;
804         }
805     }
806 
807     void Prepare(bool inTriggerGCThread);
808     void Reclaim(TriggerGCType gcType);
809     void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
810     void CompactHeapBeforeFork(JSThread *thread);
811     void ReclaimForAppSpawn();
812 
GetWorkManager()813     SharedGCWorkManager *GetWorkManager() const
814     {
815         return sWorkManager_;
816     }
817 
GetSharedGCMarker()818     SharedGCMarker *GetSharedGCMarker() const
819     {
820         return sharedGCMarker_;
821     }
822 
GetSharedGCMovableMarker()823     SharedGCMovableMarker *GetSharedGCMovableMarker() const
824     {
825         return sharedGCMovableMarker_;
826     }
827     inline void SwapOldSpace();
828 
GetSharedMemController()829     SharedMemController *GetSharedMemController() const
830     {
831         return sharedMemController_;
832     }
833 
834     void PrepareRecordRegionsForReclaim();
835 
836     template<class Callback>
837     void EnumerateOldSpaceRegions(const Callback &cb) const;
838 
839     template<class Callback>
840     void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
841 
842     template<class Callback>
843     void IterateOverObjects(const Callback &cb) const;
844 
845     inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size);
846 
847     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
848 
849     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
850 
851     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size);
852 
853     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass);
854 
855     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
856 
857     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size);
858 
859     inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
860 
861     inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
862 
863     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
864 
865     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
866 
867     inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size);
868 
869     inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size);
870 
871     size_t VerifyHeapObjects(VerifyKind verifyKind) const;
872 
873     inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
874 
875     void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread, SharedHeapOOMSource source);
876 
877     inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor);
878     inline void PushToSharedNativePointerList(JSNativePointer* pointer);
879 
880     void UpdateHeapStatsAfterGC(TriggerGCType gcType) override;
881 
882     class SharedGCScope {
883     public:
884         SharedGCScope();
885         ~SharedGCScope();
886     };
887 
InHeapProfiler()888     bool InHeapProfiler() const
889     {
890         return inHeapProfiler_;
891     }
892 
893     void CheckInHeapProfiler();
894 
895 private:
896     void ProcessAllGCListeners();
897     void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType);
898 
899     void MoveOldSpaceToAppspawn();
900 
901     void ReclaimRegions(TriggerGCType type);
902 
903     void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
904     inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
905     inline void InvokeSharedNativePointerCallbacks();
906     struct SharedHeapSmartGCStats {
907         /**
908          * For SmartGC.
909          * For daemon thread, it check these status before trying to collect garbage, and wait until finish.
910          * It need that check-wait events is atomic, so use a Mutex/CV.
911         */
912         Mutex sensitiveStatusMutex_;
913         ConditionVariable sensitiveStatusCV_;
914         AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
915         StartupStatus startupStatus_ {StartupStatus::BEFORE_STARTUP};
916         // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED,
917         // must do GC at once even in sensitive status.
918         bool forceGC_ {false};
919     };
920 
921     SharedHeapSmartGCStats smartGCStats_;
922 
923     static SharedHeap *instance_;
924 
925     GCStats *sGCStats_ {nullptr};
926 
927     bool localFullMarkTriggered_ {false};
928 
929     bool optionalLogEnabled_ {false};
930 
931     bool parallelGC_ {true};
932 
933     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
934     // to true even if sweep_task and clear_task is running asynchronously
935     bool gcFinished_ {true};
936     Mutex waitGCFinishedMutex_;
937     ConditionVariable waitGCFinishedCV_;
938 
939     DaemonThread *dThread_ {nullptr};
940     const GlobalEnvConstants *globalEnvConstants_ {nullptr};
941     SharedOldSpace *sOldSpace_ {nullptr};
942     SharedOldSpace *sCompressSpace_ {nullptr};
943     SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
944     SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
945     SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
946     SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
947     SharedGCWorkManager *sWorkManager_ {nullptr};
948     SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
949     SharedConcurrentSweeper *sSweeper_ {nullptr};
950     SharedGC *sharedGC_ {nullptr};
951     SharedFullGC *sharedFullGC_ {nullptr};
952     SharedGCEvacuator *sEvacuator_ {nullptr};
953     SharedGCMarker *sharedGCMarker_ {nullptr};
954     SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
955     SharedMemController *sharedMemController_ {nullptr};
956     size_t growingFactor_ {0};
957     size_t growingStep_ {0};
958     size_t incNativeSizeTriggerSharedCM_ {0};
959     size_t incNativeSizeTriggerSharedGC_ {0};
960     size_t fragmentationLimitForSharedFullGC_ {0};
961     std::atomic<size_t> spaceOvershoot_ {0};
962     std::atomic<size_t> nativeSizeAfterLastGC_ {0};
963     bool inHeapProfiler_ {false};
964     CVector<JSNativePointer *> sharedNativePointerList_;
965     std::mutex sNativePointerListMutex_;
966 };
967 
968 class Heap : public BaseHeap {
969 public:
970     explicit Heap(EcmaVM *ecmaVm);
971     virtual ~Heap() = default;
972     NO_COPY_SEMANTIC(Heap);
973     NO_MOVE_SEMANTIC(Heap);
974     void Initialize();
975     void Destroy() override;
976     void Prepare();
977     void GetHeapPrepare();
978     void Resume(TriggerGCType gcType);
979     void ResumeForAppSpawn();
980     void CompactHeapBeforeFork();
981     void DisableParallelGC();
982     void EnableParallelGC();
983 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
984     void SetJsDumpThresholds(size_t thresholds) const;
985 #endif
986 
987     // fixme: Rename NewSpace to YoungSpace.
988     // This is the active young generation space that the new objects are allocated in
989     // or copied into (from the other semi space) during semi space GC.
GetNewSpace()990     SemiSpace *GetNewSpace() const
991     {
992         return activeSemiSpace_;
993     }
994 
995     /*
996      * Return the original active space where the objects are to be evacuated during semi space GC.
997      * This should be invoked only in the evacuation phase of semi space GC.
998      * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement.
999      */
GetFromSpaceDuringEvacuation()1000     SemiSpace *GetFromSpaceDuringEvacuation() const
1001     {
1002         return inactiveSemiSpace_;
1003     }
1004 
GetOldSpace()1005     OldSpace *GetOldSpace() const
1006     {
1007         return oldSpace_;
1008     }
1009 
GetCompressSpace()1010     OldSpace *GetCompressSpace() const
1011     {
1012         return compressSpace_;
1013     }
1014 
GetNonMovableSpace()1015     NonMovableSpace *GetNonMovableSpace() const
1016     {
1017         return nonMovableSpace_;
1018     }
1019 
GetHugeObjectSpace()1020     HugeObjectSpace *GetHugeObjectSpace() const
1021     {
1022         return hugeObjectSpace_;
1023     }
1024 
GetMachineCodeSpace()1025     MachineCodeSpace *GetMachineCodeSpace() const
1026     {
1027         return machineCodeSpace_;
1028     }
1029 
GetHugeMachineCodeSpace()1030     HugeMachineCodeSpace *GetHugeMachineCodeSpace() const
1031     {
1032         return hugeMachineCodeSpace_;
1033     }
1034 
GetSnapshotSpace()1035     SnapshotSpace *GetSnapshotSpace() const
1036     {
1037         return snapshotSpace_;
1038     }
1039 
GetReadOnlySpace()1040     ReadOnlySpace *GetReadOnlySpace() const
1041     {
1042         return readOnlySpace_;
1043     }
1044 
GetAppSpawnSpace()1045     AppSpawnSpace *GetAppSpawnSpace() const
1046     {
1047         return appSpawnSpace_;
1048     }
1049 
GetSpaceWithType(MemSpaceType type)1050     SparseSpace *GetSpaceWithType(MemSpaceType type) const
1051     {
1052         switch (type) {
1053             case MemSpaceType::OLD_SPACE:
1054                 return oldSpace_;
1055             case MemSpaceType::NON_MOVABLE:
1056                 return nonMovableSpace_;
1057             case MemSpaceType::MACHINE_CODE_SPACE:
1058                 return machineCodeSpace_;
1059             default:
1060                 LOG_ECMA(FATAL) << "this branch is unreachable";
1061                 UNREACHABLE();
1062                 break;
1063         }
1064     }
1065 
GetPartialGC()1066     PartialGC *GetPartialGC() const
1067     {
1068         return partialGC_;
1069     }
1070 
GetFullGC()1071     FullGC *GetFullGC() const
1072     {
1073         return fullGC_;
1074     }
1075 
GetSweeper()1076     ConcurrentSweeper *GetSweeper() const
1077     {
1078         return sweeper_;
1079     }
1080 
GetEvacuator()1081     ParallelEvacuator *GetEvacuator() const
1082     {
1083         return evacuator_;
1084     }
1085 
GetConcurrentMarker()1086     ConcurrentMarker *GetConcurrentMarker() const
1087     {
1088         return concurrentMarker_;
1089     }
1090 
GetIncrementalMarker()1091     IncrementalMarker *GetIncrementalMarker() const
1092     {
1093         return incrementalMarker_;
1094     }
1095 
GetNonMovableMarker()1096     Marker *GetNonMovableMarker() const
1097     {
1098         return nonMovableMarker_;
1099     }
1100 
GetCompressGCMarker()1101     Marker *GetCompressGCMarker() const
1102     {
1103         return compressGCMarker_;
1104     }
1105 
GetEcmaVM()1106     EcmaVM *GetEcmaVM() const
1107     {
1108         return ecmaVm_;
1109     }
1110 
GetJSThread()1111     JSThread *GetJSThread() const
1112     {
1113         return thread_;
1114     }
1115 
GetWorkManager()1116     WorkManager *GetWorkManager() const
1117     {
1118         return workManager_;
1119     }
1120 
GetMarkingObjectLocalBuffer()1121     WorkNode *&GetMarkingObjectLocalBuffer()
1122     {
1123         return sharedGCData_.sharedConcurrentMarkingLocalBuffer_;
1124     }
1125 
GetIdleGCTrigger()1126     IdleGCTrigger *GetIdleGCTrigger() const
1127     {
1128         return idleGCTrigger_;
1129     }
1130 
SetRSetWorkListHandler(RSetWorkListHandler * handler)1131     void SetRSetWorkListHandler(RSetWorkListHandler *handler)
1132     {
1133         ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr));
1134         sharedGCData_.rSetWorkListHandler_ = handler;
1135     }
1136 
1137     void ProcessSharedGCMarkingLocalBuffer();
1138 
1139     void ProcessSharedGCRSetWorkList();
1140 
1141     const GlobalEnvConstants *GetGlobalConst() const override;
1142 
GetMemController()1143     MemController *GetMemController() const
1144     {
1145         return memController_;
1146     }
1147 
RecordOrResetObjectSize(size_t objectSize)1148     inline void RecordOrResetObjectSize(size_t objectSize)
1149     {
1150         recordObjectSize_ = objectSize;
1151     }
1152 
GetRecordObjectSize()1153     inline size_t GetRecordObjectSize() const
1154     {
1155         return recordObjectSize_;
1156     }
1157 
RecordOrResetNativeSize(size_t nativeSize)1158     inline void RecordOrResetNativeSize(size_t nativeSize)
1159     {
1160         recordNativeSize_ = nativeSize;
1161     }
1162 
GetRecordNativeSize()1163     inline size_t GetRecordNativeSize() const
1164     {
1165         return recordNativeSize_;
1166     }
1167 
1168     /*
1169      * For object allocations.
1170      */
1171 
1172     // Young
1173     inline TaggedObject *AllocateInYoungSpace(size_t size);
1174     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass);
1175     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size);
1176     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass);
1177     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size);
1178     inline TaggedObject *AllocateYoungOrHugeObject(size_t size);
1179     inline uintptr_t AllocateYoungSync(size_t size);
1180     inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size);
1181     // Old
1182     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass);
1183     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size);
1184     inline TaggedObject *AllocateOldOrHugeObject(size_t size);
1185     // Non-movable
1186     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass);
1187     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size);
1188     inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
1189     // Huge
1190     inline TaggedObject *AllocateHugeObject(size_t size);
1191     inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size);
1192     // Machine code
1193     inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr);
1194     inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr);
1195     // Snapshot
1196     inline uintptr_t AllocateSnapshotSpace(size_t size);
1197 
1198     // shared non movable space tlab
1199     inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size);
1200     // shared old space tlab
1201     inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size);
1202 
1203     void ResetTlab();
1204     void FillBumpPointerForTlab();
1205     /*
1206      * GC triggers.
1207      */
1208     void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1209     bool CheckAndTriggerOldGC(size_t size = 0);
1210     bool CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason = GCReason::OTHER);
1211     TriggerGCType SelectGCType() const;
1212     /*
1213      * Parallel GC related configurations and utilities.
1214      */
1215 
1216     void PostParallelGCTask(ParallelGCTaskPhase taskPhase);
1217 
IsParallelGCEnabled()1218     bool IsParallelGCEnabled() const
1219     {
1220         return parallelGC_;
1221     }
1222     void ChangeGCParams(bool inBackground) override;
1223 
1224     GCStats *GetEcmaGCStats() override;
1225 
1226     GCKeyStats *GetEcmaGCKeyStats();
1227 
1228     JSObjectResizingStrategy *GetJSObjectResizingStrategy();
1229 
1230     void TriggerIdleCollection(int idleMicroSec);
1231     void NotifyMemoryPressure(bool inHighMemoryPressure);
1232 
1233     void TryTriggerConcurrentMarking();
1234     void AdjustBySurvivalRate(size_t originalNewSpaceSize);
1235     void TriggerConcurrentMarking();
1236     bool CheckCanTriggerConcurrentMarking();
1237 
1238     void TryTriggerIdleCollection() override;
1239     void TryTriggerIncrementalMarking() override;
1240     void CalculateIdleDuration();
1241     void UpdateWorkManager(WorkManager *workManager);
1242     bool CheckOngoingConcurrentMarking() override;
1243 
1244     inline void SwapNewSpace();
1245     inline void SwapOldSpace();
1246 
1247     inline bool MoveYoungRegion(Region *region);
1248     inline bool MoveYoungRegionToOld(Region *region);
1249     inline void MergeToOldSpaceSync(LocalSpace *localSpace);
1250 
1251     template<class Callback>
1252     void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const;
1253 
1254     template<class Callback>
1255     void EnumerateNonNewSpaceRegions(const Callback &cb) const;
1256 
1257     template<class Callback>
1258     void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const;
1259 
1260     template<class Callback>
1261     void EnumerateNewSpaceRegions(const Callback &cb) const;
1262 
1263     template<class Callback>
1264     void EnumerateSnapshotSpaceRegions(const Callback &cb) const;
1265 
1266     template<class Callback>
1267     void EnumerateNonMovableRegions(const Callback &cb) const;
1268 
1269     template<class Callback>
1270     inline void EnumerateRegions(const Callback &cb) const;
1271 
1272     inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd);
1273 
1274     void WaitAllTasksFinished();
1275     void WaitConcurrentMarkingFinished();
1276 
GetMemGrowingType()1277     MemGrowingType GetMemGrowingType() const
1278     {
1279         return memGrowingtype_;
1280     }
1281 
SetMemGrowingType(MemGrowingType memGrowingType)1282     void SetMemGrowingType(MemGrowingType memGrowingType)
1283     {
1284         memGrowingtype_ = memGrowingType;
1285     }
1286 
CalculateLinearSpaceOverShoot()1287     size_t CalculateLinearSpaceOverShoot()
1288     {
1289         return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
1290     }
1291 
1292     inline size_t GetCommittedSize() const override;
1293 
1294     inline size_t GetHeapObjectSize() const override;
1295 
1296     inline void NotifyRecordMemorySize();
1297 
1298     inline size_t GetRegionCount() const override;
1299 
GetRegionCachedSize()1300     size_t GetRegionCachedSize() const
1301     {
1302         return activeSemiSpace_->GetInitialCapacity();
1303     }
1304 
1305     size_t GetLiveObjectSize() const;
1306 
1307     inline uint32_t GetHeapObjectCount() const;
1308 
GetPromotedSize()1309     size_t GetPromotedSize() const
1310     {
1311         return promotedSize_;
1312     }
1313 
1314     size_t GetArrayBufferSize() const;
1315 
1316     size_t GetHeapLimitSize() const;
1317 
GetMaxEvacuateTaskCount()1318     uint32_t GetMaxEvacuateTaskCount() const
1319     {
1320         return maxEvacuateTaskCount_;
1321     }
1322 
1323     /*
1324      * Receive callback function to control idletime.
1325      */
1326     inline void InitializeIdleStatusControl(std::function<void(bool)> callback);
1327 
DisableNotifyIdle()1328     void DisableNotifyIdle()
1329     {
1330         if (notifyIdleStatusCallback != nullptr) {
1331             notifyIdleStatusCallback(true);
1332         }
1333     }
1334 
EnableNotifyIdle()1335     void EnableNotifyIdle()
1336     {
1337         if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) {
1338             notifyIdleStatusCallback(false);
1339         }
1340     }
1341 
SetIdleTask(IdleTaskType task)1342     void SetIdleTask(IdleTaskType task)
1343     {
1344         idleTask_ = task;
1345     }
1346 
1347     void ClearIdleTask();
1348 
IsEmptyIdleTask()1349     bool IsEmptyIdleTask()
1350     {
1351         return idleTask_ == IdleTaskType::NO_TASK;
1352     }
1353 
SetOnSerializeEvent(bool isSerialize)1354     void SetOnSerializeEvent(bool isSerialize)
1355     {
1356         onSerializeEvent_ = isSerialize;
1357         if (!onSerializeEvent_ && !InSensitiveStatus()) {
1358             TryTriggerIncrementalMarking();
1359             TryTriggerIdleCollection();
1360             TryTriggerConcurrentMarking();
1361         }
1362     }
1363 
GetOnSerializeEvent()1364     bool GetOnSerializeEvent() const
1365     {
1366         return onSerializeEvent_;
1367     }
1368 
1369     void NotifyFinishColdStart(bool isMainThread = true);
1370 
1371     void NotifyFinishColdStartSoon();
1372 
1373     void NotifyHighSensitive(bool isStart);
1374 
1375     bool HandleExitHighSensitiveEvent();
1376 
1377     bool ObjectExceedMaxHeapSize() const override;
1378 
1379     bool ObjectExceedHighSensitiveThresholdForCM() const;
1380 
1381     bool ObjectExceedJustFinishStartupThresholdForGC() const;
1382 
1383     bool ObjectExceedJustFinishStartupThresholdForCM() const;
1384 
1385     void TryIncreaseNewSpaceOvershootByConfigSize();
1386 
1387     void TryIncreaseOvershootByConfigSize();
1388 
1389     bool CheckIfNeedStopCollectionByStartup();
1390 
1391     bool CheckIfNeedStopCollectionByHighSensitive();
1392 
1393     bool NeedStopCollection() override;
1394 
SetSensitiveStatus(AppSensitiveStatus status)1395     void SetSensitiveStatus(AppSensitiveStatus status) override
1396     {
1397         sHeap_->SetSensitiveStatus(status);
1398         smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release);
1399     }
1400 
GetSensitiveStatus()1401     AppSensitiveStatus GetSensitiveStatus() const override
1402     {
1403         return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire);
1404     }
1405 
SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)1406     void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)
1407     {
1408         recordObjSizeBeforeSensitive_ = objSize;
1409     }
1410 
GetRecordHeapObjectSizeBeforeSensitive()1411     size_t GetRecordHeapObjectSizeBeforeSensitive() const
1412     {
1413         return recordObjSizeBeforeSensitive_;
1414     }
1415 
SetNearGCInSensitive(bool flag)1416     void SetNearGCInSensitive(bool flag)
1417     {
1418         nearGCInSensitive_ = flag;
1419     }
1420 
IsNearGCInSensitive()1421     bool IsNearGCInSensitive()
1422     {
1423         return nearGCInSensitive_;
1424     }
1425 
CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)1426     bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status)
1427     {
1428         return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst);
1429     }
1430 
GetStartupStatus()1431     StartupStatus GetStartupStatus() const
1432     {
1433         ASSERT(smartGCStats_.startupStatus_.load(std::memory_order_relaxed) == sHeap_->GetStartupStatus());
1434         return smartGCStats_.startupStatus_.load(std::memory_order_relaxed);
1435     }
1436 
IsJustFinishStartup()1437     bool IsJustFinishStartup() const
1438     {
1439         return GetStartupStatus() == StartupStatus::JUST_FINISH_STARTUP;
1440     }
1441 
CancelJustFinishStartupEvent()1442     bool CancelJustFinishStartupEvent()
1443     {
1444         if (!IsJustFinishStartup()) {
1445             return false;
1446         }
1447         TryIncreaseOvershootByConfigSize();
1448         smartGCStats_.startupStatus_.store(StartupStatus::FINISH_STARTUP, std::memory_order_release);
1449         sHeap_->CancelJustFinishStartupEvent();
1450         return true;
1451     }
1452 
FinishStartupEvent()1453     bool FinishStartupEvent() override
1454     {
1455         if (!OnStartupEvent()) {
1456             return false;
1457         }
1458         TryIncreaseOvershootByConfigSize();
1459         smartGCStats_.startupStatus_.store(StartupStatus::JUST_FINISH_STARTUP, std::memory_order_release);
1460         sHeap_->FinishStartupEvent();
1461         return true;
1462     }
1463 
OnStartupEvent()1464     bool OnStartupEvent() const override
1465     {
1466         return GetStartupStatus() == StartupStatus::ON_STARTUP;
1467     }
1468 
NotifyPostFork()1469     void NotifyPostFork() override
1470     {
1471         sHeap_->NotifyPostFork();
1472         smartGCStats_.startupStatus_.store(StartupStatus::ON_STARTUP, std::memory_order_relaxed);
1473         LOG_GC(INFO) << "SmartGC: enter app cold start";
1474         size_t localFirst = config_.GetMaxHeapSize();
1475         size_t localSecond = config_.GetMaxHeapSize() * JUST_FINISH_STARTUP_LOCAL_THRESHOLD_RATIO;
1476         auto sharedHeapConfig = sHeap_->GetEcmaParamConfiguration();
1477         size_t sharedFirst = sHeap_->GetOldSpace()->GetInitialCapacity();
1478         size_t sharedSecond = sharedHeapConfig.GetMaxHeapSize()
1479                             * JUST_FINISH_STARTUP_SHARED_THRESHOLD_RATIO
1480                             * JUST_FINISH_STARTUP_SHARED_CONCURRENT_MARK_RATIO;
1481         LOG_GC(INFO) << "SmartGC: startup GC restrain, "
1482             << "phase 1 threshold: local " << localFirst / 1_MB << "MB, shared " << sharedFirst / 1_MB << "MB; "
1483             << "phase 2 threshold: local " << localSecond / 1_MB << "MB, shared " << sharedSecond / 1_MB << "MB";
1484     }
1485 
1486 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
StartHeapTracking()1487     void StartHeapTracking()
1488     {
1489         WaitAllTasksFinished();
1490     }
1491 
StopHeapTracking()1492     void StopHeapTracking()
1493     {
1494         WaitAllTasksFinished();
1495     }
1496 #endif
1497     inline bool InHeapProfiler();
1498 
1499     inline void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
1500 
1501     // add allocationInspector to each space
1502     void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector);
1503 
1504     // clear allocationInspector from each space
1505     void ClearAllocationInspectorFromAllSpaces();
1506 
1507     /*
1508      * Funtions used by heap verification.
1509      */
1510 
1511     template<class Callback>
1512     void IterateOverObjects(const Callback &cb, bool isSimplify = false) const;
1513 
1514     size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1515     size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1516     void StatisticHeapObject(TriggerGCType gcType) const;
1517     void StatisticHeapDetail();
1518     void PrintHeapInfo(TriggerGCType gcType) const;
1519 
OldSpaceExceedCapacity(size_t size)1520     bool OldSpaceExceedCapacity(size_t size) const override
1521     {
1522         size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size;
1523         return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() +
1524                oldSpace_->GetOutOfMemoryOvershootSize();
1525     }
1526 
OldSpaceExceedLimit()1527     bool OldSpaceExceedLimit() const override
1528     {
1529         size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
1530         return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize();
1531     }
1532 
1533     void AdjustSpaceSizeForAppSpawn();
1534 
1535     static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object);
1536 
IsFullMarkRequested()1537     bool IsFullMarkRequested() const
1538     {
1539         return fullMarkRequested_;
1540     }
1541 
SetFullMarkRequestedState(bool fullMarkRequested)1542     void SetFullMarkRequestedState(bool fullMarkRequested)
1543     {
1544         fullMarkRequested_ = fullMarkRequested;
1545     }
1546 
SetHeapMode(HeapMode mode)1547     void SetHeapMode(HeapMode mode)
1548     {
1549         mode_ = mode;
1550     }
1551 
1552     void IncreaseNativeBindingSize(size_t size);
1553     void IncreaseNativeBindingSize(JSNativePointer *object);
1554     void DecreaseNativeBindingSize(size_t size);
ResetNativeBindingSize()1555     void ResetNativeBindingSize()
1556     {
1557         nativeBindingSize_ = 0;
1558     }
1559 
GetNativeBindingSize()1560     size_t GetNativeBindingSize() const
1561     {
1562         return nativeBindingSize_;
1563     }
1564 
GetGlobalSpaceNativeLimit()1565     size_t GetGlobalSpaceNativeLimit() const
1566     {
1567         return globalSpaceNativeLimit_;
1568     }
1569 
GetNativeBindingSizeAfterLastGC()1570     size_t GetNativeBindingSizeAfterLastGC() const
1571     {
1572         return nativeBindingSizeAfterLastGC_;
1573     }
1574 
GetGlobalNativeSize()1575     size_t GetGlobalNativeSize() const
1576     {
1577         return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage();
1578     }
1579 
ResetNativeSizeAfterLastGC()1580     void ResetNativeSizeAfterLastGC()
1581     {
1582         nativeSizeAfterLastGC_ = 0;
1583         nativeBindingSizeAfterLastGC_= nativeBindingSize_;
1584     }
1585 
IncNativeSizeAfterLastGC(size_t size)1586     void IncNativeSizeAfterLastGC(size_t size)
1587     {
1588         nativeSizeAfterLastGC_ += size;
1589     }
1590 
GlobalNativeSizeLargerToTriggerGC()1591     bool GlobalNativeSizeLargerToTriggerGC() const
1592     {
1593         auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ?
1594             nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0;
1595         return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ &&
1596             nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_;
1597     }
1598 
GlobalNativeSizeLargerThanLimit()1599     bool GlobalNativeSizeLargerThanLimit() const
1600     {
1601         size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0;
1602         return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot;
1603     }
1604 
GlobalNativeSizeLargerThanLimitForIdle()1605     bool GlobalNativeSizeLargerThanLimitForIdle() const
1606     {
1607         return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ *
1608             IDLE_SPACE_SIZE_LIMIT_RATE);
1609     }
1610 
1611     void TryTriggerFullMarkOrGCByNativeSize();
1612 
1613     void TryTriggerFullMarkBySharedSize(size_t size);
1614 
1615     bool TryTriggerFullMarkBySharedLimit();
1616 
1617     void CheckAndTriggerTaskFinishedGC();
1618 
1619     bool IsMarking() const override;
1620 
1621     bool IsReadyToConcurrentMark() const override;
1622 
IsYoungGC()1623     bool IsYoungGC() const
1624     {
1625         return gcType_ == TriggerGCType::YOUNG_GC;
1626     }
1627 
1628     void CheckNonMovableSpaceOOM();
1629     void DumpHeapSnapshotBeforeOOM(bool isFullGC = true);
1630     std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const;
1631     MachineCode *GetMachineCodeObject(uintptr_t pc) const;
1632 
1633     PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
1634     PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
1635     void ProcessGCListeners();
1636 
1637     inline void ProcessNativeDelete(const WeakRootVisitor& visitor);
1638     inline void ProcessReferences(const WeakRootVisitor& visitor);
1639     inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent);
1640     inline void RemoveFromNativePointerList(const JSNativePointer* pointer);
1641     inline void ClearNativePointerList();
1642 
GetNativePointerListSize()1643     size_t GetNativePointerListSize() const
1644     {
1645         return nativePointerList_.size();
1646     }
1647 
GetHeapAliveSizeExcludesYoungAfterGC()1648     size_t GetHeapAliveSizeExcludesYoungAfterGC() const
1649     {
1650         return heapAliveSizeExcludesYoungAfterGC_;
1651     }
1652 
1653     void UpdateHeapStatsAfterGC(TriggerGCType gcType) override;
1654 
1655 private:
1656     void CollectGarbageImpl(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1657 
1658     static constexpr int MIN_JSDUMP_THRESHOLDS = 85;
1659     static constexpr int MAX_JSDUMP_THRESHOLDS = 95;
1660     static constexpr int IDLE_TIME_LIMIT = 10;  // if idle time over 10ms we can do something
1661     static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB;
1662     static constexpr int IDLE_MAINTAIN_TIME = 500;
1663     static constexpr int BACKGROUND_GROW_LIMIT = 2_MB;
1664     // Threadshold that HintGC will actually trigger GC.
1665     static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5;
1666     static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times.
1667     static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB;
1668     void RecomputeLimits();
1669     void AdjustOldSpaceLimit();
1670     // record lastRegion for each space, which will be used in ReclaimRegions()
1671     void PrepareRecordRegionsForReclaim();
1672     inline void ReclaimRegions(TriggerGCType gcType);
1673     inline size_t CalculateCommittedCacheSize();
1674 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1675     uint64_t GetCurrentTickMillseconds();
1676     void ThresholdReachedDump();
1677 #endif
1678     void CleanCallback();
IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)1679     void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1680     {
1681         pendingAsyncNativeCallbackSize_ += bindingSize;
1682     }
DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)1683     void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1684     {
1685         pendingAsyncNativeCallbackSize_ -= bindingSize;
1686     }
1687     class ParallelGCTask : public Task {
1688     public:
ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)1689         ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase)
1690             : Task(id), heap_(heap), taskPhase_(taskPhase) {};
1691         ~ParallelGCTask() override = default;
1692         bool Run(uint32_t threadIndex) override;
1693 
1694         NO_COPY_SEMANTIC(ParallelGCTask);
1695         NO_MOVE_SEMANTIC(ParallelGCTask);
1696 
1697     private:
1698         Heap *heap_ {nullptr};
1699         ParallelGCTaskPhase taskPhase_;
1700     };
1701 
1702     class AsyncClearTask : public Task {
1703     public:
AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)1704         AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type)
1705             : Task(id), heap_(heap), gcType_(type) {}
1706         ~AsyncClearTask() override = default;
1707         bool Run(uint32_t threadIndex) override;
1708 
1709         NO_COPY_SEMANTIC(AsyncClearTask);
1710         NO_MOVE_SEMANTIC(AsyncClearTask);
1711     private:
1712         Heap *heap_;
1713         TriggerGCType gcType_;
1714     };
1715 
1716     class FinishColdStartTask : public Task {
1717     public:
FinishColdStartTask(int32_t id,Heap * heap)1718         FinishColdStartTask(int32_t id, Heap *heap)
1719             : Task(id), heap_(heap) {}
1720         ~FinishColdStartTask() override = default;
1721         bool Run(uint32_t threadIndex) override;
1722 
1723         NO_COPY_SEMANTIC(FinishColdStartTask);
1724         NO_MOVE_SEMANTIC(FinishColdStartTask);
1725     private:
1726         Heap *heap_;
1727     };
1728 
1729     class FinishGCRestrainTask : public Task {
1730     public:
FinishGCRestrainTask(int32_t id,Heap * heap)1731         FinishGCRestrainTask(int32_t id, Heap *heap)
1732             : Task(id), heap_(heap) {}
1733         ~FinishGCRestrainTask() override = default;
1734         bool Run(uint32_t threadIndex) override;
1735 
1736         NO_COPY_SEMANTIC(FinishGCRestrainTask);
1737         NO_MOVE_SEMANTIC(FinishGCRestrainTask);
1738     private:
1739         Heap *heap_;
1740     };
1741 
1742     class DeleteCallbackTask : public Task {
1743     public:
DeleteCallbackTask(int32_t id,std::vector<NativePointerCallbackData> & callbacks)1744         DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : Task(id)
1745         {
1746             std::swap(callbacks, nativePointerCallbacks_);
1747         }
1748         ~DeleteCallbackTask() override = default;
1749         bool Run(uint32_t threadIndex) override;
1750 
1751         NO_COPY_SEMANTIC(DeleteCallbackTask);
1752         NO_MOVE_SEMANTIC(DeleteCallbackTask);
1753 
1754     private:
1755         std::vector<NativePointerCallbackData> nativePointerCallbacks_ {};
1756     };
1757 
1758     struct MainLocalHeapSmartGCStats {
1759         /**
1760          * For SmartGC.
1761          * For main js thread, it check these status everytime when trying to
1762          * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough.
1763         */
1764         std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
1765         std::atomic<StartupStatus> startupStatus_ {StartupStatus::BEFORE_STARTUP};
1766     };
1767 
1768     // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack.
1769     struct SharedGCLocalStoragePackedData {
1770         /**
1771          * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking,
1772          * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can
1773          * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global
1774          * in remark.
1775          * If the heap is destructed before push this node to global, check and try to push remain object as well.
1776         */
1777         WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr};
1778         /**
1779          * Recording the local_to_share rset used in SharedGC concurrentMark,
1780          * which lifecycle is in one SharedGC.
1781          * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed,
1782          * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit.
1783          * Before destroying local heap, RSetWorkList should be done as well.
1784         */
1785         RSetWorkListHandler *rSetWorkListHandler_ {nullptr};
1786     };
1787 
1788     EcmaVM *ecmaVm_ {nullptr};
1789     JSThread *thread_ {nullptr};
1790 
1791     SharedHeap *sHeap_ {nullptr};
1792     MainLocalHeapSmartGCStats smartGCStats_;
1793 
1794     /*
1795      * Heap spaces.
1796      */
1797 
1798     /*
1799      * Young generation spaces where most new objects are allocated.
1800      * (only one of the spaces is active at a time in semi space GC).
1801      */
1802     SemiSpace *activeSemiSpace_ {nullptr};
1803     SemiSpace *inactiveSemiSpace_ {nullptr};
1804 
1805     // Old generation spaces where some long living objects are allocated or promoted.
1806     OldSpace *oldSpace_ {nullptr};
1807     OldSpace *compressSpace_ {nullptr};
1808     ReadOnlySpace *readOnlySpace_ {nullptr};
1809     AppSpawnSpace *appSpawnSpace_ {nullptr};
1810     // Spaces used for special kinds of objects.
1811     NonMovableSpace *nonMovableSpace_ {nullptr};
1812     MachineCodeSpace *machineCodeSpace_ {nullptr};
1813     HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr};
1814     HugeObjectSpace *hugeObjectSpace_ {nullptr};
1815     SnapshotSpace *snapshotSpace_ {nullptr};
1816     // tlab for shared non movable space
1817     ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr};
1818     // tlab for shared old space
1819     ThreadLocalAllocationBuffer *sOldTlab_ {nullptr};
1820     /*
1821      * Garbage collectors collecting garbage in different scopes.
1822      */
1823 
1824     /*
1825      * The mostly used partial GC which collects garbage in young spaces,
1826      * and part of old spaces if needed determined by GC heuristics.
1827      */
1828     PartialGC *partialGC_ {nullptr};
1829 
1830     // Full collector which collects garbage in all valid heap spaces.
1831     FullGC *fullGC_ {nullptr};
1832 
1833     // Concurrent marker which coordinates actions of GC markers and mutators.
1834     ConcurrentMarker *concurrentMarker_ {nullptr};
1835 
1836     // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators.
1837     ConcurrentSweeper *sweeper_ {nullptr};
1838 
1839     // Parallel evacuator which evacuates objects from one space to another one.
1840     ParallelEvacuator *evacuator_ {nullptr};
1841 
1842     // Incremental marker which coordinates actions of GC markers in idle time.
1843     IncrementalMarker *incrementalMarker_ {nullptr};
1844 
1845     /*
1846      * Different kinds of markers used by different collectors.
1847      * Depending on the collector algorithm, some markers can do simple marking
1848      *  while some others need to handle object movement.
1849      */
1850     Marker *nonMovableMarker_ {nullptr};
1851     Marker *compressGCMarker_ {nullptr};
1852 
1853     // Work manager managing the tasks mostly generated in the GC mark phase.
1854     WorkManager *workManager_ {nullptr};
1855 
1856     SharedGCLocalStoragePackedData sharedGCData_;
1857 
1858     bool onSerializeEvent_ {false};
1859     bool parallelGC_ {true};
1860     bool fullGCRequested_ {false};
1861     bool fullMarkRequested_ {false};
1862     bool oldSpaceLimitAdjusted_ {false};
1863     bool enableIdleGC_ {false};
1864     std::atomic_bool isCSetClearing_ {false};
1865     HeapMode mode_ { HeapMode::NORMAL };
1866 
1867     /*
1868      * The memory controller providing memory statistics (by allocations and coleections),
1869      * which is used for GC heuristics.
1870      */
1871     MemController *memController_ {nullptr};
1872     size_t promotedSize_ {0};
1873     size_t semiSpaceCopiedSize_ {0};
1874     size_t nativeBindingSize_{0};
1875     size_t globalSpaceNativeLimit_ {0};
1876     size_t nativeSizeTriggerGCThreshold_ {0};
1877     size_t incNativeSizeTriggerGC_ {0};
1878     size_t nativeSizeOvershoot_ {0};
1879     size_t asyncClearNativePointerThreshold_ {0};
1880     size_t nativeSizeAfterLastGC_ {0};
1881     size_t heapAliveSizeExcludesYoungAfterGC_ {0};
1882     size_t nativeBindingSizeAfterLastGC_ {0};
1883     size_t newAllocatedSharedObjectSize_ {0};
1884     // recordObjectSize_ & recordNativeSize_:
1885     // Record memory before taskpool start, used to determine trigger GC or not after task finish.
1886     size_t recordObjectSize_ {0};
1887     size_t recordNativeSize_ {0};
1888     // Record heap object size before enter sensitive status
1889     size_t recordObjSizeBeforeSensitive_ {0};
1890     bool nearGCInSensitive_ {false};
1891 
1892     size_t pendingAsyncNativeCallbackSize_ {0};
1893     MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
1894 
1895     // parallel evacuator task number.
1896     uint32_t maxEvacuateTaskCount_ {0};
1897 
1898     uint64_t startupDurationInMs_ {0};
1899 
1900     Mutex setNewSpaceOvershootSizeMutex_;
1901 
1902     // Application status
1903 
1904     IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr};
1905 
1906     IdleTaskType idleTask_ {IdleTaskType::NO_TASK};
1907     float idlePredictDuration_ {0.0f};
1908     double idleTaskFinishTime_ {0.0};
1909 
1910     /*
1911      * The listeners which are called at the end of GC
1912      */
1913     std::vector<std::pair<FinishGCListener, void *>> gcListeners_;
1914 
1915     IdleGCTrigger *idleGCTrigger_ {nullptr};
1916 
1917     bool hasOOMDump_ {false};
1918 
1919     CVector<JSNativePointer *> nativePointerList_;
1920     CVector<JSNativePointer *> concurrentNativePointerList_;
1921 
1922     friend panda::test::HProfTestHelper;
1923     friend panda::test::GCTest_CallbackTask_Test;
1924     friend panda::test::HeapTestHelper;
1925 };
1926 }  // namespace panda::ecmascript
1927 
1928 #endif  // ECMASCRIPT_MEM_HEAP_H
1929