• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_H
17 #define ECMASCRIPT_MEM_HEAP_H
18 
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/frames.h"
21 #include "ecmascript/js_object_resizing_strategy.h"
22 #include "ecmascript/mem/linear_space.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/shared_heap/shared_space.h"
25 #include "ecmascript/mem/sparse_space.h"
26 #include "ecmascript/mem/work_manager.h"
27 #include "ecmascript/taskpool/taskpool.h"
28 #include "ecmascript/mem/machine_code.h"
29 #include "ecmascript/mem/idle_gc_trigger.h"
30 
31 namespace panda::test {
32 class GCTest_CallbackTask_Test;
33 class HProfTestHelper;
34 }
35 
36 namespace panda::ecmascript {
37 class ConcurrentMarker;
38 class ConcurrentSweeper;
39 class EcmaVM;
40 class FullGC;
41 class GCStats;
42 class GCKeyStats;
43 class HeapRegionAllocator;
44 class HeapTracker;
45 #if !WIN_OR_MAC_OR_IOS_PLATFORM
46 class HeapProfilerInterface;
47 class HeapProfiler;
48 #endif
49 class IncrementalMarker;
50 class JSNativePointer;
51 class Marker;
52 class MemController;
53 class NativeAreaAllocator;
54 class ParallelEvacuator;
55 class PartialGC;
56 class RSetWorkListHandler;
57 class SharedConcurrentMarker;
58 class SharedConcurrentSweeper;
59 class SharedGC;
60 class SharedGCMarkerBase;
61 class SharedGCMarker;
62 class SharedFullGC;
63 class SharedGCMovableMarker;
64 class STWYoungGC;
65 class ThreadLocalAllocationBuffer;
66 class JSThread;
67 class DaemonThread;
68 class GlobalEnvConstants;
69 class IdleGCTrigger;
70 
71 using IdleNotifyStatusCallback = std::function<void(bool)>;
72 using FinishGCListener = void (*)(void *);
73 using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator;
74 using Clock = std::chrono::high_resolution_clock;
75 using AppFreezeFilterCallback = std::function<bool(const int32_t pid)>;
76 
77 enum class IdleTaskType : uint8_t {
78     NO_TASK,
79     YOUNG_GC,
80     FINISH_MARKING,
81     INCREMENTAL_MARK
82 };
83 
84 enum class MarkType : uint8_t {
85     MARK_EDEN,
86     MARK_YOUNG,
87     MARK_FULL
88 };
89 
90 enum class MemGrowingType : uint8_t {
91     HIGH_THROUGHPUT,
92     CONSERVATIVE,
93     PRESSURE
94 };
95 
96 enum class HeapMode {
97     NORMAL,
98     SPAWN,
99     SHARE,
100 };
101 
102 enum AppSensitiveStatus : uint8_t {
103     NORMAL_SCENE,
104     ENTER_HIGH_SENSITIVE,
105     EXIT_HIGH_SENSITIVE,
106 };
107 
108 enum class VerifyKind {
109     VERIFY_PRE_GC,
110     VERIFY_POST_GC,
111     VERIFY_MARK_EDEN,
112     VERIFY_EVACUATE_EDEN,
113     VERIFY_MARK_YOUNG,
114     VERIFY_EVACUATE_YOUNG,
115     VERIFY_MARK_FULL,
116     VERIFY_EVACUATE_OLD,
117     VERIFY_EVACUATE_FULL,
118     VERIFY_SHARED_RSET_POST_FULL_GC,
119     VERIFY_PRE_SHARED_GC,
120     VERIFY_POST_SHARED_GC,
121     VERIFY_SHARED_GC_MARK,
122     VERIFY_SHARED_GC_SWEEP,
123     VERIFY_END,
124 };
125 
126 class BaseHeap {
127 public:
BaseHeap(const EcmaParamConfiguration & config)128     BaseHeap(const EcmaParamConfiguration &config) : config_(config) {}
129     virtual ~BaseHeap() = default;
130     NO_COPY_SEMANTIC(BaseHeap);
131     NO_MOVE_SEMANTIC(BaseHeap);
132 
133     virtual void Destroy() = 0;
134 
135     virtual bool IsMarking() const = 0;
136 
137     virtual bool IsReadyToConcurrentMark() const = 0;
138 
139     virtual bool NeedStopCollection() = 0;
140 
141     virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0;
142 
143     virtual AppSensitiveStatus GetSensitiveStatus() const = 0;
144 
145     virtual bool FinishStartupEvent() = 0;
146 
147     virtual bool OnStartupEvent() const = 0;
148 
149     virtual void NotifyPostFork() = 0;
150 
151     virtual void TryTriggerIdleCollection() = 0;
152 
153     virtual void TryTriggerIncrementalMarking() = 0;
154 
155     /*
156      * Wait for existing concurrent marking tasks to be finished (if any).
157      * Return true if there's ongoing concurrent marking.
158      */
159     virtual bool CheckOngoingConcurrentMarking() = 0;
160 
161     virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
162 
163     virtual bool OldSpaceExceedLimit() const = 0;
164 
165     virtual inline size_t GetCommittedSize() const = 0;
166 
167     virtual inline size_t GetHeapObjectSize() const = 0;
168 
169     virtual inline size_t GetRegionCount() const = 0;
170 
171     virtual void ChangeGCParams(bool inBackground) = 0;
172 
173     virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
174 
175     virtual GCStats *GetEcmaGCStats() = 0;
176 
177     virtual bool ObjectExceedMaxHeapSize() const = 0;
178 
GetMarkType()179     MarkType GetMarkType() const
180     {
181         return markType_;
182     }
183 
SetMarkType(MarkType markType)184     void SetMarkType(MarkType markType)
185     {
186         markType_ = markType;
187     }
188 
IsEdenMark()189     bool IsEdenMark() const
190     {
191         return markType_ == MarkType::MARK_EDEN;
192     }
193 
IsYoungMark()194     bool IsYoungMark() const
195     {
196         return markType_ == MarkType::MARK_YOUNG;
197     }
198 
IsFullMark()199     bool IsFullMark() const
200     {
201         return markType_ == MarkType::MARK_FULL;
202     }
203 
IsConcurrentFullMark()204     bool IsConcurrentFullMark() const
205     {
206         return markType_ == MarkType::MARK_FULL;
207     }
208 
GetGCType()209     TriggerGCType GetGCType() const
210     {
211         return gcType_;
212     }
213 
214     bool PUBLIC_API IsAlive(TaggedObject *object) const;
215 
216     bool ContainObject(TaggedObject *object) const;
217 
GetOldGCRequested()218     bool GetOldGCRequested()
219     {
220         return oldGCRequested_;
221     }
222 
GetEcmaParamConfiguration()223     EcmaParamConfiguration GetEcmaParamConfiguration() const
224     {
225         return config_;
226     }
227 
GetNativeAreaAllocator()228     NativeAreaAllocator *GetNativeAreaAllocator() const
229     {
230         return nativeAreaAllocator_;
231     }
232 
GetHeapRegionAllocator()233     HeapRegionAllocator *GetHeapRegionAllocator() const
234     {
235         return heapRegionAllocator_;
236     }
237 
ShouldThrowOOMError(bool shouldThrow)238     void ShouldThrowOOMError(bool shouldThrow)
239     {
240         shouldThrowOOMError_ = shouldThrow;
241     }
242 
SetCanThrowOOMError(bool canThrow)243     void SetCanThrowOOMError(bool canThrow)
244     {
245         canThrowOOMError_ = canThrow;
246     }
247 
CanThrowOOMError()248     bool CanThrowOOMError()
249     {
250         return canThrowOOMError_;
251     }
252 
IsInBackground()253     bool IsInBackground() const
254     {
255         return inBackground_;
256     }
257 
258     // ONLY used for heap verification.
IsVerifying()259     bool IsVerifying() const
260     {
261         return isVerifying_;
262     }
263 
264     // ONLY used for heap verification.
SetVerifying(bool verifying)265     void SetVerifying(bool verifying)
266     {
267         isVerifying_ = verifying;
268     }
269 
SetGCState(bool inGC)270     void SetGCState(bool inGC)
271     {
272         inGC_ = inGC;
273     }
274 
InGC()275     bool InGC() const
276     {
277         return inGC_;
278     }
279 
NotifyHeapAliveSizeAfterGC(size_t size)280     void NotifyHeapAliveSizeAfterGC(size_t size)
281     {
282         heapAliveSizeAfterGC_ = size;
283     }
284 
GetHeapAliveSizeAfterGC()285     size_t GetHeapAliveSizeAfterGC() const
286     {
287         return heapAliveSizeAfterGC_;
288     }
289 
UpdateHeapStatsAfterGC(TriggerGCType gcType)290     void UpdateHeapStatsAfterGC(TriggerGCType gcType)
291     {
292         if (gcType == TriggerGCType::EDEN_GC || gcType == TriggerGCType::YOUNG_GC) {
293             return;
294         }
295         heapAliveSizeAfterGC_ = GetHeapObjectSize();
296         fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize();
297         if (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC) {
298             heapBasicLoss_ = fragmentSizeAfterGC_;
299         }
300     }
301 
GetFragmentSizeAfterGC()302     size_t GetFragmentSizeAfterGC() const
303     {
304         return fragmentSizeAfterGC_;
305     }
306 
GetHeapBasicLoss()307     size_t GetHeapBasicLoss() const
308     {
309         return heapBasicLoss_;
310     }
311 
GetGlobalSpaceAllocLimit()312     size_t GetGlobalSpaceAllocLimit() const
313     {
314         return globalSpaceAllocLimit_;
315     }
316 
317     // Whether should verify heap during gc.
ShouldVerifyHeap()318     bool ShouldVerifyHeap() const
319     {
320         return shouldVerifyHeap_;
321     }
322 
EnablePageTagThreadId()323     bool EnablePageTagThreadId() const
324     {
325         return enablePageTagThreadId_;
326     }
327 
328     void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
329         bool NonMovableObjNearOOM = false);
330 
GetMaxMarkTaskCount()331     uint32_t GetMaxMarkTaskCount() const
332     {
333         return maxMarkTaskCount_;
334     }
335 
InSensitiveStatus()336     bool InSensitiveStatus() const
337     {
338         return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent();
339     }
340 
341     void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size);
342     inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
343                                             [[maybe_unused]] size_t size);
344     bool CheckCanDistributeTask();
345     void IncreaseTaskCount();
346     void ReduceTaskCount();
347     void WaitRunningTaskFinished();
348     void WaitClearTaskFinished();
349     void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
350         bool NonMovableObjNearOOM = false);
351     void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName);
352     void SetAppFreezeFilterCallback(AppFreezeFilterCallback cb);
353 
354 protected:
355     void FatalOutOfMemoryError(size_t size, std::string functionName);
356 
357     enum class HeapType {
358         LOCAL_HEAP,
359         SHARED_HEAP,
360         INVALID,
361     };
362 
363     class RecursionScope {
364     public:
RecursionScope(BaseHeap * heap,HeapType heapType)365         explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType)
366         {
367             if (heap_->recursionDepth_++ != 0) {
368                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
369                               << ") Constructor, depth: " << heap_->recursionDepth_;
370             }
371             heap_->SetGCState(true);
372         }
~RecursionScope()373         ~RecursionScope()
374         {
375             if (--heap_->recursionDepth_ != 0) {
376                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
377                               << ") Destructor, depth: " << heap_->recursionDepth_;
378             }
379             heap_->SetGCState(false);
380         }
381     private:
382         BaseHeap *heap_ {nullptr};
383         HeapType heapType_ {HeapType::INVALID};
384     };
385 
386     static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75;
387 
388     const EcmaParamConfiguration config_;
389     MarkType markType_ {MarkType::MARK_YOUNG};
390     TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
391     Mutex gcCollectGarbageMutex_;
392     // Region allocators.
393     NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
394     HeapRegionAllocator *heapRegionAllocator_ {nullptr};
395 
396     size_t heapAliveSizeAfterGC_ {0};
397     size_t globalSpaceAllocLimit_ {0};
398     size_t globalSpaceConcurrentMarkLimit_ {0};
399     size_t heapBasicLoss_ {1_MB};
400     size_t fragmentSizeAfterGC_ {0};
401     // parallel marker task count.
402     uint32_t runningTaskCount_ {0};
403     uint32_t maxMarkTaskCount_ {0};
404     Mutex waitTaskFinishedMutex_;
405     ConditionVariable waitTaskFinishedCV_;
406     Mutex waitClearTaskFinishedMutex_;
407     ConditionVariable waitClearTaskFinishedCV_;
408     AppFreezeFilterCallback appfreezeCallback_ {nullptr};
409     bool clearTaskFinished_ {true};
410     bool inBackground_ {false};
411     bool shouldThrowOOMError_ {false};
412     bool canThrowOOMError_ {true};
413     bool oldGCRequested_ {false};
414     // ONLY used for heap verification.
415     bool shouldVerifyHeap_ {false};
416     bool enablePageTagThreadId_ {false};
417     bool inGC_ {false};
418     bool isVerifying_ {false};
419     int32_t recursionDepth_ {0};
420 };
421 
422 class SharedHeap : public BaseHeap {
423 public:
SharedHeap(const EcmaParamConfiguration & config)424     SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
425     virtual ~SharedHeap() = default;
426 
427     static void CreateNewInstance();
428     static SharedHeap *GetInstance();
429     static void DestroyInstance();
430 
431     void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
432         const JSRuntimeOptions &option, DaemonThread *dThread);
433 
434     void Destroy() override;
435 
436     void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
437 
438     void EnableParallelGC(JSRuntimeOptions &option);
439 
440     void DisableParallelGC(JSThread *thread);
441 
442     void AdjustGlobalSpaceAllocLimit();
443 
444     class ParallelMarkTask : public Task {
445     public:
ParallelMarkTask(int32_t id,SharedHeap * heap,SharedParallelMarkPhase taskPhase)446         ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
447             : Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
448         ~ParallelMarkTask() override = default;
449         bool Run(uint32_t threadIndex) override;
450 
451         NO_COPY_SEMANTIC(ParallelMarkTask);
452         NO_MOVE_SEMANTIC(ParallelMarkTask);
453 
454     private:
455         SharedHeap *sHeap_ {nullptr};
456         SharedParallelMarkPhase taskPhase_;
457     };
458 
459     class AsyncClearTask : public Task {
460     public:
AsyncClearTask(int32_t id,SharedHeap * heap,TriggerGCType type)461         AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
462             : Task(id), sHeap_(heap), gcType_(type) {}
463         ~AsyncClearTask() override = default;
464         bool Run(uint32_t threadIndex) override;
465 
466         NO_COPY_SEMANTIC(AsyncClearTask);
467         NO_MOVE_SEMANTIC(AsyncClearTask);
468     private:
469         SharedHeap *sHeap_;
470         TriggerGCType gcType_;
471     };
IsMarking()472     bool IsMarking() const override
473     {
474         LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
475         return false;
476     }
477 
478     bool IsReadyToConcurrentMark() const override;
479 
480     bool NeedStopCollection() override;
481 
SetSensitiveStatus(AppSensitiveStatus status)482     void SetSensitiveStatus(AppSensitiveStatus status) override
483     {
484         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
485         smartGCStats_.sensitiveStatus_ = status;
486         if (!InSensitiveStatus()) {
487             smartGCStats_.sensitiveStatusCV_.Signal();
488         }
489     }
490 
491     // This should be called when holding lock of sensitiveStatusMutex_.
GetSensitiveStatus()492     AppSensitiveStatus GetSensitiveStatus() const override
493     {
494         return smartGCStats_.sensitiveStatus_;
495     }
496 
FinishStartupEvent()497     bool FinishStartupEvent() override
498     {
499         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
500         if (!smartGCStats_.onStartupEvent_) {
501             return false;
502         }
503         smartGCStats_.onStartupEvent_ = false;
504         if (!InSensitiveStatus()) {
505             smartGCStats_.sensitiveStatusCV_.Signal();
506         }
507         return true;
508     }
509 
510     // This should be called when holding lock of sensitiveStatusMutex_.
OnStartupEvent()511     bool OnStartupEvent() const override
512     {
513         return smartGCStats_.onStartupEvent_;
514     }
515 
NotifyPostFork()516     void NotifyPostFork() override
517     {
518         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
519         smartGCStats_.onStartupEvent_ = true;
520     }
521 
WaitSensitiveStatusFinished()522     void WaitSensitiveStatusFinished()
523     {
524         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
525         while (InSensitiveStatus() && !smartGCStats_.forceGC_) {
526             smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_);
527         }
528     }
529 
530     bool ObjectExceedMaxHeapSize() const override;
531 
532     bool CheckAndTriggerSharedGC(JSThread *thread);
533 
534     bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
535 
536     void TryTriggerLocalConcurrentMarking();
537 
538     // Called when all vm is destroyed, and try to destroy daemon thread.
539     void WaitAllTasksFinishedAfterAllJSThreadEliminated();
540 
541     void WaitAllTasksFinished(JSThread *thread);
542 
543     void StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason);         // In daemon thread
544 
545     // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load.
546     bool CheckCanTriggerConcurrentMarking(JSThread *thread);
547 
TryTriggerIdleCollection()548     void TryTriggerIdleCollection() override
549     {
550         LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
551         return;
552     }
553 
TryTriggerIncrementalMarking()554     void TryTriggerIncrementalMarking() override
555     {
556         LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
557         return;
558     }
559 
560     void UpdateWorkManager(SharedGCWorkManager *sWorkManager);
561 
562     bool CheckOngoingConcurrentMarking() override;
563 
OldSpaceExceedCapacity(size_t size)564     bool OldSpaceExceedCapacity(size_t size) const override
565     {
566         size_t totalSize = sOldSpace_->GetCommittedSize() + size;
567         return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
568     }
569 
OldSpaceExceedLimit()570     bool OldSpaceExceedLimit() const override
571     {
572         return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity();
573     }
574 
GetConcurrentMarker()575     SharedConcurrentMarker *GetConcurrentMarker() const
576     {
577         return sConcurrentMarker_;
578     }
579 
GetSweeper()580     SharedConcurrentSweeper *GetSweeper() const
581     {
582         return sSweeper_;
583     }
584 
IsParallelGCEnabled()585     bool IsParallelGCEnabled() const
586     {
587         return parallelGC_;
588     }
589 
GetOldSpace()590     SharedOldSpace *GetOldSpace() const
591     {
592         return sOldSpace_;
593     }
594 
GetCompressSpace()595     SharedOldSpace *GetCompressSpace() const
596     {
597         return sCompressSpace_;
598     }
599 
GetNonMovableSpace()600     SharedNonMovableSpace *GetNonMovableSpace() const
601     {
602         return sNonMovableSpace_;
603     }
604 
GetHugeObjectSpace()605     SharedHugeObjectSpace *GetHugeObjectSpace() const
606     {
607         return sHugeObjectSpace_;
608     }
609 
GetReadOnlySpace()610     SharedReadOnlySpace *GetReadOnlySpace() const
611     {
612         return sReadOnlySpace_;
613     }
614 
GetAppSpawnSpace()615     SharedAppSpawnSpace *GetAppSpawnSpace() const
616     {
617         return sAppSpawnSpace_;
618     }
619 
SetForceGC(bool forceGC)620     void SetForceGC(bool forceGC)
621     {
622         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
623         smartGCStats_.forceGC_ = forceGC;
624         if (smartGCStats_.forceGC_) {
625             smartGCStats_.sensitiveStatusCV_.Signal();
626         }
627     }
628 
629     inline void TryTriggerConcurrentMarking(JSThread *thread);
630 
631     template<TriggerGCType gcType, GCReason gcReason>
632     void TriggerConcurrentMarking(JSThread *thread);
633 
634     template<TriggerGCType gcType, GCReason gcReason>
635     void CollectGarbage(JSThread *thread);
636 
637     template<TriggerGCType gcType, GCReason gcReason>
638     void PostGCTaskForTest(JSThread *thread);
639 
640     void CollectGarbageNearOOM(JSThread *thread);
641     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
642     // to true even if sweep_task and clear_task is running asynchronously
643     void NotifyGCCompleted();            // In daemon thread
644 
645     // Called when all vm is destroyed, and try to destroy daemon thread
646     void WaitGCFinishedAfterAllJSThreadEliminated();
647 
648     void WaitGCFinished(JSThread *thread);
649 
650     void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason);
651 
SetMaxMarkTaskCount(uint32_t maxTaskCount)652     void SetMaxMarkTaskCount(uint32_t maxTaskCount)
653     {
654         maxMarkTaskCount_ = maxTaskCount;
655     }
656 
GetCommittedSize()657     inline size_t GetCommittedSize() const override
658     {
659         size_t result = sOldSpace_->GetCommittedSize() +
660             sHugeObjectSpace_->GetCommittedSize() +
661             sNonMovableSpace_->GetCommittedSize() +
662             sReadOnlySpace_->GetCommittedSize();
663         return result;
664     }
665 
GetHeapObjectSize()666     inline size_t GetHeapObjectSize() const override
667     {
668         size_t result = sOldSpace_->GetHeapObjectSize() +
669             sHugeObjectSpace_->GetHeapObjectSize() +
670             sNonMovableSpace_->GetHeapObjectSize() +
671             sReadOnlySpace_->GetCommittedSize();
672         return result;
673     }
674 
GetRegionCount()675     inline size_t GetRegionCount() const override
676     {
677         size_t result = sOldSpace_->GetRegionCount() +
678             sHugeObjectSpace_->GetRegionCount() +
679             sNonMovableSpace_->GetRegionCount() +
680             sReadOnlySpace_->GetRegionCount();
681         return result;
682     }
683 
ResetNativeSizeAfterLastGC()684     void ResetNativeSizeAfterLastGC()
685     {
686         nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed);
687     }
688 
IncNativeSizeAfterLastGC(size_t size)689     void IncNativeSizeAfterLastGC(size_t size)
690     {
691         nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed);
692     }
693 
GetNativeSizeAfterLastGC()694     size_t GetNativeSizeAfterLastGC() const
695     {
696         return nativeSizeAfterLastGC_.load(std::memory_order_relaxed);
697     }
698 
GetNativeSizeTriggerSharedGC()699     size_t GetNativeSizeTriggerSharedGC() const
700     {
701         return incNativeSizeTriggerSharedGC_;
702     }
703 
GetNativeSizeTriggerSharedCM()704     size_t GetNativeSizeTriggerSharedCM() const
705     {
706         return incNativeSizeTriggerSharedCM_;
707     }
708 
ChangeGCParams(bool inBackground)709     void ChangeGCParams([[maybe_unused]]bool inBackground) override
710     {
711         LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
712         return;
713     }
714 
GetEcmaGCStats()715     GCStats *GetEcmaGCStats() override
716     {
717         return sGCStats_;
718     }
719 
SetGlobalEnvConstants(const GlobalEnvConstants * globalEnvConstants)720     inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
721     {
722         globalEnvConstants_ = globalEnvConstants;
723     }
724 
GetGlobalConst()725     inline const GlobalEnvConstants *GetGlobalConst() const override
726     {
727         return globalEnvConstants_;
728     }
729 
GetSpaceWithType(MemSpaceType type)730     SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
731     {
732         switch (type) {
733             case MemSpaceType::SHARED_OLD_SPACE:
734                 return sOldSpace_;
735             case MemSpaceType::SHARED_NON_MOVABLE:
736                 return sNonMovableSpace_;
737             default:
738                 LOG_ECMA(FATAL) << "this branch is unreachable";
739                 UNREACHABLE();
740                 break;
741         }
742     }
743 
744     void Prepare(bool inTriggerGCThread);
745     void Reclaim(TriggerGCType gcType);
746     void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
747     void CompactHeapBeforeFork(JSThread *thread);
748     void ReclaimForAppSpawn();
749 
GetWorkManager()750     SharedGCWorkManager *GetWorkManager() const
751     {
752         return sWorkManager_;
753     }
754 
GetSharedGCMarker()755     SharedGCMarker *GetSharedGCMarker() const
756     {
757         return sharedGCMarker_;
758     }
759 
GetSharedGCMovableMarker()760     SharedGCMovableMarker *GetSharedGCMovableMarker() const
761     {
762         return sharedGCMovableMarker_;
763     }
764     inline void SwapOldSpace();
765 
766     void PrepareRecordRegionsForReclaim();
767 
768     template<class Callback>
769     void EnumerateOldSpaceRegions(const Callback &cb) const;
770 
771     template<class Callback>
772     void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
773 
774     template<class Callback>
775     void IterateOverObjects(const Callback &cb) const;
776 
777     inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size);
778 
779     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
780 
781     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
782 
783     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size);
784 
785     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass);
786 
787     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
788 
789     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size);
790 
791     inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
792 
793     inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
794 
795     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
796 
797     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
798 
799     inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size);
800 
801     inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size);
802 
803     size_t VerifyHeapObjects(VerifyKind verifyKind) const;
804 
805     inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
806 
807     void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread);
808 
809     class SharedGCScope {
810     public:
811         SharedGCScope();
812         ~SharedGCScope();
813     };
814 
815 private:
816     void ProcessAllGCListeners();
817     inline void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType);
818 
819     void MoveOldSpaceToAppspawn();
820 
821     void ReclaimRegions(TriggerGCType type);
822 
823     void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
824     inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
825     struct SharedHeapSmartGCStats {
826         /**
827          * For SmartGC.
828          * For daemon thread, it check these status before trying to collect garbage, and wait until finish.
829          * It need that check-wait events is atomic, so use a Mutex/CV.
830         */
831         Mutex sensitiveStatusMutex_;
832         ConditionVariable sensitiveStatusCV_;
833         AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
834         bool onStartupEvent_ {false};
835         // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED,
836         // must do GC at once even in sensitive status.
837         bool forceGC_ {false};
838     };
839 
840     SharedHeapSmartGCStats smartGCStats_;
841 
842     static SharedHeap *instance_;
843 
844     GCStats *sGCStats_ {nullptr};
845 
846     bool localFullMarkTriggered_ {false};
847 
848     bool optionalLogEnabled_ {false};
849 
850     bool parallelGC_ {true};
851 
852     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
853     // to true even if sweep_task and clear_task is running asynchronously
854     bool gcFinished_ {true};
855     Mutex waitGCFinishedMutex_;
856     ConditionVariable waitGCFinishedCV_;
857 
858     DaemonThread *dThread_ {nullptr};
859     const GlobalEnvConstants *globalEnvConstants_ {nullptr};
860     SharedOldSpace *sOldSpace_ {nullptr};
861     SharedOldSpace *sCompressSpace_ {nullptr};
862     SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
863     SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
864     SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
865     SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
866     SharedGCWorkManager *sWorkManager_ {nullptr};
867     SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
868     SharedConcurrentSweeper *sSweeper_ {nullptr};
869     SharedGC *sharedGC_ {nullptr};
870     SharedFullGC *sharedFullGC_ {nullptr};
871     SharedGCMarker *sharedGCMarker_ {nullptr};
872     SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
873     size_t growingFactor_ {0};
874     size_t growingStep_ {0};
875     size_t incNativeSizeTriggerSharedCM_ {0};
876     size_t incNativeSizeTriggerSharedGC_ {0};
877     size_t fragmentationLimitForSharedFullGC_ {0};
878     std::atomic<size_t> nativeSizeAfterLastGC_ {0};
879 };
880 
881 class Heap : public BaseHeap {
882 public:
883     explicit Heap(EcmaVM *ecmaVm);
884     virtual ~Heap() = default;
885     NO_COPY_SEMANTIC(Heap);
886     NO_MOVE_SEMANTIC(Heap);
887     void Initialize();
888     void Destroy() override;
889     void Prepare();
890     void GetHeapPrepare();
891     void Resume(TriggerGCType gcType);
892     void ResumeForAppSpawn();
893     void CompactHeapBeforeFork();
894     void DisableParallelGC();
895     void EnableParallelGC();
896 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
897     void SetJsDumpThresholds(size_t thresholds) const;
898 #endif
899 
GetEdenSpace()900     EdenSpace *GetEdenSpace() const
901     {
902         return edenSpace_;
903     }
904 
905     // fixme: Rename NewSpace to YoungSpace.
906     // This is the active young generation space that the new objects are allocated in
907     // or copied into (from the other semi space) during semi space GC.
GetNewSpace()908     SemiSpace *GetNewSpace() const
909     {
910         return activeSemiSpace_;
911     }
912 
913     /*
914      * Return the original active space where the objects are to be evacuated during semi space GC.
915      * This should be invoked only in the evacuation phase of semi space GC.
916      * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement.
917      */
GetFromSpaceDuringEvacuation()918     SemiSpace *GetFromSpaceDuringEvacuation() const
919     {
920         return inactiveSemiSpace_;
921     }
922 
GetOldSpace()923     OldSpace *GetOldSpace() const
924     {
925         return oldSpace_;
926     }
927 
GetCompressSpace()928     OldSpace *GetCompressSpace() const
929     {
930         return compressSpace_;
931     }
932 
GetNonMovableSpace()933     NonMovableSpace *GetNonMovableSpace() const
934     {
935         return nonMovableSpace_;
936     }
937 
GetHugeObjectSpace()938     HugeObjectSpace *GetHugeObjectSpace() const
939     {
940         return hugeObjectSpace_;
941     }
942 
GetMachineCodeSpace()943     MachineCodeSpace *GetMachineCodeSpace() const
944     {
945         return machineCodeSpace_;
946     }
947 
GetHugeMachineCodeSpace()948     HugeMachineCodeSpace *GetHugeMachineCodeSpace() const
949     {
950         return hugeMachineCodeSpace_;
951     }
952 
GetSnapshotSpace()953     SnapshotSpace *GetSnapshotSpace() const
954     {
955         return snapshotSpace_;
956     }
957 
GetReadOnlySpace()958     ReadOnlySpace *GetReadOnlySpace() const
959     {
960         return readOnlySpace_;
961     }
962 
GetAppSpawnSpace()963     AppSpawnSpace *GetAppSpawnSpace() const
964     {
965         return appSpawnSpace_;
966     }
967 
GetSpaceWithType(MemSpaceType type)968     SparseSpace *GetSpaceWithType(MemSpaceType type) const
969     {
970         switch (type) {
971             case MemSpaceType::OLD_SPACE:
972                 return oldSpace_;
973             case MemSpaceType::NON_MOVABLE:
974                 return nonMovableSpace_;
975             case MemSpaceType::MACHINE_CODE_SPACE:
976                 return machineCodeSpace_;
977             default:
978                 LOG_ECMA(FATAL) << "this branch is unreachable";
979                 UNREACHABLE();
980                 break;
981         }
982     }
983 
GetSTWYoungGC()984     STWYoungGC *GetSTWYoungGC() const
985     {
986         return stwYoungGC_;
987     }
988 
GetPartialGC()989     PartialGC *GetPartialGC() const
990     {
991         return partialGC_;
992     }
993 
GetFullGC()994     FullGC *GetFullGC() const
995     {
996         return fullGC_;
997     }
998 
GetSweeper()999     ConcurrentSweeper *GetSweeper() const
1000     {
1001         return sweeper_;
1002     }
1003 
GetEvacuator()1004     ParallelEvacuator *GetEvacuator() const
1005     {
1006         return evacuator_;
1007     }
1008 
GetConcurrentMarker()1009     ConcurrentMarker *GetConcurrentMarker() const
1010     {
1011         return concurrentMarker_;
1012     }
1013 
GetIncrementalMarker()1014     IncrementalMarker *GetIncrementalMarker() const
1015     {
1016         return incrementalMarker_;
1017     }
1018 
GetNonMovableMarker()1019     Marker *GetNonMovableMarker() const
1020     {
1021         return nonMovableMarker_;
1022     }
1023 
GetSemiGCMarker()1024     Marker *GetSemiGCMarker() const
1025     {
1026         return semiGCMarker_;
1027     }
1028 
GetCompressGCMarker()1029     Marker *GetCompressGCMarker() const
1030     {
1031         return compressGCMarker_;
1032     }
1033 
GetEcmaVM()1034     EcmaVM *GetEcmaVM() const
1035     {
1036         return ecmaVm_;
1037     }
1038 
GetJSThread()1039     JSThread *GetJSThread() const
1040     {
1041         return thread_;
1042     }
1043 
GetWorkManager()1044     WorkManager *GetWorkManager() const
1045     {
1046         return workManager_;
1047     }
1048 
GetMarkingObjectLocalBuffer()1049     WorkNode *&GetMarkingObjectLocalBuffer()
1050     {
1051         return sharedGCData_.sharedConcurrentMarkingLocalBuffer_;
1052     }
1053 
GetIdleGCTrigger()1054     IdleGCTrigger *GetIdleGCTrigger() const
1055     {
1056         return idleGCTrigger_;
1057     }
1058 
SetRSetWorkListHandler(RSetWorkListHandler * handler)1059     void SetRSetWorkListHandler(RSetWorkListHandler *handler)
1060     {
1061         ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr));
1062         sharedGCData_.rSetWorkListHandler_ = handler;
1063     }
1064 
1065     void ProcessSharedGCMarkingLocalBuffer();
1066 
1067     void ProcessSharedGCRSetWorkList();
1068 
1069     const GlobalEnvConstants *GetGlobalConst() const override;
1070 
GetMemController()1071     MemController *GetMemController() const
1072     {
1073         return memController_;
1074     }
1075 
RecordOrResetObjectSize(size_t objectSize)1076     inline void RecordOrResetObjectSize(size_t objectSize)
1077     {
1078         recordObjectSize_ = objectSize;
1079     }
1080 
GetRecordObjectSize()1081     inline size_t GetRecordObjectSize() const
1082     {
1083         return recordObjectSize_;
1084     }
1085 
RecordOrResetNativeSize(size_t nativeSize)1086     inline void RecordOrResetNativeSize(size_t nativeSize)
1087     {
1088         recordNativeSize_ = nativeSize;
1089     }
1090 
GetRecordNativeSize()1091     inline size_t GetRecordNativeSize() const
1092     {
1093         return recordNativeSize_;
1094     }
1095 
1096     /*
1097      * For object allocations.
1098      */
1099 
1100     // Young
1101     inline TaggedObject *AllocateInGeneralNewSpace(size_t size);
1102     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass);
1103     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size);
1104     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass);
1105     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size);
1106     inline TaggedObject *AllocateYoungOrHugeObject(size_t size);
1107     inline uintptr_t AllocateYoungSync(size_t size);
1108     inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size);
1109     // Old
1110     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass);
1111     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size);
1112     // Non-movable
1113     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass);
1114     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size);
1115     inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
1116     // Huge
1117     inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size);
1118     // Machine code
1119     inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr);
1120     inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr);
1121     // Snapshot
1122     inline uintptr_t AllocateSnapshotSpace(size_t size);
1123 
1124     // shared non movable space tlab
1125     inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size);
1126     // shared old space tlab
1127     inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size);
1128 
1129     void ResetTlab();
1130     void FillBumpPointerForTlab();
1131     /*
1132      * GC triggers.
1133      */
1134     void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1135     bool CheckAndTriggerOldGC(size_t size = 0);
1136     bool CheckAndTriggerHintGC();
1137     TriggerGCType SelectGCType() const;
1138     /*
1139      * Parallel GC related configurations and utilities.
1140      */
1141 
1142     void PostParallelGCTask(ParallelGCTaskPhase taskPhase);
1143 
IsParallelGCEnabled()1144     bool IsParallelGCEnabled() const
1145     {
1146         return parallelGC_;
1147     }
1148     void ChangeGCParams(bool inBackground) override;
1149 
1150     GCStats *GetEcmaGCStats() override;
1151 
1152     GCKeyStats *GetEcmaGCKeyStats();
1153 
1154     JSObjectResizingStrategy *GetJSObjectResizingStrategy();
1155 
1156     void TriggerIdleCollection(int idleMicroSec);
1157     void NotifyMemoryPressure(bool inHighMemoryPressure);
1158 
1159     void TryTriggerConcurrentMarking();
1160     void AdjustBySurvivalRate(size_t originalNewSpaceSize);
1161     void TriggerConcurrentMarking();
1162     bool CheckCanTriggerConcurrentMarking();
1163 
1164     void TryTriggerIdleCollection() override;
1165     void TryTriggerIncrementalMarking() override;
1166     void CalculateIdleDuration();
1167     void UpdateWorkManager(WorkManager *workManager);
1168     bool CheckOngoingConcurrentMarking() override;
1169 
1170     inline void SwapNewSpace();
1171     inline void SwapOldSpace();
1172 
1173     inline bool MoveYoungRegionSync(Region *region);
1174     inline void MergeToOldSpaceSync(LocalSpace *localSpace);
1175 
1176     template<class Callback>
1177     void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const;
1178 
1179     template<class Callback>
1180     void EnumerateNonNewSpaceRegions(const Callback &cb) const;
1181 
1182     template<class Callback>
1183     void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const;
1184 
1185     template<class Callback>
1186     void EnumerateEdenSpaceRegions(const Callback &cb) const;
1187 
1188     template<class Callback>
1189     void EnumerateNewSpaceRegions(const Callback &cb) const;
1190 
1191     template<class Callback>
1192     void EnumerateSnapshotSpaceRegions(const Callback &cb) const;
1193 
1194     template<class Callback>
1195     void EnumerateNonMovableRegions(const Callback &cb) const;
1196 
1197     template<class Callback>
1198     inline void EnumerateRegions(const Callback &cb) const;
1199 
1200     inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd);
1201 
1202     void WaitAllTasksFinished();
1203     void WaitConcurrentMarkingFinished();
1204 
GetMemGrowingType()1205     MemGrowingType GetMemGrowingType() const
1206     {
1207         return memGrowingtype_;
1208     }
1209 
SetMemGrowingType(MemGrowingType memGrowingType)1210     void SetMemGrowingType(MemGrowingType memGrowingType)
1211     {
1212         memGrowingtype_ = memGrowingType;
1213     }
1214 
CalculateLinearSpaceOverShoot()1215     size_t CalculateLinearSpaceOverShoot()
1216     {
1217         return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
1218     }
1219 
1220     inline size_t GetCommittedSize() const override;
1221 
1222     inline size_t GetHeapObjectSize() const override;
1223 
1224     inline void NotifyRecordMemorySize();
1225 
1226     inline size_t GetRegionCount() const override;
1227 
GetRegionCachedSize()1228     size_t GetRegionCachedSize() const
1229     {
1230         return activeSemiSpace_->GetInitialCapacity();
1231     }
1232 
1233     size_t GetLiveObjectSize() const;
1234 
1235     inline uint32_t GetHeapObjectCount() const;
1236 
GetPromotedSize()1237     size_t GetPromotedSize() const
1238     {
1239         return promotedSize_;
1240     }
GetEdenToYoungSize()1241     size_t GetEdenToYoungSize() const
1242     {
1243         return edenToYoungSize_;
1244     }
1245 
1246     size_t GetArrayBufferSize() const;
1247 
1248     size_t GetHeapLimitSize() const;
1249 
GetMaxEvacuateTaskCount()1250     uint32_t GetMaxEvacuateTaskCount() const
1251     {
1252         return maxEvacuateTaskCount_;
1253     }
1254 
1255     /*
1256      * Receive callback function to control idletime.
1257      */
1258     inline void InitializeIdleStatusControl(std::function<void(bool)> callback);
1259 
DisableNotifyIdle()1260     void DisableNotifyIdle()
1261     {
1262         if (notifyIdleStatusCallback != nullptr) {
1263             notifyIdleStatusCallback(true);
1264         }
1265     }
1266 
EnableNotifyIdle()1267     void EnableNotifyIdle()
1268     {
1269         if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) {
1270             notifyIdleStatusCallback(false);
1271         }
1272     }
1273 
SetIdleTask(IdleTaskType task)1274     void SetIdleTask(IdleTaskType task)
1275     {
1276         idleTask_ = task;
1277     }
1278 
1279     void ClearIdleTask();
1280 
IsEmptyIdleTask()1281     bool IsEmptyIdleTask()
1282     {
1283         return idleTask_ == IdleTaskType::NO_TASK;
1284     }
1285 
SetOnSerializeEvent(bool isSerialize)1286     void SetOnSerializeEvent(bool isSerialize)
1287     {
1288         onSerializeEvent_ = isSerialize;
1289         if (!onSerializeEvent_ && !InSensitiveStatus()) {
1290             TryTriggerIncrementalMarking();
1291             TryTriggerIdleCollection();
1292             TryTriggerConcurrentMarking();
1293         }
1294     }
1295 
GetOnSerializeEvent()1296     bool GetOnSerializeEvent() const
1297     {
1298         return onSerializeEvent_;
1299     }
1300 
1301     void NotifyFinishColdStart(bool isMainThread = true);
1302 
1303     void NotifyFinishColdStartSoon();
1304 
1305     void NotifyHighSensitive(bool isStart);
1306 
1307     bool HandleExitHighSensitiveEvent();
1308 
1309     bool ObjectExceedMaxHeapSize() const override;
1310 
1311     bool NeedStopCollection() override;
1312 
SetSensitiveStatus(AppSensitiveStatus status)1313     void SetSensitiveStatus(AppSensitiveStatus status) override
1314     {
1315         sHeap_->SetSensitiveStatus(status);
1316         smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release);
1317     }
1318 
GetSensitiveStatus()1319     AppSensitiveStatus GetSensitiveStatus() const override
1320     {
1321         return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire);
1322     }
1323 
SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)1324     void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)
1325     {
1326         recordObjSizeBeforeSensitive_ = objSize;
1327     }
1328 
GetRecordHeapObjectSizeBeforeSensitive()1329     size_t GetRecordHeapObjectSizeBeforeSensitive() const
1330     {
1331         return recordObjSizeBeforeSensitive_;
1332     }
1333 
CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)1334     bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status)
1335     {
1336         return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst);
1337     }
1338 
FinishStartupEvent()1339     bool FinishStartupEvent() override
1340     {
1341         sHeap_->FinishStartupEvent();
1342         return smartGCStats_.onStartupEvent_.exchange(false, std::memory_order_relaxed) == true;
1343     }
1344 
OnStartupEvent()1345     bool OnStartupEvent() const override
1346     {
1347         return smartGCStats_.onStartupEvent_.load(std::memory_order_relaxed);
1348     }
1349 
NotifyPostFork()1350     void NotifyPostFork() override
1351     {
1352         sHeap_->NotifyPostFork();
1353         smartGCStats_.onStartupEvent_.store(true, std::memory_order_relaxed);
1354         LOG_GC(INFO) << "SmartGC: enter app cold start";
1355     }
1356 
1357 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
StartHeapTracking()1358     void StartHeapTracking()
1359     {
1360         WaitAllTasksFinished();
1361     }
1362 
StopHeapTracking()1363     void StopHeapTracking()
1364     {
1365         WaitAllTasksFinished();
1366     }
1367 #endif
1368     inline bool InHeapProfiler();
1369 
1370     void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
1371 
1372     // add allocationInspector to each space
1373     void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector);
1374 
1375     // clear allocationInspector from each space
1376     void ClearAllocationInspectorFromAllSpaces();
1377 
1378     /*
1379      * Funtions used by heap verification.
1380      */
1381 
1382     template<class Callback>
1383     void IterateOverObjects(const Callback &cb, bool isSimplify = false) const;
1384 
1385     size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1386     size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1387     void StatisticHeapObject(TriggerGCType gcType) const;
1388     void StatisticHeapDetail();
1389     void PrintHeapInfo(TriggerGCType gcType) const;
1390 
OldSpaceExceedCapacity(size_t size)1391     bool OldSpaceExceedCapacity(size_t size) const override
1392     {
1393         size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size;
1394         return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() +
1395                oldSpace_->GetOutOfMemoryOvershootSize();
1396     }
1397 
OldSpaceExceedLimit()1398     bool OldSpaceExceedLimit() const override
1399     {
1400         size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
1401         return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize();
1402     }
1403 
1404     void AdjustSpaceSizeForAppSpawn();
1405 
1406     static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object);
1407 
IsFullMarkRequested()1408     bool IsFullMarkRequested() const
1409     {
1410         return fullMarkRequested_;
1411     }
1412 
SetFullMarkRequestedState(bool fullMarkRequested)1413     void SetFullMarkRequestedState(bool fullMarkRequested)
1414     {
1415         fullMarkRequested_ = fullMarkRequested;
1416     }
1417 
SetHeapMode(HeapMode mode)1418     void SetHeapMode(HeapMode mode)
1419     {
1420         mode_ = mode;
1421     }
1422 
1423     void IncreaseNativeBindingSize(size_t size);
1424     void IncreaseNativeBindingSize(JSNativePointer *object);
1425     void DecreaseNativeBindingSize(size_t size);
ResetNativeBindingSize()1426     void ResetNativeBindingSize()
1427     {
1428         nativeBindingSize_ = 0;
1429     }
1430 
GetNativeBindingSize()1431     size_t GetNativeBindingSize() const
1432     {
1433         return nativeBindingSize_;
1434     }
1435 
GetGlobalNativeSize()1436     size_t GetGlobalNativeSize() const
1437     {
1438         return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage();
1439     }
1440 
ResetNativeSizeAfterLastGC()1441     void ResetNativeSizeAfterLastGC()
1442     {
1443         nativeSizeAfterLastGC_ = 0;
1444         nativeBindingSizeAfterLastGC_= nativeBindingSize_;
1445     }
1446 
IncNativeSizeAfterLastGC(size_t size)1447     void IncNativeSizeAfterLastGC(size_t size)
1448     {
1449         nativeSizeAfterLastGC_ += size;
1450     }
1451 
GlobalNativeSizeLargerToTriggerGC()1452     bool GlobalNativeSizeLargerToTriggerGC() const
1453     {
1454         auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ?
1455             nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0;
1456         return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ &&
1457             nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_;
1458     }
1459 
GlobalNativeSizeLargerThanLimit()1460     bool GlobalNativeSizeLargerThanLimit() const
1461     {
1462         size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0;
1463         return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot;
1464     }
1465 
GlobalNativeSizeLargerThanLimitForIdle()1466     bool GlobalNativeSizeLargerThanLimitForIdle() const
1467     {
1468         return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ *
1469             IDLE_SPACE_SIZE_LIMIT_RATE);
1470     }
1471 
1472     void TryTriggerFullMarkOrGCByNativeSize();
1473 
1474     void TryTriggerFullMarkBySharedSize(size_t size);
1475 
1476     bool TryTriggerFullMarkBySharedLimit();
1477 
1478     void CheckAndTriggerTaskFinishedGC();
1479 
1480     bool IsMarking() const override;
1481 
1482     bool IsReadyToConcurrentMark() const override;
1483 
IsEdenGC()1484     bool IsEdenGC() const
1485     {
1486         return gcType_ == TriggerGCType::EDEN_GC;
1487     }
1488 
IsYoungGC()1489     bool IsYoungGC() const
1490     {
1491         return gcType_ == TriggerGCType::YOUNG_GC;
1492     }
1493 
IsGeneralYoungGC()1494     bool IsGeneralYoungGC() const
1495     {
1496         return gcType_ == TriggerGCType::YOUNG_GC || gcType_ == TriggerGCType::EDEN_GC;
1497     }
1498 
1499     void EnableEdenGC();
1500 
1501     void TryEnableEdenGC();
1502 
1503     void CheckNonMovableSpaceOOM();
1504     void ReleaseEdenAllocator();
1505     void InstallEdenAllocator();
1506     void DumpHeapSnapshotBeforeOOM(bool isFullGC = true);
1507     std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const;
1508     MachineCode *GetMachineCodeObject(uintptr_t pc) const;
1509 
1510     PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
1511     PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
1512     void ProcessGCListeners();
1513 
1514     inline void ProcessNativeDelete(const WeakRootVisitor& visitor);
1515     inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor);
1516     inline void ProcessReferences(const WeakRootVisitor& visitor);
1517     inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent);
1518     inline void PushToSharedNativePointerList(JSNativePointer* pointer);
1519     inline void RemoveFromNativePointerList(const JSNativePointer* pointer);
1520     inline void ClearNativePointerList();
1521 
GetNativePointerListSize()1522     size_t GetNativePointerListSize() const
1523     {
1524         return nativePointerList_.size();
1525     }
1526 
1527 private:
1528     inline TaggedObject *AllocateHugeObject(size_t size);
1529 
1530     static constexpr int MIN_JSDUMP_THRESHOLDS = 85;
1531     static constexpr int MAX_JSDUMP_THRESHOLDS = 95;
1532     static constexpr int IDLE_TIME_LIMIT = 10;  // if idle time over 10ms we can do something
1533     static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB;
1534     static constexpr int IDLE_MAINTAIN_TIME = 500;
1535     static constexpr int BACKGROUND_GROW_LIMIT = 2_MB;
1536     // Threadshold that HintGC will actually trigger GC.
1537     static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5;
1538     static constexpr double IDLE_SPACE_SIZE_LIMIT_RATE = 0.8;
1539     static constexpr double IDLE_FULLGC_SPACE_USAGE_LIMIT_RATE = 0.7;
1540     static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times.
1541     static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB;
1542     void RecomputeLimits();
1543     void AdjustOldSpaceLimit();
1544     // record lastRegion for each space, which will be used in ReclaimRegions()
1545     void PrepareRecordRegionsForReclaim();
1546     inline void ReclaimRegions(TriggerGCType gcType);
1547     inline size_t CalculateCommittedCacheSize();
1548 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1549     uint64_t GetCurrentTickMillseconds();
1550     void ThresholdReachedDump();
1551 #endif
1552     void CleanCallBack();
IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)1553     void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1554     {
1555         pendingAsyncNativeCallbackSize_ += bindingSize;
1556     }
DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)1557     void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1558     {
1559         pendingAsyncNativeCallbackSize_ -= bindingSize;
1560     }
1561     class ParallelGCTask : public Task {
1562     public:
ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)1563         ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase)
1564             : Task(id), heap_(heap), taskPhase_(taskPhase) {};
1565         ~ParallelGCTask() override = default;
1566         bool Run(uint32_t threadIndex) override;
1567 
1568         NO_COPY_SEMANTIC(ParallelGCTask);
1569         NO_MOVE_SEMANTIC(ParallelGCTask);
1570 
1571     private:
1572         Heap *heap_ {nullptr};
1573         ParallelGCTaskPhase taskPhase_;
1574     };
1575 
1576     class AsyncClearTask : public Task {
1577     public:
AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)1578         AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type)
1579             : Task(id), heap_(heap), gcType_(type) {}
1580         ~AsyncClearTask() override = default;
1581         bool Run(uint32_t threadIndex) override;
1582 
1583         NO_COPY_SEMANTIC(AsyncClearTask);
1584         NO_MOVE_SEMANTIC(AsyncClearTask);
1585     private:
1586         Heap *heap_;
1587         TriggerGCType gcType_;
1588     };
1589 
1590     class FinishColdStartTask : public Task {
1591     public:
FinishColdStartTask(int32_t id,Heap * heap)1592         FinishColdStartTask(int32_t id, Heap *heap)
1593             : Task(id), heap_(heap) {}
1594         ~FinishColdStartTask() override = default;
1595         bool Run(uint32_t threadIndex) override;
1596 
1597         NO_COPY_SEMANTIC(FinishColdStartTask);
1598         NO_MOVE_SEMANTIC(FinishColdStartTask);
1599     private:
1600         Heap *heap_;
1601     };
1602 
1603     class DeleteCallbackTask : public Task {
1604     public:
DeleteCallbackTask(int32_t id,std::vector<NativePointerCallbackData> & callbacks)1605         DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : Task(id)
1606         {
1607             std::swap(callbacks, nativePointerCallbacks_);
1608         }
1609         ~DeleteCallbackTask() override = default;
1610         bool Run(uint32_t threadIndex) override;
1611 
1612         NO_COPY_SEMANTIC(DeleteCallbackTask);
1613         NO_MOVE_SEMANTIC(DeleteCallbackTask);
1614 
1615     private:
1616         std::vector<NativePointerCallbackData> nativePointerCallbacks_ {};
1617     };
1618 
1619     struct MainLocalHeapSmartGCStats {
1620         /**
1621          * For SmartGC.
1622          * For main js thread, it check these status everytime when trying to
1623          * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough.
1624         */
1625         std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
1626         std::atomic<bool> onStartupEvent_ {false};
1627     };
1628 
1629     // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack.
1630     struct SharedGCLocalStoragePackedData {
1631         /**
1632          * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking,
1633          * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can
1634          * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global
1635          * in remark.
1636          * If the heap is destructed before push this node to global, check and try to push remain object as well.
1637         */
1638         WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr};
1639         /**
1640          * Recording the local_to_share rset used in SharedGC concurrentMark,
1641          * which lifecycle is in one SharedGC.
1642          * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed,
1643          * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit.
1644          * Before destroying local heap, RSetWorkList should be done as well.
1645         */
1646         RSetWorkListHandler *rSetWorkListHandler_ {nullptr};
1647     };
1648 
1649     EcmaVM *ecmaVm_ {nullptr};
1650     JSThread *thread_ {nullptr};
1651 
1652     SharedHeap *sHeap_ {nullptr};
1653     MainLocalHeapSmartGCStats smartGCStats_;
1654 
1655     /*
1656      * Heap spaces.
1657      */
1658 
1659     /*
1660      * Young generation spaces where most new objects are allocated.
1661      * (only one of the spaces is active at a time in semi space GC).
1662      */
1663     EdenSpace *edenSpace_ {nullptr};
1664     SemiSpace *activeSemiSpace_ {nullptr};
1665     SemiSpace *inactiveSemiSpace_ {nullptr};
1666 
1667     // Old generation spaces where some long living objects are allocated or promoted.
1668     OldSpace *oldSpace_ {nullptr};
1669     OldSpace *compressSpace_ {nullptr};
1670     ReadOnlySpace *readOnlySpace_ {nullptr};
1671     AppSpawnSpace *appSpawnSpace_ {nullptr};
1672     // Spaces used for special kinds of objects.
1673     NonMovableSpace *nonMovableSpace_ {nullptr};
1674     MachineCodeSpace *machineCodeSpace_ {nullptr};
1675     HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr};
1676     HugeObjectSpace *hugeObjectSpace_ {nullptr};
1677     SnapshotSpace *snapshotSpace_ {nullptr};
1678     // tlab for shared non movable space
1679     ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr};
1680     // tlab for shared old space
1681     ThreadLocalAllocationBuffer *sOldTlab_ {nullptr};
1682     /*
1683      * Garbage collectors collecting garbage in different scopes.
1684      */
1685 
1686     /*
1687      * Semi sapce GC which collects garbage only in young spaces.
1688      * This is however optional for now because the partial GC also covers its functionality.
1689      */
1690     STWYoungGC *stwYoungGC_ {nullptr};
1691 
1692     /*
1693      * The mostly used partial GC which collects garbage in young spaces,
1694      * and part of old spaces if needed determined by GC heuristics.
1695      */
1696     PartialGC *partialGC_ {nullptr};
1697 
1698     // Full collector which collects garbage in all valid heap spaces.
1699     FullGC *fullGC_ {nullptr};
1700 
1701     // Concurrent marker which coordinates actions of GC markers and mutators.
1702     ConcurrentMarker *concurrentMarker_ {nullptr};
1703 
1704     // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators.
1705     ConcurrentSweeper *sweeper_ {nullptr};
1706 
1707     // Parallel evacuator which evacuates objects from one space to another one.
1708     ParallelEvacuator *evacuator_ {nullptr};
1709 
1710     // Incremental marker which coordinates actions of GC markers in idle time.
1711     IncrementalMarker *incrementalMarker_ {nullptr};
1712 
1713     /*
1714      * Different kinds of markers used by different collectors.
1715      * Depending on the collector algorithm, some markers can do simple marking
1716      *  while some others need to handle object movement.
1717      */
1718     Marker *nonMovableMarker_ {nullptr};
1719     Marker *semiGCMarker_ {nullptr};
1720     Marker *compressGCMarker_ {nullptr};
1721 
1722     // Work manager managing the tasks mostly generated in the GC mark phase.
1723     WorkManager *workManager_ {nullptr};
1724 
1725     SharedGCLocalStoragePackedData sharedGCData_;
1726 
1727     bool onSerializeEvent_ {false};
1728     bool parallelGC_ {true};
1729     bool fullGCRequested_ {false};
1730     bool fullMarkRequested_ {false};
1731     bool oldSpaceLimitAdjusted_ {false};
1732     bool enableIdleGC_ {false};
1733     std::atomic_bool isCSetClearing_ {false};
1734     HeapMode mode_ { HeapMode::NORMAL };
1735 
1736     /*
1737      * The memory controller providing memory statistics (by allocations and coleections),
1738      * which is used for GC heuristics.
1739      */
1740     MemController *memController_ {nullptr};
1741     size_t edenToYoungSize_ {0};
1742     size_t promotedSize_ {0};
1743     size_t semiSpaceCopiedSize_ {0};
1744     size_t nativeBindingSize_{0};
1745     size_t globalSpaceNativeLimit_ {0};
1746     size_t nativeSizeTriggerGCThreshold_ {0};
1747     size_t incNativeSizeTriggerGC_ {0};
1748     size_t nativeSizeOvershoot_ {0};
1749     size_t asyncClearNativePointerThreshold_ {0};
1750     size_t nativeSizeAfterLastGC_ {0};
1751     size_t nativeBindingSizeAfterLastGC_ {0};
1752     size_t newAllocatedSharedObjectSize_ {0};
1753     // recordObjectSize_ & recordNativeSize_:
1754     // Record memory before taskpool start, used to determine trigger GC or not after task finish.
1755     size_t recordObjectSize_ {0};
1756     size_t recordNativeSize_ {0};
1757     // Record heap object size before enter sensitive status
1758     size_t recordObjSizeBeforeSensitive_ {0};
1759     size_t pendingAsyncNativeCallbackSize_ {0};
1760     MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
1761 
1762     // parallel evacuator task number.
1763     uint32_t maxEvacuateTaskCount_ {0};
1764 
1765     // Application status
1766 
1767     IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr};
1768 
1769     IdleTaskType idleTask_ {IdleTaskType::NO_TASK};
1770     float idlePredictDuration_ {0.0f};
1771     double idleTaskFinishTime_ {0.0};
1772 
1773     /*
1774      * The listeners which are called at the end of GC
1775      */
1776     std::vector<std::pair<FinishGCListener, void *>> gcListeners_;
1777 
1778     IdleGCTrigger *idleGCTrigger_ {nullptr};
1779 
1780     bool hasOOMDump_ {false};
1781     bool enableEdenGC_ {false};
1782 
1783     CVector<JSNativePointer *> nativePointerList_;
1784     CVector<JSNativePointer *> concurrentNativePointerList_;
1785     CVector<JSNativePointer *> sharedNativePointerList_;
1786 
1787     friend panda::test::HProfTestHelper;
1788     friend panda::test::GCTest_CallbackTask_Test;
1789 };
1790 }  // namespace panda::ecmascript
1791 
1792 #endif  // ECMASCRIPT_MEM_HEAP_H
1793