• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_H
17 #define ECMASCRIPT_MEM_HEAP_H
18 
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/frames.h"
21 #include "ecmascript/js_object_resizing_strategy.h"
22 #include "ecmascript/mem/linear_space.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/shared_heap/shared_space.h"
25 #include "ecmascript/mem/sparse_space.h"
26 #include "ecmascript/mem/work_manager.h"
27 #include "ecmascript/taskpool/taskpool.h"
28 #include "ecmascript/mem/machine_code.h"
29 #include "ecmascript/mem/idle_gc_trigger.h"
30 
31 namespace panda::test {
32 class GCTest_CallbackTask_Test;
33 class HProfTestHelper;
34 }
35 
36 namespace panda::ecmascript {
37 class ConcurrentMarker;
38 class ConcurrentSweeper;
39 class EcmaVM;
40 class FullGC;
41 class GCStats;
42 class GCKeyStats;
43 class HeapRegionAllocator;
44 class HeapTracker;
45 #if !WIN_OR_MAC_OR_IOS_PLATFORM
46 class HeapProfilerInterface;
47 class HeapProfiler;
48 #endif
49 class IncrementalMarker;
50 class JSNativePointer;
51 class Marker;
52 class MemController;
53 class NativeAreaAllocator;
54 class ParallelEvacuator;
55 class PartialGC;
56 class RSetWorkListHandler;
57 class SharedConcurrentMarker;
58 class SharedConcurrentSweeper;
59 class SharedGC;
60 class SharedGCMarkerBase;
61 class SharedGCMarker;
62 class SharedFullGC;
63 class SharedGCMovableMarker;
64 class STWYoungGC;
65 class ThreadLocalAllocationBuffer;
66 class JSThread;
67 class DaemonThread;
68 class GlobalEnvConstants;
69 class IdleGCTrigger;
70 
71 using IdleNotifyStatusCallback = std::function<void(bool)>;
72 using FinishGCListener = void (*)(void *);
73 using GCListenerId = std::vector<std::pair<FinishGCListener, void *>>::const_iterator;
74 using Clock = std::chrono::high_resolution_clock;
75 using AppFreezeFilterCallback = std::function<bool(const int32_t pid)>;
76 
77 enum class IdleTaskType : uint8_t {
78     NO_TASK,
79     YOUNG_GC,
80     FINISH_MARKING,
81     INCREMENTAL_MARK
82 };
83 
84 enum class MarkType : uint8_t {
85     MARK_EDEN,
86     MARK_YOUNG,
87     MARK_FULL
88 };
89 
90 enum class MemGrowingType : uint8_t {
91     HIGH_THROUGHPUT,
92     CONSERVATIVE,
93     PRESSURE
94 };
95 
96 enum class HeapMode {
97     NORMAL,
98     SPAWN,
99     SHARE,
100 };
101 
102 enum AppSensitiveStatus : uint8_t {
103     NORMAL_SCENE,
104     ENTER_HIGH_SENSITIVE,
105     EXIT_HIGH_SENSITIVE,
106 };
107 
108 enum class VerifyKind {
109     VERIFY_PRE_GC,
110     VERIFY_POST_GC,
111     VERIFY_MARK_EDEN,
112     VERIFY_EVACUATE_EDEN,
113     VERIFY_MARK_YOUNG,
114     VERIFY_EVACUATE_YOUNG,
115     VERIFY_MARK_FULL,
116     VERIFY_EVACUATE_OLD,
117     VERIFY_EVACUATE_FULL,
118     VERIFY_SHARED_RSET_POST_FULL_GC,
119     VERIFY_PRE_SHARED_GC,
120     VERIFY_POST_SHARED_GC,
121     VERIFY_SHARED_GC_MARK,
122     VERIFY_SHARED_GC_SWEEP,
123     VERIFY_END,
124 };
125 
126 class BaseHeap {
127 public:
BaseHeap(const EcmaParamConfiguration & config)128     BaseHeap(const EcmaParamConfiguration &config) : config_(config) {}
129     virtual ~BaseHeap() = default;
130     NO_COPY_SEMANTIC(BaseHeap);
131     NO_MOVE_SEMANTIC(BaseHeap);
132 
133     virtual void Destroy() = 0;
134 
135     virtual bool IsMarking() const = 0;
136 
137     virtual bool IsReadyToConcurrentMark() const = 0;
138 
139     virtual bool NeedStopCollection() = 0;
140 
141     virtual void SetSensitiveStatus(AppSensitiveStatus status) = 0;
142 
143     virtual AppSensitiveStatus GetSensitiveStatus() const = 0;
144 
145     virtual bool FinishStartupEvent() = 0;
146 
147     virtual bool OnStartupEvent() const = 0;
148 
149     virtual void NotifyPostFork() = 0;
150 
151     virtual void TryTriggerIdleCollection() = 0;
152 
153     virtual void TryTriggerIncrementalMarking() = 0;
154 
155     /*
156      * Wait for existing concurrent marking tasks to be finished (if any).
157      * Return true if there's ongoing concurrent marking.
158      */
159     virtual bool CheckOngoingConcurrentMarking() = 0;
160 
161     virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
162 
163     virtual bool OldSpaceExceedLimit() const = 0;
164 
165     virtual inline size_t GetCommittedSize() const = 0;
166 
167     virtual inline size_t GetHeapObjectSize() const = 0;
168 
169     virtual inline size_t GetRegionCount() const = 0;
170 
171     virtual void ChangeGCParams(bool inBackground) = 0;
172 
173     virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
174 
175     virtual GCStats *GetEcmaGCStats() = 0;
176 
177     virtual bool ObjectExceedMaxHeapSize() const = 0;
178 
GetMarkType()179     MarkType GetMarkType() const
180     {
181         return markType_;
182     }
183 
SetMarkType(MarkType markType)184     void SetMarkType(MarkType markType)
185     {
186         markType_ = markType;
187     }
188 
IsEdenMark()189     bool IsEdenMark() const
190     {
191         return markType_ == MarkType::MARK_EDEN;
192     }
193 
IsYoungMark()194     bool IsYoungMark() const
195     {
196         return markType_ == MarkType::MARK_YOUNG;
197     }
198 
IsFullMark()199     bool IsFullMark() const
200     {
201         return markType_ == MarkType::MARK_FULL;
202     }
203 
IsConcurrentFullMark()204     bool IsConcurrentFullMark() const
205     {
206         return markType_ == MarkType::MARK_FULL;
207     }
208 
GetGCType()209     TriggerGCType GetGCType() const
210     {
211         return gcType_;
212     }
213 
214     bool PUBLIC_API IsAlive(TaggedObject *object) const;
215 
216     bool ContainObject(TaggedObject *object) const;
217 
GetOldGCRequested()218     bool GetOldGCRequested()
219     {
220         return oldGCRequested_;
221     }
222 
GetEcmaParamConfiguration()223     EcmaParamConfiguration GetEcmaParamConfiguration() const
224     {
225         return config_;
226     }
227 
GetNativeAreaAllocator()228     NativeAreaAllocator *GetNativeAreaAllocator() const
229     {
230         return nativeAreaAllocator_;
231     }
232 
GetHeapRegionAllocator()233     HeapRegionAllocator *GetHeapRegionAllocator() const
234     {
235         return heapRegionAllocator_;
236     }
237 
ShouldThrowOOMError(bool shouldThrow)238     void ShouldThrowOOMError(bool shouldThrow)
239     {
240         shouldThrowOOMError_ = shouldThrow;
241     }
242 
SetCanThrowOOMError(bool canThrow)243     void SetCanThrowOOMError(bool canThrow)
244     {
245         canThrowOOMError_ = canThrow;
246     }
247 
CanThrowOOMError()248     bool CanThrowOOMError()
249     {
250         return canThrowOOMError_;
251     }
252 
IsInBackground()253     bool IsInBackground() const
254     {
255         return inBackground_;
256     }
257 
258     // ONLY used for heap verification.
IsVerifying()259     bool IsVerifying() const
260     {
261         return isVerifying_;
262     }
263 
264     // ONLY used for heap verification.
SetVerifying(bool verifying)265     void SetVerifying(bool verifying)
266     {
267         isVerifying_ = verifying;
268     }
269 
SetGCState(bool inGC)270     void SetGCState(bool inGC)
271     {
272         inGC_ = inGC;
273     }
274 
InGC()275     bool InGC() const
276     {
277         return inGC_;
278     }
279 
NotifyHeapAliveSizeAfterGC(size_t size)280     void NotifyHeapAliveSizeAfterGC(size_t size)
281     {
282         heapAliveSizeAfterGC_ = size;
283     }
284 
GetHeapAliveSizeAfterGC()285     size_t GetHeapAliveSizeAfterGC() const
286     {
287         return heapAliveSizeAfterGC_;
288     }
289 
UpdateHeapStatsAfterGC(TriggerGCType gcType)290     void UpdateHeapStatsAfterGC(TriggerGCType gcType)
291     {
292         if (gcType == TriggerGCType::EDEN_GC || gcType == TriggerGCType::YOUNG_GC) {
293             return;
294         }
295         heapAliveSizeAfterGC_ = GetHeapObjectSize();
296         fragmentSizeAfterGC_ = GetCommittedSize() - GetHeapObjectSize();
297         if (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC) {
298             heapBasicLoss_ = fragmentSizeAfterGC_;
299         }
300     }
301 
GetFragmentSizeAfterGC()302     size_t GetFragmentSizeAfterGC() const
303     {
304         return fragmentSizeAfterGC_;
305     }
306 
GetHeapBasicLoss()307     size_t GetHeapBasicLoss() const
308     {
309         return heapBasicLoss_;
310     }
311 
GetGlobalSpaceAllocLimit()312     size_t GetGlobalSpaceAllocLimit() const
313     {
314         return globalSpaceAllocLimit_;
315     }
316 
317     // Whether should verify heap during gc.
ShouldVerifyHeap()318     bool ShouldVerifyHeap() const
319     {
320         return shouldVerifyHeap_;
321     }
322 
323     void ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
324         bool NonMovableObjNearOOM = false);
325 
GetMaxMarkTaskCount()326     uint32_t GetMaxMarkTaskCount() const
327     {
328         return maxMarkTaskCount_;
329     }
330 
InSensitiveStatus()331     bool InSensitiveStatus() const
332     {
333         return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE || OnStartupEvent();
334     }
335 
336     void OnAllocateEvent(EcmaVM *ecmaVm, TaggedObject* address, size_t size);
337     inline void SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
338                                             [[maybe_unused]] size_t size);
339     bool CheckCanDistributeTask();
340     void IncreaseTaskCount();
341     void ReduceTaskCount();
342     void WaitRunningTaskFinished();
343     void WaitClearTaskFinished();
344     void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
345         bool NonMovableObjNearOOM = false);
346     void SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName);
347     void SetAppFreezeFilterCallback(AppFreezeFilterCallback cb);
348 
349 protected:
350     void FatalOutOfMemoryError(size_t size, std::string functionName);
351 
352     enum class HeapType {
353         LOCAL_HEAP,
354         SHARED_HEAP,
355         INVALID,
356     };
357 
358     class RecursionScope {
359     public:
RecursionScope(BaseHeap * heap,HeapType heapType)360         explicit RecursionScope(BaseHeap* heap, HeapType heapType) : heap_(heap), heapType_(heapType)
361         {
362             if (heap_->recursionDepth_++ != 0) {
363                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
364                               << ") Constructor, depth: " << heap_->recursionDepth_;
365             }
366             heap_->SetGCState(true);
367         }
~RecursionScope()368         ~RecursionScope()
369         {
370             if (--heap_->recursionDepth_ != 0) {
371                 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage(isShared=" << static_cast<int>(heapType_)
372                               << ") Destructor, depth: " << heap_->recursionDepth_;
373             }
374             heap_->SetGCState(false);
375         }
376     private:
377         BaseHeap *heap_ {nullptr};
378         HeapType heapType_ {HeapType::INVALID};
379     };
380 
381     static constexpr double TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE = 0.75;
382 
383     const EcmaParamConfiguration config_;
384     MarkType markType_ {MarkType::MARK_YOUNG};
385     TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
386     Mutex gcCollectGarbageMutex_;
387     // Region allocators.
388     NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
389     HeapRegionAllocator *heapRegionAllocator_ {nullptr};
390 
391     size_t heapAliveSizeAfterGC_ {0};
392     size_t globalSpaceAllocLimit_ {0};
393     size_t globalSpaceConcurrentMarkLimit_ {0};
394     size_t heapBasicLoss_ {1_MB};
395     size_t fragmentSizeAfterGC_ {0};
396     // parallel marker task count.
397     uint32_t runningTaskCount_ {0};
398     uint32_t maxMarkTaskCount_ {0};
399     Mutex waitTaskFinishedMutex_;
400     ConditionVariable waitTaskFinishedCV_;
401     Mutex waitClearTaskFinishedMutex_;
402     ConditionVariable waitClearTaskFinishedCV_;
403     AppFreezeFilterCallback appfreezeCallback_ {nullptr};
404     bool clearTaskFinished_ {true};
405     bool inBackground_ {false};
406     bool shouldThrowOOMError_ {false};
407     bool canThrowOOMError_ {true};
408     bool oldGCRequested_ {false};
409     // ONLY used for heap verification.
410     bool shouldVerifyHeap_ {false};
411     bool inGC_ {false};
412     bool isVerifying_ {false};
413     int32_t recursionDepth_ {0};
414 };
415 
416 class SharedHeap : public BaseHeap {
417 public:
SharedHeap(const EcmaParamConfiguration & config)418     SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
419     virtual ~SharedHeap() = default;
420 
421     static void CreateNewInstance();
422     static SharedHeap *GetInstance();
423     static void DestroyInstance();
424 
425     void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
426         const JSRuntimeOptions &option, DaemonThread *dThread);
427 
428     void Destroy() override;
429 
430     void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
431 
432     void EnableParallelGC(JSRuntimeOptions &option);
433 
434     void DisableParallelGC(JSThread *thread);
435 
436     void AdjustGlobalSpaceAllocLimit();
437 
438     class ParallelMarkTask : public Task {
439     public:
ParallelMarkTask(int32_t id,SharedHeap * heap,SharedParallelMarkPhase taskPhase)440         ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
441             : Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
442         ~ParallelMarkTask() override = default;
443         bool Run(uint32_t threadIndex) override;
444 
445         NO_COPY_SEMANTIC(ParallelMarkTask);
446         NO_MOVE_SEMANTIC(ParallelMarkTask);
447 
448     private:
449         SharedHeap *sHeap_ {nullptr};
450         SharedParallelMarkPhase taskPhase_;
451     };
452 
453     class AsyncClearTask : public Task {
454     public:
AsyncClearTask(int32_t id,SharedHeap * heap,TriggerGCType type)455         AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
456             : Task(id), sHeap_(heap), gcType_(type) {}
457         ~AsyncClearTask() override = default;
458         bool Run(uint32_t threadIndex) override;
459 
460         NO_COPY_SEMANTIC(AsyncClearTask);
461         NO_MOVE_SEMANTIC(AsyncClearTask);
462     private:
463         SharedHeap *sHeap_;
464         TriggerGCType gcType_;
465     };
IsMarking()466     bool IsMarking() const override
467     {
468         LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
469         return false;
470     }
471 
472     bool IsReadyToConcurrentMark() const override;
473 
474     bool NeedStopCollection() override;
475 
SetSensitiveStatus(AppSensitiveStatus status)476     void SetSensitiveStatus(AppSensitiveStatus status) override
477     {
478         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
479         smartGCStats_.sensitiveStatus_ = status;
480         if (!InSensitiveStatus()) {
481             smartGCStats_.sensitiveStatusCV_.Signal();
482         }
483     }
484 
485     // This should be called when holding lock of sensitiveStatusMutex_.
GetSensitiveStatus()486     AppSensitiveStatus GetSensitiveStatus() const override
487     {
488         return smartGCStats_.sensitiveStatus_;
489     }
490 
FinishStartupEvent()491     bool FinishStartupEvent() override
492     {
493         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
494         if (!smartGCStats_.onStartupEvent_) {
495             return false;
496         }
497         smartGCStats_.onStartupEvent_ = false;
498         if (!InSensitiveStatus()) {
499             smartGCStats_.sensitiveStatusCV_.Signal();
500         }
501         return true;
502     }
503 
504     // This should be called when holding lock of sensitiveStatusMutex_.
OnStartupEvent()505     bool OnStartupEvent() const override
506     {
507         return smartGCStats_.onStartupEvent_;
508     }
509 
NotifyPostFork()510     void NotifyPostFork() override
511     {
512         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
513         smartGCStats_.onStartupEvent_ = true;
514     }
515 
WaitSensitiveStatusFinished()516     void WaitSensitiveStatusFinished()
517     {
518         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
519         while (InSensitiveStatus() && !smartGCStats_.forceGC_) {
520             smartGCStats_.sensitiveStatusCV_.Wait(&smartGCStats_.sensitiveStatusMutex_);
521         }
522     }
523 
524     bool ObjectExceedMaxHeapSize() const override;
525 
526     bool CheckAndTriggerSharedGC(JSThread *thread);
527 
528     bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
529 
530     void TryTriggerLocalConcurrentMarking();
531 
532     // Called when all vm is destroyed, and try to destroy daemon thread.
533     void WaitAllTasksFinishedAfterAllJSThreadEliminated();
534 
535     void WaitAllTasksFinished(JSThread *thread);
536 
537     void StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason);         // In daemon thread
538 
539     // Use JSThread instead of DaemonThread to check if IsReadyToSharedConcurrentMark, to avoid an atomic load.
540     bool CheckCanTriggerConcurrentMarking(JSThread *thread);
541 
TryTriggerIdleCollection()542     void TryTriggerIdleCollection() override
543     {
544         LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
545         return;
546     }
547 
TryTriggerIncrementalMarking()548     void TryTriggerIncrementalMarking() override
549     {
550         LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
551         return;
552     }
553 
554     void UpdateWorkManager(SharedGCWorkManager *sWorkManager);
555 
556     bool CheckOngoingConcurrentMarking() override;
557 
OldSpaceExceedCapacity(size_t size)558     bool OldSpaceExceedCapacity(size_t size) const override
559     {
560         size_t totalSize = sOldSpace_->GetCommittedSize() + size;
561         return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
562     }
563 
OldSpaceExceedLimit()564     bool OldSpaceExceedLimit() const override
565     {
566         return sOldSpace_->GetHeapObjectSize() >= sOldSpace_->GetInitialCapacity();
567     }
568 
GetConcurrentMarker()569     SharedConcurrentMarker *GetConcurrentMarker() const
570     {
571         return sConcurrentMarker_;
572     }
573 
GetSweeper()574     SharedConcurrentSweeper *GetSweeper() const
575     {
576         return sSweeper_;
577     }
578 
IsParallelGCEnabled()579     bool IsParallelGCEnabled() const
580     {
581         return parallelGC_;
582     }
583 
GetOldSpace()584     SharedOldSpace *GetOldSpace() const
585     {
586         return sOldSpace_;
587     }
588 
GetCompressSpace()589     SharedOldSpace *GetCompressSpace() const
590     {
591         return sCompressSpace_;
592     }
593 
GetNonMovableSpace()594     SharedNonMovableSpace *GetNonMovableSpace() const
595     {
596         return sNonMovableSpace_;
597     }
598 
GetHugeObjectSpace()599     SharedHugeObjectSpace *GetHugeObjectSpace() const
600     {
601         return sHugeObjectSpace_;
602     }
603 
GetReadOnlySpace()604     SharedReadOnlySpace *GetReadOnlySpace() const
605     {
606         return sReadOnlySpace_;
607     }
608 
GetAppSpawnSpace()609     SharedAppSpawnSpace *GetAppSpawnSpace() const
610     {
611         return sAppSpawnSpace_;
612     }
613 
SetForceGC(bool forceGC)614     void SetForceGC(bool forceGC)
615     {
616         LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
617         smartGCStats_.forceGC_ = forceGC;
618         if (smartGCStats_.forceGC_) {
619             smartGCStats_.sensitiveStatusCV_.Signal();
620         }
621     }
622 
623     inline void TryTriggerConcurrentMarking(JSThread *thread);
624 
625     template<TriggerGCType gcType, GCReason gcReason>
626     void TriggerConcurrentMarking(JSThread *thread);
627 
628     template<TriggerGCType gcType, GCReason gcReason>
629     void CollectGarbage(JSThread *thread);
630 
631     void CollectGarbageNearOOM(JSThread *thread);
632     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
633     // to true even if sweep_task and clear_task is running asynchronously
634     void NotifyGCCompleted();            // In daemon thread
635 
636     // Called when all vm is destroyed, and try to destroy daemon thread
637     void WaitGCFinishedAfterAllJSThreadEliminated();
638 
639     void WaitGCFinished(JSThread *thread);
640 
641     void DaemonCollectGarbage(TriggerGCType gcType, GCReason reason);
642 
SetMaxMarkTaskCount(uint32_t maxTaskCount)643     void SetMaxMarkTaskCount(uint32_t maxTaskCount)
644     {
645         maxMarkTaskCount_ = maxTaskCount;
646     }
647 
GetCommittedSize()648     inline size_t GetCommittedSize() const override
649     {
650         size_t result = sOldSpace_->GetCommittedSize() +
651             sHugeObjectSpace_->GetCommittedSize() +
652             sNonMovableSpace_->GetCommittedSize() +
653             sReadOnlySpace_->GetCommittedSize();
654         return result;
655     }
656 
GetHeapObjectSize()657     inline size_t GetHeapObjectSize() const override
658     {
659         size_t result = sOldSpace_->GetHeapObjectSize() +
660             sHugeObjectSpace_->GetHeapObjectSize() +
661             sNonMovableSpace_->GetHeapObjectSize() +
662             sReadOnlySpace_->GetCommittedSize();
663         return result;
664     }
665 
GetRegionCount()666     inline size_t GetRegionCount() const override
667     {
668         size_t result = sOldSpace_->GetRegionCount() +
669             sHugeObjectSpace_->GetRegionCount() +
670             sNonMovableSpace_->GetRegionCount() +
671             sReadOnlySpace_->GetRegionCount();
672         return result;
673     }
674 
ResetNativeSizeAfterLastGC()675     void ResetNativeSizeAfterLastGC()
676     {
677         nativeSizeAfterLastGC_.store(0, std::memory_order_relaxed);
678     }
679 
IncNativeSizeAfterLastGC(size_t size)680     void IncNativeSizeAfterLastGC(size_t size)
681     {
682         nativeSizeAfterLastGC_.fetch_add(size, std::memory_order_relaxed);
683     }
684 
GetNativeSizeAfterLastGC()685     size_t GetNativeSizeAfterLastGC() const
686     {
687         return nativeSizeAfterLastGC_.load(std::memory_order_relaxed);
688     }
689 
GetNativeSizeTriggerSharedGC()690     size_t GetNativeSizeTriggerSharedGC() const
691     {
692         return incNativeSizeTriggerSharedGC_;
693     }
694 
GetNativeSizeTriggerSharedCM()695     size_t GetNativeSizeTriggerSharedCM() const
696     {
697         return incNativeSizeTriggerSharedCM_;
698     }
699 
ChangeGCParams(bool inBackground)700     void ChangeGCParams([[maybe_unused]]bool inBackground) override
701     {
702         LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
703         return;
704     }
705 
GetEcmaGCStats()706     GCStats *GetEcmaGCStats() override
707     {
708         return sGCStats_;
709     }
710 
SetGlobalEnvConstants(const GlobalEnvConstants * globalEnvConstants)711     inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
712     {
713         globalEnvConstants_ = globalEnvConstants;
714     }
715 
GetGlobalConst()716     inline const GlobalEnvConstants *GetGlobalConst() const override
717     {
718         return globalEnvConstants_;
719     }
720 
GetSpaceWithType(MemSpaceType type)721     SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
722     {
723         switch (type) {
724             case MemSpaceType::SHARED_OLD_SPACE:
725                 return sOldSpace_;
726             case MemSpaceType::SHARED_NON_MOVABLE:
727                 return sNonMovableSpace_;
728             default:
729                 LOG_ECMA(FATAL) << "this branch is unreachable";
730                 UNREACHABLE();
731                 break;
732         }
733     }
734 
735     void Prepare(bool inTriggerGCThread);
736     void Reclaim(TriggerGCType gcType);
737     void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
738     void CompactHeapBeforeFork(JSThread *thread);
739     void ReclaimForAppSpawn();
740 
GetWorkManager()741     SharedGCWorkManager *GetWorkManager() const
742     {
743         return sWorkManager_;
744     }
745 
GetSharedGCMarker()746     SharedGCMarker *GetSharedGCMarker() const
747     {
748         return sharedGCMarker_;
749     }
750 
GetSharedGCMovableMarker()751     SharedGCMovableMarker *GetSharedGCMovableMarker() const
752     {
753         return sharedGCMovableMarker_;
754     }
755     inline void SwapOldSpace();
756 
757     void PrepareRecordRegionsForReclaim();
758 
759     template<class Callback>
760     void EnumerateOldSpaceRegions(const Callback &cb) const;
761 
762     template<class Callback>
763     void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
764 
765     template<class Callback>
766     void IterateOverObjects(const Callback &cb) const;
767 
768     inline TaggedObject *AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size);
769 
770     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
771 
772     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
773 
774     inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, size_t size);
775 
776     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass);
777 
778     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
779 
780     inline TaggedObject *AllocateOldOrHugeObject(JSThread *thread, size_t size);
781 
782     inline TaggedObject *AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
783 
784     inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
785 
786     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
787 
788     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
789 
790     inline TaggedObject *AllocateSNonMovableTlab(JSThread *thread, size_t size);
791 
792     inline TaggedObject *AllocateSOldTlab(JSThread *thread, size_t size);
793 
794     size_t VerifyHeapObjects(VerifyKind verifyKind) const;
795 
796     inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
797 
798     void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread);
799 
800     class SharedGCScope {
801     public:
802         SharedGCScope();
803         ~SharedGCScope();
804     };
805 
806 private:
807     void ProcessAllGCListeners();
808     inline void CollectGarbageFinish(bool inDaemon, TriggerGCType gcType);
809 
810     void MoveOldSpaceToAppspawn();
811 
812     void ReclaimRegions(TriggerGCType type);
813 
814     void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
815     inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
816     struct SharedHeapSmartGCStats {
817         /**
818          * For SmartGC.
819          * For daemon thread, it check these status before trying to collect garbage, and wait until finish.
820          * It need that check-wait events is atomic, so use a Mutex/CV.
821         */
822         Mutex sensitiveStatusMutex_;
823         ConditionVariable sensitiveStatusCV_;
824         AppSensitiveStatus sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
825         bool onStartupEvent_ {false};
826         // If the SharedHeap is almost OOM and a collect is failed, cause a GC with GCReason::ALLOCATION_FAILED,
827         // must do GC at once even in sensitive status.
828         bool forceGC_ {false};
829     };
830 
831     SharedHeapSmartGCStats smartGCStats_;
832 
833     static SharedHeap *instance_;
834 
835     GCStats *sGCStats_ {nullptr};
836 
837     bool localFullMarkTriggered_ {false};
838 
839     bool optionalLogEnabled_ {false};
840 
841     bool parallelGC_ {true};
842 
843     // Only means the main body of SharedGC is finished, i.e. if parallel_gc is enabled, this flags will be set
844     // to true even if sweep_task and clear_task is running asynchronously
845     bool gcFinished_ {true};
846     Mutex waitGCFinishedMutex_;
847     ConditionVariable waitGCFinishedCV_;
848 
849     DaemonThread *dThread_ {nullptr};
850     const GlobalEnvConstants *globalEnvConstants_ {nullptr};
851     SharedOldSpace *sOldSpace_ {nullptr};
852     SharedOldSpace *sCompressSpace_ {nullptr};
853     SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
854     SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
855     SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
856     SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
857     SharedGCWorkManager *sWorkManager_ {nullptr};
858     SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
859     SharedConcurrentSweeper *sSweeper_ {nullptr};
860     SharedGC *sharedGC_ {nullptr};
861     SharedFullGC *sharedFullGC_ {nullptr};
862     SharedGCMarker *sharedGCMarker_ {nullptr};
863     SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
864     size_t growingFactor_ {0};
865     size_t growingStep_ {0};
866     size_t incNativeSizeTriggerSharedCM_ {0};
867     size_t incNativeSizeTriggerSharedGC_ {0};
868     size_t fragmentationLimitForSharedFullGC_ {0};
869     std::atomic<size_t> nativeSizeAfterLastGC_ {0};
870 };
871 
872 class Heap : public BaseHeap {
873 public:
874     explicit Heap(EcmaVM *ecmaVm);
875     virtual ~Heap() = default;
876     NO_COPY_SEMANTIC(Heap);
877     NO_MOVE_SEMANTIC(Heap);
878     void Initialize();
879     void Destroy() override;
880     void Prepare();
881     void GetHeapPrepare();
882     void Resume(TriggerGCType gcType);
883     void ResumeForAppSpawn();
884     void CompactHeapBeforeFork();
885     void DisableParallelGC();
886     void EnableParallelGC();
887 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
888     void SetJsDumpThresholds(size_t thresholds) const;
889 #endif
890 
GetEdenSpace()891     EdenSpace *GetEdenSpace() const
892     {
893         return edenSpace_;
894     }
895 
896     // fixme: Rename NewSpace to YoungSpace.
897     // This is the active young generation space that the new objects are allocated in
898     // or copied into (from the other semi space) during semi space GC.
GetNewSpace()899     SemiSpace *GetNewSpace() const
900     {
901         return activeSemiSpace_;
902     }
903 
904     /*
905      * Return the original active space where the objects are to be evacuated during semi space GC.
906      * This should be invoked only in the evacuation phase of semi space GC.
907      * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement.
908      */
GetFromSpaceDuringEvacuation()909     SemiSpace *GetFromSpaceDuringEvacuation() const
910     {
911         return inactiveSemiSpace_;
912     }
913 
GetOldSpace()914     OldSpace *GetOldSpace() const
915     {
916         return oldSpace_;
917     }
918 
GetCompressSpace()919     OldSpace *GetCompressSpace() const
920     {
921         return compressSpace_;
922     }
923 
GetNonMovableSpace()924     NonMovableSpace *GetNonMovableSpace() const
925     {
926         return nonMovableSpace_;
927     }
928 
GetHugeObjectSpace()929     HugeObjectSpace *GetHugeObjectSpace() const
930     {
931         return hugeObjectSpace_;
932     }
933 
GetMachineCodeSpace()934     MachineCodeSpace *GetMachineCodeSpace() const
935     {
936         return machineCodeSpace_;
937     }
938 
GetHugeMachineCodeSpace()939     HugeMachineCodeSpace *GetHugeMachineCodeSpace() const
940     {
941         return hugeMachineCodeSpace_;
942     }
943 
GetSnapshotSpace()944     SnapshotSpace *GetSnapshotSpace() const
945     {
946         return snapshotSpace_;
947     }
948 
GetReadOnlySpace()949     ReadOnlySpace *GetReadOnlySpace() const
950     {
951         return readOnlySpace_;
952     }
953 
GetAppSpawnSpace()954     AppSpawnSpace *GetAppSpawnSpace() const
955     {
956         return appSpawnSpace_;
957     }
958 
GetSpaceWithType(MemSpaceType type)959     SparseSpace *GetSpaceWithType(MemSpaceType type) const
960     {
961         switch (type) {
962             case MemSpaceType::OLD_SPACE:
963                 return oldSpace_;
964             case MemSpaceType::NON_MOVABLE:
965                 return nonMovableSpace_;
966             case MemSpaceType::MACHINE_CODE_SPACE:
967                 return machineCodeSpace_;
968             default:
969                 LOG_ECMA(FATAL) << "this branch is unreachable";
970                 UNREACHABLE();
971                 break;
972         }
973     }
974 
GetSTWYoungGC()975     STWYoungGC *GetSTWYoungGC() const
976     {
977         return stwYoungGC_;
978     }
979 
GetPartialGC()980     PartialGC *GetPartialGC() const
981     {
982         return partialGC_;
983     }
984 
GetFullGC()985     FullGC *GetFullGC() const
986     {
987         return fullGC_;
988     }
989 
GetSweeper()990     ConcurrentSweeper *GetSweeper() const
991     {
992         return sweeper_;
993     }
994 
GetEvacuator()995     ParallelEvacuator *GetEvacuator() const
996     {
997         return evacuator_;
998     }
999 
GetConcurrentMarker()1000     ConcurrentMarker *GetConcurrentMarker() const
1001     {
1002         return concurrentMarker_;
1003     }
1004 
GetIncrementalMarker()1005     IncrementalMarker *GetIncrementalMarker() const
1006     {
1007         return incrementalMarker_;
1008     }
1009 
GetNonMovableMarker()1010     Marker *GetNonMovableMarker() const
1011     {
1012         return nonMovableMarker_;
1013     }
1014 
GetSemiGCMarker()1015     Marker *GetSemiGCMarker() const
1016     {
1017         return semiGCMarker_;
1018     }
1019 
GetCompressGCMarker()1020     Marker *GetCompressGCMarker() const
1021     {
1022         return compressGCMarker_;
1023     }
1024 
GetEcmaVM()1025     EcmaVM *GetEcmaVM() const
1026     {
1027         return ecmaVm_;
1028     }
1029 
GetJSThread()1030     JSThread *GetJSThread() const
1031     {
1032         return thread_;
1033     }
1034 
GetWorkManager()1035     WorkManager *GetWorkManager() const
1036     {
1037         return workManager_;
1038     }
1039 
GetMarkingObjectLocalBuffer()1040     WorkNode *&GetMarkingObjectLocalBuffer()
1041     {
1042         return sharedGCData_.sharedConcurrentMarkingLocalBuffer_;
1043     }
1044 
GetIdleGCTrigger()1045     IdleGCTrigger *GetIdleGCTrigger() const
1046     {
1047         return idleGCTrigger_;
1048     }
1049 
SetRSetWorkListHandler(RSetWorkListHandler * handler)1050     void SetRSetWorkListHandler(RSetWorkListHandler *handler)
1051     {
1052         ASSERT((sharedGCData_.rSetWorkListHandler_ == nullptr) != (handler == nullptr));
1053         sharedGCData_.rSetWorkListHandler_ = handler;
1054     }
1055 
1056     void ProcessSharedGCMarkingLocalBuffer();
1057 
1058     void ProcessSharedGCRSetWorkList();
1059 
1060     const GlobalEnvConstants *GetGlobalConst() const override;
1061 
GetMemController()1062     MemController *GetMemController() const
1063     {
1064         return memController_;
1065     }
1066 
RecordOrResetObjectSize(size_t objectSize)1067     inline void RecordOrResetObjectSize(size_t objectSize)
1068     {
1069         recordObjectSize_ = objectSize;
1070     }
1071 
GetRecordObjectSize()1072     inline size_t GetRecordObjectSize() const
1073     {
1074         return recordObjectSize_;
1075     }
1076 
RecordOrResetNativeSize(size_t nativeSize)1077     inline void RecordOrResetNativeSize(size_t nativeSize)
1078     {
1079         recordNativeSize_ = nativeSize;
1080     }
1081 
GetRecordNativeSize()1082     inline size_t GetRecordNativeSize() const
1083     {
1084         return recordNativeSize_;
1085     }
1086 
1087     /*
1088      * For object allocations.
1089      */
1090 
1091     // Young
1092     inline TaggedObject *AllocateInGeneralNewSpace(size_t size);
1093     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass);
1094     inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size);
1095     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass);
1096     inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size);
1097     inline TaggedObject *AllocateYoungOrHugeObject(size_t size);
1098     inline uintptr_t AllocateYoungSync(size_t size);
1099     inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size);
1100     // Old
1101     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass);
1102     inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size);
1103     // Non-movable
1104     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass);
1105     inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size);
1106     inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
1107     // Huge
1108     inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size);
1109     // Machine code
1110     inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc = nullptr);
1111     inline TaggedObject *AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc = nullptr);
1112     // Snapshot
1113     inline uintptr_t AllocateSnapshotSpace(size_t size);
1114 
1115     // shared non movable space tlab
1116     inline TaggedObject *AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size);
1117     // shared old space tlab
1118     inline TaggedObject *AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size);
1119 
1120     void ResetTlab();
1121     void FillBumpPointerForTlab();
1122     /*
1123      * GC triggers.
1124      */
1125     void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
1126     bool CheckAndTriggerOldGC(size_t size = 0);
1127     bool CheckAndTriggerHintGC();
1128     TriggerGCType SelectGCType() const;
1129     /*
1130      * Parallel GC related configurations and utilities.
1131      */
1132 
1133     void PostParallelGCTask(ParallelGCTaskPhase taskPhase);
1134 
IsParallelGCEnabled()1135     bool IsParallelGCEnabled() const
1136     {
1137         return parallelGC_;
1138     }
1139     void ChangeGCParams(bool inBackground) override;
1140 
1141     GCStats *GetEcmaGCStats() override;
1142 
1143     GCKeyStats *GetEcmaGCKeyStats();
1144 
1145     JSObjectResizingStrategy *GetJSObjectResizingStrategy();
1146 
1147     void TriggerIdleCollection(int idleMicroSec);
1148     void NotifyMemoryPressure(bool inHighMemoryPressure);
1149 
1150     void TryTriggerConcurrentMarking();
1151     void AdjustBySurvivalRate(size_t originalNewSpaceSize);
1152     void TriggerConcurrentMarking();
1153     bool CheckCanTriggerConcurrentMarking();
1154 
1155     void TryTriggerIdleCollection() override;
1156     void TryTriggerIncrementalMarking() override;
1157     void CalculateIdleDuration();
1158     void UpdateWorkManager(WorkManager *workManager);
1159     bool CheckOngoingConcurrentMarking() override;
1160 
1161     inline void SwapNewSpace();
1162     inline void SwapOldSpace();
1163 
1164     inline bool MoveYoungRegionSync(Region *region);
1165     inline void MergeToOldSpaceSync(LocalSpace *localSpace);
1166 
1167     template<class Callback>
1168     void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const;
1169 
1170     template<class Callback>
1171     void EnumerateNonNewSpaceRegions(const Callback &cb) const;
1172 
1173     template<class Callback>
1174     void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const;
1175 
1176     template<class Callback>
1177     void EnumerateEdenSpaceRegions(const Callback &cb) const;
1178 
1179     template<class Callback>
1180     void EnumerateNewSpaceRegions(const Callback &cb) const;
1181 
1182     template<class Callback>
1183     void EnumerateSnapshotSpaceRegions(const Callback &cb) const;
1184 
1185     template<class Callback>
1186     void EnumerateNonMovableRegions(const Callback &cb) const;
1187 
1188     template<class Callback>
1189     inline void EnumerateRegions(const Callback &cb) const;
1190 
1191     inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd);
1192 
1193     void WaitAllTasksFinished();
1194     void WaitConcurrentMarkingFinished();
1195 
GetMemGrowingType()1196     MemGrowingType GetMemGrowingType() const
1197     {
1198         return memGrowingtype_;
1199     }
1200 
SetMemGrowingType(MemGrowingType memGrowingType)1201     void SetMemGrowingType(MemGrowingType memGrowingType)
1202     {
1203         memGrowingtype_ = memGrowingType;
1204     }
1205 
CalculateLinearSpaceOverShoot()1206     size_t CalculateLinearSpaceOverShoot()
1207     {
1208         return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
1209     }
1210 
1211     inline size_t GetCommittedSize() const override;
1212 
1213     inline size_t GetHeapObjectSize() const override;
1214 
1215     inline void NotifyRecordMemorySize();
1216 
1217     inline size_t GetRegionCount() const override;
1218 
GetRegionCachedSize()1219     size_t GetRegionCachedSize() const
1220     {
1221         return activeSemiSpace_->GetInitialCapacity();
1222     }
1223 
1224     size_t GetLiveObjectSize() const;
1225 
1226     inline uint32_t GetHeapObjectCount() const;
1227 
GetPromotedSize()1228     size_t GetPromotedSize() const
1229     {
1230         return promotedSize_;
1231     }
GetEdenToYoungSize()1232     size_t GetEdenToYoungSize() const
1233     {
1234         return edenToYoungSize_;
1235     }
1236 
1237     size_t GetArrayBufferSize() const;
1238 
1239     size_t GetHeapLimitSize() const;
1240 
GetMaxEvacuateTaskCount()1241     uint32_t GetMaxEvacuateTaskCount() const
1242     {
1243         return maxEvacuateTaskCount_;
1244     }
1245 
1246     /*
1247      * Receive callback function to control idletime.
1248      */
1249     inline void InitializeIdleStatusControl(std::function<void(bool)> callback);
1250 
DisableNotifyIdle()1251     void DisableNotifyIdle()
1252     {
1253         if (notifyIdleStatusCallback != nullptr) {
1254             notifyIdleStatusCallback(true);
1255         }
1256     }
1257 
EnableNotifyIdle()1258     void EnableNotifyIdle()
1259     {
1260         if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) {
1261             notifyIdleStatusCallback(false);
1262         }
1263     }
1264 
SetIdleTask(IdleTaskType task)1265     void SetIdleTask(IdleTaskType task)
1266     {
1267         idleTask_ = task;
1268     }
1269 
1270     void ClearIdleTask();
1271 
IsEmptyIdleTask()1272     bool IsEmptyIdleTask()
1273     {
1274         return idleTask_ == IdleTaskType::NO_TASK;
1275     }
1276 
SetOnSerializeEvent(bool isSerialize)1277     void SetOnSerializeEvent(bool isSerialize)
1278     {
1279         onSerializeEvent_ = isSerialize;
1280         if (!onSerializeEvent_ && !InSensitiveStatus()) {
1281             TryTriggerIncrementalMarking();
1282             TryTriggerIdleCollection();
1283             TryTriggerConcurrentMarking();
1284         }
1285     }
1286 
GetOnSerializeEvent()1287     bool GetOnSerializeEvent() const
1288     {
1289         return onSerializeEvent_;
1290     }
1291 
1292     void NotifyFinishColdStart(bool isMainThread = true);
1293 
1294     void NotifyFinishColdStartSoon();
1295 
1296     void NotifyHighSensitive(bool isStart);
1297 
1298     bool HandleExitHighSensitiveEvent();
1299 
1300     bool ObjectExceedMaxHeapSize() const override;
1301 
1302     bool NeedStopCollection() override;
1303 
SetSensitiveStatus(AppSensitiveStatus status)1304     void SetSensitiveStatus(AppSensitiveStatus status) override
1305     {
1306         sHeap_->SetSensitiveStatus(status);
1307         smartGCStats_.sensitiveStatus_.store(status, std::memory_order_release);
1308     }
1309 
GetSensitiveStatus()1310     AppSensitiveStatus GetSensitiveStatus() const override
1311     {
1312         return smartGCStats_.sensitiveStatus_.load(std::memory_order_acquire);
1313     }
1314 
SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)1315     void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize)
1316     {
1317         recordObjSizeBeforeSensitive_ = objSize;
1318     }
1319 
GetRecordHeapObjectSizeBeforeSensitive()1320     size_t GetRecordHeapObjectSizeBeforeSensitive() const
1321     {
1322         return recordObjSizeBeforeSensitive_;
1323     }
1324 
CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)1325     bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status)
1326     {
1327         return smartGCStats_.sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst);
1328     }
1329 
FinishStartupEvent()1330     bool FinishStartupEvent() override
1331     {
1332         sHeap_->FinishStartupEvent();
1333         return smartGCStats_.onStartupEvent_.exchange(false, std::memory_order_relaxed) == true;
1334     }
1335 
OnStartupEvent()1336     bool OnStartupEvent() const override
1337     {
1338         return smartGCStats_.onStartupEvent_.load(std::memory_order_relaxed);
1339     }
1340 
NotifyPostFork()1341     void NotifyPostFork() override
1342     {
1343         sHeap_->NotifyPostFork();
1344         smartGCStats_.onStartupEvent_.store(true, std::memory_order_relaxed);
1345         LOG_GC(INFO) << "SmartGC: enter app cold start";
1346     }
1347 
1348 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
StartHeapTracking()1349     void StartHeapTracking()
1350     {
1351         WaitAllTasksFinished();
1352     }
1353 
StopHeapTracking()1354     void StopHeapTracking()
1355     {
1356         WaitAllTasksFinished();
1357     }
1358 #endif
1359     inline bool InHeapProfiler();
1360 
1361     void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size);
1362 
1363     // add allocationInspector to each space
1364     void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector);
1365 
1366     // clear allocationInspector from each space
1367     void ClearAllocationInspectorFromAllSpaces();
1368 
1369     /*
1370      * Funtions used by heap verification.
1371      */
1372 
1373     template<class Callback>
1374     void IterateOverObjects(const Callback &cb, bool isSimplify = false) const;
1375 
1376     size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1377     size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const;
1378     void StatisticHeapObject(TriggerGCType gcType) const;
1379     void StatisticHeapDetail();
1380     void PrintHeapInfo(TriggerGCType gcType) const;
1381 
OldSpaceExceedCapacity(size_t size)1382     bool OldSpaceExceedCapacity(size_t size) const override
1383     {
1384         size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size;
1385         return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() +
1386                oldSpace_->GetOutOfMemoryOvershootSize();
1387     }
1388 
OldSpaceExceedLimit()1389     bool OldSpaceExceedLimit() const override
1390     {
1391         size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
1392         return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize();
1393     }
1394 
1395     void AdjustSpaceSizeForAppSpawn();
1396 
1397     static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object);
1398 
IsFullMarkRequested()1399     bool IsFullMarkRequested() const
1400     {
1401         return fullMarkRequested_;
1402     }
1403 
SetFullMarkRequestedState(bool fullMarkRequested)1404     void SetFullMarkRequestedState(bool fullMarkRequested)
1405     {
1406         fullMarkRequested_ = fullMarkRequested;
1407     }
1408 
SetHeapMode(HeapMode mode)1409     void SetHeapMode(HeapMode mode)
1410     {
1411         mode_ = mode;
1412     }
1413 
1414     void IncreaseNativeBindingSize(size_t size);
1415     void IncreaseNativeBindingSize(JSNativePointer *object);
1416     void DecreaseNativeBindingSize(size_t size);
ResetNativeBindingSize()1417     void ResetNativeBindingSize()
1418     {
1419         nativeBindingSize_ = 0;
1420     }
1421 
GetNativeBindingSize()1422     size_t GetNativeBindingSize() const
1423     {
1424         return nativeBindingSize_;
1425     }
1426 
GetGlobalNativeSize()1427     size_t GetGlobalNativeSize() const
1428     {
1429         return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage();
1430     }
1431 
ResetNativeSizeAfterLastGC()1432     void ResetNativeSizeAfterLastGC()
1433     {
1434         nativeSizeAfterLastGC_ = 0;
1435         nativeBindingSizeAfterLastGC_= nativeBindingSize_;
1436     }
1437 
IncNativeSizeAfterLastGC(size_t size)1438     void IncNativeSizeAfterLastGC(size_t size)
1439     {
1440         nativeSizeAfterLastGC_ += size;
1441     }
1442 
GlobalNativeSizeLargerToTriggerGC()1443     bool GlobalNativeSizeLargerToTriggerGC() const
1444     {
1445         auto incNativeBindingSizeAfterLastGC = nativeBindingSize_ > nativeBindingSizeAfterLastGC_ ?
1446             nativeBindingSize_ - nativeBindingSizeAfterLastGC_ : 0;
1447         return GetGlobalNativeSize() > nativeSizeTriggerGCThreshold_ &&
1448             nativeSizeAfterLastGC_ + incNativeBindingSizeAfterLastGC > incNativeSizeTriggerGC_;
1449     }
1450 
GlobalNativeSizeLargerThanLimit()1451     bool GlobalNativeSizeLargerThanLimit() const
1452     {
1453         size_t overshoot = InSensitiveStatus() ? nativeSizeOvershoot_ : 0;
1454         return GetGlobalNativeSize() >= globalSpaceNativeLimit_ + overshoot;
1455     }
1456 
GlobalNativeSizeLargerThanLimitForIdle()1457     bool GlobalNativeSizeLargerThanLimitForIdle() const
1458     {
1459         return GetGlobalNativeSize() >= static_cast<size_t>(globalSpaceNativeLimit_ *
1460             IDLE_SPACE_SIZE_LIMIT_RATE);
1461     }
1462 
1463     void TryTriggerFullMarkOrGCByNativeSize();
1464 
1465     void TryTriggerFullMarkBySharedSize(size_t size);
1466 
1467     bool TryTriggerFullMarkBySharedLimit();
1468 
1469     void CheckAndTriggerTaskFinishedGC();
1470 
1471     bool IsMarking() const override;
1472 
1473     bool IsReadyToConcurrentMark() const override;
1474 
IsEdenGC()1475     bool IsEdenGC() const
1476     {
1477         return gcType_ == TriggerGCType::EDEN_GC;
1478     }
1479 
IsYoungGC()1480     bool IsYoungGC() const
1481     {
1482         return gcType_ == TriggerGCType::YOUNG_GC;
1483     }
1484 
IsGeneralYoungGC()1485     bool IsGeneralYoungGC() const
1486     {
1487         return gcType_ == TriggerGCType::YOUNG_GC || gcType_ == TriggerGCType::EDEN_GC;
1488     }
1489 
1490     void EnableEdenGC();
1491 
1492     void TryEnableEdenGC();
1493 
1494     void CheckNonMovableSpaceOOM();
1495     void ReleaseEdenAllocator();
1496     void InstallEdenAllocator();
1497     void DumpHeapSnapshotBeforeOOM(bool isFullGC = true);
1498     std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const;
1499     MachineCode *GetMachineCodeObject(uintptr_t pc) const;
1500 
1501     PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
1502     PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
1503     void ProcessGCListeners();
1504 
1505     inline void ProcessNativeDelete(const WeakRootVisitor& visitor);
1506     inline void ProcessSharedNativeDelete(const WeakRootVisitor& visitor);
1507     inline void ProcessReferences(const WeakRootVisitor& visitor);
1508     inline void PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent);
1509     inline void PushToSharedNativePointerList(JSNativePointer* pointer);
1510     inline void RemoveFromNativePointerList(const JSNativePointer* pointer);
1511     inline void ClearNativePointerList();
1512 
GetNativePointerListSize()1513     size_t GetNativePointerListSize() const
1514     {
1515         return nativePointerList_.size();
1516     }
1517 
1518 private:
1519     inline TaggedObject *AllocateHugeObject(size_t size);
1520 
1521     static constexpr int MIN_JSDUMP_THRESHOLDS = 85;
1522     static constexpr int MAX_JSDUMP_THRESHOLDS = 95;
1523     static constexpr int IDLE_TIME_LIMIT = 10;  // if idle time over 10ms we can do something
1524     static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB;
1525     static constexpr int IDLE_MAINTAIN_TIME = 500;
1526     static constexpr int BACKGROUND_GROW_LIMIT = 2_MB;
1527     // Threadshold that HintGC will actually trigger GC.
1528     static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5;
1529     static constexpr double IDLE_SPACE_SIZE_LIMIT_RATE = 0.8;
1530     static constexpr double IDLE_FULLGC_SPACE_USAGE_LIMIT_RATE = 0.7;
1531     static constexpr size_t NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT = DEFAULT_SHARED_HEAP_SIZE / 10; // 10 : ten times.
1532     static constexpr size_t INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT = 100_MB;
1533     void RecomputeLimits();
1534     void AdjustOldSpaceLimit();
1535     // record lastRegion for each space, which will be used in ReclaimRegions()
1536     void PrepareRecordRegionsForReclaim();
1537     inline void ReclaimRegions(TriggerGCType gcType);
1538     inline size_t CalculateCommittedCacheSize();
1539 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1540     uint64_t GetCurrentTickMillseconds();
1541     void ThresholdReachedDump();
1542 #endif
1543     void CleanCallBack();
IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)1544     void IncreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1545     {
1546         pendingAsyncNativeCallbackSize_ += bindingSize;
1547     }
DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)1548     void DecreasePendingAsyncNativeCallbackSize(size_t bindingSize)
1549     {
1550         pendingAsyncNativeCallbackSize_ -= bindingSize;
1551     }
1552     class ParallelGCTask : public Task {
1553     public:
ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)1554         ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase)
1555             : Task(id), heap_(heap), taskPhase_(taskPhase) {};
1556         ~ParallelGCTask() override = default;
1557         bool Run(uint32_t threadIndex) override;
1558 
1559         NO_COPY_SEMANTIC(ParallelGCTask);
1560         NO_MOVE_SEMANTIC(ParallelGCTask);
1561 
1562     private:
1563         Heap *heap_ {nullptr};
1564         ParallelGCTaskPhase taskPhase_;
1565     };
1566 
1567     class AsyncClearTask : public Task {
1568     public:
AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)1569         AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type)
1570             : Task(id), heap_(heap), gcType_(type) {}
1571         ~AsyncClearTask() override = default;
1572         bool Run(uint32_t threadIndex) override;
1573 
1574         NO_COPY_SEMANTIC(AsyncClearTask);
1575         NO_MOVE_SEMANTIC(AsyncClearTask);
1576     private:
1577         Heap *heap_;
1578         TriggerGCType gcType_;
1579     };
1580 
1581     class FinishColdStartTask : public Task {
1582     public:
FinishColdStartTask(int32_t id,Heap * heap)1583         FinishColdStartTask(int32_t id, Heap *heap)
1584             : Task(id), heap_(heap) {}
1585         ~FinishColdStartTask() override = default;
1586         bool Run(uint32_t threadIndex) override;
1587 
1588         NO_COPY_SEMANTIC(FinishColdStartTask);
1589         NO_MOVE_SEMANTIC(FinishColdStartTask);
1590     private:
1591         Heap *heap_;
1592     };
1593 
1594     class DeleteCallbackTask : public Task {
1595     public:
DeleteCallbackTask(int32_t id,std::vector<NativePointerCallbackData> & callbacks)1596         DeleteCallbackTask(int32_t id, std::vector<NativePointerCallbackData> &callbacks) : Task(id)
1597         {
1598             std::swap(callbacks, nativePointerCallbacks_);
1599         }
1600         ~DeleteCallbackTask() override = default;
1601         bool Run(uint32_t threadIndex) override;
1602 
1603         NO_COPY_SEMANTIC(DeleteCallbackTask);
1604         NO_MOVE_SEMANTIC(DeleteCallbackTask);
1605 
1606     private:
1607         std::vector<NativePointerCallbackData> nativePointerCallbacks_ {};
1608     };
1609 
1610     struct MainLocalHeapSmartGCStats {
1611         /**
1612          * For SmartGC.
1613          * For main js thread, it check these status everytime when trying to
1614          * collect garbage(e.g. in JSThread::CheckSafePoint), and skip if need, so std::atomic is almost enough.
1615         */
1616         std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE};
1617         std::atomic<bool> onStartupEvent_ {false};
1618     };
1619 
1620     // Some data used in SharedGC is also need to store in local heap, e.g. the temporary local mark stack.
1621     struct SharedGCLocalStoragePackedData {
1622         /**
1623          * During SharedGC concurrent marking, barrier will push shared object to mark stack for marking,
1624          * in LocalGC can just push non-shared object to WorkNode for MAIN_THREAD_INDEX, but in SharedGC, only can
1625          * either use a global lock for DAEMON_THREAD_INDEX's WorkNode, or push to a local WorkNode, and push to global
1626          * in remark.
1627          * If the heap is destructed before push this node to global, check and try to push remain object as well.
1628         */
1629         WorkNode *sharedConcurrentMarkingLocalBuffer_ {nullptr};
1630         /**
1631          * Recording the local_to_share rset used in SharedGC concurrentMark,
1632          * which lifecycle is in one SharedGC.
1633          * Before mutate this local heap(e.g. LocalGC::Evacuate), should make sure the RSetWorkList is all processed,
1634          * other the SharedGC concurrentMark will visitor the incorrect local_to_share bit.
1635          * Before destroying local heap, RSetWorkList should be done as well.
1636         */
1637         RSetWorkListHandler *rSetWorkListHandler_ {nullptr};
1638     };
1639 
1640     EcmaVM *ecmaVm_ {nullptr};
1641     JSThread *thread_ {nullptr};
1642 
1643     SharedHeap *sHeap_ {nullptr};
1644     MainLocalHeapSmartGCStats smartGCStats_;
1645 
1646     /*
1647      * Heap spaces.
1648      */
1649 
1650     /*
1651      * Young generation spaces where most new objects are allocated.
1652      * (only one of the spaces is active at a time in semi space GC).
1653      */
1654     EdenSpace *edenSpace_ {nullptr};
1655     SemiSpace *activeSemiSpace_ {nullptr};
1656     SemiSpace *inactiveSemiSpace_ {nullptr};
1657 
1658     // Old generation spaces where some long living objects are allocated or promoted.
1659     OldSpace *oldSpace_ {nullptr};
1660     OldSpace *compressSpace_ {nullptr};
1661     ReadOnlySpace *readOnlySpace_ {nullptr};
1662     AppSpawnSpace *appSpawnSpace_ {nullptr};
1663     // Spaces used for special kinds of objects.
1664     NonMovableSpace *nonMovableSpace_ {nullptr};
1665     MachineCodeSpace *machineCodeSpace_ {nullptr};
1666     HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr};
1667     HugeObjectSpace *hugeObjectSpace_ {nullptr};
1668     SnapshotSpace *snapshotSpace_ {nullptr};
1669     // tlab for shared non movable space
1670     ThreadLocalAllocationBuffer *sNonMovableTlab_ {nullptr};
1671     // tlab for shared old space
1672     ThreadLocalAllocationBuffer *sOldTlab_ {nullptr};
1673     /*
1674      * Garbage collectors collecting garbage in different scopes.
1675      */
1676 
1677     /*
1678      * Semi sapce GC which collects garbage only in young spaces.
1679      * This is however optional for now because the partial GC also covers its functionality.
1680      */
1681     STWYoungGC *stwYoungGC_ {nullptr};
1682 
1683     /*
1684      * The mostly used partial GC which collects garbage in young spaces,
1685      * and part of old spaces if needed determined by GC heuristics.
1686      */
1687     PartialGC *partialGC_ {nullptr};
1688 
1689     // Full collector which collects garbage in all valid heap spaces.
1690     FullGC *fullGC_ {nullptr};
1691 
1692     // Concurrent marker which coordinates actions of GC markers and mutators.
1693     ConcurrentMarker *concurrentMarker_ {nullptr};
1694 
1695     // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators.
1696     ConcurrentSweeper *sweeper_ {nullptr};
1697 
1698     // Parallel evacuator which evacuates objects from one space to another one.
1699     ParallelEvacuator *evacuator_ {nullptr};
1700 
1701     // Incremental marker which coordinates actions of GC markers in idle time.
1702     IncrementalMarker *incrementalMarker_ {nullptr};
1703 
1704     /*
1705      * Different kinds of markers used by different collectors.
1706      * Depending on the collector algorithm, some markers can do simple marking
1707      *  while some others need to handle object movement.
1708      */
1709     Marker *nonMovableMarker_ {nullptr};
1710     Marker *semiGCMarker_ {nullptr};
1711     Marker *compressGCMarker_ {nullptr};
1712 
1713     // Work manager managing the tasks mostly generated in the GC mark phase.
1714     WorkManager *workManager_ {nullptr};
1715 
1716     SharedGCLocalStoragePackedData sharedGCData_;
1717 
1718     bool onSerializeEvent_ {false};
1719     bool parallelGC_ {true};
1720     bool fullGCRequested_ {false};
1721     bool fullMarkRequested_ {false};
1722     bool oldSpaceLimitAdjusted_ {false};
1723     bool enableIdleGC_ {false};
1724     std::atomic_bool isCSetClearing_ {false};
1725     HeapMode mode_ { HeapMode::NORMAL };
1726 
1727     /*
1728      * The memory controller providing memory statistics (by allocations and coleections),
1729      * which is used for GC heuristics.
1730      */
1731     MemController *memController_ {nullptr};
1732     size_t edenToYoungSize_ {0};
1733     size_t promotedSize_ {0};
1734     size_t semiSpaceCopiedSize_ {0};
1735     size_t nativeBindingSize_{0};
1736     size_t globalSpaceNativeLimit_ {0};
1737     size_t nativeSizeTriggerGCThreshold_ {0};
1738     size_t incNativeSizeTriggerGC_ {0};
1739     size_t nativeSizeOvershoot_ {0};
1740     size_t asyncClearNativePointerThreshold_ {0};
1741     size_t nativeSizeAfterLastGC_ {0};
1742     size_t nativeBindingSizeAfterLastGC_ {0};
1743     size_t newAllocatedSharedObjectSize_ {0};
1744     // recordObjectSize_ & recordNativeSize_:
1745     // Record memory before taskpool start, used to determine trigger GC or not after task finish.
1746     size_t recordObjectSize_ {0};
1747     size_t recordNativeSize_ {0};
1748     // Record heap object size before enter sensitive status
1749     size_t recordObjSizeBeforeSensitive_ {0};
1750     size_t pendingAsyncNativeCallbackSize_ {0};
1751     MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
1752 
1753     // parallel evacuator task number.
1754     uint32_t maxEvacuateTaskCount_ {0};
1755 
1756     // Application status
1757 
1758     IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr};
1759 
1760     IdleTaskType idleTask_ {IdleTaskType::NO_TASK};
1761     float idlePredictDuration_ {0.0f};
1762     double idleTaskFinishTime_ {0.0};
1763 
1764     /*
1765      * The listeners which are called at the end of GC
1766      */
1767     std::vector<std::pair<FinishGCListener, void *>> gcListeners_;
1768 
1769     IdleGCTrigger *idleGCTrigger_ {nullptr};
1770 
1771     bool hasOOMDump_ {false};
1772     bool enableEdenGC_ {false};
1773 
1774     CVector<JSNativePointer *> nativePointerList_;
1775     CVector<JSNativePointer *> concurrentNativePointerList_;
1776     CVector<JSNativePointer *> sharedNativePointerList_;
1777 
1778     friend panda::test::HProfTestHelper;
1779     friend panda::test::GCTest_CallbackTask_Test;
1780 };
1781 }  // namespace panda::ecmascript
1782 
1783 #endif  // ECMASCRIPT_MEM_HEAP_H
1784