1 /* 2 * Copyright (c) 2021 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef ECMASCRIPT_MEM_HEAP_H 17 #define ECMASCRIPT_MEM_HEAP_H 18 19 #include "ecmascript/base/config.h" 20 #include "ecmascript/frames.h" 21 #include "ecmascript/js_thread.h" 22 #include "ecmascript/mem/linear_space.h" 23 #include "ecmascript/mem/mark_stack.h" 24 #include "ecmascript/mem/sparse_space.h" 25 #include "ecmascript/mem/work_manager.h" 26 #include "ecmascript/taskpool/taskpool.h" 27 28 namespace panda::ecmascript { 29 class ConcurrentMarker; 30 class ConcurrentSweeper; 31 class EcmaVM; 32 class FullGC; 33 class HeapRegionAllocator; 34 class HeapTracker; 35 #if !WIN_OR_MAC_OR_IOS_PLATFORM 36 class HeapProfilerInterface; 37 class HeapProfiler; 38 #endif 39 class IncrementalMarker; 40 class JSNativePointer; 41 class Marker; 42 class MemController; 43 class NativeAreaAllocator; 44 class ParallelEvacuator; 45 class PartialGC; 46 class STWYoungGC; 47 48 using IdleNotifyStatusCallback = std::function<void(bool)>; 49 50 enum class IdleTaskType : uint8_t { 51 NO_TASK, 52 YOUNG_GC, 53 FINISH_MARKING, 54 INCREMENTAL_MARK 55 }; 56 57 enum class MarkType : uint8_t { 58 MARK_YOUNG, 59 MARK_FULL 60 }; 61 62 enum class MemGrowingType : uint8_t { 63 HIGH_THROUGHPUT, 64 CONSERVATIVE, 65 PRESSURE 66 }; 67 68 enum class HeapMode { 69 NORMAL, 70 SPAWN, 71 SHARE, 72 }; 73 74 enum AppSensitiveStatus : uint8_t { 75 NORMAL_SCENE, 76 ENTER_HIGH_SENSITIVE, 77 EXIT_HIGH_SENSITIVE, 78 }; 79 80 enum class VerifyKind { 81 VERIFY_PRE_GC, 82 VERIFY_POST_GC, 83 VERIFY_CONCURRENT_MARK_YOUNG, 84 VERIFY_EVACUATE_YOUNG, 85 VERIFY_CONCURRENT_MARK_FULL, 86 VERIFY_EVACUATE_OLD, 87 VERIFY_EVACUATE_FULL 88 }; 89 90 class Heap { 91 public: 92 explicit Heap(EcmaVM *ecmaVm); 93 ~Heap() = default; 94 NO_COPY_SEMANTIC(Heap); 95 NO_MOVE_SEMANTIC(Heap); 96 void Initialize(); 97 void Destroy(); 98 void Prepare(); 99 void Resume(TriggerGCType gcType); 100 void ResumeForAppSpawn(); 101 void CompactHeapBeforeFork(); 102 void DisableParallelGC(); 103 void EnableParallelGC(); 104 // fixme: Rename NewSpace to YoungSpace. 105 // This is the active young generation space that the new objects are allocated in 106 // or copied into (from the other semi space) during semi space GC. GetNewSpace()107 SemiSpace *GetNewSpace() const 108 { 109 return activeSemiSpace_; 110 } 111 112 /* 113 * Return the original active space where the objects are to be evacuated during semi space GC. 114 * This should be invoked only in the evacuation phase of semi space GC. 115 * fixme: Get rid of this interface or make it safe considering the above implicit limitation / requirement. 116 */ GetFromSpaceDuringEvacuation()117 SemiSpace *GetFromSpaceDuringEvacuation() const 118 { 119 return inactiveSemiSpace_; 120 } 121 GetOldSpace()122 OldSpace *GetOldSpace() const 123 { 124 return oldSpace_; 125 } 126 GetNonMovableSpace()127 NonMovableSpace *GetNonMovableSpace() const 128 { 129 return nonMovableSpace_; 130 } 131 GetHugeObjectSpace()132 HugeObjectSpace *GetHugeObjectSpace() const 133 { 134 return hugeObjectSpace_; 135 } 136 GetMachineCodeSpace()137 MachineCodeSpace *GetMachineCodeSpace() const 138 { 139 return machineCodeSpace_; 140 } 141 GetHugeMachineCodeSpace()142 HugeMachineCodeSpace *GetHugeMachineCodeSpace() const 143 { 144 return hugeMachineCodeSpace_; 145 } 146 GetSnapshotSpace()147 SnapshotSpace *GetSnapshotSpace() const 148 { 149 return snapshotSpace_; 150 } 151 GetReadOnlySpace()152 ReadOnlySpace *GetReadOnlySpace() const 153 { 154 return readOnlySpace_; 155 } 156 GetAppSpawnSpace()157 AppSpawnSpace *GetAppSpawnSpace() const 158 { 159 return appSpawnSpace_; 160 } 161 GetSpaceWithType(MemSpaceType type)162 SparseSpace *GetSpaceWithType(MemSpaceType type) const 163 { 164 switch (type) { 165 case MemSpaceType::OLD_SPACE: 166 return oldSpace_; 167 case MemSpaceType::NON_MOVABLE: 168 return nonMovableSpace_; 169 case MemSpaceType::MACHINE_CODE_SPACE: 170 return machineCodeSpace_; 171 default: 172 LOG_ECMA(FATAL) << "this branch is unreachable"; 173 UNREACHABLE(); 174 break; 175 } 176 } 177 GetSTWYoungGC()178 STWYoungGC *GetSTWYoungGC() const 179 { 180 return stwYoungGC_; 181 } 182 GetPartialGC()183 PartialGC *GetPartialGC() const 184 { 185 return partialGC_; 186 } 187 GetFullGC()188 FullGC *GetFullGC() const 189 { 190 return fullGC_; 191 } 192 GetSweeper()193 ConcurrentSweeper *GetSweeper() const 194 { 195 return sweeper_; 196 } 197 GetEvacuator()198 ParallelEvacuator *GetEvacuator() const 199 { 200 return evacuator_; 201 } 202 GetConcurrentMarker()203 ConcurrentMarker *GetConcurrentMarker() const 204 { 205 return concurrentMarker_; 206 } 207 GetIncrementalMarker()208 IncrementalMarker *GetIncrementalMarker() const 209 { 210 return incrementalMarker_; 211 } 212 GetNonMovableMarker()213 Marker *GetNonMovableMarker() const 214 { 215 return nonMovableMarker_; 216 } 217 GetSemiGCMarker()218 Marker *GetSemiGCMarker() const 219 { 220 return semiGCMarker_; 221 } 222 GetCompressGCMarker()223 Marker *GetCompressGCMarker() const 224 { 225 return compressGCMarker_; 226 } 227 GetEcmaVM()228 EcmaVM *GetEcmaVM() const 229 { 230 return ecmaVm_; 231 } 232 GetJSThread()233 JSThread *GetJSThread() const 234 { 235 return thread_; 236 } 237 GetWorkManager()238 WorkManager *GetWorkManager() const 239 { 240 return workManager_; 241 } 242 GetMemController()243 MemController *GetMemController() const 244 { 245 return memController_; 246 } 247 InSensitiveStatus()248 bool InSensitiveStatus() const 249 { 250 return sensitiveStatus_.load(std::memory_order_relaxed) == AppSensitiveStatus::ENTER_HIGH_SENSITIVE 251 || onStartupEvent_; 252 } 253 GetSensitiveStatus()254 AppSensitiveStatus GetSensitiveStatus() const 255 { 256 return sensitiveStatus_.load(std::memory_order_relaxed); 257 } 258 onStartUpEvent()259 bool onStartUpEvent() const 260 { 261 return onStartupEvent_; 262 } 263 SetSensitiveStatus(AppSensitiveStatus status)264 void SetSensitiveStatus(AppSensitiveStatus status) 265 { 266 sensitiveStatus_.store(status, std::memory_order_release);; 267 } 268 CASSensitiveStatus(AppSensitiveStatus expect,AppSensitiveStatus status)269 bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status) 270 { 271 return sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst); 272 } 273 NotifyPostFork()274 void NotifyPostFork() 275 { 276 LockHolder holder(finishColdStartMutex_); 277 onStartupEvent_ = true; 278 LOG_GC(INFO) << "SmartGC: enter app cold start"; 279 } 280 SetOnSerializeEvent(bool isSerialize)281 void SetOnSerializeEvent(bool isSerialize) 282 { 283 onSerializeEvent_ = isSerialize; 284 if (!onSerializeEvent_ && !InSensitiveStatus()) { 285 TryTriggerIncrementalMarking(); 286 TryTriggerIdleCollection(); 287 TryTriggerConcurrentMarking(); 288 } 289 } 290 GetOnSerializeEvent()291 bool GetOnSerializeEvent() const 292 { 293 return onSerializeEvent_; 294 } 295 296 // Whether should verify heap during gc. ShouldVerifyHeap()297 bool ShouldVerifyHeap() const 298 { 299 return shouldVerifyHeap_; 300 } 301 302 /* 303 * For object allocations. 304 */ 305 306 // Young 307 inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass); 308 inline TaggedObject *AllocateYoungOrHugeObject(JSHClass *hclass, size_t size); 309 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass); 310 inline TaggedObject *AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size); 311 inline TaggedObject *AllocateYoungOrHugeObject(size_t size); 312 inline uintptr_t AllocateYoungSync(size_t size); 313 inline TaggedObject *TryAllocateYoungGeneration(JSHClass *hclass, size_t size); 314 // Old 315 inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass); 316 inline TaggedObject *AllocateOldOrHugeObject(JSHClass *hclass, size_t size); 317 // Non-movable 318 inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass); 319 inline TaggedObject *AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size); 320 inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size); 321 // Huge 322 inline TaggedObject *AllocateHugeObject(JSHClass *hclass, size_t size); 323 inline TaggedObject *AllocateHugeObject(size_t size); 324 // Machine code 325 inline TaggedObject *AllocateMachineCodeObject(JSHClass *hclass, size_t size); 326 inline TaggedObject *AllocateHugeMachineCodeObject(size_t size); 327 // Snapshot 328 inline uintptr_t AllocateSnapshotSpace(size_t size); 329 GetNativeAreaAllocator()330 NativeAreaAllocator *GetNativeAreaAllocator() const 331 { 332 return nativeAreaAllocator_; 333 } 334 GetHeapRegionAllocator()335 HeapRegionAllocator *GetHeapRegionAllocator() const 336 { 337 return heapRegionAllocator_; 338 } 339 340 /* 341 * GC triggers. 342 */ 343 344 void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER); 345 346 bool CheckAndTriggerOldGC(size_t size = 0); 347 bool CheckAndTriggerHintGC(); 348 TriggerGCType SelectGCType() const; 349 /* 350 * Parallel GC related configurations and utilities. 351 */ 352 353 void PostParallelGCTask(ParallelGCTaskPhase taskPhase); 354 IsParallelGCEnabled()355 bool IsParallelGCEnabled() const 356 { 357 return parallelGC_; 358 } 359 void ChangeGCParams(bool inBackground); 360 void TriggerIdleCollection(int idleMicroSec); 361 void NotifyMemoryPressure(bool inHighMemoryPressure); 362 bool CheckCanDistributeTask(); 363 364 void WaitRunningTaskFinished(); 365 366 void TryTriggerConcurrentMarking(); 367 void AdjustBySurvivalRate(size_t originalNewSpaceSize); 368 void TriggerConcurrentMarking(); 369 bool CheckCanTriggerConcurrentMarking(); 370 371 void TryTriggerIdleCollection(); 372 void TryTriggerIncrementalMarking(); 373 void CalculateIdleDuration(); 374 void UpdateWorkManager(WorkManager *workManager); 375 /* 376 * Wait for existing concurrent marking tasks to be finished (if any). 377 * Return true if there's ongoing concurrent marking. 378 */ 379 bool CheckOngoingConcurrentMarking(); 380 381 /* 382 * Functions invoked during GC. 383 */ 384 SetMarkType(MarkType markType)385 void SetMarkType(MarkType markType) 386 { 387 markType_ = markType; 388 } 389 IsConcurrentFullMark()390 bool IsConcurrentFullMark() const 391 { 392 return markType_ == MarkType::MARK_FULL; 393 } 394 395 inline void SwapNewSpace(); 396 inline void SwapOldSpace(); 397 398 inline bool MoveYoungRegionSync(Region *region); 399 inline void MergeToOldSpaceSync(LocalSpace *localSpace); 400 401 template<class Callback> 402 void EnumerateOldSpaceRegions(const Callback &cb, Region *region = nullptr) const; 403 404 template<class Callback> 405 void EnumerateNonNewSpaceRegions(const Callback &cb) const; 406 407 template<class Callback> 408 void EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const; 409 410 template<class Callback> 411 void EnumerateNewSpaceRegions(const Callback &cb) const; 412 413 template<class Callback> 414 void EnumerateSnapshotSpaceRegions(const Callback &cb) const; 415 416 template<class Callback> 417 void EnumerateNonMovableRegions(const Callback &cb) const; 418 419 template<class Callback> 420 inline void EnumerateRegions(const Callback &cb) const; 421 422 inline void ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd); 423 424 void WaitAllTasksFinished(); 425 void WaitConcurrentMarkingFinished(); 426 GetMemGrowingType()427 MemGrowingType GetMemGrowingType() const 428 { 429 return memGrowingtype_; 430 } 431 SetMemGrowingType(MemGrowingType memGrowingType)432 void SetMemGrowingType(MemGrowingType memGrowingType) 433 { 434 memGrowingtype_ = memGrowingType; 435 } 436 437 inline size_t GetCommittedSize() const; 438 439 inline size_t GetHeapObjectSize() const; 440 size_t GetLiveObjectSize() const; 441 442 inline uint32_t GetHeapObjectCount() const; 443 GetPromotedSize()444 size_t GetPromotedSize() const 445 { 446 return promotedSize_; 447 } 448 449 size_t GetArrayBufferSize() const; 450 451 size_t GetHeapLimitSize() const; 452 GetMaxMarkTaskCount()453 uint32_t GetMaxMarkTaskCount() const 454 { 455 return maxMarkTaskCount_; 456 } 457 GetMaxEvacuateTaskCount()458 uint32_t GetMaxEvacuateTaskCount() const 459 { 460 return maxEvacuateTaskCount_; 461 } 462 463 /* 464 * Receive callback function to control idletime. 465 */ 466 inline void InitializeIdleStatusControl(std::function<void(bool)> callback); 467 DisableNotifyIdle()468 void DisableNotifyIdle() 469 { 470 if (notifyIdleStatusCallback != nullptr) { 471 notifyIdleStatusCallback(true); 472 } 473 } 474 EnableNotifyIdle()475 void EnableNotifyIdle() 476 { 477 if (enableIdleGC_ && notifyIdleStatusCallback != nullptr) { 478 notifyIdleStatusCallback(false); 479 } 480 } 481 SetIdleTask(IdleTaskType task)482 void SetIdleTask(IdleTaskType task) 483 { 484 idleTask_ = task; 485 } 486 487 void ClearIdleTask(); 488 IsEmptyIdleTask()489 bool IsEmptyIdleTask() const 490 { 491 return idleTask_ == IdleTaskType::NO_TASK; 492 } 493 494 void NotifyFinishColdStart(bool isMainThread = true); 495 496 void NotifyFinishColdStartSoon(); 497 498 void NotifyHighSensitive(bool isStart); 499 500 void HandleExitHighSensitiveEvent(); 501 502 bool ObjectExceedMaxHeapSize() const; 503 504 bool NeedStopCollection(); 505 506 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER) StartHeapTracking()507 void StartHeapTracking() 508 { 509 WaitAllTasksFinished(); 510 } 511 StopHeapTracking()512 void StopHeapTracking() 513 { 514 WaitAllTasksFinished(); 515 } 516 #endif 517 void OnAllocateEvent(TaggedObject* address, size_t size); 518 void OnMoveEvent(uintptr_t address, TaggedObject* forwardAddress, size_t size); 519 void AddToKeptObjects(JSHandle<JSTaggedValue> value) const; 520 void ClearKeptObjects() const; 521 522 // add allocationInspector to each space 523 void AddAllocationInspectorToAllSpaces(AllocationInspector *inspector); 524 525 // clear allocationInspector from each space 526 void ClearAllocationInspectorFromAllSpaces(); 527 528 /* 529 * Funtions used by heap verification. 530 */ 531 532 template<class Callback> 533 void IterateOverObjects(const Callback &cb) const; 534 535 bool IsAlive(TaggedObject *object) const; 536 bool ContainObject(TaggedObject *object) const; 537 538 size_t VerifyHeapObjects(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const; 539 size_t VerifyOldToNewRSet(VerifyKind verifyKind = VerifyKind::VERIFY_PRE_GC) const; 540 void StatisticHeapObject(TriggerGCType gcType) const; 541 void StatisticHeapDetail() const; 542 void PrintHeapInfo(TriggerGCType gcType) const; 543 OldSpaceExceedCapacity(size_t size)544 bool OldSpaceExceedCapacity(size_t size) const 545 { 546 size_t totalSize = oldSpace_->GetCommittedSize() + hugeObjectSpace_->GetCommittedSize() + size; 547 return totalSize >= oldSpace_->GetMaximumCapacity() + oldSpace_->GetOvershootSize() + 548 oldSpace_->GetOutOfMemoryOvershootSize(); 549 } 550 OldSpaceExceedLimit()551 bool OldSpaceExceedLimit() const 552 { 553 size_t totalSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize(); 554 return totalSize >= oldSpace_->GetInitialCapacity() + oldSpace_->GetOvershootSize(); 555 } 556 557 void AdjustSpaceSizeForAppSpawn(); 558 559 // ONLY used for heap verification. IsVerifying()560 bool IsVerifying() const 561 { 562 return isVerifying_; 563 } 564 565 // ONLY used for heap verification. SetVerifying(bool verifying)566 void SetVerifying(bool verifying) 567 { 568 isVerifying_ = verifying; 569 } 570 ShouldMoveToRoSpace(JSHClass * hclass,TaggedObject * object)571 static bool ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object) 572 { 573 return hclass->IsString() && !Region::ObjectAddressToRange(object)->InHugeObjectSpace(); 574 } 575 IsFullMarkRequested()576 bool IsFullMarkRequested() const 577 { 578 return fullMarkRequested_; 579 } 580 SetFullMarkRequestedState(bool fullMarkRequested)581 void SetFullMarkRequestedState(bool fullMarkRequested) 582 { 583 fullMarkRequested_ = fullMarkRequested; 584 } 585 ShouldThrowOOMError(bool shouldThrow)586 void ShouldThrowOOMError(bool shouldThrow) 587 { 588 shouldThrowOOMError_ = shouldThrow; 589 } 590 SetHeapMode(HeapMode mode)591 void SetHeapMode(HeapMode mode) 592 { 593 mode_ = mode; 594 } 595 596 void ThrowOutOfMemoryError(size_t size, std::string functionName, bool NonMovableObjNearOOM = false); 597 void ThrowOutOfMemoryErrorForDefault(size_t size, std::string functionName, bool NonMovableObjNearOOM = false); 598 599 void IncreaseNativeBindingSize(size_t size); 600 void IncreaseNativeBindingSize(JSNativePointer *object); ResetNativeBindingSize()601 void ResetNativeBindingSize() 602 { 603 nativeBindingSize_ = 0; 604 } 605 GetNativeBindingSize()606 size_t GetNativeBindingSize() const 607 { 608 return nativeBindingSize_; 609 } 610 GetGlobalNativeSize()611 size_t GetGlobalNativeSize() const 612 { 613 return GetNativeBindingSize() + nativeAreaAllocator_->GetNativeMemoryUsage(); 614 } 615 GlobalNativeSizeLargerThanLimit()616 bool GlobalNativeSizeLargerThanLimit() const 617 { 618 return GetGlobalNativeSize() >= globalSpaceNativeLimit_; 619 } 620 621 void TryTriggerFullMarkByNativeSize(); 622 NotifyHeapAliveSizeAfterGC(size_t size)623 void NotifyHeapAliveSizeAfterGC(size_t size) 624 { 625 heapAliveSizeAfterGC_ = size; 626 } 627 GetHeapAliveSizeAfterGC()628 size_t GetHeapAliveSizeAfterGC() const 629 { 630 return heapAliveSizeAfterGC_; 631 } 632 IsInBackground()633 bool IsInBackground() const 634 { 635 return inBackground_; 636 } 637 IsYoungGC()638 bool IsYoungGC() const 639 { 640 return gcType_ == TriggerGCType::YOUNG_GC; 641 } 642 GetOldGCRequested()643 bool GetOldGCRequested() 644 { 645 return oldGCRequested_; 646 } 647 GetGCType()648 TriggerGCType GetGCType() const 649 { 650 return gcType_; 651 } 652 653 void CheckNonMovableSpaceOOM(); 654 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> CalCallSiteInfo(uintptr_t retAddr) const; 655 656 private: 657 static constexpr int IDLE_TIME_LIMIT = 10; // if idle time over 10ms we can do something 658 static constexpr int ALLOCATE_SIZE_LIMIT = 100_KB; 659 static constexpr int IDLE_MAINTAIN_TIME = 500; 660 static constexpr int BACKGROUND_GROW_LIMIT = 2_MB; 661 // Threadshold that HintGC will actually trigger GC. 662 static constexpr double SURVIVAL_RATE_THRESHOLD = 0.5; 663 void FatalOutOfMemoryError(size_t size, std::string functionName); 664 void RecomputeLimits(); 665 void AdjustOldSpaceLimit(); 666 // record lastRegion for each space, which will be used in ReclaimRegions() 667 void PrepareRecordRegionsForReclaim(); 668 void IncreaseTaskCount(); 669 void ReduceTaskCount(); 670 void WaitClearTaskFinished(); 671 void InvokeWeakNodeNativeFinalizeCallback(); 672 void DumpHeapSnapshotBeforeOOM(bool isFullGC = true); 673 inline void ReclaimRegions(TriggerGCType gcType); 674 inline size_t CalculateCommittedCacheSize(); 675 class ParallelGCTask : public Task { 676 public: ParallelGCTask(int32_t id,Heap * heap,ParallelGCTaskPhase taskPhase)677 ParallelGCTask(int32_t id, Heap *heap, ParallelGCTaskPhase taskPhase) 678 : Task(id), heap_(heap), taskPhase_(taskPhase) {}; 679 ~ParallelGCTask() override = default; 680 bool Run(uint32_t threadIndex) override; 681 682 NO_COPY_SEMANTIC(ParallelGCTask); 683 NO_MOVE_SEMANTIC(ParallelGCTask); 684 685 private: 686 Heap *heap_ {nullptr}; 687 ParallelGCTaskPhase taskPhase_; 688 }; 689 690 class AsyncClearTask : public Task { 691 public: AsyncClearTask(int32_t id,Heap * heap,TriggerGCType type)692 AsyncClearTask(int32_t id, Heap *heap, TriggerGCType type) 693 : Task(id), heap_(heap), gcType_(type) {} 694 ~AsyncClearTask() override = default; 695 bool Run(uint32_t threadIndex) override; 696 697 NO_COPY_SEMANTIC(AsyncClearTask); 698 NO_MOVE_SEMANTIC(AsyncClearTask); 699 private: 700 Heap *heap_; 701 TriggerGCType gcType_; 702 }; 703 704 class FinishColdStartTask : public Task { 705 public: FinishColdStartTask(int32_t id,Heap * heap)706 FinishColdStartTask(int32_t id, Heap *heap) 707 : Task(id), heap_(heap) {} 708 ~FinishColdStartTask() override = default; 709 bool Run(uint32_t threadIndex) override; 710 711 NO_COPY_SEMANTIC(FinishColdStartTask); 712 NO_MOVE_SEMANTIC(FinishColdStartTask); 713 private: 714 Heap *heap_; 715 }; 716 717 class RecursionScope { 718 public: RecursionScope(Heap * heap)719 explicit RecursionScope(Heap* heap) : heap_(heap) 720 { 721 if (heap_->recursionDepth_++ != 0) { 722 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage Constructor, depth: " << heap_->recursionDepth_; 723 } 724 } ~RecursionScope()725 ~RecursionScope() 726 { 727 if (--heap_->recursionDepth_ != 0) { 728 LOG_GC(FATAL) << "Recursion in HeapCollectGarbage Destructor, depth: " << heap_->recursionDepth_; 729 } 730 } 731 private: 732 Heap* heap_ {nullptr}; 733 }; 734 735 EcmaVM *ecmaVm_ {nullptr}; 736 JSThread *thread_ {nullptr}; 737 738 /* 739 * Heap spaces. 740 */ 741 742 /* 743 * Young generation spaces where most new objects are allocated. 744 * (only one of the spaces is active at a time in semi space GC). 745 */ 746 SemiSpace *activeSemiSpace_ {nullptr}; 747 SemiSpace *inactiveSemiSpace_ {nullptr}; 748 749 // Old generation spaces where some long living objects are allocated or promoted. 750 OldSpace *oldSpace_ {nullptr}; 751 OldSpace *compressSpace_ {nullptr}; 752 ReadOnlySpace *readOnlySpace_ {nullptr}; 753 AppSpawnSpace *appSpawnSpace_ {nullptr}; 754 // Spaces used for special kinds of objects. 755 NonMovableSpace *nonMovableSpace_ {nullptr}; 756 MachineCodeSpace *machineCodeSpace_ {nullptr}; 757 HugeMachineCodeSpace *hugeMachineCodeSpace_ {nullptr}; 758 HugeObjectSpace *hugeObjectSpace_ {nullptr}; 759 SnapshotSpace *snapshotSpace_ {nullptr}; 760 761 /* 762 * Garbage collectors collecting garbage in different scopes. 763 */ 764 765 /* 766 * Semi sapce GC which collects garbage only in young spaces. 767 * This is however optional for now because the partial GC also covers its functionality. 768 */ 769 STWYoungGC *stwYoungGC_ {nullptr}; 770 771 /* 772 * The mostly used partial GC which collects garbage in young spaces, 773 * and part of old spaces if needed determined by GC heuristics. 774 */ 775 PartialGC *partialGC_ {nullptr}; 776 777 // Full collector which collects garbage in all valid heap spaces. 778 FullGC *fullGC_ {nullptr}; 779 780 // Concurrent marker which coordinates actions of GC markers and mutators. 781 ConcurrentMarker *concurrentMarker_ {nullptr}; 782 783 // Concurrent sweeper which coordinates actions of sweepers (in spaces excluding young semi spaces) and mutators. 784 ConcurrentSweeper *sweeper_ {nullptr}; 785 786 // Parallel evacuator which evacuates objects from one space to another one. 787 ParallelEvacuator *evacuator_ {nullptr}; 788 789 // Incremental marker which coordinates actions of GC markers in idle time. 790 IncrementalMarker *incrementalMarker_ {nullptr}; 791 792 /* 793 * Different kinds of markers used by different collectors. 794 * Depending on the collector algorithm, some markers can do simple marking 795 * while some others need to handle object movement. 796 */ 797 Marker *nonMovableMarker_ {nullptr}; 798 Marker *semiGCMarker_ {nullptr}; 799 Marker *compressGCMarker_ {nullptr}; 800 801 // Work manager managing the tasks mostly generated in the GC mark phase. 802 WorkManager *workManager_ {nullptr}; 803 804 MarkType markType_ {MarkType::MARK_YOUNG}; 805 806 bool parallelGC_ {true}; 807 bool fullGCRequested_ {false}; 808 bool oldGCRequested_ {false}; 809 bool fullMarkRequested_ {false}; 810 bool oldSpaceLimitAdjusted_ {false}; 811 bool shouldThrowOOMError_ {false}; 812 bool runningNativeFinalizeCallbacks_ {false}; 813 bool enableIdleGC_ {false}; 814 HeapMode mode_ { HeapMode::NORMAL }; 815 816 size_t globalSpaceAllocLimit_ {0}; 817 size_t promotedSize_ {0}; 818 size_t semiSpaceCopiedSize_ {0}; 819 size_t nativeBindingSize_{0}; 820 size_t globalSpaceNativeLimit_ {0}; 821 MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT}; 822 TriggerGCType gcType_ {TriggerGCType::YOUNG_GC}; 823 824 bool clearTaskFinished_ {true}; 825 Mutex waitClearTaskFinishedMutex_; 826 ConditionVariable waitClearTaskFinishedCV_; 827 uint32_t runningTaskCount_ {0}; 828 // parallel marker task number. 829 uint32_t maxMarkTaskCount_ {0}; 830 // parallel evacuator task number. 831 uint32_t maxEvacuateTaskCount_ {0}; 832 Mutex finishColdStartMutex_; 833 Mutex waitTaskFinishedMutex_; 834 ConditionVariable waitTaskFinishedCV_; 835 836 /* 837 * The memory controller providing memory statistics (by allocations and coleections), 838 * which is used for GC heuristics. 839 */ 840 MemController *memController_ {nullptr}; 841 842 // Region allocators. 843 NativeAreaAllocator *nativeAreaAllocator_ {nullptr}; 844 HeapRegionAllocator *heapRegionAllocator_ {nullptr}; 845 846 // Application status 847 bool inBackground_ {false}; 848 849 IdleNotifyStatusCallback notifyIdleStatusCallback {nullptr}; 850 std::atomic<AppSensitiveStatus> sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE}; 851 bool onStartupEvent_ {false}; 852 bool onSerializeEvent_ {false}; 853 854 IdleTaskType idleTask_ {IdleTaskType::NO_TASK}; 855 float idlePredictDuration_ {0.0f}; 856 size_t heapAliveSizeAfterGC_ {0}; 857 double idleTaskFinishTime_ {0.0}; 858 int32_t recursionDepth_ {0}; 859 860 // ONLY used for heap verification. 861 bool shouldVerifyHeap_ {false}; 862 bool isVerifying_ {false}; 863 }; 864 } // namespace panda::ecmascript 865 866 #endif // ECMASCRIPT_MEM_HEAP_H 867