• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/heap-inl.h"
17 
18 #include "ecmascript/ecma_vm.h"
19 #include "ecmascript/free_object.h"
20 #include "ecmascript/js_finalization_registry.h"
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/linked_hash_table.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/concurrent_marker.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/full_gc.h"
27 #include "ecmascript/mem/incremental_marker.h"
28 #include "ecmascript/mem/mark_stack.h"
29 #include "ecmascript/mem/mem_controller.h"
30 #include "ecmascript/mem/partial_gc.h"
31 #include "ecmascript/mem/native_area_allocator.h"
32 #include "ecmascript/mem/parallel_evacuator.h"
33 #include "ecmascript/mem/parallel_marker-inl.h"
34 #include "ecmascript/mem/stw_young_gc.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/mem/work_manager.h"
37 #include "ecmascript/mem/gc_stats.h"
38 #include "ecmascript/ecma_string_table.h"
39 #include "ecmascript/runtime_call_id.h"
40 #if !WIN_OR_MAC_OR_IOS_PLATFORM
41 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
42 #include "ecmascript/dfx/hprof/heap_profiler.h"
43 #endif
44 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
45 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
46 #endif
47 
48 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)49 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
50                              nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
51                              heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
52 
Initialize()53 void Heap::Initialize()
54 {
55     memController_ = new MemController(this);
56     auto &config = ecmaVm_->GetEcmaParamConfiguration();
57     size_t maxHeapSize = config.GetMaxHeapSize();
58     size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
59     size_t maxSemiSpaceCapacity = config.GetMaxSemiSpaceSize();
60     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
61     activeSemiSpace_->Restart();
62     activeSemiSpace_->SetWaterLine();
63     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
64     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
65     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
66     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
67     // not set up from space
68 
69     size_t readOnlySpaceCapacity = config.GetDefaultReadOnlySpaceSize();
70     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
71     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
72     size_t nonmovableSpaceCapacity = config.GetDefaultNonMovableSpaceSize();
73     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
74         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
75     }
76     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
77     nonMovableSpace_->Initialize();
78     size_t snapshotSpaceCapacity = config.GetDefaultSnapshotSpaceSize();
79     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
80     size_t machineCodeSpaceCapacity = config.GetDefaultMachineCodeSpaceSize();
81     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
82 
83     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
84         machineCodeSpaceCapacity + readOnlySpaceCapacity;
85     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) {
86         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
87     }
88     size_t oldSpaceCapacity = maxHeapSize - capacities;
89     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
90     globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
91     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
92     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
93     oldSpace_->Initialize();
94 
95     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
96     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
97     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
98         maxEvacuateTaskCount_ - 1);
99 
100     LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
101                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
102                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
103                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
104                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
105                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
106                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
107                  << ", gcThreadNum = " << maxMarkTaskCount_;
108     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
109     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
110     markType_ = MarkType::MARK_YOUNG;
111 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
112     concurrentMarkerEnabled = false;
113 #endif
114     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
115     stwYoungGC_ = new STWYoungGC(this, parallelGC_);
116     fullGC_ = new FullGC(this);
117 
118     partialGC_ = new PartialGC(this);
119     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
120         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
121     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
122         EnableConcurrentMarkType::CONFIG_DISABLE);
123     nonMovableMarker_ = new NonMovableMarker(this);
124     semiGCMarker_ = new SemiGCMarker(this);
125     compressGCMarker_ = new CompressGCMarker(this);
126     evacuator_ = new ParallelEvacuator(this);
127     incrementalMarker_ = new IncrementalMarker(this);
128 }
129 
Destroy()130 void Heap::Destroy()
131 {
132     if (workManager_ != nullptr) {
133         delete workManager_;
134         workManager_ = nullptr;
135     }
136     if (activeSemiSpace_ != nullptr) {
137         activeSemiSpace_->Destroy();
138         delete activeSemiSpace_;
139         activeSemiSpace_ = nullptr;
140     }
141     if (inactiveSemiSpace_ != nullptr) {
142         inactiveSemiSpace_->Destroy();
143         delete inactiveSemiSpace_;
144         inactiveSemiSpace_ = nullptr;
145     }
146     if (oldSpace_ != nullptr) {
147         oldSpace_->Reset();
148         delete oldSpace_;
149         oldSpace_ = nullptr;
150     }
151     if (compressSpace_ != nullptr) {
152         compressSpace_->Destroy();
153         delete compressSpace_;
154         compressSpace_ = nullptr;
155     }
156     if (nonMovableSpace_ != nullptr) {
157         nonMovableSpace_->Reset();
158         delete nonMovableSpace_;
159         nonMovableSpace_ = nullptr;
160     }
161     if (snapshotSpace_ != nullptr) {
162         snapshotSpace_->Destroy();
163         delete snapshotSpace_;
164         snapshotSpace_ = nullptr;
165     }
166     if (machineCodeSpace_ != nullptr) {
167         machineCodeSpace_->Reset();
168         delete machineCodeSpace_;
169         machineCodeSpace_ = nullptr;
170     }
171     if (hugeObjectSpace_ != nullptr) {
172         hugeObjectSpace_->Destroy();
173         delete hugeObjectSpace_;
174         hugeObjectSpace_ = nullptr;
175     }
176     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
177         readOnlySpace_->ClearReadOnly();
178         readOnlySpace_->Destroy();
179         delete readOnlySpace_;
180         readOnlySpace_ = nullptr;
181     }
182     if (appSpawnSpace_ != nullptr) {
183         appSpawnSpace_->Reset();
184         delete appSpawnSpace_;
185         appSpawnSpace_ = nullptr;
186     }
187     if (stwYoungGC_ != nullptr) {
188         delete stwYoungGC_;
189         stwYoungGC_ = nullptr;
190     }
191     if (partialGC_ != nullptr) {
192         delete partialGC_;
193         partialGC_ = nullptr;
194     }
195     if (fullGC_ != nullptr) {
196         delete fullGC_;
197         fullGC_ = nullptr;
198     }
199 
200     nativeAreaAllocator_ = nullptr;
201     heapRegionAllocator_ = nullptr;
202 
203     if (memController_ != nullptr) {
204         delete memController_;
205         memController_ = nullptr;
206     }
207     if (sweeper_ != nullptr) {
208         delete sweeper_;
209         sweeper_ = nullptr;
210     }
211     if (concurrentMarker_ != nullptr) {
212         delete concurrentMarker_;
213         concurrentMarker_ = nullptr;
214     }
215     if (incrementalMarker_ != nullptr) {
216         delete incrementalMarker_;
217         incrementalMarker_ = nullptr;
218     }
219     if (nonMovableMarker_ != nullptr) {
220         delete nonMovableMarker_;
221         nonMovableMarker_ = nullptr;
222     }
223     if (semiGCMarker_ != nullptr) {
224         delete semiGCMarker_;
225         semiGCMarker_ = nullptr;
226     }
227     if (compressGCMarker_ != nullptr) {
228         delete compressGCMarker_;
229         compressGCMarker_ = nullptr;
230     }
231     if (evacuator_ != nullptr) {
232         delete evacuator_;
233         evacuator_ = nullptr;
234     }
235 }
236 
Prepare()237 void Heap::Prepare()
238 {
239     MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), HeapPrepare);
240     WaitRunningTaskFinished();
241     sweeper_->EnsureAllTaskFinished();
242     WaitClearTaskFinished();
243 }
244 
Resume(TriggerGCType gcType)245 void Heap::Resume(TriggerGCType gcType)
246 {
247     if (mode_ != HeapMode::SPAWN &&
248         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC())) {
249         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
250         size_t multiple = 2;
251         size_t oldSpaceMaxLimit = 0;
252         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
253             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
254             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
255         } else {
256             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
257             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
258         }
259         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
260     }
261 
262     activeSemiSpace_->SetWaterLine();
263     PrepareRecordRegionsForReclaim();
264     hugeObjectSpace_->ReclaimHugeRegion();
265     if (parallelGC_) {
266         clearTaskFinished_ = false;
267         Taskpool::GetCurrentTaskpool()->PostTask(
268             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
269     } else {
270         ReclaimRegions(gcType);
271     }
272 }
273 
ResumeForAppSpawn()274 void Heap::ResumeForAppSpawn()
275 {
276     sweeper_->WaitAllTaskFinished();
277     hugeObjectSpace_->ReclaimHugeRegion();
278     inactiveSemiSpace_->ReclaimRegions();
279     oldSpace_->Reset();
280     auto cb = [] (Region *region) {
281         region->ClearMarkGCBitset();
282     };
283     nonMovableSpace_->EnumerateRegions(cb);
284     machineCodeSpace_->EnumerateRegions(cb);
285     hugeObjectSpace_->EnumerateRegions(cb);
286 }
287 
CompactHeapBeforeFork()288 void Heap::CompactHeapBeforeFork()
289 {
290     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
291 }
292 
DisableParallelGC()293 void Heap::DisableParallelGC()
294 {
295     WaitAllTasksFinished();
296     parallelGC_ = false;
297     maxEvacuateTaskCount_ = 0;
298     maxMarkTaskCount_ = 0;
299     stwYoungGC_->ConfigParallelGC(false);
300     sweeper_->ConfigConcurrentSweep(false);
301     concurrentMarker_->ConfigConcurrentMark(false);
302     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
303 }
304 
EnableParallelGC()305 void Heap::EnableParallelGC()
306 {
307     Taskpool::GetCurrentTaskpool()->Initialize();
308     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
309     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
310     if (auto totalThreadNum = workManager_->GetTotalThreadNum();
311         totalThreadNum != maxEvacuateTaskCount_ + 1) {
312         LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
313                            << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
314         delete workManager_;
315         workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
316     }
317     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
318                                          maxEvacuateTaskCount_ - 1);
319     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
320 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
321     concurrentMarkerEnabled = false;
322 #endif
323     stwYoungGC_->ConfigParallelGC(parallelGC_);
324     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
325     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
326 }
327 
SelectGCType() const328 TriggerGCType Heap::SelectGCType() const
329 {
330     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
331     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
332         return YOUNG_GC;
333     }
334     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
335         GetHeapObjectSize() <= globalSpaceAllocLimit_ && !GlobalNativeSizeLargerThanLimit()) {
336         return YOUNG_GC;
337     }
338     return OLD_GC;
339 }
340 
CollectGarbage(TriggerGCType gcType,GCReason reason)341 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
342 {
343     if (thread_->IsCrossThreadExecutionEnable()) {
344         return;
345     }
346 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
347     [[maybe_unused]] GcStateScope scope(thread_);
348 #endif
349     CHECK_NO_GC
350 
351 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
352     LOG_ECMA(DEBUG) << "Enable heap verify";
353     isVerifying_ = true;
354     // pre gc heap verify
355     sweeper_->EnsureAllTaskFinished();
356     auto failCount = Verification(this).VerifyAll();
357     if (failCount > 0) {
358         LOG_GC(FATAL) << "Before gc heap corrupted and " << failCount << " corruptions";
359     }
360     isVerifying_ = false;
361 #endif
362 
363 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
364     gcType = TriggerGCType::FULL_GC;
365 #endif
366     if (fullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
367         gcType = TriggerGCType::FULL_GC;
368     }
369     size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
370     size_t originalNewSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize();
371     memController_->StartCalculationBeforeGC();
372     StatisticHeapObject(gcType);
373     if (!GetJSThread()->IsReadyToMark() && markType_ == MarkType::MARK_FULL) {
374         ecmaVm_->GetEcmaGCStats()->SetGCReason(reason);
375     } else {
376         ecmaVm_->GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
377     }
378     switch (gcType) {
379         case TriggerGCType::YOUNG_GC:
380             // Use partial GC for young generation.
381             if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
382                 SetMarkType(MarkType::MARK_YOUNG);
383             }
384             partialGC_->RunPhases();
385             break;
386         case TriggerGCType::OLD_GC:
387             if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
388                 // Wait for existing concurrent marking tasks to be finished (if any),
389                 // and reset concurrent marker's status for full mark.
390                 bool concurrentMark = CheckOngoingConcurrentMarking();
391                 if (concurrentMark) {
392                     concurrentMarker_->Reset();
393                 }
394             }
395             SetMarkType(MarkType::MARK_FULL);
396             partialGC_->RunPhases();
397             break;
398         case TriggerGCType::FULL_GC:
399             fullGC_->SetForAppSpawn(false);
400             fullGC_->RunPhases();
401             if (fullGCRequested_) {
402                 fullGCRequested_ = false;
403             }
404             break;
405         case TriggerGCType::APPSPAWN_FULL_GC:
406             fullGC_->SetForAppSpawn(true);
407             fullGC_->RunPhasesForAppSpawn();
408             break;
409         default:
410             LOG_ECMA(FATAL) << "this branch is unreachable";
411             UNREACHABLE();
412             break;
413     }
414 
415     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
416     if (shouldThrowOOMError_) {
417         ThrowOutOfMemoryError(oldSpace_->GetMergeSize(), " OldSpace::Merge");
418         oldSpace_->ResetMergeSize();
419         shouldThrowOOMError_ = false;
420     }
421 
422     ClearIdleTask();
423     // Adjust the old space capacity and global limit for the first partial GC with full mark.
424     // Trigger the full mark next time if the current survival rate is much less than half the average survival rates.
425     AdjustBySurvivalRate(originalNewSpaceSize);
426     activeSemiSpace_->AdjustNativeLimit(originalNewSpaceNativeSize);
427     memController_->StopCalculationAfterGC(gcType);
428     if (gcType == TriggerGCType::FULL_GC || IsFullMark()) {
429         // Only when the gc type is not semiGC and after the old space sweeping has been finished,
430         // the limits of old space and global space can be recomputed.
431         RecomputeLimits();
432         OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsFullMark()
433                                      << " global object size " << GetHeapObjectSize()
434                                      << " global committed size " << GetCommittedSize()
435                                      << " global limit " << globalSpaceAllocLimit_;
436         markType_ = MarkType::MARK_YOUNG;
437     }
438     if (concurrentMarker_->IsRequestDisabled()) {
439         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
440     }
441     // GC log
442     ecmaVm_->GetEcmaGCStats()->RecordStatisticAfterGC();
443     ecmaVm_->GetEcmaGCStats()->PrintGCStatistic();
444     // weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
445     // even lead to another GC, so this have to invoke after this GC process.
446     InvokeWeakNodeNativeFinalizeCallback();
447 
448 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
449     // post gc heap verify
450     isVerifying_ = true;
451     sweeper_->EnsureAllTaskFinished();
452     failCount = Verification(this).VerifyAll();
453     if (failCount > 0) {
454         LOG_GC(FATAL) << "After gc heap corrupted and " << failCount << " corruptions";
455     }
456     isVerifying_ = false;
457 #endif
458     JSFinalizationRegistry::CheckAndCall(thread_);
459 }
460 
ThrowOutOfMemoryError(size_t size,std::string functionName)461 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName)
462 {
463     GetEcmaVM()->GetEcmaGCStats()->PrintGCMemoryStatistic();
464     std::ostringstream oss;
465     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
466     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
467     THROW_OOM_ERROR(thread_, oss.str().c_str());
468 }
469 
FatalOutOfMemoryError(size_t size,std::string functionName)470 void Heap::FatalOutOfMemoryError(size_t size, std::string functionName)
471 {
472     GetEcmaVM()->GetEcmaGCStats()->PrintGCMemoryStatistic();
473     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
474                         << " function name: " << functionName.c_str();
475 }
476 
AdjustBySurvivalRate(size_t originalNewSpaceSize)477 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
478 {
479     if (originalNewSpaceSize <= 0) {
480         return;
481     }
482     semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
483     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
484     promotedSize_ = GetEvacuator()->GetPromotedSize();
485     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
486     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
487     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
488                                 << " survivalRate: " << survivalRate;
489     if (!oldSpaceLimitAdjusted_) {
490         memController_->AddSurvivalRate(survivalRate);
491         AdjustOldSpaceLimit();
492     } else {
493         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
494         // 2 means half
495         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
496             SetFullMarkRequestedState(true);
497             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
498                 << " is less than half the average survival rates: " << averageSurvivalRate
499                 << ". Trigger full mark next time.";
500             // Survival rate of full mark is precise. Reset recorded survival rates.
501             memController_->ResetRecordedSurvivalRates();
502         }
503         memController_->AddSurvivalRate(survivalRate);
504     }
505 }
506 
VerifyHeapObjects() const507 size_t Heap::VerifyHeapObjects() const
508 {
509     size_t failCount = 0;
510     {
511         VerifyObjectVisitor verifier(this, &failCount);
512         activeSemiSpace_->IterateOverObjects(verifier);
513     }
514 
515     {
516         VerifyObjectVisitor verifier(this, &failCount);
517         oldSpace_->IterateOverObjects(verifier);
518     }
519 
520     {
521         VerifyObjectVisitor verifier(this, &failCount);
522         appSpawnSpace_->IterateOverMarkedObjects(verifier);
523     }
524 
525     {
526         VerifyObjectVisitor verifier(this, &failCount);
527         nonMovableSpace_->IterateOverObjects(verifier);
528     }
529 
530     {
531         VerifyObjectVisitor verifier(this, &failCount);
532         hugeObjectSpace_->IterateOverObjects(verifier);
533     }
534     {
535         VerifyObjectVisitor verifier(this, &failCount);
536         machineCodeSpace_->IterateOverObjects(verifier);
537     }
538     {
539         VerifyObjectVisitor verifier(this, &failCount);
540         snapshotSpace_->IterateOverObjects(verifier);
541     }
542     return failCount;
543 }
544 
VerifyOldToNewRSet() const545 size_t Heap::VerifyOldToNewRSet() const
546 {
547     size_t failCount = 0;
548     VerifyObjectVisitor verifier(this, &failCount);
549     oldSpace_->IterateOldToNewOverObjects(verifier);
550     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
551     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
552     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
553     return failCount;
554 }
555 
AdjustOldSpaceLimit()556 void Heap::AdjustOldSpaceLimit()
557 {
558     if (oldSpaceLimitAdjusted_) {
559         return;
560     }
561     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
562     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
563     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
564         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
565     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
566         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
567     } else {
568         oldSpaceLimitAdjusted_ = true;
569     }
570 
571     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
572         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
573     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
574         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
575     }
576     // temporarily regard the heap limit is the same as the native limit.
577     globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
578     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
579         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
580 }
581 
OnAllocateEvent(TaggedObject * address,size_t size)582 void Heap::OnAllocateEvent([[maybe_unused]] TaggedObject* address, [[maybe_unused]] size_t size)
583 {
584 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
585     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
586     if (profiler != nullptr) {
587         BlockHookScope blockScope;
588         profiler->AllocationEvent(address, size);
589     }
590 #endif
591 }
592 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)593 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
594                        [[maybe_unused]] size_t size)
595 {
596 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
597     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
598     if (profiler != nullptr) {
599         BlockHookScope blockScope;
600         profiler->MoveEvent(address, forwardAddress, size);
601     }
602 #endif
603 }
604 
AddToKeptObjects(JSHandle<JSTaggedValue> value) const605 void Heap::AddToKeptObjects(JSHandle<JSTaggedValue> value) const
606 {
607     JSHandle<GlobalEnv> env = ecmaVm_->GetGlobalEnv();
608     JSHandle<LinkedHashSet> linkedSet;
609     if (env->GetWeakRefKeepObjects()->IsUndefined()) {
610         linkedSet = LinkedHashSet::Create(thread_);
611     } else {
612         linkedSet =
613             JSHandle<LinkedHashSet>(thread_, LinkedHashSet::Cast(env->GetWeakRefKeepObjects()->GetTaggedObject()));
614     }
615     linkedSet = LinkedHashSet::Add(thread_, linkedSet, value);
616     env->SetWeakRefKeepObjects(thread_, linkedSet);
617 }
618 
AdjustSpaceSizeForAppSpawn()619 void Heap::AdjustSpaceSizeForAppSpawn()
620 {
621     SetHeapMode(HeapMode::SPAWN);
622     auto &config = ecmaVm_->GetEcmaParamConfiguration();
623     size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
624     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
625     auto committedSize = appSpawnSpace_->GetCommittedSize();
626     appSpawnSpace_->SetInitialCapacity(committedSize);
627     appSpawnSpace_->SetMaximumCapacity(committedSize);
628     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
629     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
630 }
631 
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)632 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
633 {
634     ASSERT(inspector != nullptr);
635     // activeSemiSpace_/inactiveSemiSpace_:
636     // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
637     activeSemiSpace_->AddAllocationInspector(inspector);
638     // oldSpace_/compressSpace_:
639     // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
640     oldSpace_->AddAllocationInspector(inspector);
641     // readOnlySpace_ need not allocationInspector.
642     // appSpawnSpace_ need not allocationInspector.
643     nonMovableSpace_->AddAllocationInspector(inspector);
644     machineCodeSpace_->AddAllocationInspector(inspector);
645     hugeObjectSpace_->AddAllocationInspector(inspector);
646 }
647 
ClearAllocationInspectorFromAllSpaces()648 void Heap::ClearAllocationInspectorFromAllSpaces()
649 {
650     activeSemiSpace_->ClearAllocationInspector();
651     oldSpace_->ClearAllocationInspector();
652     nonMovableSpace_->ClearAllocationInspector();
653     machineCodeSpace_->ClearAllocationInspector();
654     hugeObjectSpace_->ClearAllocationInspector();
655 }
656 
ClearKeptObjects() const657 void Heap::ClearKeptObjects() const
658 {
659     ecmaVm_->GetGlobalEnv()->SetWeakRefKeepObjects(thread_, JSTaggedValue::Undefined());
660 }
661 
RecomputeLimits()662 void Heap::RecomputeLimits()
663 {
664     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
665     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
666     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
667     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
668 
669     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
670     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
671     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
672         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
673     size_t maxGlobalSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize() - newSpaceCapacity;
674     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
675                                                                      maxGlobalSize, newSpaceCapacity, growingFactor);
676     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
677     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
678     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
679                                                                   MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
680                                                                   growingFactor);
681     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
682         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
683         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
684     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
685         (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
686         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
687                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
688                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
689         SetFullMarkRequestedState(true);
690     }
691 }
692 
CheckAndTriggerOldGC(size_t size)693 void Heap::CheckAndTriggerOldGC(size_t size)
694 {
695     if (OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) || GetHeapObjectSize() > globalSpaceAllocLimit_ ||
696         GlobalNativeSizeLargerThanLimit()) {
697         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
698     }
699 }
700 
CheckOngoingConcurrentMarking()701 bool Heap::CheckOngoingConcurrentMarking()
702 {
703     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark() &&
704         concurrentMarker_->IsTriggeredConcurrentMark()) {
705         TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
706         if (thread_->IsMarking()) {
707             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
708             MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), WaitConcurrentMarkingFinished);
709             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
710             WaitConcurrentMarkingFinished();
711         } else {
712             WaitRunningTaskFinished();
713         }
714         memController_->RecordAfterConcurrentMark(IsFullMark(), concurrentMarker_);
715         return true;
716     }
717     return false;
718 }
719 
ClearIdleTask()720 void Heap::ClearIdleTask()
721 {
722     SetIdleTask(IdleTaskType::NO_TASK);
723     idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
724 }
725 
TryTriggerIdleCollection()726 void Heap::TryTriggerIdleCollection()
727 {
728     if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToMark() || !enableIdleGC_) {
729         return;
730     }
731     if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
732         SetIdleTask(IdleTaskType::FINISH_MARKING);
733         EnableNotifyIdle();
734         CalculateIdleDuration();
735         return;
736     }
737 
738     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
739     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
740     double newSpaceAllocToLimitDuration =
741         (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize()) / newSpaceAllocSpeed;
742     double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
743     double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
744     // 2 means double
745     if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
746         SetIdleTask(IdleTaskType::YOUNG_GC);
747         SetMarkType(MarkType::MARK_YOUNG);
748         EnableNotifyIdle();
749         CalculateIdleDuration();
750         return;
751     }
752 }
753 
CalculateIdleDuration()754 void Heap::CalculateIdleDuration()
755 {
756     // update reference duration
757     idlePredictDuration_ = 0.0f;
758     size_t updateReferenceSpeed = markType_ == MarkType::MARK_YOUNG ?
759         ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED) :
760         ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
761     if (updateReferenceSpeed != 0) {
762         idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
763     }
764 
765     // clear native object duration
766     size_t clearNativeObjSpeed = 0;
767     if (markType_ == MarkType::MARK_YOUNG) {
768         clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
769     } else if (markType_ == MarkType::MARK_FULL) {
770         clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
771     }
772 
773     if (clearNativeObjSpeed != 0) {
774         idlePredictDuration_ += (float)GetEcmaVM()->GetNativePointerListSize() / clearNativeObjSpeed;
775     }
776 
777     // sweep and evacuate duration
778     size_t youngEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
779     size_t sweepSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
780     size_t oldEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
781     double survivalRate = ecmaVm_->GetEcmaGCStats()->GetAvgSurvivalRate();
782     if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
783         idlePredictDuration_ += survivalRate * activeSemiSpace_->GetHeapObjectSize() / youngEvacuateSpeed;
784     } else if (markType_ == MarkType::MARK_FULL) {
785         if (sweepSpeed != 0) {
786             idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
787         }
788         if (oldEvacuateSpeed != 0) {
789             size_t collectRegionSetSize = GetEcmaVM()->GetEcmaGCStats()->GetRecordData(
790                 RecordData::COLLECT_REGION_SET_SIZE);
791             idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
792                                     oldEvacuateSpeed;
793         }
794     }
795 
796     // Idle YoungGC mark duration
797     size_t markSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
798     if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
799         idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
800     }
801     OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
802 }
803 
TryTriggerIncrementalMarking()804 void Heap::TryTriggerIncrementalMarking()
805 {
806     if (!GetJSThread()->IsReadyToMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
807         return;
808     }
809     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
810     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
811     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
812     double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
813     double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
814     double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
815 
816     double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
817     // mark finished before allocate limit
818     if ((oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) ||
819         GetHeapObjectSize() >= globalSpaceAllocLimit_) {
820         // The object allocated in incremental marking should lower than limit,
821         // otherwise select trigger concurrent mark.
822         size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
823         if (allocateSize < ALLOCATE_SIZE_LIMIT) {
824             EnableNotifyIdle();
825             SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
826         }
827     }
828 }
829 
TryTriggerConcurrentMarking()830 void Heap::TryTriggerConcurrentMarking()
831 {
832     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
833     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
834     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
835     // full mark will be triggered.
836     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
837     // young mark may not result in the new space reaching its limit, young mark can be triggered.
838     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
839     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
840     if (!concurrentMarker_->IsEnabled() || !thread_->IsReadyToMark() ||
841         incrementalMarker_->IsTriggeredIncrementalMark() ||
842         !(idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC)) {
843         return;
844     }
845     if (fullMarkRequested_) {
846         markType_ = MarkType::MARK_FULL;
847         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
848         TriggerConcurrentMarking();
849         return;
850     }
851     bool isFullMarkNeeded = false;
852     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
853            oldSpaceAllocToLimitDuration = 0;
854     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
855     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
856     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
857     size_t globalHeapObjectSize = GetHeapObjectSize();
858     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
859     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
860         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
861             GlobalNativeSizeLargerThanLimit()) {
862             markType_ = MarkType::MARK_FULL;
863             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
864             TriggerConcurrentMarking();
865             return;
866         }
867     } else {
868         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
869             GlobalNativeSizeLargerThanLimit()) {
870             isFullMarkNeeded = true;
871         }
872         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
873         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
874         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
875         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
876         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
877             isFullMarkNeeded = true;
878         }
879     }
880 
881     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
882     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
883     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
884         auto &config = ecmaVm_->GetEcmaParamConfiguration();
885         if (activeSemiSpace_->GetCommittedSize() >= config.GetSemiSpaceTriggerConcurrentMark()) {
886             markType_ = MarkType::MARK_YOUNG;
887             TriggerConcurrentMarking();
888             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
889         }
890         return;
891     }
892     newSpaceAllocToLimitDuration = (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize()) /
893         newSpaceAllocSpeed;
894     newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
895     // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
896     newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
897 
898     if (isFullMarkNeeded) {
899         if (oldSpaceMarkDuration < newSpaceAllocToLimitDuration &&
900             oldSpaceMarkDuration < oldSpaceAllocToLimitDuration) {
901             markType_ = MarkType::MARK_FULL;
902             TriggerConcurrentMarking();
903             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by speed";
904         } else {
905             if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_  ||
906                 GlobalNativeSizeLargerThanLimit()) {
907                 markType_ = MarkType::MARK_FULL;
908                 TriggerConcurrentMarking();
909                 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by limit";
910             }
911         }
912     } else if (newSpaceRemainSize < DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
913         markType_ = MarkType::MARK_YOUNG;
914         TriggerConcurrentMarking();
915         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
916     }
917 }
918 
TryTriggerFullMarkByNativeSize()919 void Heap::TryTriggerFullMarkByNativeSize()
920 {
921     if (GlobalNativeSizeLargerThanLimit()) {
922         if (concurrentMarker_->IsEnabled()) {
923             SetFullMarkRequestedState(true);
924             TryTriggerConcurrentMarking();
925         } else {
926             CheckAndTriggerOldGC();
927         }
928     }
929 }
930 
IncreaseNativeBindingSize(JSNativePointer * object)931 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
932 {
933     size_t size = object->GetBindingSize();
934     if (size == 0) {
935         return;
936     }
937     Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(object));
938     if (region->InYoungSpace()) {
939         activeSemiSpace_->IncreaseNativeBindingSize(size);
940     } else {
941         nonNewSpaceNativeBindingSize_ += size;
942     }
943 }
944 
IncreaseNativeBindingSize(bool nonMovable,size_t size)945 void Heap::IncreaseNativeBindingSize(bool nonMovable, size_t size)
946 {
947     if (size == 0) {
948         return;
949     }
950     if (!nonMovable) {
951         activeSemiSpace_->IncreaseNativeBindingSize(size);
952     } else {
953         nonNewSpaceNativeBindingSize_ += size;
954     }
955 }
956 
PrepareRecordRegionsForReclaim()957 void Heap::PrepareRecordRegionsForReclaim()
958 {
959     activeSemiSpace_->SetRecordRegion();
960     oldSpace_->SetRecordRegion();
961     snapshotSpace_->SetRecordRegion();
962     nonMovableSpace_->SetRecordRegion();
963     hugeObjectSpace_->SetRecordRegion();
964     machineCodeSpace_->SetRecordRegion();
965 }
966 
TriggerConcurrentMarking()967 void Heap::TriggerConcurrentMarking()
968 {
969     if (idleTask_ == IdleTaskType::YOUNG_GC && IsFullMark()) {
970         ClearIdleTask();
971         DisableNotifyIdle();
972     }
973     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
974         concurrentMarker_->Mark();
975     }
976 }
977 
WaitRunningTaskFinished()978 void Heap::WaitRunningTaskFinished()
979 {
980     os::memory::LockHolder holder(waitTaskFinishedMutex_);
981     while (runningTaskCount_ > 0) {
982         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
983     }
984 }
985 
WaitClearTaskFinished()986 void Heap::WaitClearTaskFinished()
987 {
988     os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
989     while (!clearTaskFinished_) {
990         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
991     }
992 }
993 
WaitAllTasksFinished()994 void Heap::WaitAllTasksFinished()
995 {
996     WaitRunningTaskFinished();
997     sweeper_->EnsureAllTaskFinished();
998     WaitClearTaskFinished();
999     if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1000         concurrentMarker_->WaitMarkingFinished();
1001     }
1002 }
1003 
WaitConcurrentMarkingFinished()1004 void Heap::WaitConcurrentMarkingFinished()
1005 {
1006     concurrentMarker_->WaitMarkingFinished();
1007 }
1008 
PostParallelGCTask(ParallelGCTaskPhase gcTask)1009 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
1010 {
1011     IncreaseTaskCount();
1012     Taskpool::GetCurrentTaskpool()->PostTask(
1013         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
1014 }
1015 
IncreaseTaskCount()1016 void Heap::IncreaseTaskCount()
1017 {
1018     os::memory::LockHolder holder(waitTaskFinishedMutex_);
1019     runningTaskCount_++;
1020 }
1021 
ChangeGCParams(bool inBackground)1022 void Heap::ChangeGCParams(bool inBackground)
1023 {
1024     inBackground_ = inBackground;
1025     if (inBackground) {
1026         LOG_GC(INFO) << "app is inBackground";
1027         if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT) {
1028             CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
1029         }
1030         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1031             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1032             LOG_GC(INFO) << "Heap Growing Type CONSERVATIVE";
1033         }
1034         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1035         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
1036         maxMarkTaskCount_ = 1;
1037         maxEvacuateTaskCount_ = 1;
1038     } else {
1039         LOG_GC(INFO) << "app is not inBackground";
1040         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1041             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
1042             LOG_GC(INFO) << "Heap Growing Type HIGH_THROUGHPUT";
1043         }
1044         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
1045         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
1046         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1047             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
1048         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1049     }
1050 }
1051 
TriggerIdleCollection(int idleMicroSec)1052 void Heap::TriggerIdleCollection(int idleMicroSec)
1053 {
1054     if (idleTask_ == IdleTaskType::NO_TASK) {
1055         if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
1056             DisableNotifyIdle();
1057         }
1058         return;
1059     }
1060 
1061     // Incremental mark initialize and process
1062     if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
1063         incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
1064         incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1065         if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
1066             CalculateIdleDuration();
1067         }
1068         return;
1069     }
1070 
1071     if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
1072         return;
1073     }
1074 
1075     switch (idleTask_) {
1076         case IdleTaskType::FINISH_MARKING: {
1077             if (markType_ == MarkType::MARK_FULL) {
1078                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
1079             } else {
1080                 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1081             }
1082             break;
1083         }
1084         case IdleTaskType::YOUNG_GC:
1085             CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1086             break;
1087         case IdleTaskType::INCREMENTAL_MARK:
1088             incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1089             break;
1090         default:
1091             break;
1092     }
1093     ClearIdleTask();
1094 }
1095 
NotifyMemoryPressure(bool inHighMemoryPressure)1096 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
1097 {
1098     if (inHighMemoryPressure) {
1099         LOG_GC(INFO) << "app is inHighMemoryPressure";
1100         SetMemGrowingType(MemGrowingType::PRESSURE);
1101     } else {
1102         LOG_GC(INFO) << "app is not inHighMemoryPressure";
1103         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1104     }
1105 }
1106 
CheckCanDistributeTask()1107 bool Heap::CheckCanDistributeTask()
1108 {
1109     os::memory::LockHolder holder(waitTaskFinishedMutex_);
1110     return runningTaskCount_ < maxMarkTaskCount_;
1111 }
1112 
ReduceTaskCount()1113 void Heap::ReduceTaskCount()
1114 {
1115     os::memory::LockHolder holder(waitTaskFinishedMutex_);
1116     runningTaskCount_--;
1117     if (runningTaskCount_ == 0) {
1118         waitTaskFinishedCV_.SignalAll();
1119     }
1120 }
1121 
Run(uint32_t threadIndex)1122 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
1123 {
1124     switch (taskPhase_) {
1125         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
1126             heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
1127             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1128             break;
1129         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
1130             heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
1131             break;
1132         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
1133             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1134             break;
1135         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
1136             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1137             break;
1138         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
1139             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
1140             break;
1141         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
1142             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1143             break;
1144         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
1145             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
1146             break;
1147         default:
1148             break;
1149     }
1150     heap_->ReduceTaskCount();
1151     return true;
1152 }
1153 
Run(uint32_t threadIndex)1154 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
1155 {
1156     heap_->ReclaimRegions(gcType_);
1157     return true;
1158 }
1159 
GetArrayBufferSize() const1160 size_t Heap::GetArrayBufferSize() const
1161 {
1162     size_t result = 0;
1163     sweeper_->EnsureAllTaskFinished();
1164     this->IterateOverObjects([&result](TaggedObject *obj) {
1165         JSHClass* jsClass = obj->GetClass();
1166         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
1167     });
1168     return result;
1169 }
1170 
IsAlive(TaggedObject * object) const1171 bool Heap::IsAlive(TaggedObject *object) const
1172 {
1173     if (!ContainObject(object)) {
1174         LOG_GC(ERROR) << "The region is already free";
1175         return false;
1176     }
1177 
1178     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
1179     if (isFree) {
1180         Region *region = Region::ObjectAddressToRange(object);
1181         LOG_GC(ERROR) << "The object " << object << " in "
1182                             << region->GetSpaceTypeName()
1183                             << " already free";
1184     }
1185     return !isFree;
1186 }
1187 
ContainObject(TaggedObject * object) const1188 bool Heap::ContainObject(TaggedObject *object) const
1189 {
1190     /*
1191      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
1192      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
1193      * calculated from an object address is still valid or alive in a cheap way.
1194      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
1195      * introduce additional incorrect memory access issues.
1196      * Unless we can tolerate the performance impact of iterating the region list of each space and change
1197      * the implementation to that approach, don't rely on current implementation to get accurate result.
1198      */
1199     Region *region = Region::ObjectAddressToRange(object);
1200     return region->InHeapSpace();
1201 }
1202 
InvokeWeakNodeNativeFinalizeCallback()1203 void Heap::InvokeWeakNodeNativeFinalizeCallback()
1204 {
1205     // the second callback may lead to another GC, if this, return directly;
1206     if (runningNativeFinalizeCallbacks_) {
1207         return;
1208     }
1209     runningNativeFinalizeCallbacks_ = true;
1210     auto weakNodeNativeFinalizeCallBacks = thread_->GetWeakNodeNativeFinalizeCallbacks();
1211     while (!weakNodeNativeFinalizeCallBacks->empty()) {
1212         auto callbackPair = weakNodeNativeFinalizeCallBacks->back();
1213         weakNodeNativeFinalizeCallBacks->pop_back();
1214         ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
1215         auto callback = callbackPair.first;
1216         (*callback)(callbackPair.second);
1217     }
1218     runningNativeFinalizeCallbacks_ = false;
1219 }
1220 
PrintHeapInfo(TriggerGCType gcType) const1221 void Heap::PrintHeapInfo(TriggerGCType gcType) const
1222 {
1223     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
1224     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
1225                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsFullMark() << ")";
1226     OPTIONAL_LOG(ecmaVm_, INFO) << "ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize()
1227                    << "/" << activeSemiSpace_->GetInitialCapacity() << "), NonMovable("
1228                    << nonMovableSpace_->GetHeapObjectSize() << "/" << nonMovableSpace_->GetCommittedSize()
1229                    << "/" << nonMovableSpace_->GetInitialCapacity() << "), Old("
1230                    << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize()
1231                    << "/" << oldSpace_->GetInitialCapacity() << "), HugeObject("
1232                    << hugeObjectSpace_->GetHeapObjectSize() << "/" << hugeObjectSpace_->GetCommittedSize()
1233                    << "/" << hugeObjectSpace_->GetInitialCapacity() << "), ReadOnlySpace("
1234                    << readOnlySpace_->GetCommittedSize() << "/" << readOnlySpace_->GetInitialCapacity()
1235                    << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize() << "/"
1236                    << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
1237                    << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
1238 }
1239 
StatisticHeapObject(TriggerGCType gcType) const1240 void Heap::StatisticHeapObject(TriggerGCType gcType) const
1241 {
1242     PrintHeapInfo(gcType);
1243 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
1244     static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
1245     int typeCount[JS_TYPE_LAST] = { 0 };
1246     static const int MIN_COUNT_THRESHOLD = 1000;
1247 
1248     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1249         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1250     });
1251     for (int i = 0; i < JS_TYPE_LAST; i++) {
1252         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1253             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
1254                            << " count:" << typeCount[i];
1255         }
1256         typeCount[i] = 0;
1257     }
1258 
1259     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1260         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1261     });
1262     for (int i = 0; i < JS_TYPE_LAST; i++) {
1263         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1264             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
1265                            << " count:" << typeCount[i];
1266         }
1267         typeCount[i] = 0;
1268     }
1269 
1270     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1271         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1272     });
1273     for (int i = 0; i < JS_TYPE_LAST; i++) {
1274         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1275             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
1276                            << " count:" << typeCount[i];
1277         }
1278         typeCount[i] = 0;
1279     }
1280 #endif
1281 }
1282 }  // namespace panda::ecmascript
1283