• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/heap-inl.h"
17 
18 #include "ecmascript/ecma_vm.h"
19 #include "ecmascript/free_object.h"
20 #include "ecmascript/js_finalization_registry.h"
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/linked_hash_table.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/concurrent_marker.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/full_gc.h"
27 #include "ecmascript/mem/mark_stack.h"
28 #include "ecmascript/mem/mem_controller.h"
29 #include "ecmascript/mem/partial_gc.h"
30 #include "ecmascript/mem/native_area_allocator.h"
31 #include "ecmascript/mem/parallel_evacuator.h"
32 #include "ecmascript/mem/parallel_marker-inl.h"
33 #include "ecmascript/mem/stw_young_gc.h"
34 #include "ecmascript/mem/verification.h"
35 #include "ecmascript/mem/work_manager.h"
36 #include "ecmascript/mem/gc_stats.h"
37 #include "ecmascript/ecma_string_table.h"
38 #include "ecmascript/runtime_call_id.h"
39 
40 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
41 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
42 #endif
43 
44 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)45 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
46                              nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
47                              heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
48 
Initialize()49 void Heap::Initialize()
50 {
51     memController_ = new MemController(this);
52     auto &config = ecmaVm_->GetEcmaParamConfiguration();
53     size_t maxHeapSize = config.GetMaxHeapSize();
54     size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
55     size_t maxSemiSpaceCapacity = config.GetMaxSemiSpaceSize();
56     activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
57     activeSemiSpace_->Restart();
58     activeSemiSpace_->SetWaterLine();
59     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
60     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
61     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
62     inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
63     // not set up from space
64 
65     size_t readOnlySpaceCapacity = config.GetDefaultReadOnlySpaceSize();
66     readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
67     appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
68     size_t nonmovableSpaceCapacity = config.GetDefaultNonMovableSpaceSize();
69     if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
70         nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
71     }
72     nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
73     nonMovableSpace_->Initialize();
74     size_t snapshotSpaceCapacity = config.GetDefaultSnapshotSpaceSize();
75     snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
76     size_t machineCodeSpaceCapacity = config.GetDefaultMachineCodeSpaceSize();
77     machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
78 
79     size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
80         machineCodeSpaceCapacity + readOnlySpaceCapacity;
81     if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) {
82         LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
83     }
84     size_t oldSpaceCapacity = maxHeapSize - capacities;
85     globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
86     globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
87     oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
88     compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
89     oldSpace_->Initialize();
90 
91     hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
92     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
93     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
94         maxEvacuateTaskCount_ - 1);
95 
96     LOG_GC(INFO) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
97                  << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
98                  << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
99                  << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
100                  << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
101                  << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
102                  << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
103                  << ", gcThreadNum = " << maxMarkTaskCount_;
104     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
105     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
106     markType_ = MarkType::MARK_YOUNG;
107 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
108     concurrentMarkerEnabled = false;
109 #endif
110     workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
111     stwYoungGC_ = new STWYoungGC(this, parallelGC_);
112     fullGC_ = new FullGC(this);
113 
114     partialGC_ = new PartialGC(this);
115     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
116         EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
117     concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
118         EnableConcurrentMarkType::CONFIG_DISABLE);
119     nonMovableMarker_ = new NonMovableMarker(this);
120     semiGCMarker_ = new SemiGCMarker(this);
121     compressGCMarker_ = new CompressGCMarker(this);
122     evacuator_ = new ParallelEvacuator(this);
123     idleData_ = new IdleData();
124     enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
125 }
126 
Destroy()127 void Heap::Destroy()
128 {
129     if (workManager_ != nullptr) {
130         delete workManager_;
131         workManager_ = nullptr;
132     }
133     if (activeSemiSpace_ != nullptr) {
134         activeSemiSpace_->Destroy();
135         delete activeSemiSpace_;
136         activeSemiSpace_ = nullptr;
137     }
138     if (inactiveSemiSpace_ != nullptr) {
139         inactiveSemiSpace_->Destroy();
140         delete inactiveSemiSpace_;
141         inactiveSemiSpace_ = nullptr;
142     }
143     if (oldSpace_ != nullptr) {
144         oldSpace_->Reset();
145         delete oldSpace_;
146         oldSpace_ = nullptr;
147     }
148     if (compressSpace_ != nullptr) {
149         compressSpace_->Destroy();
150         delete compressSpace_;
151         compressSpace_ = nullptr;
152     }
153     if (nonMovableSpace_ != nullptr) {
154         nonMovableSpace_->Reset();
155         delete nonMovableSpace_;
156         nonMovableSpace_ = nullptr;
157     }
158     if (snapshotSpace_ != nullptr) {
159         snapshotSpace_->Destroy();
160         delete snapshotSpace_;
161         snapshotSpace_ = nullptr;
162     }
163     if (machineCodeSpace_ != nullptr) {
164         machineCodeSpace_->Reset();
165         delete machineCodeSpace_;
166         machineCodeSpace_ = nullptr;
167     }
168     if (hugeObjectSpace_ != nullptr) {
169         hugeObjectSpace_->Destroy();
170         delete hugeObjectSpace_;
171         hugeObjectSpace_ = nullptr;
172     }
173     if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
174         readOnlySpace_->ClearReadOnly();
175         readOnlySpace_->Destroy();
176         delete readOnlySpace_;
177         readOnlySpace_ = nullptr;
178     }
179     if (appSpawnSpace_ != nullptr) {
180         appSpawnSpace_->Reset();
181         delete appSpawnSpace_;
182         appSpawnSpace_ = nullptr;
183     }
184     if (stwYoungGC_ != nullptr) {
185         delete stwYoungGC_;
186         stwYoungGC_ = nullptr;
187     }
188     if (partialGC_ != nullptr) {
189         delete partialGC_;
190         partialGC_ = nullptr;
191     }
192     if (fullGC_ != nullptr) {
193         delete fullGC_;
194         fullGC_ = nullptr;
195     }
196 
197     nativeAreaAllocator_ = nullptr;
198     heapRegionAllocator_ = nullptr;
199 
200     if (memController_ != nullptr) {
201         delete memController_;
202         memController_ = nullptr;
203     }
204     if (sweeper_ != nullptr) {
205         delete sweeper_;
206         sweeper_ = nullptr;
207     }
208     if (concurrentMarker_ != nullptr) {
209         delete concurrentMarker_;
210         concurrentMarker_ = nullptr;
211     }
212     if (nonMovableMarker_ != nullptr) {
213         delete nonMovableMarker_;
214         nonMovableMarker_ = nullptr;
215     }
216     if (semiGCMarker_ != nullptr) {
217         delete semiGCMarker_;
218         semiGCMarker_ = nullptr;
219     }
220     if (compressGCMarker_ != nullptr) {
221         delete compressGCMarker_;
222         compressGCMarker_ = nullptr;
223     }
224     if (evacuator_ != nullptr) {
225         delete evacuator_;
226         evacuator_ = nullptr;
227     }
228     if (idleData_ != nullptr) {
229         delete idleData_;
230         idleData_ = nullptr;
231     }
232 }
233 
Prepare()234 void Heap::Prepare()
235 {
236     MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), HeapPrepare);
237     WaitRunningTaskFinished();
238     sweeper_->EnsureAllTaskFinished();
239     WaitClearTaskFinished();
240 }
241 
Resume(TriggerGCType gcType)242 void Heap::Resume(TriggerGCType gcType)
243 {
244     if (mode_ != HeapMode::SPAWN &&
245         activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC())) {
246         // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
247         size_t multiple = 2;
248         size_t oldSpaceMaxLimit = 0;
249         if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
250             size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
251             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
252         } else {
253             size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
254             oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
255         }
256         oldSpace_->SetMaximumCapacity(oldSpaceMaxLimit);
257         inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
258     }
259 
260     activeSemiSpace_->SetWaterLine();
261     PrepareRecordRegionsForReclaim();
262     hugeObjectSpace_->ReclaimHugeRegion();
263     if (parallelGC_) {
264         clearTaskFinished_ = false;
265         Taskpool::GetCurrentTaskpool()->PostTask(
266             std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
267     } else {
268         ReclaimRegions(gcType);
269     }
270 }
271 
ResumeForAppSpawn()272 void Heap::ResumeForAppSpawn()
273 {
274     sweeper_->WaitAllTaskFinished();
275     hugeObjectSpace_->ReclaimHugeRegion();
276     inactiveSemiSpace_->ReclaimRegions();
277     oldSpace_->Reset();
278     auto cb = [] (Region *region) {
279         region->ClearMarkGCBitset();
280     };
281     nonMovableSpace_->EnumerateRegions(cb);
282     machineCodeSpace_->EnumerateRegions(cb);
283     hugeObjectSpace_->EnumerateRegions(cb);
284 }
285 
CompactHeapBeforeFork()286 void Heap::CompactHeapBeforeFork()
287 {
288     CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
289 }
290 
DisableParallelGC()291 void Heap::DisableParallelGC()
292 {
293     WaitAllTasksFinished();
294     parallelGC_ = false;
295     maxEvacuateTaskCount_ = 0;
296     maxMarkTaskCount_ = 0;
297     stwYoungGC_->ConfigParallelGC(false);
298     sweeper_->ConfigConcurrentSweep(false);
299     concurrentMarker_->ConfigConcurrentMark(false);
300     Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
301 }
302 
EnableParallelGC()303 void Heap::EnableParallelGC()
304 {
305     Taskpool::GetCurrentTaskpool()->Initialize();
306     parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
307     maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
308     maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
309                                          maxEvacuateTaskCount_ - 1);
310     bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
311 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
312     concurrentMarkerEnabled = false;
313 #endif
314     stwYoungGC_->ConfigParallelGC(parallelGC_);
315     sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
316     concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
317 }
318 
SelectGCType() const319 TriggerGCType Heap::SelectGCType() const
320 {
321     // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
322     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
323         return YOUNG_GC;
324     }
325     if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
326         GetHeapObjectSize() <= globalSpaceAllocLimit_) {
327         return YOUNG_GC;
328     }
329     return OLD_GC;
330 }
331 
CollectGarbage(TriggerGCType gcType)332 void Heap::CollectGarbage(TriggerGCType gcType)
333 {
334 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
335     [[maybe_unused]] GcStateScope scope(thread_);
336 #endif
337     CHECK_NO_GC
338 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
339     isVerifying_ = true;
340     // pre gc heap verify
341     sweeper_->EnsureAllTaskFinished();
342     auto failCount = Verification(this).VerifyAll();
343     if (failCount > 0) {
344         LOG_GC(FATAL) << "Before gc heap corrupted and " << failCount << " corruptions";
345     }
346     isVerifying_ = false;
347 #endif
348 
349 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
350     gcType = TriggerGCType::FULL_GC;
351 #endif
352     if (fullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
353         gcType = TriggerGCType::FULL_GC;
354     }
355     size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
356     size_t originalNewSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize();
357     memController_->StartCalculationBeforeGC();
358     StatisticHeapObject(gcType);
359     switch (gcType) {
360         case TriggerGCType::YOUNG_GC:
361             // Use partial GC for young generation.
362             if (!concurrentMarker_->IsEnabled()) {
363                 SetMarkType(MarkType::MARK_YOUNG);
364             }
365             partialGC_->RunPhases();
366             break;
367         case TriggerGCType::OLD_GC:
368             if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
369                 // Wait for existing concurrent marking tasks to be finished (if any),
370                 // and reset concurrent marker's status for full mark.
371                 bool concurrentMark = CheckOngoingConcurrentMarking();
372                 if (concurrentMark) {
373                     concurrentMarker_->Reset();
374                 }
375             }
376             SetMarkType(MarkType::MARK_FULL);
377             partialGC_->RunPhases();
378             break;
379         case TriggerGCType::FULL_GC:
380             fullGC_->SetForAppSpawn(false);
381             fullGC_->RunPhases();
382             if (fullGCRequested_) {
383                 fullGCRequested_ = false;
384             }
385             break;
386         case TriggerGCType::APPSPAWN_FULL_GC:
387             fullGC_->SetForAppSpawn(true);
388             fullGC_->RunPhasesForAppSpawn();
389             break;
390         default:
391             UNREACHABLE();
392             break;
393     }
394 
395     // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
396     if (shouldThrowOOMError_) {
397         ThrowOutOfMemoryError(oldSpace_->GetMergeSize(), " OldSpace::Merge");
398         oldSpace_->ResetMergeSize();
399         shouldThrowOOMError_ = false;
400     }
401 
402     // Adjust the old space capacity and global limit for the first partial GC with full mark.
403     // Trigger the full mark next time if the current survival rate is much less than half the average survival rates.
404     AdjustBySurvivalRate(originalNewSpaceSize);
405     activeSemiSpace_->AdjustNativeLimit(originalNewSpaceNativeSize);
406     memController_->StopCalculationAfterGC(gcType);
407     if (gcType == TriggerGCType::FULL_GC || IsFullMark()) {
408         // Only when the gc type is not semiGC and after the old space sweeping has been finished,
409         // the limits of old space and global space can be recomputed.
410         RecomputeLimits();
411         OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsFullMark()
412                                      << " global object size " << GetHeapObjectSize()
413                                      << " global committed size " << GetCommittedSize()
414                                      << " global limit " << globalSpaceAllocLimit_;
415         markType_ = MarkType::MARK_YOUNG;
416     }
417     if (concurrentMarker_->IsRequestDisabled()) {
418         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
419     }
420     ecmaVm_->GetEcmaGCStats()->CheckIfLongTimePause();
421 # if ECMASCRIPT_ENABLE_GC_LOG
422     ecmaVm_->GetEcmaGCStats()->PrintStatisticResult();
423 #endif
424     // weak node secondPassCallback may execute JS and change the weakNodeList status,
425     // even lead to another GC, so this have to invoke after this GC process.
426     InvokeWeakNodeSecondPassCallback();
427 
428 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
429     // post gc heap verify
430     isVerifying_ = true;
431     sweeper_->EnsureAllTaskFinished();
432     failCount = Verification(this).VerifyAll();
433     if (failCount > 0) {
434         LOG_GC(FATAL) << "After gc heap corrupted and " << failCount << " corruptions";
435     }
436     isVerifying_ = false;
437 #endif
438     JSFinalizationRegistry::CheckAndCall(thread_);
439 }
440 
ThrowOutOfMemoryError(size_t size,std::string functionName)441 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName)
442 {
443     GetEcmaVM()->GetEcmaGCStats()->PrintHeapStatisticResult(true);
444     std::ostringstream oss;
445     oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
446     LOG_ECMA_MEM(ERROR) << oss.str().c_str();
447     THROW_OOM_ERROR(thread_, oss.str().c_str());
448 }
449 
FatalOutOfMemoryError(size_t size,std::string functionName)450 void Heap::FatalOutOfMemoryError(size_t size, std::string functionName)
451 {
452     GetEcmaVM()->GetEcmaGCStats()->PrintHeapStatisticResult(true);
453     LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
454                         << " function name: " << functionName.c_str();
455 }
456 
AdjustBySurvivalRate(size_t originalNewSpaceSize)457 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
458 {
459     if (originalNewSpaceSize <= 0) {
460         return;
461     }
462     semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
463     double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
464     promotedSize_ = GetEvacuator()->GetPromotedSize();
465     double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
466     double survivalRate = std::min(copiedRate + promotedRate, 1.0);
467     OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
468                                 << " survivalRate: " << survivalRate;
469     if (!oldSpaceLimitAdjusted_) {
470         memController_->AddSurvivalRate(survivalRate);
471         AdjustOldSpaceLimit();
472     } else {
473         double averageSurvivalRate = memController_->GetAverageSurvivalRate();
474         if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
475             fullMarkRequested_ = true;
476             OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
477                 << " is less than half the average survival rates: " << averageSurvivalRate
478                 << ". Trigger full mark next time.";
479             // Survival rate of full mark is precise. Reset recorded survival rates.
480             memController_->ResetRecordedSurvivalRates();
481         }
482         memController_->AddSurvivalRate(survivalRate);
483     }
484 }
485 
VerifyHeapObjects() const486 size_t Heap::VerifyHeapObjects() const
487 {
488     size_t failCount = 0;
489     {
490         VerifyObjectVisitor verifier(this, &failCount);
491         activeSemiSpace_->IterateOverObjects(verifier);
492     }
493 
494     {
495         VerifyObjectVisitor verifier(this, &failCount);
496         oldSpace_->IterateOverObjects(verifier);
497     }
498 
499     {
500         VerifyObjectVisitor verifier(this, &failCount);
501         appSpawnSpace_->IterateOverMarkedObjects(verifier);
502     }
503 
504     {
505         VerifyObjectVisitor verifier(this, &failCount);
506         nonMovableSpace_->IterateOverObjects(verifier);
507     }
508 
509     {
510         VerifyObjectVisitor verifier(this, &failCount);
511         hugeObjectSpace_->IterateOverObjects(verifier);
512     }
513     {
514         VerifyObjectVisitor verifier(this, &failCount);
515         machineCodeSpace_->IterateOverObjects(verifier);
516     }
517     {
518         VerifyObjectVisitor verifier(this, &failCount);
519         snapshotSpace_->IterateOverObjects(verifier);
520     }
521     return failCount;
522 }
523 
VerifyOldToNewRSet() const524 size_t Heap::VerifyOldToNewRSet() const
525 {
526     size_t failCount = 0;
527     VerifyObjectVisitor verifier(this, &failCount);
528     oldSpace_->IterateOldToNewOverObjects(verifier);
529     appSpawnSpace_->IterateOldToNewOverObjects(verifier);
530     nonMovableSpace_->IterateOldToNewOverObjects(verifier);
531     machineCodeSpace_->IterateOldToNewOverObjects(verifier);
532     return failCount;
533 }
534 
AdjustOldSpaceLimit()535 void Heap::AdjustOldSpaceLimit()
536 {
537     if (oldSpaceLimitAdjusted_) {
538         return;
539     }
540     size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
541     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
542     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
543         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
544     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
545         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
546     } else {
547         oldSpaceLimitAdjusted_ = true;
548     }
549 
550     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
551         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
552     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
553         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
554     }
555     // temporarily regard the heap limit is the same as the native limit.
556     globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
557     OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
558         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
559 }
560 
AddToKeptObjects(JSHandle<JSTaggedValue> value) const561 void Heap::AddToKeptObjects(JSHandle<JSTaggedValue> value) const
562 {
563     JSHandle<GlobalEnv> env = ecmaVm_->GetGlobalEnv();
564     JSHandle<LinkedHashSet> linkedSet;
565     if (env->GetWeakRefKeepObjects()->IsUndefined()) {
566         linkedSet = LinkedHashSet::Create(thread_);
567     } else {
568         linkedSet =
569             JSHandle<LinkedHashSet>(thread_, LinkedHashSet::Cast(env->GetWeakRefKeepObjects()->GetTaggedObject()));
570     }
571     linkedSet = LinkedHashSet::Add(thread_, linkedSet, value);
572     env->SetWeakRefKeepObjects(thread_, linkedSet);
573 }
574 
AdjustSpaceSizeForAppSpawn()575 void Heap::AdjustSpaceSizeForAppSpawn()
576 {
577     SetHeapMode(HeapMode::SPAWN);
578     auto &config = ecmaVm_->GetEcmaParamConfiguration();
579     size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
580     activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
581     auto committedSize = appSpawnSpace_->GetCommittedSize();
582     appSpawnSpace_->SetInitialCapacity(committedSize);
583     appSpawnSpace_->SetMaximumCapacity(committedSize);
584     oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
585     oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
586 }
587 
ClearKeptObjects() const588 void Heap::ClearKeptObjects() const
589 {
590     ecmaVm_->GetGlobalEnv()->SetWeakRefKeepObjects(thread_, JSTaggedValue::Undefined());
591 }
592 
RecomputeLimits()593 void Heap::RecomputeLimits()
594 {
595     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
596     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
597     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
598     size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
599 
600     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
601     size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
602     size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
603         maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
604     size_t maxGlobalSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize() - newSpaceCapacity;
605     size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
606                                                                    maxGlobalSize, newSpaceCapacity, growingFactor);
607     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
608     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
609     size_t globalSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize() + nonNewSpaceNativeBindingSize_
610                                    + nativeAreaAllocator_->GetNativeMemoryUsage();
611     globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(globalSpaceNativeSize, MIN_HEAP_SIZE,
612                                                                   maxGlobalSize, newSpaceCapacity, growingFactor);
613     OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
614         << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
615         << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
616     if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize()
617          && (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) {
618         OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
619                                     << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
620                                     << " Committed Size: " << oldSpace_->GetCommittedSize();
621         SetFullMarkRequestedState(true);
622     }
623 }
624 
CheckAndTriggerOldGC(size_t size)625 void Heap::CheckAndTriggerOldGC(size_t size)
626 {
627     if (OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) {
628         CollectGarbage(TriggerGCType::OLD_GC);
629     }
630 }
631 
CheckOngoingConcurrentMarking()632 bool Heap::CheckOngoingConcurrentMarking()
633 {
634     if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
635         if (thread_->IsMarking()) {
636             [[maybe_unused]] ClockScope clockScope;
637             ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
638             MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), WaitConcurrentMarkingFinished);
639             GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
640             WaitConcurrentMarkingFinished();
641             ecmaVm_->GetEcmaGCStats()->StatisticConcurrentMarkWait(clockScope.GetPauseTime());
642             LOG_GC(DEBUG) << "wait concurrent marking finish pause time " << clockScope.TotalSpentTime();
643         } else {
644             WaitRunningTaskFinished();
645         }
646         memController_->RecordAfterConcurrentMark(IsFullMark(), concurrentMarker_);
647         return true;
648     }
649     return false;
650 }
651 
TryTriggerConcurrentMarking()652 void Heap::TryTriggerConcurrentMarking()
653 {
654     // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
655     // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
656     // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
657     // full mark will be triggered.
658     // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
659     // young mark may not result in the new space reaching its limit, young mark can be triggered.
660     // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
661     // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
662     if (!concurrentMarker_->IsEnabled() || !thread_->IsReadyToMark()) {
663         return;
664     }
665     if (fullMarkRequested_) {
666         markType_ = MarkType::MARK_FULL;
667         OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
668         TriggerConcurrentMarking();
669         return;
670     }
671     bool isFullMarkNeeded = false;
672     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
673            oldSpaceAllocToLimitDuration = 0;
674     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
675     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
676     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
677     size_t globalHeapObjectSize = GetHeapObjectSize();
678     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
679     size_t globalSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize() + nonNewSpaceNativeBindingSize_
680                                    + nativeAreaAllocator_->GetNativeMemoryUsage();
681     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
682         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit ||  globalHeapObjectSize >= globalSpaceAllocLimit_
683             || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
684             markType_ = MarkType::MARK_FULL;
685             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
686             TriggerConcurrentMarking();
687             return;
688         }
689     } else {
690         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_
691             || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
692             isFullMarkNeeded = true;
693         }
694         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
695         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
696         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
697         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
698         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
699             isFullMarkNeeded = true;
700         }
701     }
702 
703     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
704     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
705 
706     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
707         auto &config = ecmaVm_->GetEcmaParamConfiguration();
708         if (activeSemiSpace_->GetCommittedSize() >= config.GetSemiSpaceTriggerConcurrentMark()) {
709             markType_ = MarkType::MARK_YOUNG;
710             TriggerConcurrentMarking();
711             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
712         }
713         return;
714     }
715     newSpaceAllocToLimitDuration = (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize())
716         / newSpaceAllocSpeed;
717     newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
718     // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
719     newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
720 
721     if (isFullMarkNeeded) {
722         if (oldSpaceMarkDuration < newSpaceAllocToLimitDuration
723             && oldSpaceMarkDuration < oldSpaceAllocToLimitDuration) {
724             markType_ = MarkType::MARK_FULL;
725             TriggerConcurrentMarking();
726             OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by speed";
727         } else {
728             if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_
729                 || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
730                 markType_ = MarkType::MARK_FULL;
731                 TriggerConcurrentMarking();
732                 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by limit";
733             }
734         }
735     } else if (newSpaceRemainSize < DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
736         markType_ = MarkType::MARK_YOUNG;
737         TriggerConcurrentMarking();
738         OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
739     }
740 }
741 
IncreaseNativeBindingSize(JSNativePointer * object)742 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
743 {
744     size_t size = object->GetBindingSize();
745     if (size == 0) {
746         return;
747     }
748     Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(object));
749     if (region->InYoungSpace()) {
750         activeSemiSpace_->IncreaseNativeBindingSize(size);
751     } else {
752         nonNewSpaceNativeBindingSize_ += size;
753     }
754 }
755 
IncreaseNativeBindingSize(bool nonMovable,size_t size)756 void Heap::IncreaseNativeBindingSize(bool nonMovable, size_t size)
757 {
758     if (size == 0) {
759         return;
760     }
761     if (!nonMovable) {
762         activeSemiSpace_->IncreaseNativeBindingSize(size);
763     } else {
764         nonNewSpaceNativeBindingSize_ += size;
765     }
766 }
767 
PrepareRecordRegionsForReclaim()768 void Heap::PrepareRecordRegionsForReclaim()
769 {
770     activeSemiSpace_->SetRecordRegion();
771     oldSpace_->SetRecordRegion();
772     snapshotSpace_->SetRecordRegion();
773     nonMovableSpace_->SetRecordRegion();
774     hugeObjectSpace_->SetRecordRegion();
775     machineCodeSpace_->SetRecordRegion();
776 }
777 
TriggerConcurrentMarking()778 void Heap::TriggerConcurrentMarking()
779 {
780     if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
781         concurrentMarker_->Mark();
782     }
783 }
784 
WaitRunningTaskFinished()785 void Heap::WaitRunningTaskFinished()
786 {
787     os::memory::LockHolder holder(waitTaskFinishedMutex_);
788     while (runningTaskCount_ > 0) {
789         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
790     }
791 }
792 
WaitClearTaskFinished()793 void Heap::WaitClearTaskFinished()
794 {
795     os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
796     while (!clearTaskFinished_) {
797         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
798     }
799 }
800 
WaitAllTasksFinished()801 void Heap::WaitAllTasksFinished()
802 {
803     WaitRunningTaskFinished();
804     sweeper_->EnsureAllTaskFinished();
805     WaitClearTaskFinished();
806     if (concurrentMarker_->IsEnabled() && thread_->IsMarking()) {
807         concurrentMarker_->WaitMarkingFinished();
808     }
809 }
810 
WaitConcurrentMarkingFinished()811 void Heap::WaitConcurrentMarkingFinished()
812 {
813     concurrentMarker_->WaitMarkingFinished();
814 }
815 
PostParallelGCTask(ParallelGCTaskPhase gcTask)816 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
817 {
818     IncreaseTaskCount();
819     Taskpool::GetCurrentTaskpool()->PostTask(
820         std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
821 }
822 
IncreaseTaskCount()823 void Heap::IncreaseTaskCount()
824 {
825     os::memory::LockHolder holder(waitTaskFinishedMutex_);
826     runningTaskCount_++;
827 }
828 
ChangeGCParams(bool inBackground)829 void Heap::ChangeGCParams(bool inBackground)
830 {
831     if (inBackground) {
832         LOG_GC(INFO) << "app is inBackground";
833         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
834             SetMemGrowingType(MemGrowingType::CONSERVATIVE);
835             LOG_GC(INFO) << "Heap Growing Type CONSERVATIVE";
836         }
837         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
838         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
839         maxMarkTaskCount_ = 1;
840         maxEvacuateTaskCount_ = 1;
841     } else {
842         LOG_GC(INFO) << "app is not inBackground";
843         if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
844             SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
845             LOG_GC(INFO) << "Heap Growing Type HIGH_THROUGHPUT";
846         }
847         concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
848         sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
849         maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
850             Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
851         maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
852     }
853 }
854 
TriggerIdleCollection(int idleMicroSec)855 void Heap::TriggerIdleCollection([[maybe_unused]] int idleMicroSec)
856 {
857     if (!enableIdleGC_) {
858         return;
859     }
860     int64_t curTime = 0;
861     if (waitForStartUp_) {
862         curTime = static_cast<int64_t>(JSDate::Now().GetDouble());
863         if (idleTime_ == 0) {
864             idleTime_ = curTime;
865         }
866         if (curTime - idleTime_ > WAIT_FOR_APP_START_UP) {
867             waitForStartUp_ = false;
868         }
869         return;
870     }
871 
872     if (idleMicroSec >= IDLE_TIME_REMARK && thread_->IsMarkFinished()) {
873         concurrentMarker_->HandleMarkingFinished();
874         return;
875     }
876 
877     if (idleMicroSec >= IDLE_TIME_LIMIT) {
878         curTime = static_cast<int64_t>(JSDate::Now().GetDouble());
879         size_t oldCommitSize = oldSpace_->GetCommittedSize();
880         // rest
881         if (curTime - idleTime_ > MIN_OLD_GC_LIMIT) {
882             size_t heapObjectSize = GetHeapObjectSize();
883             idleData_->SetNextValue(heapObjectSize);
884             idleTime_ = curTime;
885             if (idleData_->CheckIsRest() && heapObjectSize > triggerRestIdleSize_) {
886                 CollectGarbage(TriggerGCType::FULL_GC);
887                 couldIdleGC_ = false;
888                 triggerRestIdleSize_ = GetHeapObjectSize() + REST_HEAP_GROWTH_LIMIT;
889                 return;
890             }
891             couldIdleGC_ = true;
892             idleHeapObjectSize_ = GetHeapObjectSize();
893         }
894 
895         // sparse space over limit
896         if (couldIdleGC_ && oldCommitSize + nonMovableSpace_->GetCommittedSize() > idleOldSpace_) {
897             CollectGarbage(TriggerGCType::OLD_GC);
898             idleTime_ = curTime;
899             couldIdleGC_ = false;
900             idleOldSpace_ = oldSpace_->GetInitialCapacity();
901             return;
902         }
903 
904         if (activeSemiSpace_->GetHeapObjectSize() > IDLE_GC_YOUNG_SPACE) {
905             CollectGarbage(TriggerGCType::YOUNG_GC);
906             return;
907         }
908     }
909 }
910 
NotifyMemoryPressure(bool inHighMemoryPressure)911 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
912 {
913     if (inHighMemoryPressure) {
914         LOG_GC(INFO) << "app is inHighMemoryPressure";
915         SetMemGrowingType(MemGrowingType::PRESSURE);
916     } else {
917         LOG_GC(INFO) << "app is not inHighMemoryPressure";
918         SetMemGrowingType(MemGrowingType::CONSERVATIVE);
919     }
920 }
921 
CheckCanDistributeTask()922 bool Heap::CheckCanDistributeTask()
923 {
924     os::memory::LockHolder holder(waitTaskFinishedMutex_);
925     return runningTaskCount_ < maxMarkTaskCount_;
926 }
927 
ReduceTaskCount()928 void Heap::ReduceTaskCount()
929 {
930     os::memory::LockHolder holder(waitTaskFinishedMutex_);
931     runningTaskCount_--;
932     if (runningTaskCount_ == 0) {
933         waitTaskFinishedCV_.SignalAll();
934     }
935 }
936 
Run(uint32_t threadIndex)937 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
938 {
939     switch (taskPhase_) {
940         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
941             heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
942             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
943             break;
944         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
945             heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
946             break;
947         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
948             heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
949             break;
950         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
951             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
952             break;
953         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
954             heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
955             break;
956         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
957             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
958             break;
959         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
960             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
961             break;
962         default:
963             break;
964     }
965     heap_->ReduceTaskCount();
966     return true;
967 }
968 
Run(uint32_t threadIndex)969 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
970 {
971     heap_->ReclaimRegions(gcType_);
972     return true;
973 }
974 
GetArrayBufferSize() const975 size_t Heap::GetArrayBufferSize() const
976 {
977     size_t result = 0;
978     sweeper_->EnsureAllTaskFinished();
979     this->IterateOverObjects([&result](TaggedObject *obj) {
980         JSHClass* jsClass = obj->GetClass();
981         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
982     });
983     return result;
984 }
985 
IsAlive(TaggedObject * object) const986 bool Heap::IsAlive(TaggedObject *object) const
987 {
988     if (!ContainObject(object)) {
989         LOG_GC(ERROR) << "The region is already free";
990         return false;
991     }
992 
993     bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
994     if (isFree) {
995         Region *region = Region::ObjectAddressToRange(object);
996         LOG_GC(ERROR) << "The object " << object << " in "
997                             << region->GetSpaceTypeName()
998                             << " already free";
999     }
1000     return !isFree;
1001 }
1002 
ContainObject(TaggedObject * object) const1003 bool Heap::ContainObject(TaggedObject *object) const
1004 {
1005     /*
1006      * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
1007      * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
1008      * calculated from an object address is still valid or alive in a cheap way.
1009      * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
1010      * introduce additional incorrect memory access issues.
1011      * Unless we can tolerate the performance impact of iterating the region list of each space and change
1012      * the implementation to that approach, don't rely on current implementation to get accurate result.
1013      */
1014     Region *region = Region::ObjectAddressToRange(object);
1015     return region->InHeapSpace();
1016 }
1017 
InvokeWeakNodeSecondPassCallback()1018 void Heap::InvokeWeakNodeSecondPassCallback()
1019 {
1020     // the second callback may lead to another GC, if this, return directly;
1021     if (runningSecondPassCallbacks_) {
1022         return;
1023     }
1024     runningSecondPassCallbacks_ = true;
1025     auto weakNodesSecondCallbacks = thread_->GetWeakNodeSecondPassCallbacks();
1026     while (!weakNodesSecondCallbacks->empty()) {
1027         auto callbackPair = weakNodesSecondCallbacks->back();
1028         weakNodesSecondCallbacks->pop_back();
1029         ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
1030         auto callback = callbackPair.first;
1031         (*callback)(callbackPair.second);
1032     }
1033     runningSecondPassCallbacks_ = false;
1034 }
1035 
PrintHeapInfo(TriggerGCType gcType) const1036 void Heap::PrintHeapInfo(TriggerGCType gcType) const
1037 {
1038     OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
1039     OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
1040                                 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsFullMark() << ")";
1041     OPTIONAL_LOG(ecmaVm_, INFO) << "ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize()
1042                    << "/" << activeSemiSpace_->GetInitialCapacity() << "), NonMovable("
1043                    << nonMovableSpace_->GetHeapObjectSize() << "/" << nonMovableSpace_->GetCommittedSize()
1044                    << "/" << nonMovableSpace_->GetInitialCapacity() << "), Old("
1045                    << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize()
1046                    << "/" << oldSpace_->GetInitialCapacity() << "), HugeObject("
1047                    << hugeObjectSpace_->GetHeapObjectSize() << "/" << hugeObjectSpace_->GetCommittedSize()
1048                    << "/" << hugeObjectSpace_->GetInitialCapacity() << "), ReadOnlySpace("
1049                    << readOnlySpace_->GetCommittedSize() << "/" << readOnlySpace_->GetInitialCapacity()
1050                    << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize() << "/"
1051                    << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
1052                    << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
1053 }
1054 
StatisticHeapObject(TriggerGCType gcType) const1055 void Heap::StatisticHeapObject(TriggerGCType gcType) const
1056 {
1057     PrintHeapInfo(gcType);
1058 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
1059     static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
1060     int typeCount[JS_TYPE_LAST] = { 0 };
1061     static const int MIN_COUNT_THRESHOLD = 1000;
1062 
1063     nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1064         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1065     });
1066     for (int i = 0; i < JS_TYPE_LAST; i++) {
1067         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1068             LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
1069                            << " count:" << typeCount[i];
1070         }
1071         typeCount[i] = 0;
1072     }
1073 
1074     oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1075         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1076     });
1077     for (int i = 0; i < JS_TYPE_LAST; i++) {
1078         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1079             LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
1080                            << " count:" << typeCount[i];
1081         }
1082         typeCount[i] = 0;
1083     }
1084 
1085     activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1086         typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1087     });
1088     for (int i = 0; i < JS_TYPE_LAST; i++) {
1089         if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1090             LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
1091                            << " count:" << typeCount[i];
1092         }
1093         typeCount[i] = 0;
1094     }
1095 #endif
1096 }
1097 }  // namespace panda::ecmascript
1098