• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/free_object.h"
17 #include "ecmascript/mem/heap-inl.h"
18 
19 #include <sys/sysinfo.h>
20 
21 #include "ecmascript/cpu_profiler/cpu_profiler.h"
22 #include "ecmascript/ecma_vm.h"
23 #include "ecmascript/mem/assert_scope-inl.h"
24 #include "ecmascript/mem/concurrent_marker.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/full_gc.h"
27 #include "ecmascript/mem/mark_stack.h"
28 #include "ecmascript/mem/mem_controller.h"
29 #include "ecmascript/mem/mix_gc.h"
30 #include "ecmascript/mem/native_area_allocator.h"
31 #include "ecmascript/mem/parallel_evacuation.h"
32 #include "ecmascript/mem/parallel_marker-inl.h"
33 #include "ecmascript/mem/parallel_work_helper.h"
34 #include "ecmascript/mem/stw_young_gc_for_testing.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/runtime_call_id.h"
37 
38 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)39 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
40                              nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
41                              heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
42 
Initialize()43 void Heap::Initialize()
44 {
45     memController_ = new MemController(this);
46 
47     size_t defaultSemiSpaceCapacity = ecmaVm_->GetJSOptions().DefaultSemiSpaceCapacity();
48     toSpace_ = new SemiSpace(this, defaultSemiSpaceCapacity, defaultSemiSpaceCapacity);
49     toSpace_->Restart();
50     toSpace_->SetWaterLine();
51     fromSpace_ = new SemiSpace(this, defaultSemiSpaceCapacity, defaultSemiSpaceCapacity);
52 
53     // not set up from space
54     size_t maxOldSpaceCapacity = ecmaVm_->GetJSOptions().MaxOldSpaceCapacity();
55     oldSpace_ = new OldSpace(this, OLD_SPACE_LIMIT_BEGIN, maxOldSpaceCapacity);
56     compressSpace_ = new OldSpace(this, OLD_SPACE_LIMIT_BEGIN, maxOldSpaceCapacity);
57     oldSpace_->Initialize();
58     size_t maxNonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
59     nonMovableSpace_ = new NonMovableSpace(this, maxNonmovableSpaceCapacity, maxNonmovableSpaceCapacity);
60     nonMovableSpace_->Initialize();
61     size_t defaultSnapshotSpaceCapacity = ecmaVm_->GetJSOptions().DefaultSnapshotSpaceCapacity();
62     size_t maxSnapshotSpaceCapacity = ecmaVm_->GetJSOptions().MaxSnapshotSpaceCapacity();
63     snapshotSpace_ = new SnapShotSpace(this, defaultSnapshotSpaceCapacity, maxSnapshotSpaceCapacity);
64     size_t maxMachineCodeSpaceCapacity = ecmaVm_->GetJSOptions().MaxMachineCodeSpaceCapacity();
65     machineCodeSpace_ = new MachineCodeSpace(this, maxMachineCodeSpaceCapacity, maxMachineCodeSpaceCapacity);
66     machineCodeSpace_->Initialize();
67     hugeObjectSpace_ = new HugeObjectSpace(this);
68     paralledGc_ = ecmaVm_->GetJSOptions().IsEnableParallelGC();
69     concurrentMarkingEnabled_ = ecmaVm_->GetJSOptions().IsEnableConcurrentMark();
70     markType_ = MarkType::SEMI_MARK;
71 #if ECMASCRIPT_DISABLE_PARALLEL_GC
72     paralledGc_ = false;
73 #endif
74 #if defined(IS_STANDARD_SYSTEM)
75     concurrentMarkingEnabled_ = false;
76 #endif
77     workList_ = new WorkerHelper(this, Platform::GetCurrentPlatform()->GetTotalThreadNum() + 1);
78     stwYoungGC_ = new STWYoungGC(this, paralledGc_);
79     fullGC_ = new FullGC(this);
80 
81     derivedPointers_ = new ChunkMap<DerivedDataKey, uintptr_t>(ecmaVm_->GetChunk());
82     mixGC_ = new MixGC(this);
83     sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().IsEnableConcurrentSweep());
84     concurrentMarker_ = new ConcurrentMarker(this);
85     nonMovableMarker_ = new NonMovableMarker(this);
86     semiGcMarker_ = new SemiGcMarker(this);
87     compressGcMarker_ = new CompressGcMarker(this);
88     evacuation_ = new ParallelEvacuation(this);
89 }
90 
Destroy()91 void Heap::Destroy()
92 {
93     Prepare();
94     if (toSpace_ != nullptr) {
95         toSpace_->Destroy();
96         delete toSpace_;
97         toSpace_ = nullptr;
98     }
99     if (fromSpace_ != nullptr) {
100         fromSpace_->Destroy();
101         delete fromSpace_;
102         fromSpace_ = nullptr;
103     }
104     if (oldSpace_ != nullptr) {
105         oldSpace_->Destroy();
106         delete oldSpace_;
107         oldSpace_ = nullptr;
108     }
109     if (compressSpace_ != nullptr) {
110         compressSpace_->Destroy();
111         delete compressSpace_;
112         compressSpace_ = nullptr;
113     }
114     if (nonMovableSpace_ != nullptr) {
115         nonMovableSpace_->Destroy();
116         delete nonMovableSpace_;
117         nonMovableSpace_ = nullptr;
118     }
119     if (snapshotSpace_ != nullptr) {
120         snapshotSpace_->Destroy();
121         delete snapshotSpace_;
122         snapshotSpace_ = nullptr;
123     }
124     if (machineCodeSpace_ != nullptr) {
125         machineCodeSpace_->Destroy();
126         delete machineCodeSpace_;
127         machineCodeSpace_ = nullptr;
128     }
129     if (hugeObjectSpace_ != nullptr) {
130         hugeObjectSpace_->Destroy();
131         delete hugeObjectSpace_;
132         hugeObjectSpace_ = nullptr;
133     }
134     if (workList_ != nullptr) {
135         delete workList_;
136         workList_ = nullptr;
137     }
138     if (stwYoungGC_ != nullptr) {
139         delete stwYoungGC_;
140         stwYoungGC_ = nullptr;
141     }
142     if (mixGC_ != nullptr) {
143         delete mixGC_;
144         mixGC_ = nullptr;
145     }
146     if (fullGC_ != nullptr) {
147         delete fullGC_;
148         fullGC_ = nullptr;
149     }
150 
151     nativeAreaAllocator_ = nullptr;
152     heapRegionAllocator_ = nullptr;
153 
154     if (memController_ != nullptr) {
155         delete memController_;
156         memController_ = nullptr;
157     }
158     if (sweeper_ != nullptr) {
159         delete sweeper_;
160         sweeper_ = nullptr;
161     }
162     if (derivedPointers_ != nullptr) {
163         delete derivedPointers_;
164         derivedPointers_ = nullptr;
165     }
166     if (concurrentMarker_ != nullptr) {
167         delete concurrentMarker_;
168         concurrentMarker_ = nullptr;
169     }
170     if (nonMovableMarker_ != nullptr) {
171         delete nonMovableMarker_;
172         nonMovableMarker_ = nullptr;
173     }
174     if (semiGcMarker_ != nullptr) {
175         delete semiGcMarker_;
176         semiGcMarker_ = nullptr;
177     }
178     if (compressGcMarker_ != nullptr) {
179         delete compressGcMarker_;
180         compressGcMarker_ = nullptr;
181     }
182 }
183 
Prepare()184 void Heap::Prepare()
185 {
186     MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), HeapPrepare);
187     WaitRunningTaskFinished();
188     sweeper_->EnsureAllTaskFinished();
189     WaitClearTaskFinished();
190 }
191 
Resume(TriggerGCType gcType)192 void Heap::Resume(TriggerGCType gcType)
193 {
194     if (gcType == TriggerGCType::FULL_GC) {
195         compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
196         auto *oldSpace = compressSpace_;
197         compressSpace_ = oldSpace_;
198         oldSpace_ = oldSpace;
199     }
200     if (toSpace_->AdjustCapacity(fromSpace_->GetAllocatedSizeSinceGC())) {
201         fromSpace_->SetMaximumCapacity(toSpace_->GetMaximumCapacity());
202     }
203 
204     toSpace_->SetWaterLine();
205     if (paralledGc_) {
206         isClearTaskFinished_ = false;
207         Platform::GetCurrentPlatform()->PostTask(std::make_unique<AsyncClearTask>(this, gcType));
208     } else {
209         ReclaimRegions(gcType);
210     }
211 }
212 
SelectGCType() const213 TriggerGCType Heap::SelectGCType() const
214 {
215     // If concurrent mark is enable, The TryTriggerConcurrentMarking decide which GC to choose.
216     if (concurrentMarkingEnabled_) {
217         return SEMI_GC;
218     }
219     if (oldSpace_->CanExpand(toSpace_->GetSurvivalObjectSize()) && GetHeapObjectSize() <= globalSpaceAllocLimit_) {
220         return SEMI_GC;
221     } else {
222         return OLD_GC;
223     }
224 }
225 
CollectGarbage(TriggerGCType gcType)226 void Heap::CollectGarbage(TriggerGCType gcType)
227 {
228     [[maybe_unused]] GcStateScope scope(thread_);
229     CHECK_NO_GC
230 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
231     isVerifying_ = true;
232     // pre gc heap verify
233     sweeper_->EnsureAllTaskFinished();
234     auto failCount = Verification(this).VerifyAll();
235     if (failCount > 0) {
236         LOG(FATAL, GC) << "Before gc heap corrupted and " << failCount << " corruptions";
237     }
238     isVerifying_ = false;
239 #endif
240 
241 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
242     gcType = TriggerGCType::FULL_GC;
243 #endif
244     if (isFullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
245         gcType = TriggerGCType::FULL_GC;
246     }
247     startNewSpaceSize_ = toSpace_->GetHeapObjectSize();
248     memController_->StartCalculationBeforeGC();
249     OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Heap::CollectGarbage, gcType = " << gcType
250                                              << " global CommittedSize" << GetCommittedSize()
251                                              << " global limit" << globalSpaceAllocLimit_;
252     switch (gcType) {
253         case TriggerGCType::SEMI_GC:
254             if (!concurrentMarkingEnabled_) {
255                 SetMarkType(MarkType::SEMI_MARK);
256             }
257             mixGC_->RunPhases();
258             break;
259         case TriggerGCType::OLD_GC:
260             if (concurrentMarkingEnabled_ && markType_ == MarkType::SEMI_MARK) {
261                 bool concurrentMark = CheckConcurrentMark();
262                 if (concurrentMark) {
263                     GetConcurrentMarker()->Reset();
264                 }
265             }
266             SetMarkType(MarkType::FULL_MARK);
267             mixGC_->RunPhases();
268             break;
269         case TriggerGCType::FULL_GC:
270             fullGC_->RunPhases();
271             if (isFullGCRequested_) {
272                 isFullGCRequested_ = false;
273             }
274             break;
275         default:
276             UNREACHABLE();
277             break;
278     }
279 
280     if (!oldSpaceLimitAdjusted_ && startNewSpaceSize_ > 0) {
281         semiSpaceCopiedSize_ = toSpace_->GetHeapObjectSize();
282         double copiedRate = semiSpaceCopiedSize_ * 1.0 / startNewSpaceSize_;
283         promotedSize_ = GetEvacuation()->GetPromotedSize();
284         double promotedRate = promotedSize_ * 1.0 / startNewSpaceSize_;
285         memController_->AddSurvivalRate(std::min(copiedRate + promotedRate, 1.0));
286         AdjustOldSpaceLimit();
287     }
288 
289     memController_->StopCalculationAfterGC(gcType);
290 
291     if (gcType == TriggerGCType::FULL_GC || IsFullMark()) {
292         // Only when the gc type is not semiGC and after the old space sweeping has been finished,
293         // the limits of old space and global space can be recomputed.
294         RecomputeLimits();
295         OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << " GC after: is full mark" << IsFullMark()
296                                                  << " global CommittedSize" << GetCommittedSize()
297                                                  << " global limit" << globalSpaceAllocLimit_;
298         markType_ = MarkType::SEMI_MARK;
299     }
300 
301 # if ECMASCRIPT_ENABLE_GC_LOG
302     ecmaVm_->GetEcmaGCStats()->PrintStatisticResult();
303 #endif
304 
305 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
306     // post gc heap verify
307     isVerifying_ = true;
308     sweeper_->EnsureAllTaskFinished();
309     failCount = Verification(this).VerifyAll();
310     if (failCount > 0) {
311         LOG(FATAL, GC) << "After gc heap corrupted and " << failCount << " corruptions";
312     }
313     isVerifying_ = false;
314 #endif
315 }
316 
ThrowOutOfMemoryError(size_t size,std::string functionName)317 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName)
318 {
319     GetEcmaVM()->GetEcmaGCStats()->PrintHeapStatisticResult(true);
320     LOG_ECMA_MEM(FATAL) << "OOM when trying to allocate " << size << " bytes"
321         << " function name: " << functionName.c_str();
322 }
323 
VerifyHeapObjects() const324 size_t Heap::VerifyHeapObjects() const
325 {
326     size_t failCount = 0;
327     {
328         VerifyObjectVisitor verifier(this, &failCount);
329         toSpace_->IterateOverObjects(verifier);
330     }
331 
332     {
333         VerifyObjectVisitor verifier(this, &failCount);
334         oldSpace_->IterateOverObjects(verifier);
335     }
336 
337     {
338         VerifyObjectVisitor verifier(this, &failCount);
339         nonMovableSpace_->IterateOverObjects(verifier);
340     }
341 
342     {
343         VerifyObjectVisitor verifier(this, &failCount);
344         hugeObjectSpace_->IterateOverObjects(verifier);
345     }
346     return failCount;
347 }
348 
AdjustOldSpaceLimit()349 void Heap::AdjustOldSpaceLimit()
350 {
351     if (oldSpaceLimitAdjusted_) {
352         return;
353     }
354     size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
355     size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + MIN_GROWING_STEP,
356         static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
357     if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
358         GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
359     } else {
360         oldSpaceLimitAdjusted_ = true;
361     }
362 
363     size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + MIN_GROWING_STEP,
364         static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
365     if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
366         globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
367     }
368     OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "AdjustOldSpaceLimit oldSpaceAllocLimit_" << oldSpaceAllocLimit
369         << " globalSpaceAllocLimit_" << globalSpaceAllocLimit_;
370 }
371 
RecomputeLimits()372 void Heap::RecomputeLimits()
373 {
374     double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
375     double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughtputPerMS();
376     size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
377     size_t newSpaceCapacity = toSpace_->GetCommittedSize();
378 
379     double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
380     size_t maxOldSpaceCapacity = GetEcmaVM()->GetJSOptions().MaxOldSpaceCapacity();
381     auto newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT, maxOldSpaceCapacity,
382                                                                 newSpaceCapacity, growingFactor);
383     auto newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), DEFAULT_HEAP_SIZE,
384                                                                    MAX_HEAP_SIZE, newSpaceCapacity, growingFactor);
385     globalSpaceAllocLimit_ = newGlobalSpaceLimit;
386     oldSpace_->SetInitialCapacity(newOldSpaceLimit);
387     OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "RecomputeLimits oldSpaceAllocLimit_" << newOldSpaceLimit
388         << " globalSpaceAllocLimit_" << globalSpaceAllocLimit_;
389 }
390 
CheckAndTriggerOldGC()391 void Heap::CheckAndTriggerOldGC()
392 {
393     if (GetHeapObjectSize() > globalSpaceAllocLimit_) {
394         CollectGarbage(TriggerGCType::OLD_GC);
395     }
396 }
397 
CheckConcurrentMark()398 bool Heap::CheckConcurrentMark()
399 {
400     if (concurrentMarkingEnabled_ && !thread_->IsReadyToMark()) {
401         if (thread_->IsMarking()) {
402             [[maybe_unused]] ClockScope clockScope;
403             ECMA_BYTRACE_NAME(BYTRACE_TAG_ARK, "Heap::CheckConcurrentMark");
404             MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), WaitConcurrentMarkingFinished);
405             GetNonMovableMarker()->ProcessMarkStack(0);
406             WaitConcurrentMarkingFinished();
407             ecmaVm_->GetEcmaGCStats()->StatisticConcurrentMarkWait(clockScope.GetPauseTime());
408             ECMA_GC_LOG() << "wait concurrent marking finish pause time " << clockScope.TotalSpentTime();
409         }
410         memController_->RecordAfterConcurrentMark(IsFullMark(), concurrentMarker_);
411         return true;
412     }
413     return false;
414 }
415 
TryTriggerConcurrentMarking()416 void Heap::TryTriggerConcurrentMarking()
417 {
418     // When the concurrent mark is enabled, concurrent mark can be tried to triggered. When the size of old space or
419     // global space reaches to the limits, isFullMarkNeeded will be true. If the predicted duration of the current full
420     // mark can allow the new space and old space to allocate to their limits, full mark will be triggered. In the same
421     // way, if the size of the new space reaches to the capacity, and the predicted duration of the current semi mark
422     // can exactly allow the new space to allocate to the capacity, semi mark can be triggered. But when it will spend
423     // a lot of time in full mark, the compress full GC will be requested after the spaces reach to limits. And If the
424     // global space is larger than the half max heap size, we will turn to use full mark and trigger mix GC.
425     if (!concurrentMarkingEnabled_ || !thread_->IsReadyToMark()) {
426         return;
427     }
428     bool isFullMarkNeeded = false;
429     double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
430            oldSpaceAllocToLimitDuration = 0;
431     double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughtPerMS();
432     double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
433     size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
434     size_t globalHeapObjectSize = GetHeapObjectSize();
435     size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
436     if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
437         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit ||  globalHeapObjectSize >= globalSpaceAllocLimit_) {
438             markType_ = MarkType::FULL_MARK;
439             OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Trigger the first full mark";
440             TriggerConcurrentMarking();
441             return;
442         }
443     } else {
444         if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_) {
445             isFullMarkNeeded = true;
446         }
447         oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
448         oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
449         // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
450         double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
451         if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
452             isFullMarkNeeded = true;
453         }
454     }
455 
456     double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughtPerMS();
457     double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
458 
459     if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
460         if (toSpace_->GetCommittedSize() >= SEMI_SPACE_TRIGGER_CONCURRENT_MARK) {
461             markType_ = MarkType::SEMI_MARK;
462             TriggerConcurrentMarking();
463             OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Trigger the first semi mark" << isFullGCRequested_;
464         }
465         return;
466     }
467     newSpaceAllocToLimitDuration = (toSpace_->GetMaximumCapacity() - toSpace_->GetCommittedSize())
468                                     / newSpaceAllocSpeed;
469     newSpaceMarkDuration = toSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
470     // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
471     newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
472 
473     if (isFullMarkNeeded) {
474         if (oldSpaceMarkDuration < newSpaceAllocToLimitDuration
475             && oldSpaceMarkDuration < oldSpaceAllocToLimitDuration) {
476             markType_ = MarkType::FULL_MARK;
477             TriggerConcurrentMarking();
478             OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Trigger full mark by speed";
479         } else {
480             if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_) {
481                 markType_ = MarkType::FULL_MARK;
482                 TriggerConcurrentMarking();
483                 OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Trigger full mark by limit";
484             }
485         }
486     } else if (newSpaceRemainSize < DEFAULT_REGION_SIZE) {
487         markType_ = MarkType::SEMI_MARK;
488         TriggerConcurrentMarking();
489         OPTIONAL_LOG(ecmaVm_, ERROR, ECMASCRIPT) << "Trigger semi mark";
490     }
491 }
492 
TriggerConcurrentMarking()493 void Heap::TriggerConcurrentMarking()
494 {
495     if (concurrentMarkingEnabled_ && !isFullGCRequested_) {
496         concurrentMarker_->ConcurrentMarking();
497     }
498 }
499 
UpdateDerivedObjectInStack()500 void Heap::UpdateDerivedObjectInStack()
501 {
502     if (derivedPointers_->empty()) {
503         return;
504     }
505     for (auto derived : *derivedPointers_) {
506         auto baseAddr = reinterpret_cast<JSTaggedValue *>(derived.first.first);
507         JSTaggedValue base = *baseAddr;
508         if (base.IsHeapObject()) {
509             uintptr_t baseOldObject = derived.second;
510             uintptr_t *derivedAddr = reinterpret_cast<uintptr_t *>(derived.first.second);
511 #ifndef NDEBUG
512             LOG_ECMA(DEBUG) << std::hex << "fix base before:" << baseAddr << " base old Value: " << baseOldObject <<
513                 " derived:" << derivedAddr << " old Value: " << *derivedAddr << std::endl;
514 #endif
515             // derived is always bigger than base
516             *derivedAddr = reinterpret_cast<uintptr_t>(base.GetHeapObject()) + (*derivedAddr - baseOldObject);
517 #ifndef NDEBUG
518             LOG_ECMA(DEBUG) << std::hex << "fix base after:" << baseAddr <<
519                 " base New Value: " << base.GetHeapObject() <<
520                 " derived:" << derivedAddr << " New Value: " << *derivedAddr << std::endl;
521 #endif
522         }
523     }
524     derivedPointers_->clear();
525 }
526 
WaitRunningTaskFinished()527 void Heap::WaitRunningTaskFinished()
528 {
529     os::memory::LockHolder holder(waitTaskFinishedMutex_);
530     while (runningTastCount_ > 0) {
531         waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
532     }
533 }
534 
WaitClearTaskFinished()535 void Heap::WaitClearTaskFinished()
536 {
537     os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
538     while (!isClearTaskFinished_) {
539         waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
540     }
541 }
542 
WaitConcurrentMarkingFinished()543 void Heap::WaitConcurrentMarkingFinished()
544 {
545     concurrentMarker_->WaitConcurrentMarkingFinished();
546 }
547 
PostParallelGCTask(ParallelGCTaskPhase gcTask)548 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
549 {
550     IncreaseTaskCount();
551     Platform::GetCurrentPlatform()->PostTask(std::make_unique<ParallelGCTask>(this, gcTask));
552 }
553 
IncreaseTaskCount()554 void Heap::IncreaseTaskCount()
555 {
556     os::memory::LockHolder holder(waitTaskFinishedMutex_);
557     runningTastCount_++;
558 }
559 
CheckCanDistributeTask()560 bool Heap::CheckCanDistributeTask()
561 {
562     os::memory::LockHolder holder(waitTaskFinishedMutex_);
563     return (runningTastCount_ < Platform::GetCurrentPlatform()->GetTotalThreadNum() - 1);
564 }
565 
ReduceTaskCount()566 void Heap::ReduceTaskCount()
567 {
568     os::memory::LockHolder holder(waitTaskFinishedMutex_);
569     runningTastCount_--;
570     if (runningTastCount_ == 0) {
571         waitTaskFinishedCV_.SignalAll();
572     }
573 }
574 
Run(uint32_t threadIndex)575 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
576 {
577     switch (taskPhase_) {
578         case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
579             heap_->GetSemiGcMarker()->MarkRoots(threadIndex);
580             heap_->GetSemiGcMarker()->ProcessMarkStack(threadIndex);
581             break;
582         case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
583             heap_->GetSemiGcMarker()->ProcessSnapshotRSet(threadIndex);
584             break;
585         case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
586             heap_->GetSemiGcMarker()->ProcessMarkStack(threadIndex);
587             break;
588         case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
589             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
590             break;
591         case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
592             heap_->GetCompressGcMarker()->ProcessMarkStack(threadIndex);
593             break;
594         case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
595             heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
596             break;
597         case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
598             heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
599             break;
600         default:
601             break;
602     }
603     heap_->ReduceTaskCount();
604     return true;
605 }
606 
Run(uint32_t threadIndex)607 bool Heap::AsyncClearTask::Run(uint32_t threadIndex)
608 {
609     heap_->ReclaimRegions(gcType_);
610     return true;
611 }
612 
GetArrayBufferSize() const613 size_t Heap::GetArrayBufferSize() const
614 {
615     size_t result = 0;
616     this->IteratorOverObjects([&result](TaggedObject *obj) {
617         JSHClass* jsClass = obj->GetClass();
618         result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
619     });
620     return result;
621 }
622 
IsLive(TaggedObject * object) const623 bool Heap::IsLive(TaggedObject *object) const
624 {
625     if (!ContainObject(object)) {
626         LOG(ERROR, RUNTIME) << "The region is already free";
627         return false;
628     }
629 
630     Region *region = Region::ObjectAddressToRange(object);
631     if (region->InHugeObjectGeneration()) {
632         return true;
633     }
634     bool isFree = FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
635     if (isFree) {
636         LOG(ERROR, RUNTIME) << "The object " << object << " in "
637                             << ToSpaceTypeName(region->GetSpace()->GetSpaceType())
638                             << " already free";
639     }
640     return !isFree;
641 }
642 
ContainObject(TaggedObject * object) const643 bool Heap::ContainObject(TaggedObject *object) const
644 {
645     // semi space
646     if (toSpace_->ContainObject(object)) {
647         return true;
648     }
649     // old space
650     if (oldSpace_->ContainObject(object)) {
651         return true;
652     }
653     // non movable space
654     if (nonMovableSpace_->ContainObject(object)) {
655         return true;
656     }
657     // huge object space
658     if (hugeObjectSpace_->ContainObject(object)) {
659         return true;
660     }
661     return false;
662 }
663 }  // namespace panda::ecmascript
664