1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/heap-inl.h"
17
18 #include "ecmascript/ecma_vm.h"
19 #include "ecmascript/free_object.h"
20 #include "ecmascript/js_finalization_registry.h"
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/linked_hash_table.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/concurrent_marker.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/full_gc.h"
27 #include "ecmascript/mem/incremental_marker.h"
28 #include "ecmascript/mem/mark_stack.h"
29 #include "ecmascript/mem/mem_controller.h"
30 #include "ecmascript/mem/partial_gc.h"
31 #include "ecmascript/mem/native_area_allocator.h"
32 #include "ecmascript/mem/parallel_evacuator.h"
33 #include "ecmascript/mem/parallel_marker-inl.h"
34 #include "ecmascript/mem/stw_young_gc.h"
35 #include "ecmascript/mem/verification.h"
36 #include "ecmascript/mem/work_manager.h"
37 #include "ecmascript/mem/gc_stats.h"
38 #include "ecmascript/ecma_string_table.h"
39 #include "ecmascript/runtime_call_id.h"
40 #if !WIN_OR_MAC_OR_IOS_PLATFORM
41 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
42 #include "ecmascript/dfx/hprof/heap_profiler.h"
43 #endif
44 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
45 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
46 #endif
47
48 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)49 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
50 nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
51 heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
52
Initialize()53 void Heap::Initialize()
54 {
55 memController_ = new MemController(this);
56 auto &config = ecmaVm_->GetEcmaParamConfiguration();
57 size_t maxHeapSize = config.GetMaxHeapSize();
58 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
59 size_t maxSemiSpaceCapacity = config.GetMaxSemiSpaceSize();
60 activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
61 activeSemiSpace_->Restart();
62 activeSemiSpace_->SetWaterLine();
63 auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
64 auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
65 thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
66 inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
67 // not set up from space
68
69 size_t readOnlySpaceCapacity = config.GetDefaultReadOnlySpaceSize();
70 readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
71 appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
72 size_t nonmovableSpaceCapacity = config.GetDefaultNonMovableSpaceSize();
73 if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
74 nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
75 }
76 nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
77 nonMovableSpace_->Initialize();
78 size_t snapshotSpaceCapacity = config.GetDefaultSnapshotSpaceSize();
79 snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
80 size_t machineCodeSpaceCapacity = config.GetDefaultMachineCodeSpaceSize();
81 machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
82
83 size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
84 machineCodeSpaceCapacity + readOnlySpaceCapacity;
85 if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) {
86 LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
87 }
88 size_t oldSpaceCapacity = maxHeapSize - capacities;
89 globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
90 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
91 oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
92 compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
93 oldSpace_->Initialize();
94
95 hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
96 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
97 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
98 maxEvacuateTaskCount_ - 1);
99
100 LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
101 << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
102 << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
103 << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
104 << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
105 << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
106 << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
107 << ", gcThreadNum = " << maxMarkTaskCount_;
108 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
109 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
110 markType_ = MarkType::MARK_YOUNG;
111 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
112 concurrentMarkerEnabled = false;
113 #endif
114 workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
115 stwYoungGC_ = new STWYoungGC(this, parallelGC_);
116 fullGC_ = new FullGC(this);
117
118 partialGC_ = new PartialGC(this);
119 sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
120 EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
121 concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
122 EnableConcurrentMarkType::CONFIG_DISABLE);
123 nonMovableMarker_ = new NonMovableMarker(this);
124 semiGCMarker_ = new SemiGCMarker(this);
125 compressGCMarker_ = new CompressGCMarker(this);
126 evacuator_ = new ParallelEvacuator(this);
127 incrementalMarker_ = new IncrementalMarker(this);
128 }
129
Destroy()130 void Heap::Destroy()
131 {
132 if (workManager_ != nullptr) {
133 delete workManager_;
134 workManager_ = nullptr;
135 }
136 if (activeSemiSpace_ != nullptr) {
137 activeSemiSpace_->Destroy();
138 delete activeSemiSpace_;
139 activeSemiSpace_ = nullptr;
140 }
141 if (inactiveSemiSpace_ != nullptr) {
142 inactiveSemiSpace_->Destroy();
143 delete inactiveSemiSpace_;
144 inactiveSemiSpace_ = nullptr;
145 }
146 if (oldSpace_ != nullptr) {
147 oldSpace_->Reset();
148 delete oldSpace_;
149 oldSpace_ = nullptr;
150 }
151 if (compressSpace_ != nullptr) {
152 compressSpace_->Destroy();
153 delete compressSpace_;
154 compressSpace_ = nullptr;
155 }
156 if (nonMovableSpace_ != nullptr) {
157 nonMovableSpace_->Reset();
158 delete nonMovableSpace_;
159 nonMovableSpace_ = nullptr;
160 }
161 if (snapshotSpace_ != nullptr) {
162 snapshotSpace_->Destroy();
163 delete snapshotSpace_;
164 snapshotSpace_ = nullptr;
165 }
166 if (machineCodeSpace_ != nullptr) {
167 machineCodeSpace_->Reset();
168 delete machineCodeSpace_;
169 machineCodeSpace_ = nullptr;
170 }
171 if (hugeObjectSpace_ != nullptr) {
172 hugeObjectSpace_->Destroy();
173 delete hugeObjectSpace_;
174 hugeObjectSpace_ = nullptr;
175 }
176 if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
177 readOnlySpace_->ClearReadOnly();
178 readOnlySpace_->Destroy();
179 delete readOnlySpace_;
180 readOnlySpace_ = nullptr;
181 }
182 if (appSpawnSpace_ != nullptr) {
183 appSpawnSpace_->Reset();
184 delete appSpawnSpace_;
185 appSpawnSpace_ = nullptr;
186 }
187 if (stwYoungGC_ != nullptr) {
188 delete stwYoungGC_;
189 stwYoungGC_ = nullptr;
190 }
191 if (partialGC_ != nullptr) {
192 delete partialGC_;
193 partialGC_ = nullptr;
194 }
195 if (fullGC_ != nullptr) {
196 delete fullGC_;
197 fullGC_ = nullptr;
198 }
199
200 nativeAreaAllocator_ = nullptr;
201 heapRegionAllocator_ = nullptr;
202
203 if (memController_ != nullptr) {
204 delete memController_;
205 memController_ = nullptr;
206 }
207 if (sweeper_ != nullptr) {
208 delete sweeper_;
209 sweeper_ = nullptr;
210 }
211 if (concurrentMarker_ != nullptr) {
212 delete concurrentMarker_;
213 concurrentMarker_ = nullptr;
214 }
215 if (incrementalMarker_ != nullptr) {
216 delete incrementalMarker_;
217 incrementalMarker_ = nullptr;
218 }
219 if (nonMovableMarker_ != nullptr) {
220 delete nonMovableMarker_;
221 nonMovableMarker_ = nullptr;
222 }
223 if (semiGCMarker_ != nullptr) {
224 delete semiGCMarker_;
225 semiGCMarker_ = nullptr;
226 }
227 if (compressGCMarker_ != nullptr) {
228 delete compressGCMarker_;
229 compressGCMarker_ = nullptr;
230 }
231 if (evacuator_ != nullptr) {
232 delete evacuator_;
233 evacuator_ = nullptr;
234 }
235 }
236
Prepare()237 void Heap::Prepare()
238 {
239 MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), HeapPrepare);
240 WaitRunningTaskFinished();
241 sweeper_->EnsureAllTaskFinished();
242 WaitClearTaskFinished();
243 }
244
Resume(TriggerGCType gcType)245 void Heap::Resume(TriggerGCType gcType)
246 {
247 if (mode_ != HeapMode::SPAWN &&
248 activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC())) {
249 // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
250 size_t multiple = 2;
251 size_t oldSpaceMaxLimit = 0;
252 if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
253 size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
254 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
255 } else {
256 size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
257 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
258 }
259 inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
260 }
261
262 activeSemiSpace_->SetWaterLine();
263 PrepareRecordRegionsForReclaim();
264 hugeObjectSpace_->ReclaimHugeRegion();
265 if (parallelGC_) {
266 clearTaskFinished_ = false;
267 Taskpool::GetCurrentTaskpool()->PostTask(
268 std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
269 } else {
270 ReclaimRegions(gcType);
271 }
272 }
273
ResumeForAppSpawn()274 void Heap::ResumeForAppSpawn()
275 {
276 sweeper_->WaitAllTaskFinished();
277 hugeObjectSpace_->ReclaimHugeRegion();
278 inactiveSemiSpace_->ReclaimRegions();
279 oldSpace_->Reset();
280 auto cb = [] (Region *region) {
281 region->ClearMarkGCBitset();
282 };
283 nonMovableSpace_->EnumerateRegions(cb);
284 machineCodeSpace_->EnumerateRegions(cb);
285 hugeObjectSpace_->EnumerateRegions(cb);
286 }
287
CompactHeapBeforeFork()288 void Heap::CompactHeapBeforeFork()
289 {
290 CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
291 }
292
DisableParallelGC()293 void Heap::DisableParallelGC()
294 {
295 WaitAllTasksFinished();
296 parallelGC_ = false;
297 maxEvacuateTaskCount_ = 0;
298 maxMarkTaskCount_ = 0;
299 stwYoungGC_->ConfigParallelGC(false);
300 sweeper_->ConfigConcurrentSweep(false);
301 concurrentMarker_->ConfigConcurrentMark(false);
302 Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
303 }
304
EnableParallelGC()305 void Heap::EnableParallelGC()
306 {
307 Taskpool::GetCurrentTaskpool()->Initialize();
308 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
309 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
310 if (auto totalThreadNum = workManager_->GetTotalThreadNum();
311 totalThreadNum != maxEvacuateTaskCount_ + 1) {
312 LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
313 << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
314 delete workManager_;
315 workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
316 }
317 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
318 maxEvacuateTaskCount_ - 1);
319 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
320 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
321 concurrentMarkerEnabled = false;
322 #endif
323 stwYoungGC_->ConfigParallelGC(parallelGC_);
324 sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
325 concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
326 }
327
SelectGCType() const328 TriggerGCType Heap::SelectGCType() const
329 {
330 // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
331 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
332 return YOUNG_GC;
333 }
334 if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
335 GetHeapObjectSize() <= globalSpaceAllocLimit_ && !GlobalNativeSizeLargerThanLimit()) {
336 return YOUNG_GC;
337 }
338 return OLD_GC;
339 }
340
CollectGarbage(TriggerGCType gcType,GCReason reason)341 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
342 {
343 if (thread_->IsCrossThreadExecutionEnable()) {
344 return;
345 }
346 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
347 [[maybe_unused]] GcStateScope scope(thread_);
348 #endif
349 CHECK_NO_GC
350
351 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
352 LOG_ECMA(DEBUG) << "Enable heap verify";
353 isVerifying_ = true;
354 // pre gc heap verify
355 sweeper_->EnsureAllTaskFinished();
356 auto failCount = Verification(this).VerifyAll();
357 if (failCount > 0) {
358 LOG_GC(FATAL) << "Before gc heap corrupted and " << failCount << " corruptions";
359 }
360 isVerifying_ = false;
361 #endif
362
363 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
364 gcType = TriggerGCType::FULL_GC;
365 #endif
366 if (fullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
367 gcType = TriggerGCType::FULL_GC;
368 }
369 size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
370 size_t originalNewSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize();
371 memController_->StartCalculationBeforeGC();
372 StatisticHeapObject(gcType);
373 if (!GetJSThread()->IsReadyToMark() && markType_ == MarkType::MARK_FULL) {
374 ecmaVm_->GetEcmaGCStats()->SetGCReason(reason);
375 } else {
376 ecmaVm_->GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
377 }
378 switch (gcType) {
379 case TriggerGCType::YOUNG_GC:
380 // Use partial GC for young generation.
381 if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
382 SetMarkType(MarkType::MARK_YOUNG);
383 }
384 partialGC_->RunPhases();
385 break;
386 case TriggerGCType::OLD_GC:
387 if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
388 // Wait for existing concurrent marking tasks to be finished (if any),
389 // and reset concurrent marker's status for full mark.
390 bool concurrentMark = CheckOngoingConcurrentMarking();
391 if (concurrentMark) {
392 concurrentMarker_->Reset();
393 }
394 }
395 SetMarkType(MarkType::MARK_FULL);
396 partialGC_->RunPhases();
397 break;
398 case TriggerGCType::FULL_GC:
399 fullGC_->SetForAppSpawn(false);
400 fullGC_->RunPhases();
401 if (fullGCRequested_) {
402 fullGCRequested_ = false;
403 }
404 break;
405 case TriggerGCType::APPSPAWN_FULL_GC:
406 fullGC_->SetForAppSpawn(true);
407 fullGC_->RunPhasesForAppSpawn();
408 break;
409 default:
410 LOG_ECMA(FATAL) << "this branch is unreachable";
411 UNREACHABLE();
412 break;
413 }
414
415 // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
416 if (shouldThrowOOMError_) {
417 ThrowOutOfMemoryError(oldSpace_->GetMergeSize(), " OldSpace::Merge");
418 oldSpace_->ResetMergeSize();
419 shouldThrowOOMError_ = false;
420 }
421
422 ClearIdleTask();
423 // Adjust the old space capacity and global limit for the first partial GC with full mark.
424 // Trigger the full mark next time if the current survival rate is much less than half the average survival rates.
425 AdjustBySurvivalRate(originalNewSpaceSize);
426 activeSemiSpace_->AdjustNativeLimit(originalNewSpaceNativeSize);
427 memController_->StopCalculationAfterGC(gcType);
428 if (gcType == TriggerGCType::FULL_GC || IsFullMark()) {
429 // Only when the gc type is not semiGC and after the old space sweeping has been finished,
430 // the limits of old space and global space can be recomputed.
431 RecomputeLimits();
432 OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsFullMark()
433 << " global object size " << GetHeapObjectSize()
434 << " global committed size " << GetCommittedSize()
435 << " global limit " << globalSpaceAllocLimit_;
436 markType_ = MarkType::MARK_YOUNG;
437 }
438 if (concurrentMarker_->IsRequestDisabled()) {
439 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
440 }
441 // GC log
442 ecmaVm_->GetEcmaGCStats()->RecordStatisticAfterGC();
443 ecmaVm_->GetEcmaGCStats()->PrintGCStatistic();
444 // weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
445 // even lead to another GC, so this have to invoke after this GC process.
446 InvokeWeakNodeNativeFinalizeCallback();
447
448 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
449 // post gc heap verify
450 isVerifying_ = true;
451 sweeper_->EnsureAllTaskFinished();
452 failCount = Verification(this).VerifyAll();
453 if (failCount > 0) {
454 LOG_GC(FATAL) << "After gc heap corrupted and " << failCount << " corruptions";
455 }
456 isVerifying_ = false;
457 #endif
458 JSFinalizationRegistry::CheckAndCall(thread_);
459 }
460
ThrowOutOfMemoryError(size_t size,std::string functionName)461 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName)
462 {
463 GetEcmaVM()->GetEcmaGCStats()->PrintGCMemoryStatistic();
464 std::ostringstream oss;
465 oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
466 LOG_ECMA_MEM(ERROR) << oss.str().c_str();
467 THROW_OOM_ERROR(thread_, oss.str().c_str());
468 }
469
FatalOutOfMemoryError(size_t size,std::string functionName)470 void Heap::FatalOutOfMemoryError(size_t size, std::string functionName)
471 {
472 GetEcmaVM()->GetEcmaGCStats()->PrintGCMemoryStatistic();
473 LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
474 << " function name: " << functionName.c_str();
475 }
476
AdjustBySurvivalRate(size_t originalNewSpaceSize)477 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
478 {
479 if (originalNewSpaceSize <= 0) {
480 return;
481 }
482 semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
483 double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
484 promotedSize_ = GetEvacuator()->GetPromotedSize();
485 double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
486 double survivalRate = std::min(copiedRate + promotedRate, 1.0);
487 OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
488 << " survivalRate: " << survivalRate;
489 if (!oldSpaceLimitAdjusted_) {
490 memController_->AddSurvivalRate(survivalRate);
491 AdjustOldSpaceLimit();
492 } else {
493 double averageSurvivalRate = memController_->GetAverageSurvivalRate();
494 // 2 means half
495 if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
496 fullMarkRequested_ = true;
497 OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
498 << " is less than half the average survival rates: " << averageSurvivalRate
499 << ". Trigger full mark next time.";
500 // Survival rate of full mark is precise. Reset recorded survival rates.
501 memController_->ResetRecordedSurvivalRates();
502 }
503 memController_->AddSurvivalRate(survivalRate);
504 }
505 }
506
VerifyHeapObjects() const507 size_t Heap::VerifyHeapObjects() const
508 {
509 size_t failCount = 0;
510 {
511 VerifyObjectVisitor verifier(this, &failCount);
512 activeSemiSpace_->IterateOverObjects(verifier);
513 }
514
515 {
516 VerifyObjectVisitor verifier(this, &failCount);
517 oldSpace_->IterateOverObjects(verifier);
518 }
519
520 {
521 VerifyObjectVisitor verifier(this, &failCount);
522 appSpawnSpace_->IterateOverMarkedObjects(verifier);
523 }
524
525 {
526 VerifyObjectVisitor verifier(this, &failCount);
527 nonMovableSpace_->IterateOverObjects(verifier);
528 }
529
530 {
531 VerifyObjectVisitor verifier(this, &failCount);
532 hugeObjectSpace_->IterateOverObjects(verifier);
533 }
534 {
535 VerifyObjectVisitor verifier(this, &failCount);
536 machineCodeSpace_->IterateOverObjects(verifier);
537 }
538 {
539 VerifyObjectVisitor verifier(this, &failCount);
540 snapshotSpace_->IterateOverObjects(verifier);
541 }
542 return failCount;
543 }
544
VerifyOldToNewRSet() const545 size_t Heap::VerifyOldToNewRSet() const
546 {
547 size_t failCount = 0;
548 VerifyObjectVisitor verifier(this, &failCount);
549 oldSpace_->IterateOldToNewOverObjects(verifier);
550 appSpawnSpace_->IterateOldToNewOverObjects(verifier);
551 nonMovableSpace_->IterateOldToNewOverObjects(verifier);
552 machineCodeSpace_->IterateOldToNewOverObjects(verifier);
553 return failCount;
554 }
555
AdjustOldSpaceLimit()556 void Heap::AdjustOldSpaceLimit()
557 {
558 if (oldSpaceLimitAdjusted_) {
559 return;
560 }
561 size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
562 size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
563 size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
564 static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
565 if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
566 GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
567 } else {
568 oldSpaceLimitAdjusted_ = true;
569 }
570
571 size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
572 static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
573 if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
574 globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
575 }
576 // temporarily regard the heap limit is the same as the native limit.
577 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
578 OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
579 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
580 }
581
OnAllocateEvent(TaggedObject * address,size_t size)582 void Heap::OnAllocateEvent([[maybe_unused]] TaggedObject* address, [[maybe_unused]] size_t size)
583 {
584 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
585 HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
586 if (profiler != nullptr) {
587 BlockHookScope blockScope;
588 profiler->AllocationEvent(address, size);
589 }
590 #endif
591 }
592
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)593 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
594 [[maybe_unused]] size_t size)
595 {
596 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
597 HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
598 if (profiler != nullptr) {
599 BlockHookScope blockScope;
600 profiler->MoveEvent(address, forwardAddress, size);
601 }
602 #endif
603 }
604
AddToKeptObjects(JSHandle<JSTaggedValue> value) const605 void Heap::AddToKeptObjects(JSHandle<JSTaggedValue> value) const
606 {
607 JSHandle<GlobalEnv> env = ecmaVm_->GetGlobalEnv();
608 JSHandle<LinkedHashSet> linkedSet;
609 if (env->GetWeakRefKeepObjects()->IsUndefined()) {
610 linkedSet = LinkedHashSet::Create(thread_);
611 } else {
612 linkedSet =
613 JSHandle<LinkedHashSet>(thread_, LinkedHashSet::Cast(env->GetWeakRefKeepObjects()->GetTaggedObject()));
614 }
615 linkedSet = LinkedHashSet::Add(thread_, linkedSet, value);
616 env->SetWeakRefKeepObjects(thread_, linkedSet);
617 }
618
AdjustSpaceSizeForAppSpawn()619 void Heap::AdjustSpaceSizeForAppSpawn()
620 {
621 SetHeapMode(HeapMode::SPAWN);
622 auto &config = ecmaVm_->GetEcmaParamConfiguration();
623 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
624 activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
625 auto committedSize = appSpawnSpace_->GetCommittedSize();
626 appSpawnSpace_->SetInitialCapacity(committedSize);
627 appSpawnSpace_->SetMaximumCapacity(committedSize);
628 oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
629 oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
630 }
631
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)632 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
633 {
634 ASSERT(inspector != nullptr);
635 // activeSemiSpace_/inactiveSemiSpace_:
636 // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
637 activeSemiSpace_->AddAllocationInspector(inspector);
638 // oldSpace_/compressSpace_:
639 // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
640 oldSpace_->AddAllocationInspector(inspector);
641 // readOnlySpace_ need not allocationInspector.
642 // appSpawnSpace_ need not allocationInspector.
643 nonMovableSpace_->AddAllocationInspector(inspector);
644 machineCodeSpace_->AddAllocationInspector(inspector);
645 hugeObjectSpace_->AddAllocationInspector(inspector);
646 }
647
ClearAllocationInspectorFromAllSpaces()648 void Heap::ClearAllocationInspectorFromAllSpaces()
649 {
650 activeSemiSpace_->ClearAllocationInspector();
651 oldSpace_->ClearAllocationInspector();
652 nonMovableSpace_->ClearAllocationInspector();
653 machineCodeSpace_->ClearAllocationInspector();
654 hugeObjectSpace_->ClearAllocationInspector();
655 }
656
ClearKeptObjects() const657 void Heap::ClearKeptObjects() const
658 {
659 ecmaVm_->GetGlobalEnv()->SetWeakRefKeepObjects(thread_, JSTaggedValue::Undefined());
660 }
661
RecomputeLimits()662 void Heap::RecomputeLimits()
663 {
664 double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
665 double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
666 size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
667 size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
668
669 double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
670 size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
671 size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
672 maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
673 size_t maxGlobalSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize() - newSpaceCapacity;
674 size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
675 maxGlobalSize, newSpaceCapacity, growingFactor);
676 globalSpaceAllocLimit_ = newGlobalSpaceLimit;
677 oldSpace_->SetInitialCapacity(newOldSpaceLimit);
678 globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
679 maxGlobalSize, newSpaceCapacity, growingFactor);
680 OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
681 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
682 << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
683 if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
684 (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
685 OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
686 << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
687 << " Committed Size: " << oldSpace_->GetCommittedSize();
688 SetFullMarkRequestedState(true);
689 }
690 }
691
CheckAndTriggerOldGC(size_t size)692 void Heap::CheckAndTriggerOldGC(size_t size)
693 {
694 if (OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) || GetHeapObjectSize() > globalSpaceAllocLimit_ ||
695 GlobalNativeSizeLargerThanLimit()) {
696 CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
697 }
698 }
699
CheckOngoingConcurrentMarking()700 bool Heap::CheckOngoingConcurrentMarking()
701 {
702 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark() &&
703 concurrentMarker_->IsTriggeredConcurrentMark()) {
704 TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
705 if (thread_->IsMarking()) {
706 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
707 MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), WaitConcurrentMarkingFinished);
708 GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
709 WaitConcurrentMarkingFinished();
710 } else {
711 WaitRunningTaskFinished();
712 }
713 memController_->RecordAfterConcurrentMark(IsFullMark(), concurrentMarker_);
714 return true;
715 }
716 return false;
717 }
718
ClearIdleTask()719 void Heap::ClearIdleTask()
720 {
721 SetIdleTask(IdleTaskType::NO_TASK);
722 idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
723 }
724
TryTriggerIdleCollection()725 void Heap::TryTriggerIdleCollection()
726 {
727 if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToMark() || !enableIdleGC_) {
728 return;
729 }
730 if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
731 SetIdleTask(IdleTaskType::FINISH_MARKING);
732 EnableNotifyIdle();
733 CalculateIdleDuration();
734 return;
735 }
736
737 double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
738 double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
739 double newSpaceAllocToLimitDuration =
740 (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize()) / newSpaceAllocSpeed;
741 double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
742 double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
743 // 2 means double
744 if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
745 SetIdleTask(IdleTaskType::YOUNG_GC);
746 SetMarkType(MarkType::MARK_YOUNG);
747 EnableNotifyIdle();
748 CalculateIdleDuration();
749 return;
750 }
751 }
752
CalculateIdleDuration()753 void Heap::CalculateIdleDuration()
754 {
755 // update reference duration
756 idlePredictDuration_ = 0.0f;
757 size_t updateReferenceSpeed = markType_ == MarkType::MARK_YOUNG ?
758 ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED) :
759 ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
760 if (updateReferenceSpeed != 0) {
761 idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
762 }
763
764 // clear native object duration
765 size_t clearNativeObjSpeed = 0;
766 if (markType_ == MarkType::MARK_YOUNG) {
767 clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
768 } else if (markType_ == MarkType::MARK_FULL) {
769 clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
770 }
771
772 if (clearNativeObjSpeed != 0) {
773 idlePredictDuration_ += (float)GetEcmaVM()->GetNativePointerListSize() / clearNativeObjSpeed;
774 }
775
776 // sweep and evacuate duration
777 size_t youngEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
778 size_t sweepSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
779 size_t oldEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
780 double survivalRate = ecmaVm_->GetEcmaGCStats()->GetAvgSurvivalRate();
781 if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
782 idlePredictDuration_ += survivalRate * activeSemiSpace_->GetHeapObjectSize() / youngEvacuateSpeed;
783 } else if (markType_ == MarkType::MARK_FULL) {
784 if (sweepSpeed != 0) {
785 idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
786 }
787 if (oldEvacuateSpeed != 0) {
788 size_t collectRegionSetSize = GetEcmaVM()->GetEcmaGCStats()->GetRecordData(
789 RecordData::COLLECT_REGION_SET_SIZE);
790 idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
791 oldEvacuateSpeed;
792 }
793 }
794
795 // Idle YoungGC mark duration
796 size_t markSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
797 if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
798 idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
799 }
800 OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
801 }
802
TryTriggerIncrementalMarking()803 void Heap::TryTriggerIncrementalMarking()
804 {
805 if (!GetJSThread()->IsReadyToMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
806 return;
807 }
808 size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
809 size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
810 double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
811 double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
812 double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
813 double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
814
815 double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
816 // mark finished before allocate limit
817 if ((oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) ||
818 GetHeapObjectSize() >= globalSpaceAllocLimit_) {
819 // The object allocated in incremental marking should lower than limit,
820 // otherwise select trigger concurrent mark.
821 size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
822 if (allocateSize < ALLOCATE_SIZE_LIMIT) {
823 EnableNotifyIdle();
824 SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
825 }
826 }
827 }
828
TryTriggerConcurrentMarking()829 void Heap::TryTriggerConcurrentMarking()
830 {
831 // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
832 // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
833 // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
834 // full mark will be triggered.
835 // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
836 // young mark may not result in the new space reaching its limit, young mark can be triggered.
837 // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
838 // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
839 if (!concurrentMarker_->IsEnabled() || !thread_->IsReadyToMark() ||
840 incrementalMarker_->IsTriggeredIncrementalMark() ||
841 !(idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC)) {
842 return;
843 }
844 if (fullMarkRequested_) {
845 markType_ = MarkType::MARK_FULL;
846 OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
847 TriggerConcurrentMarking();
848 return;
849 }
850 bool isFullMarkNeeded = false;
851 double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
852 oldSpaceAllocToLimitDuration = 0;
853 double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
854 double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
855 size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
856 size_t globalHeapObjectSize = GetHeapObjectSize();
857 size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
858 if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
859 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
860 GlobalNativeSizeLargerThanLimit()) {
861 markType_ = MarkType::MARK_FULL;
862 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
863 TriggerConcurrentMarking();
864 return;
865 }
866 } else {
867 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
868 GlobalNativeSizeLargerThanLimit()) {
869 isFullMarkNeeded = true;
870 }
871 oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
872 oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
873 // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
874 double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
875 if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
876 isFullMarkNeeded = true;
877 }
878 }
879
880 double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
881 double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
882 if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
883 auto &config = ecmaVm_->GetEcmaParamConfiguration();
884 if (activeSemiSpace_->GetCommittedSize() >= config.GetSemiSpaceTriggerConcurrentMark()) {
885 markType_ = MarkType::MARK_YOUNG;
886 TriggerConcurrentMarking();
887 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
888 }
889 return;
890 }
891 newSpaceAllocToLimitDuration = (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize()) /
892 newSpaceAllocSpeed;
893 newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
894 // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
895 newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
896
897 if (isFullMarkNeeded) {
898 if (oldSpaceMarkDuration < newSpaceAllocToLimitDuration &&
899 oldSpaceMarkDuration < oldSpaceAllocToLimitDuration) {
900 markType_ = MarkType::MARK_FULL;
901 TriggerConcurrentMarking();
902 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by speed";
903 } else {
904 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
905 GlobalNativeSizeLargerThanLimit()) {
906 markType_ = MarkType::MARK_FULL;
907 TriggerConcurrentMarking();
908 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by limit";
909 }
910 }
911 } else if (newSpaceRemainSize < DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
912 markType_ = MarkType::MARK_YOUNG;
913 TriggerConcurrentMarking();
914 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
915 }
916 }
917
IncreaseNativeBindingSize(JSNativePointer * object)918 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
919 {
920 size_t size = object->GetBindingSize();
921 if (size == 0) {
922 return;
923 }
924 Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(object));
925 if (region->InYoungSpace()) {
926 activeSemiSpace_->IncreaseNativeBindingSize(size);
927 } else {
928 nonNewSpaceNativeBindingSize_ += size;
929 }
930 }
931
IncreaseNativeBindingSize(bool nonMovable,size_t size)932 void Heap::IncreaseNativeBindingSize(bool nonMovable, size_t size)
933 {
934 if (size == 0) {
935 return;
936 }
937 if (!nonMovable) {
938 activeSemiSpace_->IncreaseNativeBindingSize(size);
939 } else {
940 nonNewSpaceNativeBindingSize_ += size;
941 }
942 }
943
PrepareRecordRegionsForReclaim()944 void Heap::PrepareRecordRegionsForReclaim()
945 {
946 activeSemiSpace_->SetRecordRegion();
947 oldSpace_->SetRecordRegion();
948 snapshotSpace_->SetRecordRegion();
949 nonMovableSpace_->SetRecordRegion();
950 hugeObjectSpace_->SetRecordRegion();
951 machineCodeSpace_->SetRecordRegion();
952 }
953
TriggerConcurrentMarking()954 void Heap::TriggerConcurrentMarking()
955 {
956 if (idleTask_ == IdleTaskType::YOUNG_GC && IsFullMark()) {
957 ClearIdleTask();
958 DisableNotifyIdle();
959 }
960 if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
961 concurrentMarker_->Mark();
962 }
963 }
964
WaitRunningTaskFinished()965 void Heap::WaitRunningTaskFinished()
966 {
967 os::memory::LockHolder holder(waitTaskFinishedMutex_);
968 while (runningTaskCount_ > 0) {
969 waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
970 }
971 }
972
WaitClearTaskFinished()973 void Heap::WaitClearTaskFinished()
974 {
975 os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
976 while (!clearTaskFinished_) {
977 waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
978 }
979 }
980
WaitAllTasksFinished()981 void Heap::WaitAllTasksFinished()
982 {
983 WaitRunningTaskFinished();
984 sweeper_->EnsureAllTaskFinished();
985 WaitClearTaskFinished();
986 if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
987 concurrentMarker_->WaitMarkingFinished();
988 }
989 }
990
WaitConcurrentMarkingFinished()991 void Heap::WaitConcurrentMarkingFinished()
992 {
993 concurrentMarker_->WaitMarkingFinished();
994 }
995
PostParallelGCTask(ParallelGCTaskPhase gcTask)996 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
997 {
998 IncreaseTaskCount();
999 Taskpool::GetCurrentTaskpool()->PostTask(
1000 std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
1001 }
1002
IncreaseTaskCount()1003 void Heap::IncreaseTaskCount()
1004 {
1005 os::memory::LockHolder holder(waitTaskFinishedMutex_);
1006 runningTaskCount_++;
1007 }
1008
ChangeGCParams(bool inBackground)1009 void Heap::ChangeGCParams(bool inBackground)
1010 {
1011 inBackground_ = inBackground;
1012 if (inBackground) {
1013 LOG_GC(INFO) << "app is inBackground";
1014 if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT) {
1015 CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
1016 }
1017 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1018 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1019 LOG_GC(INFO) << "Heap Growing Type CONSERVATIVE";
1020 }
1021 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1022 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
1023 maxMarkTaskCount_ = 1;
1024 maxEvacuateTaskCount_ = 1;
1025 } else {
1026 LOG_GC(INFO) << "app is not inBackground";
1027 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1028 SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
1029 LOG_GC(INFO) << "Heap Growing Type HIGH_THROUGHPUT";
1030 }
1031 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
1032 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
1033 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1034 Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
1035 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1036 }
1037 }
1038
TriggerIdleCollection(int idleMicroSec)1039 void Heap::TriggerIdleCollection(int idleMicroSec)
1040 {
1041 if (idleTask_ == IdleTaskType::NO_TASK) {
1042 if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
1043 DisableNotifyIdle();
1044 }
1045 return;
1046 }
1047
1048 // Incremental mark initialize and process
1049 if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
1050 incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
1051 incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1052 if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
1053 CalculateIdleDuration();
1054 }
1055 return;
1056 }
1057
1058 if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
1059 return;
1060 }
1061
1062 switch (idleTask_) {
1063 case IdleTaskType::FINISH_MARKING: {
1064 if (markType_ == MarkType::MARK_FULL) {
1065 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
1066 } else {
1067 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1068 }
1069 break;
1070 }
1071 case IdleTaskType::YOUNG_GC:
1072 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1073 break;
1074 case IdleTaskType::INCREMENTAL_MARK:
1075 incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1076 break;
1077 default:
1078 break;
1079 }
1080 ClearIdleTask();
1081 }
1082
NotifyMemoryPressure(bool inHighMemoryPressure)1083 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
1084 {
1085 if (inHighMemoryPressure) {
1086 LOG_GC(INFO) << "app is inHighMemoryPressure";
1087 SetMemGrowingType(MemGrowingType::PRESSURE);
1088 } else {
1089 LOG_GC(INFO) << "app is not inHighMemoryPressure";
1090 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1091 }
1092 }
1093
CheckCanDistributeTask()1094 bool Heap::CheckCanDistributeTask()
1095 {
1096 os::memory::LockHolder holder(waitTaskFinishedMutex_);
1097 return runningTaskCount_ < maxMarkTaskCount_;
1098 }
1099
ReduceTaskCount()1100 void Heap::ReduceTaskCount()
1101 {
1102 os::memory::LockHolder holder(waitTaskFinishedMutex_);
1103 runningTaskCount_--;
1104 if (runningTaskCount_ == 0) {
1105 waitTaskFinishedCV_.SignalAll();
1106 }
1107 }
1108
Run(uint32_t threadIndex)1109 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
1110 {
1111 switch (taskPhase_) {
1112 case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
1113 heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
1114 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1115 break;
1116 case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
1117 heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
1118 break;
1119 case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
1120 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1121 break;
1122 case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
1123 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1124 break;
1125 case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
1126 heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
1127 break;
1128 case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
1129 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1130 break;
1131 case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
1132 heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
1133 break;
1134 default:
1135 break;
1136 }
1137 heap_->ReduceTaskCount();
1138 return true;
1139 }
1140
Run(uint32_t threadIndex)1141 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
1142 {
1143 heap_->ReclaimRegions(gcType_);
1144 return true;
1145 }
1146
GetArrayBufferSize() const1147 size_t Heap::GetArrayBufferSize() const
1148 {
1149 size_t result = 0;
1150 sweeper_->EnsureAllTaskFinished();
1151 this->IterateOverObjects([&result](TaggedObject *obj) {
1152 JSHClass* jsClass = obj->GetClass();
1153 result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
1154 });
1155 return result;
1156 }
1157
IsAlive(TaggedObject * object) const1158 bool Heap::IsAlive(TaggedObject *object) const
1159 {
1160 if (!ContainObject(object)) {
1161 LOG_GC(ERROR) << "The region is already free";
1162 return false;
1163 }
1164
1165 bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
1166 if (isFree) {
1167 Region *region = Region::ObjectAddressToRange(object);
1168 LOG_GC(ERROR) << "The object " << object << " in "
1169 << region->GetSpaceTypeName()
1170 << " already free";
1171 }
1172 return !isFree;
1173 }
1174
ContainObject(TaggedObject * object) const1175 bool Heap::ContainObject(TaggedObject *object) const
1176 {
1177 /*
1178 * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
1179 * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
1180 * calculated from an object address is still valid or alive in a cheap way.
1181 * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
1182 * introduce additional incorrect memory access issues.
1183 * Unless we can tolerate the performance impact of iterating the region list of each space and change
1184 * the implementation to that approach, don't rely on current implementation to get accurate result.
1185 */
1186 Region *region = Region::ObjectAddressToRange(object);
1187 return region->InHeapSpace();
1188 }
1189
InvokeWeakNodeNativeFinalizeCallback()1190 void Heap::InvokeWeakNodeNativeFinalizeCallback()
1191 {
1192 // the second callback may lead to another GC, if this, return directly;
1193 if (runningNativeFinalizeCallbacks_) {
1194 return;
1195 }
1196 runningNativeFinalizeCallbacks_ = true;
1197 auto weakNodeNativeFinalizeCallBacks = thread_->GetWeakNodeNativeFinalizeCallbacks();
1198 while (!weakNodeNativeFinalizeCallBacks->empty()) {
1199 auto callbackPair = weakNodeNativeFinalizeCallBacks->back();
1200 weakNodeNativeFinalizeCallBacks->pop_back();
1201 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
1202 auto callback = callbackPair.first;
1203 (*callback)(callbackPair.second);
1204 }
1205 runningNativeFinalizeCallbacks_ = false;
1206 }
1207
PrintHeapInfo(TriggerGCType gcType) const1208 void Heap::PrintHeapInfo(TriggerGCType gcType) const
1209 {
1210 OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
1211 OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
1212 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsFullMark() << ")";
1213 OPTIONAL_LOG(ecmaVm_, INFO) << "ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize()
1214 << "/" << activeSemiSpace_->GetInitialCapacity() << "), NonMovable("
1215 << nonMovableSpace_->GetHeapObjectSize() << "/" << nonMovableSpace_->GetCommittedSize()
1216 << "/" << nonMovableSpace_->GetInitialCapacity() << "), Old("
1217 << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize()
1218 << "/" << oldSpace_->GetInitialCapacity() << "), HugeObject("
1219 << hugeObjectSpace_->GetHeapObjectSize() << "/" << hugeObjectSpace_->GetCommittedSize()
1220 << "/" << hugeObjectSpace_->GetInitialCapacity() << "), ReadOnlySpace("
1221 << readOnlySpace_->GetCommittedSize() << "/" << readOnlySpace_->GetInitialCapacity()
1222 << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize() << "/"
1223 << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
1224 << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
1225 }
1226
StatisticHeapObject(TriggerGCType gcType) const1227 void Heap::StatisticHeapObject(TriggerGCType gcType) const
1228 {
1229 PrintHeapInfo(gcType);
1230 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
1231 static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
1232 int typeCount[JS_TYPE_LAST] = { 0 };
1233 static const int MIN_COUNT_THRESHOLD = 1000;
1234
1235 nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1236 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1237 });
1238 for (int i = 0; i < JS_TYPE_LAST; i++) {
1239 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1240 LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
1241 << " count:" << typeCount[i];
1242 }
1243 typeCount[i] = 0;
1244 }
1245
1246 oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1247 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1248 });
1249 for (int i = 0; i < JS_TYPE_LAST; i++) {
1250 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1251 LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
1252 << " count:" << typeCount[i];
1253 }
1254 typeCount[i] = 0;
1255 }
1256
1257 activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1258 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1259 });
1260 for (int i = 0; i < JS_TYPE_LAST; i++) {
1261 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1262 LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
1263 << " count:" << typeCount[i];
1264 }
1265 typeCount[i] = 0;
1266 }
1267 #endif
1268 }
1269 } // namespace panda::ecmascript
1270