1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/heap-inl.h"
17
18 #include "ecmascript/ecma_vm.h"
19 #include "ecmascript/free_object.h"
20 #include "ecmascript/js_finalization_registry.h"
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/linked_hash_table.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/concurrent_marker.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/full_gc.h"
27 #include "ecmascript/mem/mark_stack.h"
28 #include "ecmascript/mem/mem_controller.h"
29 #include "ecmascript/mem/partial_gc.h"
30 #include "ecmascript/mem/native_area_allocator.h"
31 #include "ecmascript/mem/parallel_evacuator.h"
32 #include "ecmascript/mem/parallel_marker-inl.h"
33 #include "ecmascript/mem/stw_young_gc.h"
34 #include "ecmascript/mem/verification.h"
35 #include "ecmascript/mem/work_manager.h"
36 #include "ecmascript/mem/gc_stats.h"
37 #include "ecmascript/ecma_string_table.h"
38 #include "ecmascript/runtime_call_id.h"
39
40 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
41 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
42 #endif
43
44 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)45 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
46 nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
47 heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
48
Initialize()49 void Heap::Initialize()
50 {
51 memController_ = new MemController(this);
52 auto &config = ecmaVm_->GetEcmaParamConfiguration();
53 size_t maxHeapSize = config.GetMaxHeapSize();
54 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
55 size_t maxSemiSpaceCapacity = config.GetMaxSemiSpaceSize();
56 activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
57 activeSemiSpace_->Restart();
58 activeSemiSpace_->SetWaterLine();
59 auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
60 auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
61 thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
62 inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
63 // not set up from space
64
65 size_t readOnlySpaceCapacity = config.GetDefaultReadOnlySpaceSize();
66 readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
67 appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
68 size_t nonmovableSpaceCapacity = config.GetDefaultNonMovableSpaceSize();
69 if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
70 nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
71 }
72 nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
73 nonMovableSpace_->Initialize();
74 size_t snapshotSpaceCapacity = config.GetDefaultSnapshotSpaceSize();
75 snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
76 size_t machineCodeSpaceCapacity = config.GetDefaultMachineCodeSpaceSize();
77 machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
78
79 size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
80 machineCodeSpaceCapacity + readOnlySpaceCapacity;
81 if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) {
82 LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
83 }
84 size_t oldSpaceCapacity = maxHeapSize - capacities;
85 globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
86 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
87 oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
88 compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
89 oldSpace_->Initialize();
90
91 hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
92 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
93 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
94 maxEvacuateTaskCount_ - 1);
95
96 LOG_GC(INFO) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
97 << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
98 << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
99 << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
100 << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
101 << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
102 << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
103 << ", gcThreadNum = " << maxMarkTaskCount_;
104 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
105 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
106 markType_ = MarkType::MARK_YOUNG;
107 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
108 concurrentMarkerEnabled = false;
109 #endif
110 workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
111 stwYoungGC_ = new STWYoungGC(this, parallelGC_);
112 fullGC_ = new FullGC(this);
113
114 partialGC_ = new PartialGC(this);
115 sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
116 EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
117 concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
118 EnableConcurrentMarkType::CONFIG_DISABLE);
119 nonMovableMarker_ = new NonMovableMarker(this);
120 semiGCMarker_ = new SemiGCMarker(this);
121 compressGCMarker_ = new CompressGCMarker(this);
122 evacuator_ = new ParallelEvacuator(this);
123 idleData_ = new IdleData();
124 enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
125 }
126
Destroy()127 void Heap::Destroy()
128 {
129 if (workManager_ != nullptr) {
130 delete workManager_;
131 workManager_ = nullptr;
132 }
133 if (activeSemiSpace_ != nullptr) {
134 activeSemiSpace_->Destroy();
135 delete activeSemiSpace_;
136 activeSemiSpace_ = nullptr;
137 }
138 if (inactiveSemiSpace_ != nullptr) {
139 inactiveSemiSpace_->Destroy();
140 delete inactiveSemiSpace_;
141 inactiveSemiSpace_ = nullptr;
142 }
143 if (oldSpace_ != nullptr) {
144 oldSpace_->Reset();
145 delete oldSpace_;
146 oldSpace_ = nullptr;
147 }
148 if (compressSpace_ != nullptr) {
149 compressSpace_->Destroy();
150 delete compressSpace_;
151 compressSpace_ = nullptr;
152 }
153 if (nonMovableSpace_ != nullptr) {
154 nonMovableSpace_->Reset();
155 delete nonMovableSpace_;
156 nonMovableSpace_ = nullptr;
157 }
158 if (snapshotSpace_ != nullptr) {
159 snapshotSpace_->Destroy();
160 delete snapshotSpace_;
161 snapshotSpace_ = nullptr;
162 }
163 if (machineCodeSpace_ != nullptr) {
164 machineCodeSpace_->Reset();
165 delete machineCodeSpace_;
166 machineCodeSpace_ = nullptr;
167 }
168 if (hugeObjectSpace_ != nullptr) {
169 hugeObjectSpace_->Destroy();
170 delete hugeObjectSpace_;
171 hugeObjectSpace_ = nullptr;
172 }
173 if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
174 readOnlySpace_->ClearReadOnly();
175 readOnlySpace_->Destroy();
176 delete readOnlySpace_;
177 readOnlySpace_ = nullptr;
178 }
179 if (appSpawnSpace_ != nullptr) {
180 appSpawnSpace_->Reset();
181 delete appSpawnSpace_;
182 appSpawnSpace_ = nullptr;
183 }
184 if (stwYoungGC_ != nullptr) {
185 delete stwYoungGC_;
186 stwYoungGC_ = nullptr;
187 }
188 if (partialGC_ != nullptr) {
189 delete partialGC_;
190 partialGC_ = nullptr;
191 }
192 if (fullGC_ != nullptr) {
193 delete fullGC_;
194 fullGC_ = nullptr;
195 }
196
197 nativeAreaAllocator_ = nullptr;
198 heapRegionAllocator_ = nullptr;
199
200 if (memController_ != nullptr) {
201 delete memController_;
202 memController_ = nullptr;
203 }
204 if (sweeper_ != nullptr) {
205 delete sweeper_;
206 sweeper_ = nullptr;
207 }
208 if (concurrentMarker_ != nullptr) {
209 delete concurrentMarker_;
210 concurrentMarker_ = nullptr;
211 }
212 if (nonMovableMarker_ != nullptr) {
213 delete nonMovableMarker_;
214 nonMovableMarker_ = nullptr;
215 }
216 if (semiGCMarker_ != nullptr) {
217 delete semiGCMarker_;
218 semiGCMarker_ = nullptr;
219 }
220 if (compressGCMarker_ != nullptr) {
221 delete compressGCMarker_;
222 compressGCMarker_ = nullptr;
223 }
224 if (evacuator_ != nullptr) {
225 delete evacuator_;
226 evacuator_ = nullptr;
227 }
228 if (idleData_ != nullptr) {
229 delete idleData_;
230 idleData_ = nullptr;
231 }
232 }
233
Prepare()234 void Heap::Prepare()
235 {
236 MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), HeapPrepare);
237 WaitRunningTaskFinished();
238 sweeper_->EnsureAllTaskFinished();
239 WaitClearTaskFinished();
240 }
241
Resume(TriggerGCType gcType)242 void Heap::Resume(TriggerGCType gcType)
243 {
244 if (gcType == TriggerGCType::FULL_GC) {
245 compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
246 auto *oldSpace = compressSpace_;
247 compressSpace_ = oldSpace_;
248 oldSpace_ = oldSpace;
249 }
250
251 if (mode_ != HeapMode::SPAWN &&
252 activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC())) {
253 // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
254 size_t multiple = 2;
255 size_t oldSpaceMaxLimit = 0;
256 if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
257 size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
258 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
259 } else {
260 size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
261 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
262 }
263 oldSpace_->SetMaximumCapacity(oldSpaceMaxLimit);
264 inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
265 }
266
267 activeSemiSpace_->SetWaterLine();
268 PrepareRecordRegionsForReclaim();
269 hugeObjectSpace_->ReclaimHugeRegion();
270 if (parallelGC_) {
271 clearTaskFinished_ = false;
272 Taskpool::GetCurrentTaskpool()->PostTask(
273 std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
274 } else {
275 ReclaimRegions(gcType);
276 }
277 }
278
ResumeForAppSpawn()279 void Heap::ResumeForAppSpawn()
280 {
281 sweeper_->WaitAllTaskFinished();
282 hugeObjectSpace_->ReclaimHugeRegion();
283 inactiveSemiSpace_->ReclaimRegions();
284 oldSpace_->Reset();
285 auto cb = [] (Region *region) {
286 region->ClearMarkGCBitset();
287 };
288 nonMovableSpace_->EnumerateRegions(cb);
289 machineCodeSpace_->EnumerateRegions(cb);
290 hugeObjectSpace_->EnumerateRegions(cb);
291 }
292
CompactHeapBeforeFork()293 void Heap::CompactHeapBeforeFork()
294 {
295 CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
296 }
297
DisableParallelGC()298 void Heap::DisableParallelGC()
299 {
300 WaitAllTasksFinished();
301 parallelGC_ = false;
302 maxEvacuateTaskCount_ = 0;
303 maxMarkTaskCount_ = 0;
304 stwYoungGC_->ConfigParallelGC(false);
305 sweeper_->ConfigConcurrentSweep(false);
306 concurrentMarker_->ConfigConcurrentMark(false);
307 Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
308 }
309
EnableParallelGC()310 void Heap::EnableParallelGC()
311 {
312 Taskpool::GetCurrentTaskpool()->Initialize();
313 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
314 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
315 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
316 maxEvacuateTaskCount_ - 1);
317 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
318 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
319 concurrentMarkerEnabled = false;
320 #endif
321 stwYoungGC_->ConfigParallelGC(parallelGC_);
322 sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
323 concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
324 }
325
SelectGCType() const326 TriggerGCType Heap::SelectGCType() const
327 {
328 // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
329 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
330 return YOUNG_GC;
331 }
332 if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
333 GetHeapObjectSize() <= globalSpaceAllocLimit_) {
334 return YOUNG_GC;
335 }
336 return OLD_GC;
337 }
338
CollectGarbage(TriggerGCType gcType)339 void Heap::CollectGarbage(TriggerGCType gcType)
340 {
341 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
342 [[maybe_unused]] GcStateScope scope(thread_);
343 #endif
344 CHECK_NO_GC
345 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
346 isVerifying_ = true;
347 // pre gc heap verify
348 sweeper_->EnsureAllTaskFinished();
349 auto failCount = Verification(this).VerifyAll();
350 if (failCount > 0) {
351 LOG_GC(FATAL) << "Before gc heap corrupted and " << failCount << " corruptions";
352 }
353 isVerifying_ = false;
354 #endif
355
356 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
357 gcType = TriggerGCType::FULL_GC;
358 #endif
359 if (fullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
360 gcType = TriggerGCType::FULL_GC;
361 }
362 size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
363 size_t originalNewSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize();
364 memController_->StartCalculationBeforeGC();
365 StatisticHeapObject(gcType);
366 switch (gcType) {
367 case TriggerGCType::YOUNG_GC:
368 // Use partial GC for young generation.
369 if (!concurrentMarker_->IsEnabled()) {
370 SetMarkType(MarkType::MARK_YOUNG);
371 }
372 partialGC_->RunPhases();
373 break;
374 case TriggerGCType::OLD_GC:
375 if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
376 // Wait for existing concurrent marking tasks to be finished (if any),
377 // and reset concurrent marker's status for full mark.
378 bool concurrentMark = CheckOngoingConcurrentMarking();
379 if (concurrentMark) {
380 concurrentMarker_->Reset();
381 }
382 }
383 SetMarkType(MarkType::MARK_FULL);
384 partialGC_->RunPhases();
385 break;
386 case TriggerGCType::FULL_GC:
387 fullGC_->SetForAppSpawn(false);
388 fullGC_->RunPhases();
389 if (fullGCRequested_) {
390 fullGCRequested_ = false;
391 }
392 break;
393 case TriggerGCType::APPSPAWN_FULL_GC:
394 fullGC_->SetForAppSpawn(true);
395 fullGC_->RunPhasesForAppSpawn();
396 break;
397 default:
398 UNREACHABLE();
399 break;
400 }
401
402 // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
403 if (shouldThrowOOMError_) {
404 ThrowOutOfMemoryError(oldSpace_->GetMergeSize(), " OldSpace::Merge");
405 oldSpace_->ResetMergeSize();
406 shouldThrowOOMError_ = false;
407 }
408
409 // Adjust the old space capacity and global limit for the first partial GC with full mark.
410 // Trigger the full mark next time if the current survival rate is much less than half the average survival rates.
411 AdjustBySurvivalRate(originalNewSpaceSize);
412 activeSemiSpace_->AdjustNativeLimit(originalNewSpaceNativeSize);
413 memController_->StopCalculationAfterGC(gcType);
414 if (gcType == TriggerGCType::FULL_GC || IsFullMark()) {
415 // Only when the gc type is not semiGC and after the old space sweeping has been finished,
416 // the limits of old space and global space can be recomputed.
417 RecomputeLimits();
418 OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsFullMark()
419 << " global object size " << GetHeapObjectSize()
420 << " global committed size " << GetCommittedSize()
421 << " global limit " << globalSpaceAllocLimit_;
422 markType_ = MarkType::MARK_YOUNG;
423 }
424 if (concurrentMarker_->IsRequestDisabled()) {
425 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
426 }
427 ecmaVm_->GetEcmaGCStats()->CheckIfLongTimePause();
428 # if ECMASCRIPT_ENABLE_GC_LOG
429 ecmaVm_->GetEcmaGCStats()->PrintStatisticResult();
430 #endif
431 // weak node secondPassCallback may execute JS and change the weakNodeList status,
432 // even lead to another GC, so this have to invoke after this GC process.
433 InvokeWeakNodeSecondPassCallback();
434
435 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
436 // post gc heap verify
437 isVerifying_ = true;
438 sweeper_->EnsureAllTaskFinished();
439 failCount = Verification(this).VerifyAll();
440 if (failCount > 0) {
441 LOG_GC(FATAL) << "After gc heap corrupted and " << failCount << " corruptions";
442 }
443 isVerifying_ = false;
444 #endif
445 JSFinalizationRegistry::CheckAndCall(thread_);
446 }
447
ThrowOutOfMemoryError(size_t size,std::string functionName)448 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName)
449 {
450 GetEcmaVM()->GetEcmaGCStats()->PrintHeapStatisticResult(true);
451 std::ostringstream oss;
452 oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
453 LOG_ECMA_MEM(ERROR) << oss.str().c_str();
454 THROW_OOM_ERROR(thread_, oss.str().c_str());
455 }
456
FatalOutOfMemoryError(size_t size,std::string functionName)457 void Heap::FatalOutOfMemoryError(size_t size, std::string functionName)
458 {
459 GetEcmaVM()->GetEcmaGCStats()->PrintHeapStatisticResult(true);
460 LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
461 << " function name: " << functionName.c_str();
462 }
463
AdjustBySurvivalRate(size_t originalNewSpaceSize)464 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
465 {
466 if (originalNewSpaceSize <= 0) {
467 return;
468 }
469 semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
470 double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
471 promotedSize_ = GetEvacuator()->GetPromotedSize();
472 double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
473 double survivalRate = std::min(copiedRate + promotedRate, 1.0);
474 OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
475 << " survivalRate: " << survivalRate;
476 if (!oldSpaceLimitAdjusted_) {
477 memController_->AddSurvivalRate(survivalRate);
478 AdjustOldSpaceLimit();
479 } else {
480 double averageSurvivalRate = memController_->GetAverageSurvivalRate();
481 if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
482 fullMarkRequested_ = true;
483 OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
484 << " is less than half the average survival rates: " << averageSurvivalRate
485 << ". Trigger full mark next time.";
486 // Survival rate of full mark is precise. Reset recorded survival rates.
487 memController_->ResetRecordedSurvivalRates();
488 }
489 memController_->AddSurvivalRate(survivalRate);
490 }
491 }
492
VerifyHeapObjects() const493 size_t Heap::VerifyHeapObjects() const
494 {
495 size_t failCount = 0;
496 {
497 VerifyObjectVisitor verifier(this, &failCount);
498 activeSemiSpace_->IterateOverObjects(verifier);
499 }
500
501 {
502 VerifyObjectVisitor verifier(this, &failCount);
503 oldSpace_->IterateOverObjects(verifier);
504 }
505
506 {
507 VerifyObjectVisitor verifier(this, &failCount);
508 appSpawnSpace_->IterateOverMarkedObjects(verifier);
509 }
510
511 {
512 VerifyObjectVisitor verifier(this, &failCount);
513 nonMovableSpace_->IterateOverObjects(verifier);
514 }
515
516 {
517 VerifyObjectVisitor verifier(this, &failCount);
518 hugeObjectSpace_->IterateOverObjects(verifier);
519 }
520 {
521 VerifyObjectVisitor verifier(this, &failCount);
522 machineCodeSpace_->IterateOverObjects(verifier);
523 }
524 {
525 VerifyObjectVisitor verifier(this, &failCount);
526 snapshotSpace_->IterateOverObjects(verifier);
527 }
528 return failCount;
529 }
530
VerifyOldToNewRSet() const531 size_t Heap::VerifyOldToNewRSet() const
532 {
533 size_t failCount = 0;
534 VerifyObjectVisitor verifier(this, &failCount);
535 oldSpace_->IterateOldToNewOverObjects(verifier);
536 appSpawnSpace_->IterateOldToNewOverObjects(verifier);
537 nonMovableSpace_->IterateOldToNewOverObjects(verifier);
538 machineCodeSpace_->IterateOldToNewOverObjects(verifier);
539 return failCount;
540 }
541
AdjustOldSpaceLimit()542 void Heap::AdjustOldSpaceLimit()
543 {
544 if (oldSpaceLimitAdjusted_) {
545 return;
546 }
547 size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
548 size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
549 size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
550 static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
551 if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
552 GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
553 } else {
554 oldSpaceLimitAdjusted_ = true;
555 }
556
557 size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
558 static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
559 if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
560 globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
561 }
562 // temporarily regard the heap limit is the same as the native limit.
563 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
564 OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
565 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
566 }
567
AddToKeptObjects(JSHandle<JSTaggedValue> value) const568 void Heap::AddToKeptObjects(JSHandle<JSTaggedValue> value) const
569 {
570 JSHandle<GlobalEnv> env = ecmaVm_->GetGlobalEnv();
571 JSHandle<LinkedHashSet> linkedSet;
572 if (env->GetWeakRefKeepObjects()->IsUndefined()) {
573 linkedSet = LinkedHashSet::Create(thread_);
574 } else {
575 linkedSet =
576 JSHandle<LinkedHashSet>(thread_, LinkedHashSet::Cast(env->GetWeakRefKeepObjects()->GetTaggedObject()));
577 }
578 linkedSet = LinkedHashSet::Add(thread_, linkedSet, value);
579 env->SetWeakRefKeepObjects(thread_, linkedSet);
580 }
581
AdjustSpaceSizeForAppSpawn()582 void Heap::AdjustSpaceSizeForAppSpawn()
583 {
584 SetHeapMode(HeapMode::SPAWN);
585 auto &config = ecmaVm_->GetEcmaParamConfiguration();
586 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
587 activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
588 auto committedSize = appSpawnSpace_->GetCommittedSize();
589 appSpawnSpace_->SetInitialCapacity(committedSize);
590 appSpawnSpace_->SetMaximumCapacity(committedSize);
591 oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
592 oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
593 }
594
ClearKeptObjects() const595 void Heap::ClearKeptObjects() const
596 {
597 ecmaVm_->GetGlobalEnv()->SetWeakRefKeepObjects(thread_, JSTaggedValue::Undefined());
598 }
599
RecomputeLimits()600 void Heap::RecomputeLimits()
601 {
602 double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
603 double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
604 size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
605 size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
606
607 double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
608 size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
609 size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
610 maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
611 size_t maxGlobalSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize() - newSpaceCapacity;
612 size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
613 maxGlobalSize, newSpaceCapacity, growingFactor);
614 globalSpaceAllocLimit_ = newGlobalSpaceLimit;
615 oldSpace_->SetInitialCapacity(newOldSpaceLimit);
616 size_t globalSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize() + nonNewSpaceNativeBindingSize_
617 + nativeAreaAllocator_->GetNativeMemoryUsage();
618 globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(globalSpaceNativeSize, MIN_HEAP_SIZE,
619 maxGlobalSize, newSpaceCapacity, growingFactor);
620 OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
621 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
622 << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
623 if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize()
624 && (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) {
625 OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
626 << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
627 << " Committed Size: " << oldSpace_->GetCommittedSize();
628 SetFullMarkRequestedState(true);
629 }
630 }
631
CheckAndTriggerOldGC(size_t size)632 void Heap::CheckAndTriggerOldGC(size_t size)
633 {
634 if (OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) {
635 CollectGarbage(TriggerGCType::OLD_GC);
636 }
637 }
638
CheckOngoingConcurrentMarking()639 bool Heap::CheckOngoingConcurrentMarking()
640 {
641 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
642 if (thread_->IsMarking()) {
643 [[maybe_unused]] ClockScope clockScope;
644 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
645 MEM_ALLOCATE_AND_GC_TRACE(GetEcmaVM(), WaitConcurrentMarkingFinished);
646 GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
647 WaitConcurrentMarkingFinished();
648 ecmaVm_->GetEcmaGCStats()->StatisticConcurrentMarkWait(clockScope.GetPauseTime());
649 LOG_GC(DEBUG) << "wait concurrent marking finish pause time " << clockScope.TotalSpentTime();
650 } else {
651 WaitRunningTaskFinished();
652 }
653 memController_->RecordAfterConcurrentMark(IsFullMark(), concurrentMarker_);
654 return true;
655 }
656 return false;
657 }
658
TryTriggerConcurrentMarking()659 void Heap::TryTriggerConcurrentMarking()
660 {
661 // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
662 // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
663 // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
664 // full mark will be triggered.
665 // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
666 // young mark may not result in the new space reaching its limit, young mark can be triggered.
667 // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
668 // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
669 if (!concurrentMarker_->IsEnabled() || !thread_->IsReadyToMark()) {
670 return;
671 }
672 if (fullMarkRequested_) {
673 markType_ = MarkType::MARK_FULL;
674 OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
675 TriggerConcurrentMarking();
676 return;
677 }
678 bool isFullMarkNeeded = false;
679 double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
680 oldSpaceAllocToLimitDuration = 0;
681 double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
682 double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
683 size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize();
684 size_t globalHeapObjectSize = GetHeapObjectSize();
685 size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
686 size_t globalSpaceNativeSize = activeSemiSpace_->GetNativeBindingSize() + nonNewSpaceNativeBindingSize_
687 + nativeAreaAllocator_->GetNativeMemoryUsage();
688 if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
689 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_
690 || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
691 markType_ = MarkType::MARK_FULL;
692 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
693 TriggerConcurrentMarking();
694 return;
695 }
696 } else {
697 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_
698 || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
699 isFullMarkNeeded = true;
700 }
701 oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
702 oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
703 // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
704 double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
705 if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
706 isFullMarkNeeded = true;
707 }
708 }
709
710 double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
711 double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
712
713 if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
714 auto &config = ecmaVm_->GetEcmaParamConfiguration();
715 if (activeSemiSpace_->GetCommittedSize() >= config.GetSemiSpaceTriggerConcurrentMark()) {
716 markType_ = MarkType::MARK_YOUNG;
717 TriggerConcurrentMarking();
718 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
719 }
720 return;
721 }
722 newSpaceAllocToLimitDuration = (activeSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetCommittedSize())
723 / newSpaceAllocSpeed;
724 newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
725 // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
726 newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
727
728 if (isFullMarkNeeded) {
729 if (oldSpaceMarkDuration < newSpaceAllocToLimitDuration
730 && oldSpaceMarkDuration < oldSpaceAllocToLimitDuration) {
731 markType_ = MarkType::MARK_FULL;
732 TriggerConcurrentMarking();
733 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by speed";
734 } else {
735 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_
736 || globalSpaceNativeSize >= globalSpaceNativeLimit_) {
737 markType_ = MarkType::MARK_FULL;
738 TriggerConcurrentMarking();
739 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark by limit";
740 }
741 }
742 } else if (newSpaceRemainSize < DEFAULT_REGION_SIZE || activeSemiSpace_->NativeBindingSizeLargerThanLimit()) {
743 markType_ = MarkType::MARK_YOUNG;
744 TriggerConcurrentMarking();
745 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
746 }
747 }
748
IncreaseNativeBindingSize(JSNativePointer * object)749 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
750 {
751 size_t size = object->GetBindingSize();
752 if (size == 0) {
753 return;
754 }
755 Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(object));
756 if (region->InYoungSpace()) {
757 activeSemiSpace_->IncreaseNativeBindingSize(size);
758 } else {
759 nonNewSpaceNativeBindingSize_ += size;
760 }
761 }
762
IncreaseNativeBindingSize(bool nonMovable,size_t size)763 void Heap::IncreaseNativeBindingSize(bool nonMovable, size_t size)
764 {
765 if (size == 0) {
766 return;
767 }
768 if (!nonMovable) {
769 activeSemiSpace_->IncreaseNativeBindingSize(size);
770 } else {
771 nonNewSpaceNativeBindingSize_ += size;
772 }
773 }
774
PrepareRecordRegionsForReclaim()775 void Heap::PrepareRecordRegionsForReclaim()
776 {
777 activeSemiSpace_->SetRecordRegion();
778 oldSpace_->SetRecordRegion();
779 snapshotSpace_->SetRecordRegion();
780 nonMovableSpace_->SetRecordRegion();
781 hugeObjectSpace_->SetRecordRegion();
782 machineCodeSpace_->SetRecordRegion();
783 }
784
TriggerConcurrentMarking()785 void Heap::TriggerConcurrentMarking()
786 {
787 if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
788 concurrentMarker_->Mark();
789 }
790 }
791
WaitRunningTaskFinished()792 void Heap::WaitRunningTaskFinished()
793 {
794 os::memory::LockHolder holder(waitTaskFinishedMutex_);
795 while (runningTaskCount_ > 0) {
796 waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
797 }
798 }
799
WaitClearTaskFinished()800 void Heap::WaitClearTaskFinished()
801 {
802 os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
803 while (!clearTaskFinished_) {
804 waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
805 }
806 }
807
WaitAllTasksFinished()808 void Heap::WaitAllTasksFinished()
809 {
810 WaitRunningTaskFinished();
811 sweeper_->EnsureAllTaskFinished();
812 WaitClearTaskFinished();
813 if (concurrentMarker_->IsEnabled() && thread_->IsMarking()) {
814 concurrentMarker_->WaitMarkingFinished();
815 }
816 }
817
WaitConcurrentMarkingFinished()818 void Heap::WaitConcurrentMarkingFinished()
819 {
820 concurrentMarker_->WaitMarkingFinished();
821 }
822
PostParallelGCTask(ParallelGCTaskPhase gcTask)823 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
824 {
825 IncreaseTaskCount();
826 Taskpool::GetCurrentTaskpool()->PostTask(
827 std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
828 }
829
IncreaseTaskCount()830 void Heap::IncreaseTaskCount()
831 {
832 os::memory::LockHolder holder(waitTaskFinishedMutex_);
833 runningTaskCount_++;
834 }
835
ChangeGCParams(bool inBackground)836 void Heap::ChangeGCParams(bool inBackground)
837 {
838 if (inBackground) {
839 LOG_GC(INFO) << "app is inBackground";
840 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
841 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
842 LOG_GC(INFO) << "Heap Growing Type CONSERVATIVE";
843 }
844 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
845 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
846 maxMarkTaskCount_ = 1;
847 maxEvacuateTaskCount_ = 1;
848 } else {
849 LOG_GC(INFO) << "app is not inBackground";
850 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
851 SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
852 LOG_GC(INFO) << "Heap Growing Type HIGH_THROUGHPUT";
853 }
854 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
855 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
856 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
857 Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
858 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
859 }
860 }
861
TriggerIdleCollection(int idleMicroSec)862 void Heap::TriggerIdleCollection([[maybe_unused]] int idleMicroSec)
863 {
864 if (!enableIdleGC_) {
865 return;
866 }
867 int64_t curTime = 0;
868 if (waitForStartUp_) {
869 curTime = static_cast<int64_t>(JSDate::Now().GetDouble());
870 if (idleTime_ == 0) {
871 idleTime_ = curTime;
872 }
873 if (curTime - idleTime_ > WAIT_FOR_APP_START_UP) {
874 waitForStartUp_ = false;
875 }
876 return;
877 }
878
879 if (idleMicroSec >= IDLE_TIME_REMARK && thread_->IsMarkFinished()) {
880 concurrentMarker_->HandleMarkingFinished();
881 return;
882 }
883
884 if (idleMicroSec >= IDLE_TIME_LIMIT) {
885 curTime = static_cast<int64_t>(JSDate::Now().GetDouble());
886 size_t oldCommitSize = oldSpace_->GetCommittedSize();
887 // rest
888 if (curTime - idleTime_ > MIN_OLD_GC_LIMIT) {
889 size_t heapObjectSize = GetHeapObjectSize();
890 idleData_->SetNextValue(heapObjectSize);
891 idleTime_ = curTime;
892 if (idleData_->CheckIsRest() && heapObjectSize > triggerRestIdleSize_) {
893 CollectGarbage(TriggerGCType::FULL_GC);
894 couldIdleGC_ = false;
895 triggerRestIdleSize_ = GetHeapObjectSize() + REST_HEAP_GROWTH_LIMIT;
896 return;
897 }
898 couldIdleGC_ = true;
899 idleHeapObjectSize_ = GetHeapObjectSize();
900 }
901
902 // sparse space over limit
903 if (couldIdleGC_ && oldCommitSize + nonMovableSpace_->GetCommittedSize() > idleOldSpace_) {
904 CollectGarbage(TriggerGCType::OLD_GC);
905 idleTime_ = curTime;
906 couldIdleGC_ = false;
907 idleOldSpace_ = oldSpace_->GetInitialCapacity();
908 return;
909 }
910
911 if (activeSemiSpace_->GetHeapObjectSize() > IDLE_GC_YOUNG_SPACE) {
912 CollectGarbage(TriggerGCType::YOUNG_GC);
913 return;
914 }
915 }
916 }
917
NotifyMemoryPressure(bool inHighMemoryPressure)918 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
919 {
920 if (inHighMemoryPressure) {
921 LOG_GC(INFO) << "app is inHighMemoryPressure";
922 SetMemGrowingType(MemGrowingType::PRESSURE);
923 } else {
924 LOG_GC(INFO) << "app is not inHighMemoryPressure";
925 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
926 }
927 }
928
CheckCanDistributeTask()929 bool Heap::CheckCanDistributeTask()
930 {
931 os::memory::LockHolder holder(waitTaskFinishedMutex_);
932 return runningTaskCount_ < maxMarkTaskCount_;
933 }
934
ReduceTaskCount()935 void Heap::ReduceTaskCount()
936 {
937 os::memory::LockHolder holder(waitTaskFinishedMutex_);
938 runningTaskCount_--;
939 if (runningTaskCount_ == 0) {
940 waitTaskFinishedCV_.SignalAll();
941 }
942 }
943
Run(uint32_t threadIndex)944 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
945 {
946 switch (taskPhase_) {
947 case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
948 heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
949 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
950 break;
951 case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
952 heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
953 break;
954 case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
955 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
956 break;
957 case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
958 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
959 break;
960 case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
961 heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
962 break;
963 case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
964 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
965 break;
966 case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
967 heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
968 break;
969 default:
970 break;
971 }
972 heap_->ReduceTaskCount();
973 return true;
974 }
975
Run(uint32_t threadIndex)976 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
977 {
978 heap_->ReclaimRegions(gcType_);
979 return true;
980 }
981
GetArrayBufferSize() const982 size_t Heap::GetArrayBufferSize() const
983 {
984 size_t result = 0;
985 sweeper_->EnsureAllTaskFinished();
986 this->IterateOverObjects([&result](TaggedObject *obj) {
987 JSHClass* jsClass = obj->GetClass();
988 result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
989 });
990 return result;
991 }
992
IsAlive(TaggedObject * object) const993 bool Heap::IsAlive(TaggedObject *object) const
994 {
995 if (!ContainObject(object)) {
996 LOG_GC(ERROR) << "The region is already free";
997 return false;
998 }
999
1000 bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
1001 if (isFree) {
1002 Region *region = Region::ObjectAddressToRange(object);
1003 LOG_GC(ERROR) << "The object " << object << " in "
1004 << region->GetSpaceTypeName()
1005 << " already free";
1006 }
1007 return !isFree;
1008 }
1009
ContainObject(TaggedObject * object) const1010 bool Heap::ContainObject(TaggedObject *object) const
1011 {
1012 /*
1013 * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
1014 * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
1015 * calculated from an object address is still valid or alive in a cheap way.
1016 * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
1017 * introduce additional incorrect memory access issues.
1018 * Unless we can tolerate the performance impact of iterating the region list of each space and change
1019 * the implementation to that approach, don't rely on current implementation to get accurate result.
1020 */
1021 Region *region = Region::ObjectAddressToRange(object);
1022 return region->InHeapSpace();
1023 }
1024
InvokeWeakNodeSecondPassCallback()1025 void Heap::InvokeWeakNodeSecondPassCallback()
1026 {
1027 // the second callback may lead to another GC, if this, return directly;
1028 if (runningSecondPassCallbacks_) {
1029 return;
1030 }
1031 runningSecondPassCallbacks_ = true;
1032 auto weakNodesSecondCallbacks = thread_->GetWeakNodeSecondPassCallbacks();
1033 while (!weakNodesSecondCallbacks->empty()) {
1034 auto callbackPair = weakNodesSecondCallbacks->back();
1035 weakNodesSecondCallbacks->pop_back();
1036 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
1037 auto callback = callbackPair.first;
1038 (*callback)(callbackPair.second);
1039 }
1040 runningSecondPassCallbacks_ = false;
1041 }
1042
PrintHeapInfo(TriggerGCType gcType) const1043 void Heap::PrintHeapInfo(TriggerGCType gcType) const
1044 {
1045 OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
1046 OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
1047 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsFullMark() << ")";
1048 OPTIONAL_LOG(ecmaVm_, INFO) << "ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize()
1049 << "/" << activeSemiSpace_->GetInitialCapacity() << "), NonMovable("
1050 << nonMovableSpace_->GetHeapObjectSize() << "/" << nonMovableSpace_->GetCommittedSize()
1051 << "/" << nonMovableSpace_->GetInitialCapacity() << "), Old("
1052 << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize()
1053 << "/" << oldSpace_->GetInitialCapacity() << "), HugeObject("
1054 << hugeObjectSpace_->GetHeapObjectSize() << "/" << hugeObjectSpace_->GetCommittedSize()
1055 << "/" << hugeObjectSpace_->GetInitialCapacity() << "), ReadOnlySpace("
1056 << readOnlySpace_->GetCommittedSize() << "/" << readOnlySpace_->GetInitialCapacity()
1057 << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize() << "/"
1058 << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
1059 << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
1060 }
1061
StatisticHeapObject(TriggerGCType gcType) const1062 void Heap::StatisticHeapObject(TriggerGCType gcType) const
1063 {
1064 PrintHeapInfo(gcType);
1065 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
1066 static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
1067 int typeCount[JS_TYPE_LAST] = { 0 };
1068 static const int MIN_COUNT_THRESHOLD = 1000;
1069
1070 nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1071 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1072 });
1073 for (int i = 0; i < JS_TYPE_LAST; i++) {
1074 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1075 LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
1076 << " count:" << typeCount[i];
1077 }
1078 typeCount[i] = 0;
1079 }
1080
1081 oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1082 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1083 });
1084 for (int i = 0; i < JS_TYPE_LAST; i++) {
1085 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1086 LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
1087 << " count:" << typeCount[i];
1088 }
1089 typeCount[i] = 0;
1090 }
1091
1092 activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1093 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1094 });
1095 for (int i = 0; i < JS_TYPE_LAST; i++) {
1096 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1097 LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
1098 << " count:" << typeCount[i];
1099 }
1100 typeCount[i] = 0;
1101 }
1102 #endif
1103 }
1104 } // namespace panda::ecmascript
1105