1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/heap-inl.h"
17
18 #include <chrono>
19 #include <thread>
20
21 #include "ecmascript/base/block_hook_scope.h"
22 #include "ecmascript/ecma_vm.h"
23 #include "ecmascript/free_object.h"
24 #include "ecmascript/js_finalization_registry.h"
25 #include "ecmascript/js_native_pointer.h"
26 #include "ecmascript/linked_hash_table.h"
27 #include "ecmascript/mem/assert_scope.h"
28 #include "ecmascript/mem/concurrent_marker.h"
29 #include "ecmascript/mem/concurrent_sweeper.h"
30 #include "ecmascript/mem/full_gc.h"
31 #include "ecmascript/mem/incremental_marker.h"
32 #include "ecmascript/mem/mark_stack.h"
33 #include "ecmascript/mem/mem_controller.h"
34 #include "ecmascript/mem/partial_gc.h"
35 #include "ecmascript/mem/native_area_allocator.h"
36 #include "ecmascript/mem/parallel_evacuator.h"
37 #include "ecmascript/mem/parallel_marker-inl.h"
38 #include "ecmascript/mem/stw_young_gc.h"
39 #include "ecmascript/mem/verification.h"
40 #include "ecmascript/mem/work_manager.h"
41 #include "ecmascript/mem/gc_stats.h"
42 #include "ecmascript/ecma_string_table.h"
43 #include "ecmascript/runtime_call_id.h"
44 #if !WIN_OR_MAC_OR_IOS_PLATFORM
45 #include "ecmascript/dfx/hprof/heap_profiler_interface.h"
46 #include "ecmascript/dfx/hprof/heap_profiler.h"
47 #endif
48 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
49 #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
50 #endif
51 #include "ecmascript/dfx/tracing/tracing.h"
52 #if defined(ENABLE_DUMP_IN_FAULTLOG)
53 #include "syspara/parameter.h"
54 #endif
55
56 namespace panda::ecmascript {
Heap(EcmaVM * ecmaVm)57 Heap::Heap(EcmaVM *ecmaVm) : ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()),
58 nativeAreaAllocator_(ecmaVm->GetNativeAreaAllocator()),
59 heapRegionAllocator_(ecmaVm->GetHeapRegionAllocator()) {}
60
Initialize()61 void Heap::Initialize()
62 {
63 memController_ = new MemController(this);
64 auto &config = ecmaVm_->GetEcmaParamConfiguration();
65 size_t maxHeapSize = config.GetMaxHeapSize();
66 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
67 size_t maxSemiSpaceCapacity = config.GetMaxSemiSpaceSize();
68 activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
69 activeSemiSpace_->Restart();
70 activeSemiSpace_->SetWaterLine();
71 auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
72 auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
73 thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
74 inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
75
76 // whether should verify heap duration gc
77 shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
78 // not set up from space
79
80 size_t readOnlySpaceCapacity = config.GetDefaultReadOnlySpaceSize();
81 readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
82 appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
83 size_t nonmovableSpaceCapacity = config.GetDefaultNonMovableSpaceSize();
84 if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
85 nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
86 }
87 nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
88 nonMovableSpace_->Initialize();
89 size_t snapshotSpaceCapacity = config.GetDefaultSnapshotSpaceSize();
90 snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
91 size_t machineCodeSpaceCapacity = config.GetDefaultMachineCodeSpaceSize();
92 machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
93
94 size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
95 machineCodeSpaceCapacity + readOnlySpaceCapacity;
96 if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) {
97 LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
98 }
99 size_t oldSpaceCapacity = maxHeapSize - capacities;
100 globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
101 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
102 oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
103 compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
104 oldSpace_->Initialize();
105
106 hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
107 hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
108 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
109 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
110 maxEvacuateTaskCount_ - 1);
111
112 LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
113 << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
114 << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
115 << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
116 << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
117 << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
118 << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
119 << ", gcThreadNum = " << maxMarkTaskCount_;
120 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
121 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
122 markType_ = MarkType::MARK_YOUNG;
123 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
124 concurrentMarkerEnabled = false;
125 #endif
126 workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
127 stwYoungGC_ = new STWYoungGC(this, parallelGC_);
128 fullGC_ = new FullGC(this);
129
130 partialGC_ = new PartialGC(this);
131 sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
132 EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
133 concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
134 EnableConcurrentMarkType::CONFIG_DISABLE);
135 nonMovableMarker_ = new NonMovableMarker(this);
136 semiGCMarker_ = new SemiGCMarker(this);
137 compressGCMarker_ = new CompressGCMarker(this);
138 evacuator_ = new ParallelEvacuator(this);
139 incrementalMarker_ = new IncrementalMarker(this);
140 }
141
Destroy()142 void Heap::Destroy()
143 {
144 if (workManager_ != nullptr) {
145 delete workManager_;
146 workManager_ = nullptr;
147 }
148 if (activeSemiSpace_ != nullptr) {
149 activeSemiSpace_->Destroy();
150 delete activeSemiSpace_;
151 activeSemiSpace_ = nullptr;
152 }
153 if (inactiveSemiSpace_ != nullptr) {
154 inactiveSemiSpace_->Destroy();
155 delete inactiveSemiSpace_;
156 inactiveSemiSpace_ = nullptr;
157 }
158 if (oldSpace_ != nullptr) {
159 oldSpace_->Reset();
160 delete oldSpace_;
161 oldSpace_ = nullptr;
162 }
163 if (compressSpace_ != nullptr) {
164 compressSpace_->Destroy();
165 delete compressSpace_;
166 compressSpace_ = nullptr;
167 }
168 if (nonMovableSpace_ != nullptr) {
169 nonMovableSpace_->Reset();
170 delete nonMovableSpace_;
171 nonMovableSpace_ = nullptr;
172 }
173 if (snapshotSpace_ != nullptr) {
174 snapshotSpace_->Destroy();
175 delete snapshotSpace_;
176 snapshotSpace_ = nullptr;
177 }
178 if (machineCodeSpace_ != nullptr) {
179 machineCodeSpace_->Reset();
180 delete machineCodeSpace_;
181 machineCodeSpace_ = nullptr;
182 }
183 if (hugeObjectSpace_ != nullptr) {
184 hugeObjectSpace_->Destroy();
185 delete hugeObjectSpace_;
186 hugeObjectSpace_ = nullptr;
187 }
188 if (hugeMachineCodeSpace_ != nullptr) {
189 hugeMachineCodeSpace_->Destroy();
190 delete hugeMachineCodeSpace_;
191 hugeMachineCodeSpace_ = nullptr;
192 }
193 if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
194 readOnlySpace_->ClearReadOnly();
195 readOnlySpace_->Destroy();
196 delete readOnlySpace_;
197 readOnlySpace_ = nullptr;
198 }
199 if (appSpawnSpace_ != nullptr) {
200 appSpawnSpace_->Reset();
201 delete appSpawnSpace_;
202 appSpawnSpace_ = nullptr;
203 }
204 if (stwYoungGC_ != nullptr) {
205 delete stwYoungGC_;
206 stwYoungGC_ = nullptr;
207 }
208 if (partialGC_ != nullptr) {
209 delete partialGC_;
210 partialGC_ = nullptr;
211 }
212 if (fullGC_ != nullptr) {
213 delete fullGC_;
214 fullGC_ = nullptr;
215 }
216
217 nativeAreaAllocator_ = nullptr;
218 heapRegionAllocator_ = nullptr;
219
220 if (memController_ != nullptr) {
221 delete memController_;
222 memController_ = nullptr;
223 }
224 if (sweeper_ != nullptr) {
225 delete sweeper_;
226 sweeper_ = nullptr;
227 }
228 if (concurrentMarker_ != nullptr) {
229 delete concurrentMarker_;
230 concurrentMarker_ = nullptr;
231 }
232 if (incrementalMarker_ != nullptr) {
233 delete incrementalMarker_;
234 incrementalMarker_ = nullptr;
235 }
236 if (nonMovableMarker_ != nullptr) {
237 delete nonMovableMarker_;
238 nonMovableMarker_ = nullptr;
239 }
240 if (semiGCMarker_ != nullptr) {
241 delete semiGCMarker_;
242 semiGCMarker_ = nullptr;
243 }
244 if (compressGCMarker_ != nullptr) {
245 delete compressGCMarker_;
246 compressGCMarker_ = nullptr;
247 }
248 if (evacuator_ != nullptr) {
249 delete evacuator_;
250 evacuator_ = nullptr;
251 }
252 }
253
Prepare()254 void Heap::Prepare()
255 {
256 MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
257 WaitRunningTaskFinished();
258 sweeper_->EnsureAllTaskFinished();
259 WaitClearTaskFinished();
260 }
261
Resume(TriggerGCType gcType)262 void Heap::Resume(TriggerGCType gcType)
263 {
264 if (mode_ != HeapMode::SPAWN &&
265 activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC())) {
266 // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
267 size_t multiple = 2;
268 size_t oldSpaceMaxLimit = 0;
269 if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
270 size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
271 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
272 } else {
273 size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
274 oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
275 }
276 inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
277 }
278
279 activeSemiSpace_->SetWaterLine();
280 PrepareRecordRegionsForReclaim();
281 hugeObjectSpace_->ReclaimHugeRegion();
282 hugeMachineCodeSpace_->ReclaimHugeRegion();
283 if (parallelGC_) {
284 clearTaskFinished_ = false;
285 Taskpool::GetCurrentTaskpool()->PostTask(
286 std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
287 } else {
288 ReclaimRegions(gcType);
289 }
290 }
291
ResumeForAppSpawn()292 void Heap::ResumeForAppSpawn()
293 {
294 sweeper_->WaitAllTaskFinished();
295 hugeObjectSpace_->ReclaimHugeRegion();
296 hugeMachineCodeSpace_->ReclaimHugeRegion();
297 inactiveSemiSpace_->ReclaimRegions();
298 oldSpace_->Reset();
299 auto cb = [] (Region *region) {
300 region->ClearMarkGCBitset();
301 };
302 nonMovableSpace_->EnumerateRegions(cb);
303 machineCodeSpace_->EnumerateRegions(cb);
304 hugeObjectSpace_->EnumerateRegions(cb);
305 hugeMachineCodeSpace_->EnumerateRegions(cb);
306 }
307
CompactHeapBeforeFork()308 void Heap::CompactHeapBeforeFork()
309 {
310 CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
311 }
312
DisableParallelGC()313 void Heap::DisableParallelGC()
314 {
315 WaitAllTasksFinished();
316 parallelGC_ = false;
317 maxEvacuateTaskCount_ = 0;
318 maxMarkTaskCount_ = 0;
319 stwYoungGC_->ConfigParallelGC(false);
320 sweeper_->ConfigConcurrentSweep(false);
321 concurrentMarker_->ConfigConcurrentMark(false);
322 Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
323 }
324
EnableParallelGC()325 void Heap::EnableParallelGC()
326 {
327 Taskpool::GetCurrentTaskpool()->Initialize();
328 parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
329 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
330 if (auto totalThreadNum = workManager_->GetTotalThreadNum();
331 totalThreadNum != maxEvacuateTaskCount_ + 1) {
332 LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
333 << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
334 delete workManager_;
335 workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
336 UpdateWorkManager(workManager_);
337 }
338 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
339 maxEvacuateTaskCount_ - 1);
340 bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
341 #if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
342 concurrentMarkerEnabled = false;
343 #endif
344 stwYoungGC_->ConfigParallelGC(parallelGC_);
345 sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
346 concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
347 }
348
SelectGCType() const349 TriggerGCType Heap::SelectGCType() const
350 {
351 // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
352 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark()) {
353 return YOUNG_GC;
354 }
355 if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
356 GetHeapObjectSize() <= globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize() &&
357 !GlobalNativeSizeLargerThanLimit()) {
358 return YOUNG_GC;
359 }
360 return OLD_GC;
361 }
362
CollectGarbage(TriggerGCType gcType,GCReason reason)363 void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
364 {
365 {
366 RecursionScope recurScope(this);
367 if (thread_->IsCrossThreadExecutionEnable() || (InSensitiveStatus() && !ObjectExceedMaxHeapSize())) {
368 return;
369 }
370 #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
371 [[maybe_unused]] GcStateScope scope(thread_);
372 #endif
373 CHECK_NO_GC
374
375 if (UNLIKELY(ShouldVerifyHeap())) {
376 // pre gc heap verify
377 LOG_ECMA(DEBUG) << "pre gc heap verify";
378 Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
379 }
380
381 #if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
382 gcType = TriggerGCType::FULL_GC;
383 #endif
384 if (fullGCRequested_ && thread_->IsReadyToMark() && gcType != TriggerGCType::FULL_GC) {
385 gcType = TriggerGCType::FULL_GC;
386 }
387 if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
388 gcType = TriggerGCType::OLD_GC;
389 }
390 oldGCRequested_ = false;
391 oldSpace_->AdjustOvershootSize();
392
393 size_t originalNewSpaceSize = activeSemiSpace_->GetHeapObjectSize();
394 memController_->StartCalculationBeforeGC();
395 StatisticHeapObject(gcType);
396 if (!GetJSThread()->IsReadyToMark() && markType_ == MarkType::MARK_FULL) {
397 ecmaVm_->GetEcmaGCStats()->SetGCReason(reason);
398 } else {
399 ecmaVm_->GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
400 }
401 gcType_ = gcType;
402 GetEcmaVM()->GetPGOProfiler()->WaitPGODumpPause();
403 switch (gcType) {
404 case TriggerGCType::YOUNG_GC:
405 // Use partial GC for young generation.
406 if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
407 SetMarkType(MarkType::MARK_YOUNG);
408 }
409 if (markType_ == MarkType::MARK_FULL) {
410 // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
411 gcType_ = TriggerGCType::OLD_GC;
412 }
413 partialGC_->RunPhases();
414 break;
415 case TriggerGCType::OLD_GC: {
416 bool fullConcurrentMarkRequested = false;
417 // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
418 if (concurrentMarker_->IsEnabled() && (thread_->IsReadyToMark() || markType_ == MarkType::MARK_YOUNG) &&
419 reason == GCReason::ALLOCATION_LIMIT) {
420 fullConcurrentMarkRequested = true;
421 }
422 if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
423 // Wait for existing concurrent marking tasks to be finished (if any),
424 // and reset concurrent marker's status for full mark.
425 bool concurrentMark = CheckOngoingConcurrentMarking();
426 if (concurrentMark) {
427 concurrentMarker_->Reset();
428 }
429 }
430 SetMarkType(MarkType::MARK_FULL);
431 if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
432 LOG_ECMA(INFO) << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
433 oldSpace_->SetOvershootSize(GetEcmaVM()->GetEcmaParamConfiguration().GetOldSpaceOvershootSize());
434 TriggerConcurrentMarking();
435 oldGCRequested_ = true;
436 return;
437 }
438 partialGC_->RunPhases();
439 break;
440 }
441 case TriggerGCType::FULL_GC:
442 fullGC_->SetForAppSpawn(false);
443 fullGC_->RunPhases();
444 if (fullGCRequested_) {
445 fullGCRequested_ = false;
446 }
447 break;
448 case TriggerGCType::APPSPAWN_FULL_GC:
449 fullGC_->SetForAppSpawn(true);
450 fullGC_->RunPhasesForAppSpawn();
451 break;
452 default:
453 LOG_ECMA(FATAL) << "this branch is unreachable";
454 UNREACHABLE();
455 break;
456 }
457 GetEcmaVM()->GetPGOProfiler()->WaitPGODumpResume();
458
459 // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
460 if (shouldThrowOOMError_) {
461 sweeper_->EnsureAllTaskFinished();
462 DumpHeapSnapshotBeforeOOM(false);
463 StatisticHeapDetail();
464 ThrowOutOfMemoryError(oldSpace_->GetMergeSize(), " OldSpace::Merge");
465 oldSpace_->ResetMergeSize();
466 shouldThrowOOMError_ = false;
467 }
468
469 ClearIdleTask();
470 // Adjust the old space capacity and global limit for the first partial GC with full mark.
471 // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
472 AdjustBySurvivalRate(originalNewSpaceSize);
473 memController_->StopCalculationAfterGC(gcType);
474 if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
475 // Only when the gc type is not semiGC and after the old space sweeping has been finished,
476 // the limits of old space and global space can be recomputed.
477 RecomputeLimits();
478 OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
479 << " global object size " << GetHeapObjectSize()
480 << " global committed size " << GetCommittedSize()
481 << " global limit " << globalSpaceAllocLimit_;
482 markType_ = MarkType::MARK_YOUNG;
483 }
484 if (concurrentMarker_->IsRequestDisabled()) {
485 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
486 }
487 // GC log
488 ecmaVm_->GetEcmaGCStats()->RecordStatisticAfterGC();
489 ecmaVm_->GetEcmaGCStats()->PrintGCStatistic();
490 }
491
492 if (gcType_ == TriggerGCType::OLD_GC) {
493 // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
494 // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
495 // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
496 // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
497 CheckNonMovableSpaceOOM();
498 }
499 // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
500 // even lead to another GC, so this have to invoke after this GC process.
501 InvokeWeakNodeNativeFinalizeCallback();
502
503 if (UNLIKELY(ShouldVerifyHeap())) {
504 // verify post gc heap verify
505 LOG_ECMA(DEBUG) << "post gc heap verify";
506 Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
507 }
508 JSFinalizationRegistry::CheckAndCall(thread_);
509 #if defined(ECMASCRIPT_SUPPORT_TRACING)
510 auto tracing = GetEcmaVM()->GetTracing();
511 if (tracing != nullptr) {
512 tracing->TraceEventRecordMemory();
513 }
514 #endif
515 ASSERT(thread_->IsPropertyCacheCleared());
516 }
517
ThrowOutOfMemoryError(size_t size,std::string functionName,bool NonMovableObjNearOOM)518 void Heap::ThrowOutOfMemoryError(size_t size, std::string functionName, bool NonMovableObjNearOOM)
519 {
520 ecmaVm_->GetEcmaGCStats()->PrintGCMemoryStatistic();
521 std::ostringstream oss;
522 if (NonMovableObjNearOOM) {
523 oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
524 << " function name: " << functionName.c_str();
525 } else {
526 oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
527 }
528 LOG_ECMA_MEM(ERROR) << oss.str().c_str();
529 THROW_OOM_ERROR(thread_, oss.str().c_str());
530 }
531
ThrowOutOfMemoryErrorForDefault(size_t size,std::string functionName,bool NonMovableObjNearOOM)532 void Heap::ThrowOutOfMemoryErrorForDefault(size_t size, std::string functionName, bool NonMovableObjNearOOM)
533 {
534 ecmaVm_->GetEcmaGCStats()->PrintGCMemoryStatistic();
535 std::ostringstream oss;
536 if (NonMovableObjNearOOM) {
537 oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
538 << " function name: " << functionName.c_str();
539 } else {
540 oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
541 }
542 LOG_ECMA_MEM(ERROR) << oss.str().c_str();
543 EcmaVM *ecmaVm = (thread_)->GetEcmaVM();
544 JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
545 JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
546
547 (thread_)->SetException(error.GetTaggedValue());
548 ecmaVm->HandleUncatchableError();
549 }
550
FatalOutOfMemoryError(size_t size,std::string functionName)551 void Heap::FatalOutOfMemoryError(size_t size, std::string functionName)
552 {
553 ecmaVm_->GetEcmaGCStats()->PrintGCMemoryStatistic();
554 LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
555 << " function name: " << functionName.c_str();
556 }
557
CheckNonMovableSpaceOOM()558 void Heap::CheckNonMovableSpaceOOM()
559 {
560 if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) {
561 sweeper_->EnsureAllTaskFinished();
562 DumpHeapSnapshotBeforeOOM(false);
563 StatisticHeapDetail();
564 ThrowOutOfMemoryError(nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
565 }
566 }
567
AdjustBySurvivalRate(size_t originalNewSpaceSize)568 void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
569 {
570 if (originalNewSpaceSize <= 0) {
571 return;
572 }
573 semiSpaceCopiedSize_ = activeSemiSpace_->GetHeapObjectSize();
574 double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
575 promotedSize_ = GetEvacuator()->GetPromotedSize();
576 double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
577 double survivalRate = std::min(copiedRate + promotedRate, 1.0);
578 OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
579 << " survivalRate: " << survivalRate;
580 if (!oldSpaceLimitAdjusted_) {
581 memController_->AddSurvivalRate(survivalRate);
582 AdjustOldSpaceLimit();
583 } else {
584 double averageSurvivalRate = memController_->GetAverageSurvivalRate();
585 // 2 means half
586 if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
587 SetFullMarkRequestedState(true);
588 OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
589 << " is less than half the average survival rates: " << averageSurvivalRate
590 << ". Trigger full mark next time.";
591 // Survival rate of full mark is precise. Reset recorded survival rates.
592 memController_->ResetRecordedSurvivalRates();
593 }
594 memController_->AddSurvivalRate(survivalRate);
595 }
596 }
597
VerifyHeapObjects(VerifyKind verifyKind) const598 size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
599 {
600 size_t failCount = 0;
601 {
602 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
603 activeSemiSpace_->IterateOverObjects(verifier);
604 }
605
606 {
607 if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
608 verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
609 verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
610 inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
611 region->IterateAllMarkedBits(std::bind(&VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject,
612 this, std::placeholders::_1));
613 });
614 }
615 }
616
617 {
618 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
619 oldSpace_->IterateOverObjects(verifier);
620 }
621
622 {
623 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
624 appSpawnSpace_->IterateOverMarkedObjects(verifier);
625 }
626
627 {
628 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
629 nonMovableSpace_->IterateOverObjects(verifier);
630 }
631
632 {
633 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
634 hugeObjectSpace_->IterateOverObjects(verifier);
635 }
636 {
637 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
638 hugeMachineCodeSpace_->IterateOverObjects(verifier);
639 }
640 {
641 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
642 machineCodeSpace_->IterateOverObjects(verifier);
643 }
644 {
645 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
646 snapshotSpace_->IterateOverObjects(verifier);
647 }
648 return failCount;
649 }
650
VerifyOldToNewRSet(VerifyKind verifyKind) const651 size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
652 {
653 size_t failCount = 0;
654 VerifyObjectVisitor verifier(this, &failCount, verifyKind);
655 oldSpace_->IterateOldToNewOverObjects(verifier);
656 appSpawnSpace_->IterateOldToNewOverObjects(verifier);
657 nonMovableSpace_->IterateOldToNewOverObjects(verifier);
658 machineCodeSpace_->IterateOldToNewOverObjects(verifier);
659 return failCount;
660 }
661
AdjustOldSpaceLimit()662 void Heap::AdjustOldSpaceLimit()
663 {
664 if (oldSpaceLimitAdjusted_) {
665 return;
666 }
667 size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
668 size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
669 size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
670 static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
671 if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
672 GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
673 } else {
674 oldSpaceLimitAdjusted_ = true;
675 }
676
677 size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
678 static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
679 if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
680 globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
681 }
682 // temporarily regard the heap limit is the same as the native limit.
683 globalSpaceNativeLimit_ = globalSpaceAllocLimit_;
684 OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
685 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
686 }
687
OnAllocateEvent(TaggedObject * address,size_t size)688 void Heap::OnAllocateEvent([[maybe_unused]] TaggedObject* address, [[maybe_unused]] size_t size)
689 {
690 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
691 HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
692 if (profiler != nullptr) {
693 base::BlockHookScope blockScope;
694 profiler->AllocationEvent(address, size);
695 }
696 #endif
697 }
698
FormatCmdLine(const std::string & cmdLine)699 std::string FormatCmdLine(const std::string& cmdLine)
700 {
701 std::string::size_type startPos = 0;
702 std::string::size_type endPos = cmdLine.size();
703 for (std::string::size_type i = 0; i < cmdLine.size(); i++) {
704 if (cmdLine[i] == '/') {
705 startPos = i + 1;
706 } else if (cmdLine[i] == '\0') {
707 endPos = i;
708 break;
709 }
710 }
711 return cmdLine.substr(startPos, endPos - startPos);
712 }
713
GetProcessName(int32_t pid)714 std::string GetProcessName(int32_t pid)
715 {
716 std::ifstream cmdLineFile("/proc/" + std::to_string(pid) + "/cmdline");
717 std::string processName;
718 if (cmdLineFile) {
719 std::getline(cmdLineFile, processName);
720 cmdLineFile.close();
721 processName = FormatCmdLine(processName);
722 return processName;
723 } else {
724 LOG_ECMA(ERROR) << " GetProcessName failed";
725 return "";
726 }
727 }
728
DumpHeapSnapshotBeforeOOM(bool isFullGC)729 void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
730 {
731 #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
732 #if defined(ENABLE_DUMP_IN_FAULTLOG)
733 if (ecmaVm_->GetHeapProfile() != nullptr) {
734 return;
735 }
736 // Filter appfreeze when dump.
737 LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM, isFullGC" << isFullGC;
738 base::BlockHookScope blockScope;
739 HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
740 int32_t pid = getpid();
741 std::string propertyName = "hiviewdfx.freeze.filter." + GetProcessName(pid);
742 if (!SetParameter(propertyName.c_str(), std::to_string(pid).c_str())) {
743 LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM, propertyName:" << propertyName
744 << " value:" << std::to_string(pid);
745 }
746 // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
747 heapProfile->DumpHeapSnapshot(DumpFormat::JSON, true, false, false, isFullGC);
748 HeapProfilerInterface::Destroy(ecmaVm_);
749 #endif // ENABLE_DUMP_IN_FAULTLOG
750 #endif // ECMASCRIPT_SUPPORT_SNAPSHOT
751 }
752
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)753 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
754 [[maybe_unused]] size_t size)
755 {
756 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
757 HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
758 if (profiler != nullptr) {
759 base::BlockHookScope blockScope;
760 profiler->MoveEvent(address, forwardAddress, size);
761 }
762 #endif
763 }
764
AddToKeptObjects(JSHandle<JSTaggedValue> value) const765 void Heap::AddToKeptObjects(JSHandle<JSTaggedValue> value) const
766 {
767 JSHandle<GlobalEnv> env = ecmaVm_->GetGlobalEnv();
768 JSHandle<LinkedHashSet> linkedSet;
769 if (env->GetWeakRefKeepObjects()->IsUndefined()) {
770 linkedSet = LinkedHashSet::Create(thread_);
771 } else {
772 linkedSet =
773 JSHandle<LinkedHashSet>(thread_, LinkedHashSet::Cast(env->GetWeakRefKeepObjects()->GetTaggedObject()));
774 }
775 linkedSet = LinkedHashSet::Add(thread_, linkedSet, value);
776 env->SetWeakRefKeepObjects(thread_, linkedSet);
777 }
778
AdjustSpaceSizeForAppSpawn()779 void Heap::AdjustSpaceSizeForAppSpawn()
780 {
781 SetHeapMode(HeapMode::SPAWN);
782 auto &config = ecmaVm_->GetEcmaParamConfiguration();
783 size_t minSemiSpaceCapacity = config.GetMinSemiSpaceSize();
784 activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
785 auto committedSize = appSpawnSpace_->GetCommittedSize();
786 appSpawnSpace_->SetInitialCapacity(committedSize);
787 appSpawnSpace_->SetMaximumCapacity(committedSize);
788 oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
789 oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
790 }
791
AddAllocationInspectorToAllSpaces(AllocationInspector * inspector)792 void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
793 {
794 ASSERT(inspector != nullptr);
795 // activeSemiSpace_/inactiveSemiSpace_:
796 // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
797 activeSemiSpace_->AddAllocationInspector(inspector);
798 // oldSpace_/compressSpace_:
799 // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
800 oldSpace_->AddAllocationInspector(inspector);
801 // readOnlySpace_ need not allocationInspector.
802 // appSpawnSpace_ need not allocationInspector.
803 nonMovableSpace_->AddAllocationInspector(inspector);
804 machineCodeSpace_->AddAllocationInspector(inspector);
805 hugeObjectSpace_->AddAllocationInspector(inspector);
806 hugeMachineCodeSpace_->AddAllocationInspector(inspector);
807 }
808
ClearAllocationInspectorFromAllSpaces()809 void Heap::ClearAllocationInspectorFromAllSpaces()
810 {
811 activeSemiSpace_->ClearAllocationInspector();
812 oldSpace_->ClearAllocationInspector();
813 nonMovableSpace_->ClearAllocationInspector();
814 machineCodeSpace_->ClearAllocationInspector();
815 hugeObjectSpace_->ClearAllocationInspector();
816 hugeMachineCodeSpace_->ClearAllocationInspector();
817 }
818
ClearKeptObjects() const819 void Heap::ClearKeptObjects() const
820 {
821 ecmaVm_->GetGlobalEnv()->SetWeakRefKeepObjects(thread_, JSTaggedValue::Undefined());
822 }
823
RecomputeLimits()824 void Heap::RecomputeLimits()
825 {
826 double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
827 double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
828 size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
829 hugeMachineCodeSpace_->GetHeapObjectSize();
830 size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
831
832 double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
833 size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
834 size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
835 maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
836 size_t maxGlobalSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize() - newSpaceCapacity;
837 size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
838 maxGlobalSize, newSpaceCapacity, growingFactor);
839 globalSpaceAllocLimit_ = newGlobalSpaceLimit;
840 oldSpace_->SetInitialCapacity(newOldSpaceLimit);
841 globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
842 MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
843 growingFactor);
844 OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
845 << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
846 << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
847 if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
848 (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
849 OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
850 << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
851 << " Committed Size: " << oldSpace_->GetCommittedSize();
852 SetFullMarkRequestedState(true);
853 }
854 }
855
CheckAndTriggerOldGC(size_t size)856 bool Heap::CheckAndTriggerOldGC(size_t size)
857 {
858 bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
859 bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
860 if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
861 oldSpace_->SetOvershootSize(GetEcmaVM()->GetEcmaParamConfiguration().GetOldSpaceOvershootSize());
862 }
863 if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
864 GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
865 !NeedStopCollection()) {
866 CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
867 if (!oldGCRequested_) {
868 return true;
869 }
870 }
871 return false;
872 }
873
CheckAndTriggerHintGC()874 bool Heap::CheckAndTriggerHintGC()
875 {
876 if (IsInBackground()) {
877 CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
878 return true;
879 }
880 if (InSensitiveStatus()) {
881 return false;
882 }
883 if (memController_->GetPredictedSurvivalRate() < SURVIVAL_RATE_THRESHOLD) {
884 CollectGarbage(TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
885 return true;
886 }
887 return false;
888 }
889
CheckOngoingConcurrentMarking()890 bool Heap::CheckOngoingConcurrentMarking()
891 {
892 if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToMark() &&
893 concurrentMarker_->IsTriggeredConcurrentMark()) {
894 TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
895 if (thread_->IsMarking()) {
896 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
897 MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
898 GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
899 WaitConcurrentMarkingFinished();
900 }
901 WaitRunningTaskFinished();
902 memController_->RecordAfterConcurrentMark(IsConcurrentFullMark(), concurrentMarker_);
903 return true;
904 }
905 return false;
906 }
907
ClearIdleTask()908 void Heap::ClearIdleTask()
909 {
910 SetIdleTask(IdleTaskType::NO_TASK);
911 idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
912 }
913
TryTriggerIdleCollection()914 void Heap::TryTriggerIdleCollection()
915 {
916 if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToMark() || !enableIdleGC_) {
917 return;
918 }
919 if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
920 SetIdleTask(IdleTaskType::FINISH_MARKING);
921 EnableNotifyIdle();
922 CalculateIdleDuration();
923 return;
924 }
925
926 double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
927 double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
928 double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
929 static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
930 newSpaceAllocSpeed;
931 double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
932 double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
933 // 2 means double
934 if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
935 SetIdleTask(IdleTaskType::YOUNG_GC);
936 SetMarkType(MarkType::MARK_YOUNG);
937 EnableNotifyIdle();
938 CalculateIdleDuration();
939 return;
940 }
941 }
942
CalculateIdleDuration()943 void Heap::CalculateIdleDuration()
944 {
945 // update reference duration
946 idlePredictDuration_ = 0.0f;
947 size_t updateReferenceSpeed = markType_ == MarkType::MARK_YOUNG ?
948 ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED) :
949 ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
950 if (updateReferenceSpeed != 0) {
951 idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
952 }
953
954 // clear native object duration
955 size_t clearNativeObjSpeed = 0;
956 if (markType_ == MarkType::MARK_YOUNG) {
957 clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
958 } else if (markType_ == MarkType::MARK_FULL) {
959 clearNativeObjSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
960 }
961
962 if (clearNativeObjSpeed != 0) {
963 idlePredictDuration_ += (float)GetEcmaVM()->GetNativePointerListSize() / clearNativeObjSpeed;
964 }
965
966 // sweep and evacuate duration
967 size_t youngEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
968 size_t sweepSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
969 size_t oldEvacuateSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
970 double survivalRate = ecmaVm_->GetEcmaGCStats()->GetAvgSurvivalRate();
971 if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
972 idlePredictDuration_ += survivalRate * activeSemiSpace_->GetHeapObjectSize() / youngEvacuateSpeed;
973 } else if (markType_ == MarkType::MARK_FULL) {
974 if (sweepSpeed != 0) {
975 idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
976 }
977 if (oldEvacuateSpeed != 0) {
978 size_t collectRegionSetSize = GetEcmaVM()->GetEcmaGCStats()->GetRecordData(
979 RecordData::COLLECT_REGION_SET_SIZE);
980 idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
981 oldEvacuateSpeed;
982 }
983 }
984
985 // Idle YoungGC mark duration
986 size_t markSpeed = ecmaVm_->GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
987 if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
988 idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
989 }
990 OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
991 }
992
TryTriggerIncrementalMarking()993 void Heap::TryTriggerIncrementalMarking()
994 {
995 if (!GetJSThread()->IsReadyToMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
996 return;
997 }
998 size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
999 size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1000 hugeMachineCodeSpace_->GetHeapObjectSize();
1001 double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1002 double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1003 double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1004 double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1005
1006 double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1007 // mark finished before allocate limit
1008 if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1009 // The object allocated in incremental marking should lower than limit,
1010 // otherwise select trigger concurrent mark.
1011 size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1012 if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1013 EnableNotifyIdle();
1014 SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1015 }
1016 }
1017 }
1018
CheckCanTriggerConcurrentMarking()1019 bool Heap::CheckCanTriggerConcurrentMarking()
1020 {
1021 return concurrentMarker_->IsEnabled() && thread_->IsReadyToMark() &&
1022 !incrementalMarker_->IsTriggeredIncrementalMark() &&
1023 (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1024 }
1025
TryTriggerConcurrentMarking()1026 void Heap::TryTriggerConcurrentMarking()
1027 {
1028 // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1029 // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1030 // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1031 // full mark will be triggered.
1032 // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1033 // young mark may not result in the new space reaching its limit, young mark can be triggered.
1034 // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1035 // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1036 if (!CheckCanTriggerConcurrentMarking()) {
1037 return;
1038 }
1039 if (fullMarkRequested_) {
1040 markType_ = MarkType::MARK_FULL;
1041 OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1042 TriggerConcurrentMarking();
1043 return;
1044 }
1045 double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1046 oldSpaceAllocToLimitDuration = 0;
1047 double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1048 double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1049 size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1050 hugeMachineCodeSpace_->GetHeapObjectSize();
1051 size_t globalHeapObjectSize = GetHeapObjectSize();
1052 size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1053 if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1054 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1055 GlobalNativeSizeLargerThanLimit()) {
1056 markType_ = MarkType::MARK_FULL;
1057 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1058 TriggerConcurrentMarking();
1059 return;
1060 }
1061 } else {
1062 if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1063 GlobalNativeSizeLargerThanLimit()) {
1064 markType_ = MarkType::MARK_FULL;
1065 TriggerConcurrentMarking();
1066 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1067 return;
1068 }
1069 oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1070 oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1071 // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1072 double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1073 if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1074 markType_ = MarkType::MARK_FULL;
1075 TriggerConcurrentMarking();
1076 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1077 return;
1078 }
1079 }
1080
1081 double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1082 double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1083 if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1084 auto &config = ecmaVm_->GetEcmaParamConfiguration();
1085 if (activeSemiSpace_->GetCommittedSize() >= config.GetSemiSpaceTriggerConcurrentMark()) {
1086 markType_ = MarkType::MARK_YOUNG;
1087 TriggerConcurrentMarking();
1088 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
1089 }
1090 return;
1091 }
1092 size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1093 size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
1094 bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
1095 if (!triggerMark) {
1096 newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
1097 newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1098 // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
1099 newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1100 triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
1101 }
1102
1103 if (triggerMark) {
1104 markType_ = MarkType::MARK_YOUNG;
1105 TriggerConcurrentMarking();
1106 OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
1107 }
1108 }
1109
TryTriggerFullMarkByNativeSize()1110 void Heap::TryTriggerFullMarkByNativeSize()
1111 {
1112 if (GlobalNativeSizeLargerThanLimit()) {
1113 if (concurrentMarker_->IsEnabled()) {
1114 SetFullMarkRequestedState(true);
1115 TryTriggerConcurrentMarking();
1116 } else {
1117 CheckAndTriggerOldGC();
1118 }
1119 }
1120 }
1121
IncreaseNativeBindingSize(JSNativePointer * object)1122 void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
1123 {
1124 size_t size = object->GetBindingSize();
1125 if (size == 0) {
1126 return;
1127 }
1128 nativeBindingSize_ += size;
1129 }
1130
IncreaseNativeBindingSize(size_t size)1131 void Heap::IncreaseNativeBindingSize(size_t size)
1132 {
1133 if (size == 0) {
1134 return;
1135 }
1136 nativeBindingSize_ += size;
1137 }
1138
PrepareRecordRegionsForReclaim()1139 void Heap::PrepareRecordRegionsForReclaim()
1140 {
1141 activeSemiSpace_->SetRecordRegion();
1142 oldSpace_->SetRecordRegion();
1143 snapshotSpace_->SetRecordRegion();
1144 nonMovableSpace_->SetRecordRegion();
1145 hugeObjectSpace_->SetRecordRegion();
1146 machineCodeSpace_->SetRecordRegion();
1147 hugeMachineCodeSpace_->SetRecordRegion();
1148 }
1149
TriggerConcurrentMarking()1150 void Heap::TriggerConcurrentMarking()
1151 {
1152 ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
1153 if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
1154 ClearIdleTask();
1155 DisableNotifyIdle();
1156 }
1157 if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
1158 concurrentMarker_->Mark();
1159 }
1160 }
1161
WaitRunningTaskFinished()1162 void Heap::WaitRunningTaskFinished()
1163 {
1164 LockHolder holder(waitTaskFinishedMutex_);
1165 while (runningTaskCount_ > 0) {
1166 waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
1167 }
1168 }
1169
WaitClearTaskFinished()1170 void Heap::WaitClearTaskFinished()
1171 {
1172 LockHolder holder(waitClearTaskFinishedMutex_);
1173 while (!clearTaskFinished_) {
1174 waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
1175 }
1176 }
1177
WaitAllTasksFinished()1178 void Heap::WaitAllTasksFinished()
1179 {
1180 WaitRunningTaskFinished();
1181 sweeper_->EnsureAllTaskFinished();
1182 WaitClearTaskFinished();
1183 if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1184 concurrentMarker_->WaitMarkingFinished();
1185 }
1186 }
1187
WaitConcurrentMarkingFinished()1188 void Heap::WaitConcurrentMarkingFinished()
1189 {
1190 concurrentMarker_->WaitMarkingFinished();
1191 }
1192
PostParallelGCTask(ParallelGCTaskPhase gcTask)1193 void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
1194 {
1195 IncreaseTaskCount();
1196 Taskpool::GetCurrentTaskpool()->PostTask(
1197 std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
1198 }
1199
IncreaseTaskCount()1200 void Heap::IncreaseTaskCount()
1201 {
1202 LockHolder holder(waitTaskFinishedMutex_);
1203 runningTaskCount_++;
1204 }
1205
ChangeGCParams(bool inBackground)1206 void Heap::ChangeGCParams(bool inBackground)
1207 {
1208 inBackground_ = inBackground;
1209 if (inBackground) {
1210 LOG_GC(INFO) << "app is inBackground";
1211 if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT) {
1212 CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
1213 }
1214 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1215 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1216 LOG_GC(INFO) << "Heap Growing Type CONSERVATIVE";
1217 }
1218 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1219 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
1220 maxMarkTaskCount_ = 1;
1221 maxEvacuateTaskCount_ = 1;
1222 Taskpool::GetCurrentTaskpool()->SetThreadPriority(false);
1223 } else {
1224 LOG_GC(INFO) << "app is not inBackground";
1225 if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
1226 SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
1227 LOG_GC(INFO) << "Heap Growing Type HIGH_THROUGHPUT";
1228 }
1229 concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
1230 sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
1231 maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1232 Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
1233 maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1234 Taskpool::GetCurrentTaskpool()->SetThreadPriority(true);
1235 }
1236 }
1237
TriggerIdleCollection(int idleMicroSec)1238 void Heap::TriggerIdleCollection(int idleMicroSec)
1239 {
1240 if (idleTask_ == IdleTaskType::NO_TASK) {
1241 if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
1242 DisableNotifyIdle();
1243 }
1244 return;
1245 }
1246
1247 // Incremental mark initialize and process
1248 if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
1249 incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
1250 incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1251 if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
1252 CalculateIdleDuration();
1253 }
1254 return;
1255 }
1256
1257 if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
1258 return;
1259 }
1260
1261 switch (idleTask_) {
1262 case IdleTaskType::FINISH_MARKING: {
1263 if (markType_ == MarkType::MARK_FULL) {
1264 CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
1265 } else {
1266 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1267 }
1268 break;
1269 }
1270 case IdleTaskType::YOUNG_GC:
1271 CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
1272 break;
1273 case IdleTaskType::INCREMENTAL_MARK:
1274 incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
1275 break;
1276 default:
1277 break;
1278 }
1279 ClearIdleTask();
1280 }
1281
NotifyMemoryPressure(bool inHighMemoryPressure)1282 void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
1283 {
1284 if (inHighMemoryPressure) {
1285 LOG_GC(INFO) << "app is inHighMemoryPressure";
1286 SetMemGrowingType(MemGrowingType::PRESSURE);
1287 } else {
1288 LOG_GC(INFO) << "app is not inHighMemoryPressure";
1289 SetMemGrowingType(MemGrowingType::CONSERVATIVE);
1290 }
1291 }
1292
NotifyFinishColdStart(bool isMainThread)1293 void Heap::NotifyFinishColdStart(bool isMainThread)
1294 {
1295 {
1296 LockHolder holder(finishColdStartMutex_);
1297 if (!onStartupEvent_) {
1298 return;
1299 }
1300 onStartupEvent_ = false;
1301 LOG_GC(INFO) << "SmartGC: finish app cold start";
1302
1303 // set overshoot size to increase gc threashold larger 8MB than current heap size.
1304 int64_t semiRemainSize =
1305 static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
1306 int64_t overshootSize =
1307 static_cast<int64_t>(GetEcmaVM()->GetEcmaParamConfiguration().GetOldSpaceOvershootSize()) - semiRemainSize;
1308 // overshoot size should be larger than 0.
1309 GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
1310 GetNewSpace()->SetWaterLineWithoutGC();
1311 }
1312
1313 if (isMainThread && CheckCanTriggerConcurrentMarking()) {
1314 markType_ = MarkType::MARK_FULL;
1315 TriggerConcurrentMarking();
1316 }
1317 }
1318
NotifyFinishColdStartSoon()1319 void Heap::NotifyFinishColdStartSoon()
1320 {
1321 if (!onStartupEvent_) {
1322 return;
1323 }
1324
1325 // post 2s task
1326 Taskpool::GetCurrentTaskpool()->PostTask(
1327 std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this));
1328 }
1329
NotifyHighSensitive(bool isStart)1330 void Heap::NotifyHighSensitive(bool isStart)
1331 {
1332 isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
1333 : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
1334 LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
1335 }
1336
HandleExitHighSensitiveEvent()1337 void Heap::HandleExitHighSensitiveEvent()
1338 {
1339 AppSensitiveStatus status = GetSensitiveStatus();
1340 if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
1341 && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE)) {
1342 // set overshoot size to increase gc threashold larger 8MB than current heap size.
1343 int64_t semiRemainSize =
1344 static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
1345 int64_t overshootSize =
1346 static_cast<int64_t>(GetEcmaVM()->GetEcmaParamConfiguration().GetOldSpaceOvershootSize()) - semiRemainSize;
1347 // overshoot size should be larger than 0.
1348 GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
1349 GetNewSpace()->SetWaterLineWithoutGC();
1350
1351 // fixme: IncrementalMarking and IdleCollection is currently not enabled
1352 TryTriggerIncrementalMarking();
1353 TryTriggerIdleCollection();
1354 TryTriggerConcurrentMarking();
1355 }
1356 }
1357
1358 // On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
1359 // concurrent mark
ObjectExceedMaxHeapSize() const1360 bool Heap::ObjectExceedMaxHeapSize() const
1361 {
1362 size_t configMaxHeapSize = ecmaVm_->GetEcmaParamConfiguration().GetMaxHeapSize();
1363 size_t overshootSize = ecmaVm_->GetEcmaParamConfiguration().GetOldSpaceOvershootSize();
1364 return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
1365 }
1366
NeedStopCollection()1367 bool Heap::NeedStopCollection()
1368 {
1369 // gc is not allowed during value serialize
1370 if (onSerializeEvent_) {
1371 return true;
1372 }
1373
1374 if (!InSensitiveStatus()) {
1375 return false;
1376 }
1377
1378 if (!ObjectExceedMaxHeapSize()) {
1379 return true;
1380 }
1381 LOG_GC(INFO) << "SmartGC: force expand will cause OOM, have to trigger gc";
1382 GetNewSpace()->SetOverShootSize(
1383 GetNewSpace()->GetCommittedSize() - GetNewSpace()->GetInitialCapacity() +
1384 ecmaVm_->GetEcmaParamConfiguration().GetOldSpaceOvershootSize());
1385 return false;
1386 }
1387
CheckCanDistributeTask()1388 bool Heap::CheckCanDistributeTask()
1389 {
1390 LockHolder holder(waitTaskFinishedMutex_);
1391 return runningTaskCount_ < maxMarkTaskCount_;
1392 }
1393
ReduceTaskCount()1394 void Heap::ReduceTaskCount()
1395 {
1396 LockHolder holder(waitTaskFinishedMutex_);
1397 runningTaskCount_--;
1398 if (runningTaskCount_ == 0) {
1399 waitTaskFinishedCV_.SignalAll();
1400 }
1401 }
1402
Run(uint32_t threadIndex)1403 bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
1404 {
1405 // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
1406 while (!heap_->GetWorkManager()->HasInitialized());
1407 switch (taskPhase_) {
1408 case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
1409 heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
1410 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1411 break;
1412 case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
1413 heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
1414 break;
1415 case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
1416 heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
1417 break;
1418 case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
1419 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1420 break;
1421 case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
1422 heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
1423 break;
1424 case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
1425 heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
1426 break;
1427 case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
1428 heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
1429 break;
1430 default:
1431 break;
1432 }
1433 heap_->ReduceTaskCount();
1434 return true;
1435 }
1436
Run(uint32_t threadIndex)1437 bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
1438 {
1439 heap_->ReclaimRegions(gcType_);
1440 return true;
1441 }
1442
Run(uint32_t threadIndex)1443 bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
1444 {
1445 std::this_thread::sleep_for(std::chrono::microseconds(2000000)); // 2000000 means 2s
1446 heap_->NotifyFinishColdStart(false);
1447 return true;
1448 }
1449
GetArrayBufferSize() const1450 size_t Heap::GetArrayBufferSize() const
1451 {
1452 size_t result = 0;
1453 sweeper_->EnsureAllTaskFinished();
1454 this->IterateOverObjects([&result](TaggedObject *obj) {
1455 JSHClass* jsClass = obj->GetClass();
1456 result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
1457 });
1458 return result;
1459 }
1460
GetLiveObjectSize() const1461 size_t Heap::GetLiveObjectSize() const
1462 {
1463 size_t objectSize = 0;
1464 sweeper_->EnsureAllTaskFinished();
1465 this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
1466 objectSize += obj->GetClass()->SizeFromJSHClass(obj);
1467 });
1468 return objectSize;
1469 }
1470
GetHeapLimitSize() const1471 size_t Heap::GetHeapLimitSize() const
1472 {
1473 // Obtains the theoretical upper limit of space that can be allocated to JS heap.
1474 auto &config = ecmaVm_->GetEcmaParamConfiguration();
1475 return config.GetMaxHeapSize();
1476 }
1477
IsAlive(TaggedObject * object) const1478 bool Heap::IsAlive(TaggedObject *object) const
1479 {
1480 if (!ContainObject(object)) {
1481 LOG_GC(ERROR) << "The region is already free";
1482 return false;
1483 }
1484
1485 bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
1486 if (isFree) {
1487 Region *region = Region::ObjectAddressToRange(object);
1488 LOG_GC(ERROR) << "The object " << object << " in "
1489 << region->GetSpaceTypeName()
1490 << " already free";
1491 }
1492 return !isFree;
1493 }
1494
ContainObject(TaggedObject * object) const1495 bool Heap::ContainObject(TaggedObject *object) const
1496 {
1497 /*
1498 * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
1499 * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
1500 * calculated from an object address is still valid or alive in a cheap way.
1501 * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
1502 * introduce additional incorrect memory access issues.
1503 * Unless we can tolerate the performance impact of iterating the region list of each space and change
1504 * the implementation to that approach, don't rely on current implementation to get accurate result.
1505 */
1506 Region *region = Region::ObjectAddressToRange(object);
1507 return region->InHeapSpace();
1508 }
1509
InvokeWeakNodeNativeFinalizeCallback()1510 void Heap::InvokeWeakNodeNativeFinalizeCallback()
1511 {
1512 // the second callback may lead to another GC, if this, return directly;
1513 if (runningNativeFinalizeCallbacks_) {
1514 return;
1515 }
1516 runningNativeFinalizeCallbacks_ = true;
1517 auto weakNodeNativeFinalizeCallBacks = thread_->GetWeakNodeNativeFinalizeCallbacks();
1518 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "InvokeNativeFinalizeCallbacks num:"
1519 + std::to_string(weakNodeNativeFinalizeCallBacks->size()));
1520 while (!weakNodeNativeFinalizeCallBacks->empty()) {
1521 auto callbackPair = weakNodeNativeFinalizeCallBacks->back();
1522 weakNodeNativeFinalizeCallBacks->pop_back();
1523 ASSERT(callbackPair.first != nullptr && callbackPair.second != nullptr);
1524 auto callback = callbackPair.first;
1525 (*callback)(callbackPair.second);
1526 }
1527 runningNativeFinalizeCallbacks_ = false;
1528 }
1529
PrintHeapInfo(TriggerGCType gcType) const1530 void Heap::PrintHeapInfo(TriggerGCType gcType) const
1531 {
1532 OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
1533 OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
1534 << ";OnStartUp:" << onStartUpEvent()
1535 << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
1536 << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
1537 OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
1538 << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark() << ")";
1539 OPTIONAL_LOG(ecmaVm_, INFO) << "ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize()
1540 << "/" << activeSemiSpace_->GetInitialCapacity() << "), NonMovable("
1541 << nonMovableSpace_->GetHeapObjectSize() << "/" << nonMovableSpace_->GetCommittedSize()
1542 << "/" << nonMovableSpace_->GetInitialCapacity() << "), Old("
1543 << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize()
1544 << "/" << oldSpace_->GetInitialCapacity() << "), HugeObject("
1545 << hugeObjectSpace_->GetHeapObjectSize() << "/" << hugeObjectSpace_->GetCommittedSize()
1546 << "/" << hugeObjectSpace_->GetInitialCapacity() << "), ReadOnlySpace("
1547 << readOnlySpace_->GetCommittedSize() << "/" << readOnlySpace_->GetInitialCapacity()
1548 << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize() << "/"
1549 << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
1550 << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
1551 }
1552
StatisticHeapObject(TriggerGCType gcType) const1553 void Heap::StatisticHeapObject(TriggerGCType gcType) const
1554 {
1555 PrintHeapInfo(gcType);
1556 #if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
1557 StatisticHeapDetail();
1558 #endif
1559 }
1560
StatisticHeapDetail() const1561 void Heap::StatisticHeapDetail() const
1562 {
1563 static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
1564 int typeCount[JS_TYPE_LAST] = { 0 };
1565 static const int MIN_COUNT_THRESHOLD = 1000;
1566
1567 nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1568 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1569 });
1570 for (int i = 0; i < JS_TYPE_LAST; i++) {
1571 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1572 LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
1573 << " count:" << typeCount[i];
1574 }
1575 typeCount[i] = 0;
1576 }
1577
1578 oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1579 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1580 });
1581 for (int i = 0; i < JS_TYPE_LAST; i++) {
1582 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1583 LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
1584 << " count:" << typeCount[i];
1585 }
1586 typeCount[i] = 0;
1587 }
1588
1589 activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
1590 typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
1591 });
1592 for (int i = 0; i < JS_TYPE_LAST; i++) {
1593 if (typeCount[i] > MIN_COUNT_THRESHOLD) {
1594 LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
1595 << " count:" << typeCount[i];
1596 }
1597 typeCount[i] = 0;
1598 }
1599 }
1600
UpdateWorkManager(WorkManager * workManager)1601 void Heap::UpdateWorkManager(WorkManager *workManager)
1602 {
1603 concurrentMarker_->workManager_ = workManager;
1604 fullGC_->workManager_ = workManager;
1605 stwYoungGC_->workManager_ = workManager;
1606 incrementalMarker_->workManager_ = workManager;
1607 nonMovableMarker_->workManager_ = workManager;
1608 semiGCMarker_->workManager_ = workManager;
1609 compressGCMarker_->workManager_ = workManager;
1610 partialGC_->workManager_ = workManager;
1611 }
1612
CalCallSiteInfo(uintptr_t retAddr) const1613 std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
1614 {
1615 MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
1616 MachineCode *code = nullptr;
1617 // 1. find return
1618 // 2. gc
1619 machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
1620 if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
1621 return;
1622 }
1623 if (MachineCode::Cast(obj)->IsInText(retAddr)) {
1624 code = MachineCode::Cast(obj);
1625 return;
1626 }
1627 });
1628 if (code == nullptr) {
1629 HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
1630 hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
1631 if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
1632 return;
1633 }
1634 if (MachineCode::Cast(obj)->IsInText(retAddr)) {
1635 code = MachineCode::Cast(obj);
1636 return;
1637 }
1638 });
1639 }
1640
1641 if (code == nullptr) {
1642 return {};
1643 }
1644 return code->CalCallSiteInfo(retAddr);
1645 };
1646 } // namespace panda::ecmascript
1647