• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_INL_H
17 #define ECMASCRIPT_MEM_HEAP_INL_H
18 
19 #include "clang.h"
20 #include "common_components/heap/heap_allocator-inl.h"
21 #include "common_interfaces/base_runtime.h"
22 #include "ecmascript/base/config.h"
23 #include "ecmascript/mem/heap.h"
24 
25 #include "ecmascript/base/block_hook_scope.h"
26 #include "ecmascript/cross_vm/daemon_task_hybrid-inl.h"
27 #include "ecmascript/daemon/daemon_task-inl.h"
28 #include "ecmascript/dfx/hprof/heap_tracker.h"
29 #include "ecmascript/ecma_vm.h"
30 #include "ecmascript/js_native_pointer.h"
31 #include "ecmascript/js_runtime_options.h"
32 #include "ecmascript/mem/allocator-inl.h"
33 #include "ecmascript/mem/concurrent_sweeper.h"
34 #include "ecmascript/mem/linear_space.h"
35 #include "ecmascript/mem/mem.h"
36 #include "ecmascript/mem/mem_controller.h"
37 #include "ecmascript/mem/shared_mem_controller.h"
38 #include "ecmascript/mem/sparse_space.h"
39 #include "ecmascript/mem/tagged_object.h"
40 #include "ecmascript/mem/thread_local_allocation_buffer.h"
41 #include "ecmascript/mem/barriers-inl.h"
42 #include "ecmascript/mem/mem_map_allocator.h"
43 #include "ecmascript/runtime.h"
44 #include "libpandabase/macros.h"
45 
46 namespace panda::ecmascript {
47 #define CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, space, message)                                         \
48     if (UNLIKELY((object) == nullptr)) {                                                                    \
49         EcmaVM *vm = GetEcmaVM();                                                                           \
50         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
51         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
52         if ((space)->IsOOMDumpSpace()) {                                                                    \
53             DumpHeapSnapshotBeforeOOM();                                                               \
54         }                                                                                                   \
55         StatisticHeapDetail();                                                                              \
56         ThrowOutOfMemoryError(GetJSThread(), size, message);                                                \
57         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
58     }
59 
60 #define CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, space, message)                                \
61     if (UNLIKELY((object) == nullptr)) {                                                                    \
62         size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();                \
63         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
64         DumpHeapSnapshotBeforeOOM(thread, SharedHeapOOMSource::NORMAL_ALLOCATION);                   \
65         ThrowOutOfMemoryError(thread, size, message);                                                       \
66         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size));                       \
67     }
68 
69 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, space, desc, message)                   \
70     if (UNLIKELY((object) == nullptr)) {                                                                    \
71         EcmaVM *vm = GetEcmaVM();                                                                           \
72         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
73         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
74         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
75         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size, desc));                         \
76     }
77 
78 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, space, message)                              \
79     if (UNLIKELY((object) == nullptr)) {                                                                    \
80         EcmaVM *vm = GetEcmaVM();                                                                           \
81         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
82         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
83         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
84         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
85     }
86 
87 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb)88 void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
89 {
90     sOldSpace_->EnumerateRegions(cb);
91     sNonMovableSpace_->EnumerateRegions(cb);
92     sHugeObjectSpace_->EnumerateRegions(cb);
93     sAppSpawnSpace_->EnumerateRegions(cb);
94 }
95 
96 template<class Callback>
EnumerateOldSpaceRegionsWithRecord(const Callback & cb)97 void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
98 {
99     sOldSpace_->EnumerateRegionsWithRecord(cb);
100     sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
101     sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
102 }
103 
104 template<class Callback>
IterateOverObjects(const Callback & cb)105 void SharedHeap::IterateOverObjects(const Callback &cb) const
106 {
107     sOldSpace_->IterateOverObjects(cb);
108     sNonMovableSpace_->IterateOverObjects(cb);
109     sHugeObjectSpace_->IterateOverObjects(cb);
110     sAppSpawnSpace_->IterateOverMarkedObjects(cb);
111 }
112 
113 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb,Region * region)114 void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
115 {
116     oldSpace_->EnumerateRegions(cb, region);
117     appSpawnSpace_->EnumerateRegions(cb);
118     nonMovableSpace_->EnumerateRegions(cb);
119     hugeObjectSpace_->EnumerateRegions(cb);
120     machineCodeSpace_->EnumerateRegions(cb);
121     hugeMachineCodeSpace_->EnumerateRegions(cb);
122 }
123 
124 template<class Callback>
EnumerateSnapshotSpaceRegions(const Callback & cb)125 void Heap::EnumerateSnapshotSpaceRegions(const Callback &cb) const
126 {
127     snapshotSpace_->EnumerateRegions(cb);
128 }
129 
130 template<class Callback>
EnumerateNonNewSpaceRegions(const Callback & cb)131 void Heap::EnumerateNonNewSpaceRegions(const Callback &cb) const
132 {
133     oldSpace_->EnumerateRegions(cb);
134     if (!isCSetClearing_.load(std::memory_order_acquire)) {
135         oldSpace_->EnumerateCollectRegionSet(cb);
136     }
137     appSpawnSpace_->EnumerateRegions(cb);
138     snapshotSpace_->EnumerateRegions(cb);
139     nonMovableSpace_->EnumerateRegions(cb);
140     hugeObjectSpace_->EnumerateRegions(cb);
141     machineCodeSpace_->EnumerateRegions(cb);
142     hugeMachineCodeSpace_->EnumerateRegions(cb);
143 }
144 
145 template<class Callback>
EnumerateNonNewSpaceRegionsWithRecord(const Callback & cb)146 void Heap::EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const
147 {
148     oldSpace_->EnumerateRegionsWithRecord(cb);
149     snapshotSpace_->EnumerateRegionsWithRecord(cb);
150     nonMovableSpace_->EnumerateRegionsWithRecord(cb);
151     hugeObjectSpace_->EnumerateRegionsWithRecord(cb);
152     machineCodeSpace_->EnumerateRegionsWithRecord(cb);
153     hugeMachineCodeSpace_->EnumerateRegionsWithRecord(cb);
154 }
155 
156 template<class Callback>
EnumerateNewSpaceRegions(const Callback & cb)157 void Heap::EnumerateNewSpaceRegions(const Callback &cb) const
158 {
159     activeSemiSpace_->EnumerateRegions(cb);
160 }
161 
162 template<class Callback>
EnumerateNonMovableRegions(const Callback & cb)163 void Heap::EnumerateNonMovableRegions(const Callback &cb) const
164 {
165     snapshotSpace_->EnumerateRegions(cb);
166     appSpawnSpace_->EnumerateRegions(cb);
167     nonMovableSpace_->EnumerateRegions(cb);
168     hugeObjectSpace_->EnumerateRegions(cb);
169     machineCodeSpace_->EnumerateRegions(cb);
170     hugeMachineCodeSpace_->EnumerateRegions(cb);
171 }
172 
173 template<class Callback>
EnumerateRegions(const Callback & cb)174 void Heap::EnumerateRegions(const Callback &cb) const
175 {
176     activeSemiSpace_->EnumerateRegions(cb);
177     oldSpace_->EnumerateRegions(cb);
178     if (!isCSetClearing_.load(std::memory_order_acquire)) {
179         oldSpace_->EnumerateCollectRegionSet(cb);
180     }
181     appSpawnSpace_->EnumerateRegions(cb);
182     snapshotSpace_->EnumerateRegions(cb);
183     nonMovableSpace_->EnumerateRegions(cb);
184     hugeObjectSpace_->EnumerateRegions(cb);
185     machineCodeSpace_->EnumerateRegions(cb);
186     hugeMachineCodeSpace_->EnumerateRegions(cb);
187 }
188 
189 template<class Callback>
IterateOverObjects(const Callback & cb,bool isSimplify)190 void Heap::IterateOverObjects(const Callback &cb, bool isSimplify) const
191 {
192     activeSemiSpace_->IterateOverObjects(cb);
193     oldSpace_->IterateOverObjects(cb);
194     nonMovableSpace_->IterateOverObjects(cb);
195     hugeObjectSpace_->IterateOverObjects(cb);
196     machineCodeSpace_->IterateOverObjects(cb);
197     hugeMachineCodeSpace_->IterateOverObjects(cb);
198     snapshotSpace_->IterateOverObjects(cb);
199     if (!isSimplify) {
200         readOnlySpace_->IterateOverObjects(cb);
201         appSpawnSpace_->IterateOverMarkedObjects(cb);
202     }
203 }
204 
AllocateYoungOrHugeObject(JSHClass * hclass)205 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass)
206 {
207     size_t size = hclass->GetObjectSize();
208     return AllocateYoungOrHugeObject(hclass, size);
209 }
210 
AllocateYoungOrHugeObject(size_t size)211 TaggedObject *Heap::AllocateYoungOrHugeObject(size_t size)
212 {
213     if (UNLIKELY(g_isEnableCMCGC)) {
214         return AllocateYoungForCMC(thread_, size);
215     }
216     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
217     TaggedObject *object = nullptr;
218     if (size > g_maxRegularHeapObjectSize) {
219         object = AllocateHugeObject(size);
220     } else {
221         object = AllocateInYoungSpace(size);
222         if (object == nullptr) {
223             if (!HandleExitHighSensitiveEvent()) {
224                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
225             }
226             object = AllocateInYoungSpace(size);
227             if (object == nullptr) {
228                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
229                 object = AllocateInYoungSpace(size);
230                 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, activeSemiSpace_, "Heap::AllocateYoungOrHugeObject");
231             }
232         }
233     }
234     return object;
235 }
236 
AllocateInYoungSpace(size_t size)237 TaggedObject *Heap::AllocateInYoungSpace(size_t size)
238 {
239     ASSERT(!g_isEnableCMCGC);
240     return reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
241 }
242 
AllocateYoungOrHugeObject(JSHClass * hclass,size_t size)243 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass, size_t size)
244 {
245     auto object = AllocateYoungOrHugeObject(size);
246     ASSERT(object != nullptr);
247     object->SetClass(thread_, hclass);
248 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
249     OnAllocateEvent(GetEcmaVM(), object, size);
250 #endif
251     return object;
252 }
253 
SetHClassAndDoAllocateEvent(JSThread * thread,TaggedObject * object,JSHClass * hclass,size_t size)254 void BaseHeap::SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
255                                            [[maybe_unused]] size_t size)
256 {
257     ASSERT(object != nullptr);
258     object->SetClass(thread, hclass);
259 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
260     OnAllocateEvent(thread->GetEcmaVM(), object, size);
261 #endif
262 }
263 
FastAllocateYoungInTlabForCMC(JSThread * thread,size_t size)264 TaggedObject *BaseHeap::FastAllocateYoungInTlabForCMC(JSThread *thread, size_t size) const
265 {
266     ASSERT(g_isEnableCMCGC);
267     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
268     if (UNLIKELY(size > g_maxRegularHeapObjectSize)) {
269         return nullptr;
270     }
271     return reinterpret_cast<TaggedObject *>(common::AllocateYoungInAllocBuffer(thread->GetAllocBuffer(), size));
272 }
273 
FastAllocateOldInTlabForCMC(JSThread * thread,size_t size)274 TaggedObject *BaseHeap::FastAllocateOldInTlabForCMC(JSThread *thread, size_t size) const
275 {
276     ASSERT(g_isEnableCMCGC);
277     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
278     if (UNLIKELY(size > g_maxRegularHeapObjectSize)) {
279         return nullptr;
280     }
281     return reinterpret_cast<TaggedObject *>(common::AllocateOldInAllocBuffer(thread->GetAllocBuffer(), size));
282 }
283 
AllocateYoungForCMC(JSThread * thread,size_t size)284 TaggedObject *BaseHeap::AllocateYoungForCMC(JSThread *thread, size_t size) const
285 {
286     ASSERT(g_isEnableCMCGC);
287     auto object = FastAllocateYoungInTlabForCMC(thread, size);
288     if (object != nullptr) {
289         object->SetLanguageType(common::LanguageType::DYNAMIC);
290         return object;
291     }
292     return reinterpret_cast<TaggedObject *>(
293         common::HeapAllocator::AllocateInYoungOrHuge(size, common::LanguageType::DYNAMIC));
294 }
295 
AllocateOldForCMC(JSThread * thread,size_t size)296 TaggedObject *BaseHeap::AllocateOldForCMC(JSThread *thread, size_t size) const
297 {
298     ASSERT(g_isEnableCMCGC);
299     auto object = FastAllocateOldInTlabForCMC(thread, size);
300     if (object != nullptr) {
301         object->SetLanguageType(common::LanguageType::DYNAMIC);
302         return object;
303     }
304     return reinterpret_cast<TaggedObject *>(
305         common::HeapAllocator::AllocateInOldOrHuge(size, common::LanguageType::DYNAMIC));
306 }
307 
AllocateYoungSync(size_t size)308 uintptr_t Heap::AllocateYoungSync(size_t size)
309 {
310     ASSERT(!g_isEnableCMCGC);
311     return activeSemiSpace_->AllocateSync(size);
312 }
313 
MoveYoungRegion(Region * region)314 bool Heap::MoveYoungRegion(Region *region)
315 {
316     return activeSemiSpace_->SwapRegion(region, inactiveSemiSpace_);
317 }
318 
MoveYoungRegionToOld(Region * region)319 bool Heap::MoveYoungRegionToOld(Region *region)
320 {
321     return oldSpace_->SwapRegion(region, inactiveSemiSpace_);
322 }
323 
MergeToOldSpaceSync(LocalSpace * localSpace)324 void Heap::MergeToOldSpaceSync(LocalSpace *localSpace)
325 {
326     oldSpace_->Merge(localSpace);
327 }
328 
InHeapProfiler()329 bool Heap::InHeapProfiler()
330 {
331 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
332     return GetEcmaVM()->GetHeapProfile() != nullptr;
333 #else
334     return false;
335 #endif
336 }
337 
MergeToOldSpaceSync(SharedLocalSpace * localSpace)338 void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
339 {
340     sOldSpace_->Merge(localSpace);
341 }
342 
TryAllocateYoungGeneration(JSHClass * hclass,size_t size)343 TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
344 {
345     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
346     if (size > g_maxRegularHeapObjectSize) {
347         return nullptr;
348     }
349     TaggedObject *object = nullptr;
350     if (UNLIKELY(g_isEnableCMCGC)) {
351         object = AllocateYoungForCMC(thread_, size);
352     } else {
353         object = reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
354     }
355     if (object != nullptr) {
356         object->SetClass(thread_, hclass);
357     }
358 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
359     OnAllocateEvent(GetEcmaVM(), object, size);
360 #endif
361     return object;
362 }
363 
AllocateOldOrHugeObject(JSHClass * hclass)364 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass)
365 {
366     size_t size = hclass->GetObjectSize();
367     return AllocateOldOrHugeObject(hclass, size);
368 }
369 
AllocateOldOrHugeObject(size_t size)370 TaggedObject *Heap::AllocateOldOrHugeObject(size_t size)
371 {
372     TaggedObject *object = nullptr;
373 
374     if (UNLIKELY(g_isEnableCMCGC)) {
375         object = AllocateOldForCMC(thread_, size);
376     } else {
377         size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
378         if (size > g_maxRegularHeapObjectSize) {
379             object = AllocateHugeObject(size);
380         } else {
381             object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateFast(size));
382             if (object == nullptr) {
383                 bool gcSuccess = CheckAndTriggerOldGC();
384                 object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateSlow(size, gcSuccess));
385             }
386             if (object == nullptr) {
387                 CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
388                 object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateSlow(size, true));
389             }
390             CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, oldSpace_, "Heap::AllocateOldOrHugeObject");
391         }
392     }
393     return object;
394 }
395 
AllocateOldOrHugeObject(JSHClass * hclass,size_t size)396 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass, size_t size)
397 {
398     auto object = AllocateOldOrHugeObject(size);
399     object->SetClass(thread_, hclass);
400 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
401     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject*>(object), size);
402 #endif
403     return object;
404 }
405 
AllocateReadOnlyOrHugeObject(JSHClass * hclass)406 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass)
407 {
408     size_t size = hclass->GetObjectSize();
409     TaggedObject *object = AllocateReadOnlyOrHugeObject(hclass, size);
410 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
411     OnAllocateEvent(GetEcmaVM(), object, size);
412 #endif
413     return object;
414 }
415 
AllocateReadOnlyOrHugeObject(JSHClass * hclass,size_t size)416 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size)
417 {
418     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
419     TaggedObject *object = nullptr;
420 
421     if (size > g_maxRegularHeapObjectSize) {
422         object = AllocateHugeObject(hclass, size);
423     } else {
424         object = AllocateReadOnlyOrHugeObject(size);
425         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, readOnlySpace_, "Heap::AllocateReadOnlyOrHugeObject");
426         ASSERT(object != nullptr);
427         object->SetClass(thread_, hclass);
428     }
429 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
430     OnAllocateEvent(GetEcmaVM(), object, size);
431 #endif
432     return object;
433 }
434 
AllocateReadOnlyOrHugeObject(size_t size)435 TaggedObject* Heap::AllocateReadOnlyOrHugeObject(size_t size)
436 {
437     if (UNLIKELY(g_isEnableCMCGC)) {
438         return reinterpret_cast<TaggedObject *>(common::HeapAllocator::AllocateInReadOnly(
439             size, common::LanguageType::DYNAMIC));
440     }
441     return reinterpret_cast<TaggedObject *>(readOnlySpace_->Allocate(size));
442 }
443 
AllocateNonMovableOrHugeObject(JSHClass * hclass)444 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass)
445 {
446     size_t size = hclass->GetObjectSize();
447     TaggedObject *object = AllocateNonMovableOrHugeObject(hclass, size);
448     if (object == nullptr) {
449         LOG_ECMA(FATAL) << "Heap::AllocateNonMovableOrHugeObject:object is nullptr";
450     }
451 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
452     OnAllocateEvent(GetEcmaVM(), object, size);
453 #endif
454     return object;
455 }
456 
AllocateNonMovableOrHugeObject(JSHClass * hclass,size_t size)457 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size)
458 {
459     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
460     TaggedObject *object = nullptr;
461     if (UNLIKELY(g_isEnableCMCGC)) {
462         object = reinterpret_cast<TaggedObject *>(common::HeapAllocator::AllocateInNonmoveOrHuge(
463             size, common::LanguageType::DYNAMIC));
464         object->SetClass(thread_, hclass);
465     } else {
466         if (size > g_maxRegularHeapObjectSize) {
467             object = AllocateHugeObject(hclass, size);
468         } else {
469             object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->CheckAndAllocate(size));
470             CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, nonMovableSpace_, "Heap::AllocateNonMovableOrHugeObject");
471             object->SetClass(thread_, hclass);
472         }
473     }
474 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
475     OnAllocateEvent(GetEcmaVM(), object, size);
476 #endif
477     return object;
478 }
479 
AllocateClassClass(JSHClass * hclass,size_t size)480 TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
481 {
482     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
483     TaggedObject *object = nullptr;
484     if (UNLIKELY(g_isEnableCMCGC)) {
485         object = reinterpret_cast<TaggedObject *>(
486             common::HeapAllocator::AllocateInNonmoveOrHuge(size, common::LanguageType::DYNAMIC));
487     } else {
488         object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->Allocate(size));
489     }
490     if (UNLIKELY(object == nullptr)) {
491         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
492         UNREACHABLE();
493     }
494     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
495 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
496     OnAllocateEvent(GetEcmaVM(), object, size);
497 #endif
498     return object;
499 }
500 
AllocateClassClass(JSThread * thread,JSHClass * hclass,size_t size)501 TaggedObject *SharedHeap::AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size)
502 {
503     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
504     TaggedObject *object = nullptr;
505     if (UNLIKELY(g_isEnableCMCGC)) {
506         // check why shareheap allocate in readonly
507         object = reinterpret_cast<TaggedObject *>(
508             common::HeapAllocator::AllocateInNonmoveOrHuge(size, common::LanguageType::DYNAMIC));
509     } else {
510         object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
511     }
512     if (UNLIKELY(object == nullptr)) {
513         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
514         UNREACHABLE();
515     }
516     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
517 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
518     OnAllocateEvent(thread->GetEcmaVM(), object, size);
519 #endif
520     return object;
521 }
522 
AllocateHugeObject(size_t size)523 TaggedObject *Heap::AllocateHugeObject(size_t size)
524 {
525     if (UNLIKELY(g_isEnableCMCGC)) {
526         return reinterpret_cast<TaggedObject *>(
527             common::HeapAllocator::AllocateInHuge(size, common::LanguageType::DYNAMIC));
528     }
529     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
530     CheckAndTriggerOldGC(size);
531 
532     auto *object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
533     if (UNLIKELY(object == nullptr)) {
534         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
535         object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
536         if (UNLIKELY(object == nullptr)) {
537             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
538             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
539             oldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
540             DumpHeapSnapshotBeforeOOM();
541             StatisticHeapDetail();
542             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
543             ThrowOutOfMemoryError(thread_, size, "Heap::AllocateHugeObject");
544             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
545             if (UNLIKELY(object == nullptr)) {
546                 FatalOutOfMemoryError(size, "Heap::AllocateHugeObject");
547             }
548         }
549     }
550     return object;
551 }
552 
AllocateHugeObject(JSHClass * hclass,size_t size)553 TaggedObject *Heap::AllocateHugeObject(JSHClass *hclass, size_t size)
554 {
555     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
556     CheckAndTriggerOldGC(size);
557     auto object = AllocateHugeObject(size);
558     object->SetClass(thread_, hclass);
559 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
560     OnAllocateEvent(GetEcmaVM(), object, size);
561 #endif
562     return object;
563 }
564 
AllocateHugeMachineCodeObject(size_t size,MachineCodeDesc * desc)565 TaggedObject *Heap::AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc)
566 {
567     ASSERT(!g_isEnableCMCGC || desc != nullptr && "in CMCGC, this path is always jitfort.");
568     TaggedObject *object;
569     if (desc) {
570         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
571             size, thread_, reinterpret_cast<void *>(desc)));
572     } else {
573         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
574             size, thread_));
575     }
576     return object;
577 }
578 
AllocateMachineCodeObject(JSHClass * hclass,size_t size,MachineCodeDesc * desc)579 TaggedObject *Heap::AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc)
580 {
581     TaggedObject *object;
582     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
583     if (!desc) {
584         // Jit Fort disabled
585         ASSERT(!GetEcmaVM()->GetJSOptions().GetEnableJitFort());
586         if (UNLIKELY(g_isEnableCMCGC)) {
587             object = reinterpret_cast<TaggedObject *>(common::HeapAllocator::AllocateInNonmoveOrHuge(
588                 size, common::LanguageType::DYNAMIC));
589         } else {
590             object = (size > g_maxRegularHeapObjectSize) ?
591                 reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size)) :
592                 reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size));
593         }
594         CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, machineCodeSpace_,
595             "Heap::AllocateMachineCodeObject");
596         object->SetClass(thread_, hclass);
597 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
598         OnAllocateEvent(GetEcmaVM(), object, size);
599 #endif
600         return object;
601     }
602 
603     // Jit Fort enabled
604     ASSERT(GetEcmaVM()->GetJSOptions().GetEnableJitFort());
605     if (!GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort()) {
606         desc->instructionsAddr = 0;
607         if (size <= g_maxRegularHeapObjectSize) {
608             // for non huge code cache obj, allocate fort space before allocating the code object
609             uintptr_t mem = machineCodeSpace_->JitFortAllocate(desc);
610             if (mem == ToUintPtr(nullptr)) {
611                 return nullptr;
612             }
613             desc->instructionsAddr = mem;
614         }
615     }
616     if (UNLIKELY(g_isEnableCMCGC)) {
617         object = (size > g_maxRegularHeapObjectSize) ?
618             reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
619             reinterpret_cast<TaggedObject *>(common::HeapAllocator::AllocateInNonmoveOrHuge(
620                 size, common::LanguageType::DYNAMIC));
621     } else {
622         object = (size > g_maxRegularHeapObjectSize) ?
623             reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
624             reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size, desc, true));
625     }
626     CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, machineCodeSpace_, desc,
627         "Heap::AllocateMachineCodeObject");
628     object->SetClass(thread_, hclass);
629 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
630     OnAllocateEvent(GetEcmaVM(), object, size);
631 #endif
632     return object;
633 }
634 
AllocateSnapshotSpace(size_t size)635 uintptr_t Heap::AllocateSnapshotSpace(size_t size)
636 {
637     ASSERT(false);
638     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
639     uintptr_t object = snapshotSpace_->Allocate(size);
640     if (UNLIKELY(object == 0)) {
641         FatalOutOfMemoryError(size, "Heap::AllocateSnapshotSpaceObject");
642     }
643 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
644     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject *>(object), size);
645 #endif
646     return object;
647 }
648 
AllocateSharedNonMovableSpaceFromTlab(JSThread * thread,size_t size)649 TaggedObject *Heap::AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size)
650 {
651     if (UNLIKELY(g_isEnableCMCGC)) {
652         return reinterpret_cast<TaggedObject*>(common::HeapAllocator::AllocateInNonmoveOrHuge(
653             size, common::LanguageType::DYNAMIC));
654     }
655 
656     ASSERT(!thread->IsJitThread());
657     if (GetEcmaVM()->GetThreadCheckStatus()) {
658         if (thread->IsJitThread()) {
659             LOG_ECMA(FATAL) << "jit thread not allowed";
660         }
661         if (thread->CheckMultiThread()) {
662             LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!"
663                             << "thread:" << thread->GetThreadId()
664                             << " currentThread:" << JSThread::GetCurrentThreadId();
665         }
666     }
667     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
668     TaggedObject *object = reinterpret_cast<TaggedObject*>(sNonMovableTlab_->Allocate(size));
669     if (object != nullptr) {
670         return object;
671     }
672     if (!sNonMovableTlab_->NeedNewTlab(size)) {
673         // slowpath
674         return nullptr;
675     }
676     size_t newTlabSize = sNonMovableTlab_->ComputeSize();
677     object = sHeap_->AllocateSNonMovableTlab(thread, newTlabSize);
678     if (object == nullptr) {
679         sNonMovableTlab_->DisableNewTlab();
680         return nullptr;
681     }
682     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
683     sNonMovableTlab_->Reset(begin, begin + newTlabSize, begin + size);
684     auto topAddress = sNonMovableTlab_->GetTopAddress();
685     auto endAddress = sNonMovableTlab_->GetEndAddress();
686     thread->ReSetSNonMovableSpaceAllocationAddress(topAddress, endAddress);
687     sHeap_->TryTriggerConcurrentMarking(thread);
688     return object;
689 }
690 
AllocateSharedOldSpaceFromTlab(JSThread * thread,size_t size)691 TaggedObject *Heap::AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size)
692 {
693     if (UNLIKELY(g_isEnableCMCGC)) {
694         // will invoked by asm interpreter stub AllocateInSOld
695         return AllocateOldForCMC(thread, size);
696     }
697 
698     ASSERT(!thread->IsJitThread());
699     if (GetEcmaVM()->GetThreadCheckStatus()) {
700         if (thread->IsJitThread()) {
701             LOG_ECMA(FATAL) << "jit thread not allowed";
702         }
703         if (thread->CheckMultiThread()) {
704             LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!"
705                             << "thread:" << thread->GetThreadId()
706                             << " currentThread:" << JSThread::GetCurrentThreadId();
707         }
708     }
709     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
710     TaggedObject *object = reinterpret_cast<TaggedObject*>(sOldTlab_->Allocate(size));
711     if (object != nullptr) {
712         return object;
713     }
714     if (!sOldTlab_->NeedNewTlab(size)) {
715         // slowpath
716         return nullptr;
717     }
718     size_t newTlabSize = sOldTlab_->ComputeSize();
719     object = sHeap_->AllocateSOldTlab(thread, newTlabSize);
720     if (object == nullptr) {
721         sOldTlab_->DisableNewTlab();
722         return nullptr;
723     }
724     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
725     sOldTlab_->Reset(begin, begin + newTlabSize, begin + size);
726     auto topAddress = sOldTlab_->GetTopAddress();
727     auto endAddress = sOldTlab_->GetEndAddress();
728     thread->ReSetSOldSpaceAllocationAddress(topAddress, endAddress);
729     sHeap_->TryTriggerConcurrentMarking(thread);
730     return object;
731 }
732 
SwapNewSpace()733 void Heap::SwapNewSpace()
734 {
735     activeSemiSpace_->Stop();
736     size_t newOverShootSize = 0;
737     if (!inBackground_ && gcType_ != TriggerGCType::FULL_GC && gcType_ != TriggerGCType::APPSPAWN_FULL_GC) {
738         newOverShootSize = activeSemiSpace_->CalculateNewOverShootSize();
739     }
740     inactiveSemiSpace_->Restart(newOverShootSize);
741 
742     SemiSpace *newSpace = inactiveSemiSpace_;
743     inactiveSemiSpace_ = activeSemiSpace_;
744     activeSemiSpace_ = newSpace;
745     if (UNLIKELY(ShouldVerifyHeap())) {
746         inactiveSemiSpace_->EnumerateRegions([](Region *region) {
747             region->SetInactiveSemiSpace();
748         });
749     }
750 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
751     activeSemiSpace_->SwapAllocationCounter(inactiveSemiSpace_);
752 #endif
753     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
754     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
755     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
756 }
757 
SwapOldSpace()758 void Heap::SwapOldSpace()
759 {
760     compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
761     auto *oldSpace = compressSpace_;
762     compressSpace_ = oldSpace_;
763     oldSpace_ = oldSpace;
764 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
765     oldSpace_->SwapAllocationCounter(compressSpace_);
766 #endif
767 }
768 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)769 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
770                        [[maybe_unused]] size_t size)
771 {
772 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
773     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
774     if (profiler != nullptr) {
775         base::BlockHookScope blockScope;
776         profiler->MoveEvent(address, forwardAddress, size);
777     }
778 #endif
779 }
780 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)781 void SharedHeap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
782                              [[maybe_unused]] size_t size)
783 {
784 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
785     Runtime::GetInstance()->GCIterateThreadListWithoutLock([&](JSThread *thread) {
786         HeapProfilerInterface *profiler = thread->GetEcmaVM()->GetHeapProfile();
787         if (profiler != nullptr) {
788             base::BlockHookScope blockScope;
789             profiler->MoveEvent(address, forwardAddress, size);
790         }
791     });
792 #endif
793 }
794 
SwapOldSpace()795 void SharedHeap::SwapOldSpace()
796 {
797     sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
798     auto *oldSpace = sCompressSpace_;
799     sCompressSpace_ = sOldSpace_;
800     sOldSpace_ = oldSpace;
801 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
802     sOldSpace_->SwapAllocationCounter(sCompressSpace_);
803 #endif
804 }
805 
ReclaimRegions(TriggerGCType gcType)806 void Heap::ReclaimRegions(TriggerGCType gcType)
807 {
808     activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
809         region->ResetRegionTypeFlag();
810         region->ClearMarkGCBitset();
811         region->ClearCrossRegionRSet();
812         region->ResetAliveObject();
813         region->ClearGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
814     });
815     size_t cachedSize = inactiveSemiSpace_->GetInitialCapacity();
816     if (gcType == TriggerGCType::FULL_GC) {
817         compressSpace_->Reset();
818         cachedSize = 0;
819     } else if (gcType == TriggerGCType::OLD_GC) {
820         oldSpace_->ReclaimCSet();
821         isCSetClearing_.store(false, std::memory_order_release);
822     }
823 
824     inactiveSemiSpace_->ReclaimRegions(cachedSize);
825     sweeper_->WaitAllTaskFinished();
826     EnumerateNonNewSpaceRegionsWithRecord([] (Region *region) {
827         region->ClearMarkGCBitset();
828         region->ClearCrossRegionRSet();
829     });
830     if (!clearTaskFinished_) {
831         LockHolder holder(waitClearTaskFinishedMutex_);
832         clearTaskFinished_ = true;
833         waitClearTaskFinishedCV_.SignalAll();
834     }
835 }
836 
837 // only call in js-thread
ClearSlotsRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd)838 void Heap::ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd)
839 {
840     if (UNLIKELY(g_isEnableCMCGC)) {
841         return;
842     }
843 
844     if (!current->InYoungSpace()) {
845         // This clear may exist data race with concurrent sweeping, so use CAS
846         current->AtomicClearSweepingOldToNewRSetInRange(freeStart, freeEnd);
847         current->ClearOldToNewRSetInRange(freeStart, freeEnd);
848         current->AtomicClearCrossRegionRSetInRange(freeStart, freeEnd);
849     }
850     current->ClearLocalToShareRSetInRange(freeStart, freeEnd);
851     current->AtomicClearSweepingLocalToShareRSetInRange(freeStart, freeEnd);
852 }
853 
GetCommittedSize()854 size_t Heap::GetCommittedSize() const
855 {
856     size_t result = activeSemiSpace_->GetCommittedSize() +
857                     oldSpace_->GetCommittedSize() +
858                     hugeObjectSpace_->GetCommittedSize() +
859                     nonMovableSpace_->GetCommittedSize() +
860                     machineCodeSpace_->GetCommittedSize() +
861                     hugeMachineCodeSpace_->GetCommittedSize() +
862                     readOnlySpace_->GetCommittedSize() +
863                     appSpawnSpace_->GetCommittedSize() +
864                     snapshotSpace_->GetCommittedSize();
865     return result;
866 }
867 
GetHeapObjectSize()868 size_t Heap::GetHeapObjectSize() const
869 {
870     size_t result = activeSemiSpace_->GetHeapObjectSize() +
871                     oldSpace_->GetHeapObjectSize() +
872                     hugeObjectSpace_->GetHeapObjectSize() +
873                     nonMovableSpace_->GetHeapObjectSize() +
874                     machineCodeSpace_->GetCommittedSize() +
875                     hugeMachineCodeSpace_->GetCommittedSize() +
876                     readOnlySpace_->GetCommittedSize() +
877                     appSpawnSpace_->GetHeapObjectSize() +
878                     snapshotSpace_->GetHeapObjectSize();
879     return result;
880 }
881 
NotifyRecordMemorySize()882 void Heap::NotifyRecordMemorySize()
883 {
884     if (GetRecordObjectSize() == 0) {
885         RecordOrResetObjectSize(GetHeapObjectSize());
886     }
887     if (GetRecordNativeSize() == 0) {
888         RecordOrResetNativeSize(GetNativeBindingSize());
889     }
890 }
891 
GetRegionCount()892 size_t Heap::GetRegionCount() const
893 {
894     size_t result = activeSemiSpace_->GetRegionCount() +
895         oldSpace_->GetRegionCount() +
896         oldSpace_->GetCollectSetRegionCount() +
897         appSpawnSpace_->GetRegionCount() +
898         snapshotSpace_->GetRegionCount() +
899         nonMovableSpace_->GetRegionCount() +
900         hugeObjectSpace_->GetRegionCount() +
901         machineCodeSpace_->GetRegionCount() +
902         hugeMachineCodeSpace_->GetRegionCount();
903     return result;
904 }
905 
GetHeapObjectCount()906 uint32_t Heap::GetHeapObjectCount() const
907 {
908     uint32_t count = 0;
909     sweeper_->EnsureAllTaskFinished();
910     this->IterateOverObjects([&count]([[maybe_unused]] TaggedObject *obj) {
911         ++count;
912     });
913     return count;
914 }
915 
InitializeIdleStatusControl(std::function<void (bool)> callback)916 void Heap::InitializeIdleStatusControl(std::function<void(bool)> callback)
917 {
918     notifyIdleStatusCallback = callback;
919     if (callback != nullptr) {
920         OPTIONAL_LOG(ecmaVm_, INFO) << "Received idle status control call back";
921         enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
922     }
923 }
924 
TryTriggerConcurrentMarking(JSThread * thread)925 void SharedHeap::TryTriggerConcurrentMarking(JSThread *thread)
926 {
927     if (UNLIKELY(g_isEnableCMCGC)) {
928         return;
929     }
930     if (!CheckCanTriggerConcurrentMarking(thread)) {
931         return;
932     }
933     bool triggerConcurrentMark = (GetHeapObjectSize() >= globalSpaceConcurrentMarkLimit_);
934     if (triggerConcurrentMark && (OnStartupEvent() || IsJustFinishStartup())) {
935         triggerConcurrentMark = ObjectExceedJustFinishStartupThresholdForCM();
936     }
937     if (triggerConcurrentMark) {
938         // currently, SharedHeap::TryTriggerConcurrentMarking is called only when allocate object in SharedHeap
939         TriggerConcurrentMarking<TriggerGCType::SHARED_GC, MarkReason::ALLOCATION_LIMIT>(thread);
940     }
941 }
942 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass)943 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
944 {
945     size_t size = hclass->GetObjectSize();
946     return AllocateNonMovableOrHugeObject(thread, hclass, size);
947 }
948 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)949 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
950 {
951     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
952     if (size > g_maxRegularHeapObjectSize) {
953         return AllocateHugeObject(thread, hclass, size);
954     }
955 
956     TaggedObject *object = nullptr;
957     if (UNLIKELY(g_isEnableCMCGC)) {
958         object = reinterpret_cast<TaggedObject *>(
959             common::HeapAllocator::AllocateInNonmoveOrHuge(size, common::LanguageType::DYNAMIC));
960         object->SetClass(thread, hclass);
961     } else {
962         object = thread->IsJitThread() ? nullptr :
963             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
964         if (object == nullptr) {
965             object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
966             CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
967                 "SharedHeap::AllocateNonMovableOrHugeObject");
968             object->SetClass(thread, hclass);
969             TryTriggerConcurrentMarking(thread);
970         } else {
971             object->SetClass(thread, hclass);
972         }
973 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
974         OnAllocateEvent(thread->GetEcmaVM(), object, size);
975 #endif
976     }
977     return object;
978 }
979 
AllocateNonMovableOrHugeObject(JSThread * thread,size_t size)980 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, size_t size)
981 {
982     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
983     if (size > g_maxRegularHeapObjectSize) {
984         return AllocateHugeObject(thread, size);
985     }
986 
987     TaggedObject *object = nullptr;
988     if (UNLIKELY(g_isEnableCMCGC)) {
989         object = reinterpret_cast<TaggedObject *>(
990             common::HeapAllocator::AllocateInNonmoveOrHuge(size, common::LanguageType::DYNAMIC));
991     } else {
992         object = thread->IsJitThread() ? nullptr :
993             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
994         if (object == nullptr) {
995             object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
996             CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
997                 "SharedHeap::AllocateNonMovableOrHugeObject");
998             TryTriggerConcurrentMarking(thread);
999         }
1000 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1001         OnAllocateEvent(thread->GetEcmaVM(), object, size);
1002 #endif
1003     }
1004     return object;
1005 }
1006 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass)1007 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass)
1008 {
1009     size_t size = hclass->GetObjectSize();
1010     return AllocateOldOrHugeObject(thread, hclass, size);
1011 }
1012 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)1013 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
1014 {
1015     TaggedObject *object = nullptr;
1016     if (UNLIKELY(g_isEnableCMCGC)) {
1017         object = AllocateOldForCMC(thread, size);
1018         object->SetClass(thread, hclass);
1019     } else {
1020         size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1021         if (size > g_maxRegularHeapObjectSize) {
1022             return AllocateHugeObject(thread, hclass, size);
1023         }
1024         object = thread->IsJitThread() ? nullptr :
1025             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
1026         if (object == nullptr) {
1027             object = AllocateInSOldSpace(thread, size);
1028             CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
1029             object->SetClass(thread, hclass);
1030             TryTriggerConcurrentMarking(thread);
1031         } else {
1032             object->SetClass(thread, hclass);
1033         }
1034 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1035         OnAllocateEvent(thread->GetEcmaVM(), object, size);
1036 #endif
1037     }
1038     return object;
1039 }
1040 
AllocateOldOrHugeObject(JSThread * thread,size_t size)1041 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
1042 {
1043     TaggedObject *object = nullptr;
1044     if (UNLIKELY(g_isEnableCMCGC)) {
1045         object = AllocateOldForCMC(thread, size);
1046     } else {
1047         size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1048         if (size > g_maxRegularHeapObjectSize) {
1049             return AllocateHugeObject(thread, size);
1050         }
1051 
1052         object = thread->IsJitThread() ? nullptr :
1053             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
1054         if (object == nullptr) {
1055             object = AllocateInSOldSpace(thread, size);
1056             CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
1057             TryTriggerConcurrentMarking(thread);
1058         }
1059     }
1060     return object;
1061 }
1062 
AllocateOldOrHugeObjectNoGC(JSThread * thread,size_t size)1063 TaggedObject *SharedHeap::AllocateOldOrHugeObjectNoGC(JSThread *thread, size_t size)
1064 {
1065     if (UNLIKELY(g_isEnableCMCGC)) {
1066         size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1067         TaggedObject *object = reinterpret_cast<TaggedObject*>(common::HeapAllocator::AllocateOldOrLargeNoGC(size));
1068         return object;
1069     } else {
1070         std::abort();
1071     }
1072 }
1073 
AllocateInSOldSpace(JSThread * thread,size_t size)1074 TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
1075 {
1076     if (UNLIKELY(g_isEnableCMCGC)) {
1077         (void)thread;
1078         ASSERT(!thread->IsJitThread());
1079         return AllocateOldForCMC(thread, size);
1080     }
1081     // jit thread no heap
1082     bool allowGC = !thread->IsJitThread();
1083     if (allowGC) {
1084         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
1085         localHeap->TryTriggerFullMarkBySharedSize(size);
1086     }
1087     TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
1088      // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
1089     if (object == nullptr) {
1090         if (allowGC) {
1091             CheckAndTriggerSharedGC(thread);
1092         }
1093         object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
1094         if (object == nullptr) {
1095             if (allowGC) {
1096                 CollectGarbageNearOOM(thread);
1097             }
1098             object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
1099         }
1100     }
1101     return object;
1102 }
1103 
AllocateHugeObject(JSThread * thread,JSHClass * hclass,size_t size)1104 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
1105 {
1106     auto object = AllocateHugeObject(thread, size);
1107     object->SetClass(thread, hclass);
1108 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1109     OnAllocateEvent(thread->GetEcmaVM(), object, size);
1110 #endif
1111     return object;
1112 }
1113 
AllocateHugeObject(JSThread * thread,size_t size)1114 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
1115 {
1116     (void)thread;
1117     if (UNLIKELY(g_isEnableCMCGC)) {
1118         return reinterpret_cast<TaggedObject *>(
1119             common::HeapAllocator::AllocateInHuge(size, common::LanguageType::DYNAMIC));
1120     }
1121     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
1122     CheckHugeAndTriggerSharedGC(thread, size);
1123     auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
1124     if (UNLIKELY(object == nullptr)) {
1125         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
1126         object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
1127         if (UNLIKELY(object == nullptr)) {
1128             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
1129             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
1130             sHugeObjectSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
1131             DumpHeapSnapshotBeforeOOM(thread, SharedHeapOOMSource::NORMAL_ALLOCATION);
1132             ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
1133             object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
1134             if (UNLIKELY(object == nullptr)) {
1135                 FatalOutOfMemoryError(size, "SharedHeap::AllocateHugeObject");
1136             }
1137         }
1138     }
1139     TryTriggerConcurrentMarking(thread);
1140     return object;
1141 }
1142 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass)1143 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass)
1144 {
1145     size_t size = hclass->GetObjectSize();
1146     return AllocateReadOnlyOrHugeObject(thread, hclass, size);
1147 }
1148 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)1149 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
1150 {
1151     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1152     if (size > g_maxRegularHeapObjectSize) {
1153         return AllocateHugeObject(thread, hclass, size);
1154     }
1155 
1156     TaggedObject *object = nullptr;
1157     if (UNLIKELY(g_isEnableCMCGC)) {
1158         object = reinterpret_cast<TaggedObject *>(
1159             common::HeapAllocator::AllocateInReadOnly(size, common::LanguageType::DYNAMIC));
1160     } else {
1161         object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
1162         CHECK_SOBJ_AND_THROW_OOM_ERROR(
1163             thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
1164     }
1165     ASSERT(object != nullptr);
1166     object->SetClass(thread, hclass);
1167     return object;
1168 }
1169 
AllocateSOldTlab(JSThread * thread,size_t size)1170 TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
1171 {
1172     ASSERT(!g_isEnableCMCGC);
1173 
1174     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1175     if (size > g_maxRegularHeapObjectSize) {
1176         return nullptr;
1177     }
1178     TaggedObject *object = nullptr;
1179     if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
1180         object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
1181     } else {
1182         object = AllocateInSOldSpace(thread, size);
1183     }
1184     return object;
1185 }
1186 
AllocateSNonMovableTlab(JSThread * thread,size_t size)1187 TaggedObject *SharedHeap::AllocateSNonMovableTlab(JSThread *thread, size_t size)
1188 {
1189     ASSERT(!g_isEnableCMCGC);
1190 
1191     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1192     if (size > g_maxRegularHeapObjectSize) {
1193         return nullptr;
1194     }
1195     TaggedObject *object = nullptr;
1196     object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
1197     return object;
1198 }
1199 
1200 template<TriggerGCType gcType, MarkReason markReason>
TriggerConcurrentMarking(JSThread * thread)1201 void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
1202 {
1203     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC);
1204     // lock is outside to prevent extreme case, maybe could move update gcFinished_ into CheckAndPostTask
1205     // instead of an outside locking.
1206     LockHolder lock(waitGCFinishedMutex_);
1207     if (dThread_->CheckAndPostTask(TriggerConcurrentMarkTask<gcType, markReason>(thread))) {
1208         ASSERT(gcFinished_);
1209         gcFinished_ = false;
1210     }
1211 }
1212 
1213 template<TriggerGCType gcType, GCReason gcReason>
CollectGarbage(JSThread * thread)1214 void SharedHeap::CollectGarbage(JSThread *thread)
1215 {
1216     if (UNLIKELY(g_isEnableCMCGC)) {
1217         common::GCReason cmcReason = common::GC_REASON_USER;
1218         bool async = true;
1219         if constexpr (gcType == TriggerGCType::FULL_GC || gcType == TriggerGCType::SHARED_FULL_GC ||
1220             gcType == TriggerGCType::APPSPAWN_FULL_GC || gcType == TriggerGCType::APPSPAWN_SHARED_FULL_GC ||
1221             gcReason == GCReason::ALLOCATION_FAILED) {
1222             cmcReason = common::GC_REASON_BACKUP;
1223             async = false;
1224         }
1225         common::BaseRuntime::RequestGC(cmcReason, async, common::GC_TYPE_FULL);
1226         return;
1227     }
1228     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC ||
1229         gcType == TriggerGCType::SHARED_FULL_GC);
1230 #ifndef NDEBUG
1231     ASSERT(!thread->HasLaunchedSuspendAll());
1232 #endif
1233     if (UNLIKELY(!dThread_->IsRunning())) {
1234         // Hope this will not happen, unless the AppSpawn run smth after PostFork
1235         LOG_GC(ERROR) << "Try to collect garbage in shared heap, but daemon thread is not running.";
1236         ForceCollectGarbageWithoutDaemonThread(gcType, gcReason, thread);
1237         return;
1238     }
1239     {
1240         // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1241         // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1242         // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1243         LockHolder lock(waitGCFinishedMutex_);
1244         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1245             ASSERT(gcFinished_);
1246             gcFinished_ = false;
1247         }
1248     }
1249     ASSERT(!gcFinished_);
1250     SetForceGC(true);
1251     WaitGCFinished(thread);
1252 }
1253 
1254 // This method is used only in the idle state and background switchover state.
1255 template<GCReason gcReason>
CompressCollectGarbageNotWaiting(JSThread * thread)1256 void SharedHeap::CompressCollectGarbageNotWaiting(JSThread *thread)
1257 {
1258     {
1259         // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1260         // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1261         // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1262         LockHolder lock(waitGCFinishedMutex_);
1263         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<TriggerGCType::SHARED_FULL_GC, gcReason>(thread))) {
1264             ASSERT(gcFinished_);
1265             gcFinished_ = false;
1266         }
1267     }
1268     ASSERT(!gcFinished_);
1269     SetForceGC(true);
1270 }
1271 
1272 template<TriggerGCType gcType, GCReason gcReason>
PostGCTaskForTest(JSThread * thread)1273 void SharedHeap::PostGCTaskForTest(JSThread *thread)
1274 {
1275     ASSERT(gcType == TriggerGCType::SHARED_GC ||gcType == TriggerGCType::SHARED_PARTIAL_GC ||
1276         gcType == TriggerGCType::SHARED_FULL_GC);
1277 #ifndef NDEBUG
1278     ASSERT(!thread->HasLaunchedSuspendAll());
1279 #endif
1280     if (dThread_->IsRunning()) {
1281         // Some UT may run without Daemon Thread.
1282         LockHolder lock(waitGCFinishedMutex_);
1283         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1284             ASSERT(gcFinished_);
1285             gcFinished_ = false;
1286         }
1287         ASSERT(!gcFinished_);
1288     }
1289 }
1290 
SwapBackAndPop(NativePointerList & vec,NativePointerList::iterator & iter)1291 static void SwapBackAndPop(NativePointerList &vec, NativePointerList::iterator &iter)
1292 {
1293     *iter = vec.back();
1294     if (iter + 1 == vec.end()) {
1295         vec.pop_back();
1296         iter = vec.end();
1297     } else {
1298         vec.pop_back();
1299     }
1300 }
1301 
ShrinkWithFactor(NativePointerList & vec)1302 static void ShrinkWithFactor(NativePointerList &vec)
1303 {
1304     constexpr size_t SHRINK_FACTOR = 2;
1305     if (vec.size() < vec.capacity() / SHRINK_FACTOR) {
1306         vec.shrink_to_fit();
1307     }
1308 }
1309 
InvokeSharedNativePointerCallbacks()1310 void SharedHeap::InvokeSharedNativePointerCallbacks()
1311 {
1312     Runtime *runtime = Runtime::GetInstance();
1313     if (!runtime->GetSharedNativePointerCallbacks().empty()) {
1314         runtime->InvokeSharedNativePointerCallbacks();
1315     }
1316 }
1317 
PushToSharedNativePointerList(JSNativePointer * pointer)1318 void SharedHeap::PushToSharedNativePointerList(JSNativePointer* pointer)
1319 {
1320     ASSERT(JSTaggedValue(pointer).IsInSharedHeap());
1321     if (g_isEnableCMCGC) {
1322         // Note: CMC GC assumes JSNativePointer is always a non-young object and tries to optimize it out in young GC
1323         ASSERT_LOGF(common::Heap::GetHeap().InRecentSpace(pointer) == false,
1324                     "Violate CMC-GC assumption: should not be young object");
1325         common::BaseRuntime::NotifyNativeAllocation(pointer->GetBindingSize());
1326     }
1327     std::lock_guard<std::mutex> lock(sNativePointerListMutex_);
1328     sharedNativePointerList_.emplace_back(JSTaggedValue(pointer));
1329 }
1330 
IteratorNativePointerList(WeakVisitor & visitor)1331 void SharedHeap::IteratorNativePointerList(WeakVisitor &visitor)
1332 {
1333     auto& sharedNativePointerCallbacks = Runtime::GetInstance()->GetSharedNativePointerCallbacks();
1334     auto sharedIter = sharedNativePointerList_.begin();
1335     while (sharedIter != sharedNativePointerList_.end()) {
1336         ObjectSlot slot(reinterpret_cast<uintptr_t>(&(*sharedIter)));
1337         bool isAlive = visitor.VisitRoot(Root::ROOT_VM, slot);
1338         if (!isAlive) {
1339             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*sharedIter).GetTaggedObject());
1340             sharedNativePointerCallbacks.emplace_back(
1341                 object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1342             common::BaseRuntime::NotifyNativeFree(object->GetBindingSize());
1343             SwapBackAndPop(sharedNativePointerList_, sharedIter);
1344         } else {
1345             ++sharedIter;
1346         }
1347     }
1348     ShrinkWithFactor(sharedNativePointerList_);
1349 }
1350 
ProcessSharedNativeDelete(const WeakRootVisitor & visitor)1351 void SharedHeap::ProcessSharedNativeDelete(const WeakRootVisitor& visitor)
1352 {
1353 #ifndef NDEBUG
1354     ASSERT(JSThread::GetCurrent()->HasLaunchedSuspendAll());
1355 #endif
1356     auto& sharedNativePointerCallbacks = Runtime::GetInstance()->GetSharedNativePointerCallbacks();
1357     auto sharedIter = sharedNativePointerList_.begin();
1358     while (sharedIter != sharedNativePointerList_.end()) {
1359         JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*sharedIter).GetTaggedObject());
1360         auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1361         if (fwd == nullptr) {
1362             sharedNativePointerCallbacks.emplace_back(
1363                 object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1364             SwapBackAndPop(sharedNativePointerList_, sharedIter);
1365         } else {
1366             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1367                 *sharedIter = JSTaggedValue(fwd);
1368             }
1369             ++sharedIter;
1370         }
1371     }
1372     ShrinkWithFactor(sharedNativePointerList_);
1373 }
1374 
ProcessNativeDelete(const WeakRootVisitor & visitor)1375 void Heap::ProcessNativeDelete(const WeakRootVisitor& visitor)
1376 {
1377     // ProcessNativeDelete should be limited to OldGC or FullGC only
1378     if (!IsYoungGC()) {
1379         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1380         auto iter = nativePointerList_.begin();
1381         ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
1382             ("ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size())).c_str(), "");
1383         while (iter != nativePointerList_.end()) {
1384             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*iter).GetTaggedObject());
1385             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1386             if (fwd == nullptr) {
1387                 size_t bindingSize = object->GetBindingSize();
1388                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1389                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1390                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1391                 SwapBackAndPop(nativePointerList_, iter);
1392             } else {
1393                 ++iter;
1394             }
1395         }
1396         ShrinkWithFactor(nativePointerList_);
1397 
1398         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1399         auto newIter = concurrentNativePointerList_.begin();
1400         while (newIter != concurrentNativePointerList_.end()) {
1401             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*newIter).GetTaggedObject());
1402             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1403             if (fwd == nullptr) {
1404                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1405                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1406                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1407                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1408             } else {
1409                 ++newIter;
1410             }
1411         }
1412         ShrinkWithFactor(concurrentNativePointerList_);
1413     }
1414 }
1415 
ProcessReferences(const WeakRootVisitor & visitor)1416 void Heap::ProcessReferences(const WeakRootVisitor& visitor)
1417 {
1418     // process native ref should be limited to OldGC or FullGC only
1419     if (!IsYoungGC()) {
1420         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1421         ResetNativeBindingSize();
1422         // array buffer
1423         auto iter = nativePointerList_.begin();
1424         ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
1425             ("ProcessReferencesNum:" + std::to_string(nativePointerList_.size())).c_str(), "");
1426         while (iter != nativePointerList_.end()) {
1427             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*iter).GetTaggedObject());
1428             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1429             if (fwd == nullptr) {
1430                 size_t bindingSize = object->GetBindingSize();
1431                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1432                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1433                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1434                 SwapBackAndPop(nativePointerList_, iter);
1435                 continue;
1436             }
1437             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1438             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1439                 *iter = JSTaggedValue(fwd);
1440             }
1441             ++iter;
1442         }
1443         ShrinkWithFactor(nativePointerList_);
1444 
1445         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1446         auto newIter = concurrentNativePointerList_.begin();
1447         while (newIter != concurrentNativePointerList_.end()) {
1448             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*newIter).GetTaggedObject());
1449             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1450             if (fwd == nullptr) {
1451                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1452                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1453                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1454                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1455                 continue;
1456             }
1457             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1458             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1459                 *newIter = JSTaggedValue(fwd);
1460             }
1461             ++newIter;
1462         }
1463         ShrinkWithFactor(concurrentNativePointerList_);
1464     }
1465 }
1466 
PushToNativePointerList(JSNativePointer * pointer,bool isConcurrent)1467 void Heap::PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent)
1468 {
1469     ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
1470     if (g_isEnableCMCGC) {
1471         common::BaseRuntime::NotifyNativeAllocation(pointer->GetBindingSize());
1472     }
1473     if (isConcurrent) {
1474         concurrentNativePointerList_.emplace_back(JSTaggedValue(pointer));
1475     } else {
1476         nativePointerList_.emplace_back(JSTaggedValue(pointer));
1477     }
1478 }
1479 
IteratorNativePointerList(WeakVisitor & visitor)1480 void Heap::IteratorNativePointerList(WeakVisitor &visitor)
1481 {
1482     auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1483     auto iter = nativePointerList_.begin();
1484     ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
1485         ("ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size())).c_str(), "");
1486     while (iter != nativePointerList_.end()) {
1487         ObjectSlot slot(reinterpret_cast<uintptr_t>(&(*iter)));
1488         bool isAlive = visitor.VisitRoot(Root::ROOT_VM, slot);
1489         if (!isAlive) {
1490             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*iter).GetTaggedObject());
1491             size_t bindingSize = object->GetBindingSize();
1492             asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1493                 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1494             nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1495             common::BaseRuntime::NotifyNativeFree(bindingSize);
1496             SwapBackAndPop(nativePointerList_, iter);
1497         } else {
1498             ++iter;
1499         }
1500     }
1501     ShrinkWithFactor(nativePointerList_);
1502 
1503     auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1504     auto concurrentIter = concurrentNativePointerList_.begin();
1505     while (concurrentIter != concurrentNativePointerList_.end()) {
1506         ObjectSlot slot(reinterpret_cast<uintptr_t>(&(*concurrentIter)));
1507         bool isAlive = visitor.VisitRoot(Root::ROOT_VM, slot);
1508         if (!isAlive) {
1509             JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*concurrentIter).GetTaggedObject());
1510             nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1511             concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1512                 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1513             common::BaseRuntime::NotifyNativeFree(object->GetBindingSize());
1514             SwapBackAndPop(concurrentNativePointerList_, concurrentIter);
1515         } else {
1516             ++concurrentIter;
1517         }
1518     }
1519     ShrinkWithFactor(concurrentNativePointerList_);
1520 }
1521 
RemoveFromNativePointerList(const JSNativePointer * pointer)1522 void Heap::RemoveFromNativePointerList(const JSNativePointer* pointer)
1523 {
1524     auto iter = std::find_if(nativePointerList_.begin(), nativePointerList_.end(),
1525                              [pointer](JSTaggedValue item) { return item.GetTaggedObject() == pointer; });
1526     if (iter != nativePointerList_.end()) {
1527         JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*iter).GetTaggedObject());
1528         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1529         object->Destroy(thread_);
1530         SwapBackAndPop(nativePointerList_, iter);
1531     }
1532     auto newIter = std::find_if(concurrentNativePointerList_.begin(), concurrentNativePointerList_.end(),
1533                                 [pointer](JSTaggedValue item) { return item.GetTaggedObject() == pointer; });
1534     if (newIter != concurrentNativePointerList_.end()) {
1535         JSNativePointer *object = reinterpret_cast<JSNativePointer *>((*newIter).GetTaggedObject());
1536         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1537         object->Destroy(thread_);
1538         SwapBackAndPop(concurrentNativePointerList_, newIter);
1539     }
1540 }
1541 
ClearNativePointerList()1542 void Heap::ClearNativePointerList()
1543 {
1544     for (auto iter : nativePointerList_) {
1545         reinterpret_cast<JSNativePointer *>(iter.GetTaggedObject())->Destroy(thread_);
1546     }
1547     for (auto iter : concurrentNativePointerList_) {
1548         reinterpret_cast<JSNativePointer *>(iter.GetTaggedObject())->Destroy(thread_);
1549     }
1550     nativePointerList_.clear();
1551 }
1552 
1553 }  // namespace panda::ecmascript
1554 
1555 #endif  // ECMASCRIPT_MEM_HEAP_INL_H
1556