• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_INL_H
17 #define ECMASCRIPT_MEM_HEAP_INL_H
18 
19 #include "ecmascript/mem/heap.h"
20 
21 #include "ecmascript/base/block_hook_scope.h"
22 #include "ecmascript/js_native_pointer.h"
23 #include "ecmascript/daemon/daemon_task-inl.h"
24 #include "ecmascript/dfx/hprof/heap_tracker.h"
25 #include "ecmascript/ecma_vm.h"
26 #include "ecmascript/mem/allocator-inl.h"
27 #include "ecmascript/mem/concurrent_sweeper.h"
28 #include "ecmascript/mem/linear_space.h"
29 #include "ecmascript/mem/mem_controller.h"
30 #include "ecmascript/mem/shared_mem_controller.h"
31 #include "ecmascript/mem/sparse_space.h"
32 #include "ecmascript/mem/tagged_object.h"
33 #include "ecmascript/mem/thread_local_allocation_buffer.h"
34 #include "ecmascript/mem/barriers-inl.h"
35 #include "ecmascript/mem/mem_map_allocator.h"
36 #include "ecmascript/runtime.h"
37 
38 namespace panda::ecmascript {
39 #define CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, space, message)                                         \
40     if (UNLIKELY((object) == nullptr)) {                                                                    \
41         EcmaVM *vm = GetEcmaVM();                                                                           \
42         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
43         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
44         if ((space)->IsOOMDumpSpace()) {                                                                    \
45             DumpHeapSnapshotBeforeOOM(false);                                                               \
46         }                                                                                                   \
47         StatisticHeapDetail();                                                                              \
48         ThrowOutOfMemoryError(GetJSThread(), size, message);                                                \
49         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
50     }
51 
52 #define CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, space, message)                                \
53     if (UNLIKELY((object) == nullptr)) {                                                                    \
54         size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();                \
55         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
56         DumpHeapSnapshotBeforeOOM(false, thread, SharedHeapOOMSource::NORMAL_ALLOCATION);                   \
57         ThrowOutOfMemoryError(thread, size, message);                                                       \
58         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size));                       \
59     }
60 
61 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, space, desc, message)                   \
62     if (UNLIKELY((object) == nullptr)) {                                                                    \
63         EcmaVM *vm = GetEcmaVM();                                                                           \
64         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
65         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
66         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
67         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size, desc));                         \
68     }
69 
70 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, space, message)                              \
71     if (UNLIKELY((object) == nullptr)) {                                                                    \
72         EcmaVM *vm = GetEcmaVM();                                                                           \
73         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
74         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
75         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
76         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
77     }
78 
79 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb)80 void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
81 {
82     sOldSpace_->EnumerateRegions(cb);
83     sNonMovableSpace_->EnumerateRegions(cb);
84     sHugeObjectSpace_->EnumerateRegions(cb);
85     sAppSpawnSpace_->EnumerateRegions(cb);
86 }
87 
88 template<class Callback>
EnumerateOldSpaceRegionsWithRecord(const Callback & cb)89 void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
90 {
91     sOldSpace_->EnumerateRegionsWithRecord(cb);
92     sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
93     sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
94 }
95 
96 template<class Callback>
IterateOverObjects(const Callback & cb)97 void SharedHeap::IterateOverObjects(const Callback &cb) const
98 {
99     sOldSpace_->IterateOverObjects(cb);
100     sNonMovableSpace_->IterateOverObjects(cb);
101     sHugeObjectSpace_->IterateOverObjects(cb);
102     sAppSpawnSpace_->IterateOverMarkedObjects(cb);
103 }
104 
105 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb,Region * region)106 void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
107 {
108     oldSpace_->EnumerateRegions(cb, region);
109     appSpawnSpace_->EnumerateRegions(cb);
110     nonMovableSpace_->EnumerateRegions(cb);
111     hugeObjectSpace_->EnumerateRegions(cb);
112     machineCodeSpace_->EnumerateRegions(cb);
113     hugeMachineCodeSpace_->EnumerateRegions(cb);
114 }
115 
116 template<class Callback>
EnumerateSnapshotSpaceRegions(const Callback & cb)117 void Heap::EnumerateSnapshotSpaceRegions(const Callback &cb) const
118 {
119     snapshotSpace_->EnumerateRegions(cb);
120 }
121 
122 template<class Callback>
EnumerateNonNewSpaceRegions(const Callback & cb)123 void Heap::EnumerateNonNewSpaceRegions(const Callback &cb) const
124 {
125     oldSpace_->EnumerateRegions(cb);
126     if (!isCSetClearing_.load(std::memory_order_acquire)) {
127         oldSpace_->EnumerateCollectRegionSet(cb);
128     }
129     appSpawnSpace_->EnumerateRegions(cb);
130     snapshotSpace_->EnumerateRegions(cb);
131     nonMovableSpace_->EnumerateRegions(cb);
132     hugeObjectSpace_->EnumerateRegions(cb);
133     machineCodeSpace_->EnumerateRegions(cb);
134     hugeMachineCodeSpace_->EnumerateRegions(cb);
135 }
136 
137 template<class Callback>
EnumerateNonNewSpaceRegionsWithRecord(const Callback & cb)138 void Heap::EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const
139 {
140     oldSpace_->EnumerateRegionsWithRecord(cb);
141     snapshotSpace_->EnumerateRegionsWithRecord(cb);
142     nonMovableSpace_->EnumerateRegionsWithRecord(cb);
143     hugeObjectSpace_->EnumerateRegionsWithRecord(cb);
144     machineCodeSpace_->EnumerateRegionsWithRecord(cb);
145     hugeMachineCodeSpace_->EnumerateRegionsWithRecord(cb);
146 }
147 
148 template<class Callback>
EnumerateNewSpaceRegions(const Callback & cb)149 void Heap::EnumerateNewSpaceRegions(const Callback &cb) const
150 {
151     activeSemiSpace_->EnumerateRegions(cb);
152 }
153 
154 template<class Callback>
EnumerateNonMovableRegions(const Callback & cb)155 void Heap::EnumerateNonMovableRegions(const Callback &cb) const
156 {
157     snapshotSpace_->EnumerateRegions(cb);
158     appSpawnSpace_->EnumerateRegions(cb);
159     nonMovableSpace_->EnumerateRegions(cb);
160     hugeObjectSpace_->EnumerateRegions(cb);
161     machineCodeSpace_->EnumerateRegions(cb);
162     hugeMachineCodeSpace_->EnumerateRegions(cb);
163 }
164 
165 template<class Callback>
EnumerateRegions(const Callback & cb)166 void Heap::EnumerateRegions(const Callback &cb) const
167 {
168     activeSemiSpace_->EnumerateRegions(cb);
169     oldSpace_->EnumerateRegions(cb);
170     if (!isCSetClearing_.load(std::memory_order_acquire)) {
171         oldSpace_->EnumerateCollectRegionSet(cb);
172     }
173     appSpawnSpace_->EnumerateRegions(cb);
174     snapshotSpace_->EnumerateRegions(cb);
175     nonMovableSpace_->EnumerateRegions(cb);
176     hugeObjectSpace_->EnumerateRegions(cb);
177     machineCodeSpace_->EnumerateRegions(cb);
178     hugeMachineCodeSpace_->EnumerateRegions(cb);
179 }
180 
181 template<class Callback>
IterateOverObjects(const Callback & cb,bool isSimplify)182 void Heap::IterateOverObjects(const Callback &cb, bool isSimplify) const
183 {
184     activeSemiSpace_->IterateOverObjects(cb);
185     oldSpace_->IterateOverObjects(cb);
186     nonMovableSpace_->IterateOverObjects(cb);
187     hugeObjectSpace_->IterateOverObjects(cb);
188     machineCodeSpace_->IterateOverObjects(cb);
189     hugeMachineCodeSpace_->IterateOverObjects(cb);
190     snapshotSpace_->IterateOverObjects(cb);
191     if (!isSimplify) {
192         readOnlySpace_->IterateOverObjects(cb);
193         appSpawnSpace_->IterateOverMarkedObjects(cb);
194     }
195 }
196 
AllocateYoungOrHugeObject(JSHClass * hclass)197 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass)
198 {
199     size_t size = hclass->GetObjectSize();
200     return AllocateYoungOrHugeObject(hclass, size);
201 }
202 
AllocateYoungOrHugeObject(size_t size)203 TaggedObject *Heap::AllocateYoungOrHugeObject(size_t size)
204 {
205     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
206     TaggedObject *object = nullptr;
207     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
208         object = AllocateHugeObject(size);
209     } else {
210         object = AllocateInYoungSpace(size);
211         if (object == nullptr) {
212             if (!HandleExitHighSensitiveEvent()) {
213                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
214             }
215             object = AllocateInYoungSpace(size);
216             if (object == nullptr) {
217                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
218                 object = AllocateInYoungSpace(size);
219                 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, activeSemiSpace_, "Heap::AllocateYoungOrHugeObject");
220             }
221         }
222     }
223     return object;
224 }
225 
AllocateInYoungSpace(size_t size)226 TaggedObject *Heap::AllocateInYoungSpace(size_t size)
227 {
228     return reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
229 }
230 
AllocateYoungOrHugeObject(JSHClass * hclass,size_t size)231 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass, size_t size)
232 {
233     auto object = AllocateYoungOrHugeObject(size);
234     ASSERT(object != nullptr);
235     object->SetClass(thread_, hclass);
236 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
237     OnAllocateEvent(GetEcmaVM(), object, size);
238 #endif
239     return object;
240 }
241 
SetHClassAndDoAllocateEvent(JSThread * thread,TaggedObject * object,JSHClass * hclass,size_t size)242 void BaseHeap::SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
243                                            [[maybe_unused]] size_t size)
244 {
245     ASSERT(object != nullptr);
246     object->SetClass(thread, hclass);
247 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
248     OnAllocateEvent(thread->GetEcmaVM(), object, size);
249 #endif
250 }
251 
AllocateYoungSync(size_t size)252 uintptr_t Heap::AllocateYoungSync(size_t size)
253 {
254     return activeSemiSpace_->AllocateSync(size);
255 }
256 
MoveYoungRegion(Region * region)257 bool Heap::MoveYoungRegion(Region *region)
258 {
259     return activeSemiSpace_->SwapRegion(region, inactiveSemiSpace_);
260 }
261 
MoveYoungRegionToOld(Region * region)262 bool Heap::MoveYoungRegionToOld(Region *region)
263 {
264     return oldSpace_->SwapRegion(region, inactiveSemiSpace_);
265 }
266 
MergeToOldSpaceSync(LocalSpace * localSpace)267 void Heap::MergeToOldSpaceSync(LocalSpace *localSpace)
268 {
269     oldSpace_->Merge(localSpace);
270 }
271 
InHeapProfiler()272 bool Heap::InHeapProfiler()
273 {
274 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
275     return GetEcmaVM()->GetHeapProfile() != nullptr;
276 #else
277     return false;
278 #endif
279 }
280 
MergeToOldSpaceSync(SharedLocalSpace * localSpace)281 void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
282 {
283     sOldSpace_->Merge(localSpace);
284 }
285 
TryAllocateYoungGeneration(JSHClass * hclass,size_t size)286 TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
287 {
288     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
289     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
290         return nullptr;
291     }
292     auto object = reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
293     if (object != nullptr) {
294         object->SetClass(thread_, hclass);
295     }
296 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
297     OnAllocateEvent(GetEcmaVM(), object, size);
298 #endif
299     return object;
300 }
301 
AllocateOldOrHugeObject(JSHClass * hclass)302 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass)
303 {
304     size_t size = hclass->GetObjectSize();
305     return AllocateOldOrHugeObject(hclass, size);
306 }
307 
AllocateOldOrHugeObject(size_t size)308 TaggedObject *Heap::AllocateOldOrHugeObject(size_t size)
309 {
310     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
311     TaggedObject *object = nullptr;
312     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
313         object = AllocateHugeObject(size);
314     } else {
315         object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateFast(size));
316         if (object == nullptr) {
317             bool gcSuccess = CheckAndTriggerOldGC();
318             object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateSlow(size, gcSuccess));
319         }
320         if (object == nullptr) {
321             CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
322             object = reinterpret_cast<TaggedObject *>(oldSpace_->AllocateSlow(size, true));
323         }
324         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, oldSpace_, "Heap::AllocateOldOrHugeObject");
325     }
326     return object;
327 }
328 
AllocateOldOrHugeObject(JSHClass * hclass,size_t size)329 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass, size_t size)
330 {
331     auto object = AllocateOldOrHugeObject(size);
332     object->SetClass(thread_, hclass);
333 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
334     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject*>(object), size);
335 #endif
336     return object;
337 }
338 
AllocateReadOnlyOrHugeObject(JSHClass * hclass)339 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass)
340 {
341     size_t size = hclass->GetObjectSize();
342     TaggedObject *object = AllocateReadOnlyOrHugeObject(hclass, size);
343 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
344     OnAllocateEvent(GetEcmaVM(), object, size);
345 #endif
346     return object;
347 }
348 
AllocateReadOnlyOrHugeObject(JSHClass * hclass,size_t size)349 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size)
350 {
351     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
352     TaggedObject *object = nullptr;
353     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
354         object = AllocateHugeObject(hclass, size);
355     } else {
356         object = reinterpret_cast<TaggedObject *>(readOnlySpace_->Allocate(size));
357         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, readOnlySpace_, "Heap::AllocateReadOnlyOrHugeObject");
358         ASSERT(object != nullptr);
359         object->SetClass(thread_, hclass);
360     }
361 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
362     OnAllocateEvent(GetEcmaVM(), object, size);
363 #endif
364     return object;
365 }
366 
AllocateNonMovableOrHugeObject(JSHClass * hclass)367 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass)
368 {
369     size_t size = hclass->GetObjectSize();
370     TaggedObject *object = AllocateNonMovableOrHugeObject(hclass, size);
371     if (object == nullptr) {
372         LOG_ECMA(FATAL) << "Heap::AllocateNonMovableOrHugeObject:object is nullptr";
373     }
374 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
375     OnAllocateEvent(GetEcmaVM(), object, size);
376 #endif
377     return object;
378 }
379 
AllocateNonMovableOrHugeObject(JSHClass * hclass,size_t size)380 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size)
381 {
382     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
383     TaggedObject *object = nullptr;
384     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
385         object = AllocateHugeObject(hclass, size);
386     } else {
387         object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->CheckAndAllocate(size));
388         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, nonMovableSpace_, "Heap::AllocateNonMovableOrHugeObject");
389         object->SetClass(thread_, hclass);
390     }
391 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
392     OnAllocateEvent(GetEcmaVM(), object, size);
393 #endif
394     return object;
395 }
396 
AllocateClassClass(JSHClass * hclass,size_t size)397 TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
398 {
399     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
400     auto object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->Allocate(size));
401     if (UNLIKELY(object == nullptr)) {
402         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
403         UNREACHABLE();
404     }
405     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
406 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
407     OnAllocateEvent(GetEcmaVM(), object, size);
408 #endif
409     return object;
410 }
411 
AllocateClassClass(JSThread * thread,JSHClass * hclass,size_t size)412 TaggedObject *SharedHeap::AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size)
413 {
414     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
415     auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
416     if (UNLIKELY(object == nullptr)) {
417         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
418         UNREACHABLE();
419     }
420     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
421 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
422     OnAllocateEvent(thread->GetEcmaVM(), object, size);
423 #endif
424     return object;
425 }
426 
AllocateHugeObject(size_t size)427 TaggedObject *Heap::AllocateHugeObject(size_t size)
428 {
429     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
430     CheckAndTriggerOldGC(size);
431 
432     auto *object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
433     if (UNLIKELY(object == nullptr)) {
434         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
435         object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
436         if (UNLIKELY(object == nullptr)) {
437             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
438             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
439             oldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
440             DumpHeapSnapshotBeforeOOM(false);
441             StatisticHeapDetail();
442             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
443             ThrowOutOfMemoryError(thread_, size, "Heap::AllocateHugeObject");
444             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
445             if (UNLIKELY(object == nullptr)) {
446                 FatalOutOfMemoryError(size, "Heap::AllocateHugeObject");
447             }
448         }
449     }
450     return object;
451 }
452 
AllocateHugeObject(JSHClass * hclass,size_t size)453 TaggedObject *Heap::AllocateHugeObject(JSHClass *hclass, size_t size)
454 {
455     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
456     CheckAndTriggerOldGC(size);
457     auto object = AllocateHugeObject(size);
458     object->SetClass(thread_, hclass);
459 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
460     OnAllocateEvent(GetEcmaVM(), object, size);
461 #endif
462     return object;
463 }
464 
AllocateHugeMachineCodeObject(size_t size,MachineCodeDesc * desc)465 TaggedObject *Heap::AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc)
466 {
467     TaggedObject *object;
468     if (desc) {
469         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
470             size, thread_, reinterpret_cast<void *>(desc)));
471     } else {
472         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
473             size, thread_));
474     }
475     return object;
476 }
477 
AllocateMachineCodeObject(JSHClass * hclass,size_t size,MachineCodeDesc * desc)478 TaggedObject *Heap::AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc)
479 {
480     TaggedObject *object;
481     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
482     if (!desc) {
483         // Jit Fort disabled
484         ASSERT(!GetEcmaVM()->GetJSOptions().GetEnableJitFort());
485         object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
486             reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size)) :
487             reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size));
488         CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, machineCodeSpace_,
489             "Heap::AllocateMachineCodeObject");
490         object->SetClass(thread_, hclass);
491 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
492         OnAllocateEvent(GetEcmaVM(), object, size);
493 #endif
494         return object;
495     }
496 
497     // Jit Fort enabled
498     ASSERT(GetEcmaVM()->GetJSOptions().GetEnableJitFort());
499     if (!GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort()) {
500         desc->instructionsAddr = 0;
501         if (size <= MAX_REGULAR_HEAP_OBJECT_SIZE) {
502             // for non huge code cache obj, allocate fort space before allocating the code object
503             uintptr_t mem = machineCodeSpace_->JitFortAllocate(desc);
504             if (mem == ToUintPtr(nullptr)) {
505                 return nullptr;
506             }
507             desc->instructionsAddr = mem;
508         }
509     }
510     object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
511         reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
512         reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size, desc, true));
513     CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, machineCodeSpace_, desc,
514         "Heap::AllocateMachineCodeObject");
515     object->SetClass(thread_, hclass);
516 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
517     OnAllocateEvent(GetEcmaVM(), object, size);
518 #endif
519     return object;
520 }
521 
AllocateSnapshotSpace(size_t size)522 uintptr_t Heap::AllocateSnapshotSpace(size_t size)
523 {
524     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
525     uintptr_t object = snapshotSpace_->Allocate(size);
526     if (UNLIKELY(object == 0)) {
527         FatalOutOfMemoryError(size, "Heap::AllocateSnapshotSpaceObject");
528     }
529 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
530     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject *>(object), size);
531 #endif
532     return object;
533 }
534 
AllocateSharedNonMovableSpaceFromTlab(JSThread * thread,size_t size)535 TaggedObject *Heap::AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size)
536 {
537     ASSERT(!thread->IsJitThread());
538     if (GetEcmaVM()->GetThreadCheckStatus()) {
539         if (thread->IsJitThread()) {
540             LOG_ECMA(FATAL) << "jit thread not allowed";
541         }
542         if (thread->CheckMultiThread()) {
543             LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!"
544                             << "thread:" << thread->GetThreadId()
545                             << " currentThread:" << JSThread::GetCurrentThreadId();
546         }
547     }
548     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
549     TaggedObject *object = reinterpret_cast<TaggedObject*>(sNonMovableTlab_->Allocate(size));
550     if (object != nullptr) {
551         return object;
552     }
553     if (!sNonMovableTlab_->NeedNewTlab(size)) {
554         // slowpath
555         return nullptr;
556     }
557     size_t newTlabSize = sNonMovableTlab_->ComputeSize();
558     object = sHeap_->AllocateSNonMovableTlab(thread, newTlabSize);
559     if (object == nullptr) {
560         sNonMovableTlab_->DisableNewTlab();
561         return nullptr;
562     }
563     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
564     sNonMovableTlab_->Reset(begin, begin + newTlabSize, begin + size);
565     auto topAddress = sNonMovableTlab_->GetTopAddress();
566     auto endAddress = sNonMovableTlab_->GetEndAddress();
567     thread->ReSetSNonMovableSpaceAllocationAddress(topAddress, endAddress);
568     sHeap_->TryTriggerConcurrentMarking(thread);
569     return object;
570 }
571 
AllocateSharedOldSpaceFromTlab(JSThread * thread,size_t size)572 TaggedObject *Heap::AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size)
573 {
574     ASSERT(!thread->IsJitThread());
575     if (GetEcmaVM()->GetThreadCheckStatus()) {
576         if (thread->IsJitThread()) {
577             LOG_ECMA(FATAL) << "jit thread not allowed";
578         }
579         if (thread->CheckMultiThread()) {
580             LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!"
581                             << "thread:" << thread->GetThreadId()
582                             << " currentThread:" << JSThread::GetCurrentThreadId();
583         }
584     }
585     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
586     TaggedObject *object = reinterpret_cast<TaggedObject*>(sOldTlab_->Allocate(size));
587     if (object != nullptr) {
588         return object;
589     }
590     if (!sOldTlab_->NeedNewTlab(size)) {
591         // slowpath
592         return nullptr;
593     }
594     size_t newTlabSize = sOldTlab_->ComputeSize();
595     object = sHeap_->AllocateSOldTlab(thread, newTlabSize);
596     if (object == nullptr) {
597         sOldTlab_->DisableNewTlab();
598         return nullptr;
599     }
600     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
601     sOldTlab_->Reset(begin, begin + newTlabSize, begin + size);
602     auto topAddress = sOldTlab_->GetTopAddress();
603     auto endAddress = sOldTlab_->GetEndAddress();
604     thread->ReSetSOldSpaceAllocationAddress(topAddress, endAddress);
605     sHeap_->TryTriggerConcurrentMarking(thread);
606     return object;
607 }
608 
SwapNewSpace()609 void Heap::SwapNewSpace()
610 {
611     activeSemiSpace_->Stop();
612     size_t newOverShootSize = 0;
613     if (!inBackground_ && gcType_ != TriggerGCType::FULL_GC && gcType_ != TriggerGCType::APPSPAWN_FULL_GC) {
614         newOverShootSize = activeSemiSpace_->CalculateNewOverShootSize();
615     }
616     inactiveSemiSpace_->Restart(newOverShootSize);
617 
618     SemiSpace *newSpace = inactiveSemiSpace_;
619     inactiveSemiSpace_ = activeSemiSpace_;
620     activeSemiSpace_ = newSpace;
621     if (UNLIKELY(ShouldVerifyHeap())) {
622         inactiveSemiSpace_->EnumerateRegions([](Region *region) {
623             region->SetInactiveSemiSpace();
624         });
625     }
626 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
627     activeSemiSpace_->SwapAllocationCounter(inactiveSemiSpace_);
628 #endif
629     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
630     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
631     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
632 }
633 
SwapOldSpace()634 void Heap::SwapOldSpace()
635 {
636     compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
637     auto *oldSpace = compressSpace_;
638     compressSpace_ = oldSpace_;
639     oldSpace_ = oldSpace;
640 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
641     oldSpace_->SwapAllocationCounter(compressSpace_);
642 #endif
643 }
644 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)645 void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
646                        [[maybe_unused]] size_t size)
647 {
648 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
649     HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
650     if (profiler != nullptr) {
651         base::BlockHookScope blockScope;
652         profiler->MoveEvent(address, forwardAddress, size);
653     }
654 #endif
655 }
656 
OnMoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)657 void SharedHeap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
658                              [[maybe_unused]] size_t size)
659 {
660 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
661     Runtime::GetInstance()->GCIterateThreadListWithoutLock([&](JSThread *thread) {
662         HeapProfilerInterface *profiler = thread->GetEcmaVM()->GetHeapProfile();
663         if (profiler != nullptr) {
664             base::BlockHookScope blockScope;
665             profiler->MoveEvent(address, forwardAddress, size);
666         }
667     });
668 #endif
669 }
670 
SwapOldSpace()671 void SharedHeap::SwapOldSpace()
672 {
673     sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
674     auto *oldSpace = sCompressSpace_;
675     sCompressSpace_ = sOldSpace_;
676     sOldSpace_ = oldSpace;
677 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
678     sOldSpace_->SwapAllocationCounter(sCompressSpace_);
679 #endif
680 }
681 
ReclaimRegions(TriggerGCType gcType)682 void Heap::ReclaimRegions(TriggerGCType gcType)
683 {
684     activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
685         region->ResetRegionTypeFlag();
686         region->ClearMarkGCBitset();
687         region->ClearCrossRegionRSet();
688         region->ResetAliveObject();
689         region->ClearGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
690     });
691     size_t cachedSize = inactiveSemiSpace_->GetInitialCapacity();
692     if (gcType == TriggerGCType::FULL_GC) {
693         compressSpace_->Reset();
694         cachedSize = 0;
695     } else if (gcType == TriggerGCType::OLD_GC) {
696         oldSpace_->ReclaimCSet();
697         isCSetClearing_.store(false, std::memory_order_release);
698     }
699 
700     inactiveSemiSpace_->ReclaimRegions(cachedSize);
701     sweeper_->WaitAllTaskFinished();
702     EnumerateNonNewSpaceRegionsWithRecord([] (Region *region) {
703         region->ClearMarkGCBitset();
704         region->ClearCrossRegionRSet();
705     });
706     if (!clearTaskFinished_) {
707         LockHolder holder(waitClearTaskFinishedMutex_);
708         clearTaskFinished_ = true;
709         waitClearTaskFinishedCV_.SignalAll();
710     }
711 }
712 
713 // only call in js-thread
ClearSlotsRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd)714 void Heap::ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd)
715 {
716     if (!current->InYoungSpace()) {
717         // This clear may exist data race with concurrent sweeping, so use CAS
718         current->AtomicClearSweepingOldToNewRSetInRange(freeStart, freeEnd);
719         current->ClearOldToNewRSetInRange(freeStart, freeEnd);
720         current->AtomicClearCrossRegionRSetInRange(freeStart, freeEnd);
721     }
722     current->ClearLocalToShareRSetInRange(freeStart, freeEnd);
723     current->AtomicClearSweepingLocalToShareRSetInRange(freeStart, freeEnd);
724 }
725 
GetCommittedSize()726 size_t Heap::GetCommittedSize() const
727 {
728     size_t result = activeSemiSpace_->GetCommittedSize() +
729                     oldSpace_->GetCommittedSize() +
730                     hugeObjectSpace_->GetCommittedSize() +
731                     nonMovableSpace_->GetCommittedSize() +
732                     machineCodeSpace_->GetCommittedSize() +
733                     hugeMachineCodeSpace_->GetCommittedSize() +
734                     readOnlySpace_->GetCommittedSize() +
735                     appSpawnSpace_->GetCommittedSize() +
736                     snapshotSpace_->GetCommittedSize();
737     return result;
738 }
739 
GetHeapObjectSize()740 size_t Heap::GetHeapObjectSize() const
741 {
742     size_t result = activeSemiSpace_->GetHeapObjectSize() +
743                     oldSpace_->GetHeapObjectSize() +
744                     hugeObjectSpace_->GetHeapObjectSize() +
745                     nonMovableSpace_->GetHeapObjectSize() +
746                     machineCodeSpace_->GetCommittedSize() +
747                     hugeMachineCodeSpace_->GetCommittedSize() +
748                     readOnlySpace_->GetCommittedSize() +
749                     appSpawnSpace_->GetHeapObjectSize() +
750                     snapshotSpace_->GetHeapObjectSize();
751     return result;
752 }
753 
NotifyRecordMemorySize()754 void Heap::NotifyRecordMemorySize()
755 {
756     if (GetRecordObjectSize() == 0) {
757         RecordOrResetObjectSize(GetHeapObjectSize());
758     }
759     if (GetRecordNativeSize() == 0) {
760         RecordOrResetNativeSize(GetNativeBindingSize());
761     }
762 }
763 
GetRegionCount()764 size_t Heap::GetRegionCount() const
765 {
766     size_t result = activeSemiSpace_->GetRegionCount() +
767         oldSpace_->GetRegionCount() +
768         oldSpace_->GetCollectSetRegionCount() +
769         appSpawnSpace_->GetRegionCount() +
770         snapshotSpace_->GetRegionCount() +
771         nonMovableSpace_->GetRegionCount() +
772         hugeObjectSpace_->GetRegionCount() +
773         machineCodeSpace_->GetRegionCount() +
774         hugeMachineCodeSpace_->GetRegionCount();
775     return result;
776 }
777 
GetHeapObjectCount()778 uint32_t Heap::GetHeapObjectCount() const
779 {
780     uint32_t count = 0;
781     sweeper_->EnsureAllTaskFinished();
782     this->IterateOverObjects([&count]([[maybe_unused]] TaggedObject *obj) {
783         ++count;
784     });
785     return count;
786 }
787 
InitializeIdleStatusControl(std::function<void (bool)> callback)788 void Heap::InitializeIdleStatusControl(std::function<void(bool)> callback)
789 {
790     notifyIdleStatusCallback = callback;
791     if (callback != nullptr) {
792         OPTIONAL_LOG(ecmaVm_, INFO) << "Received idle status control call back";
793         enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
794     }
795 }
796 
TryTriggerConcurrentMarking(JSThread * thread)797 void SharedHeap::TryTriggerConcurrentMarking(JSThread *thread)
798 {
799     if (!CheckCanTriggerConcurrentMarking(thread)) {
800         return;
801     }
802     bool triggerConcurrentMark = (GetHeapObjectSize() >= globalSpaceConcurrentMarkLimit_);
803     if (triggerConcurrentMark && (OnStartupEvent() || IsJustFinishStartup())) {
804         triggerConcurrentMark = ObjectExceedJustFinishStartupThresholdForCM();
805     }
806     if (triggerConcurrentMark) {
807         TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
808     }
809 }
810 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass)811 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
812 {
813     size_t size = hclass->GetObjectSize();
814     return AllocateNonMovableOrHugeObject(thread, hclass, size);
815 }
816 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)817 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
818 {
819     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
820     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
821         return AllocateHugeObject(thread, hclass, size);
822     }
823     TaggedObject *object = thread->IsJitThread() ? nullptr :
824         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
825     if (object == nullptr) {
826         object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
827         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
828             "SharedHeap::AllocateNonMovableOrHugeObject");
829         object->SetClass(thread, hclass);
830         TryTriggerConcurrentMarking(thread);
831     } else {
832         object->SetClass(thread, hclass);
833     }
834 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
835     OnAllocateEvent(thread->GetEcmaVM(), object, size);
836 #endif
837     return object;
838 }
839 
AllocateNonMovableOrHugeObject(JSThread * thread,size_t size)840 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, size_t size)
841 {
842     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
843     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
844         return AllocateHugeObject(thread, size);
845     }
846     TaggedObject *object = thread->IsJitThread() ? nullptr :
847         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
848     if (object == nullptr) {
849         object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
850         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
851             "SharedHeap::AllocateNonMovableOrHugeObject");
852         TryTriggerConcurrentMarking(thread);
853     }
854 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
855     OnAllocateEvent(thread->GetEcmaVM(), object, size);
856 #endif
857     return object;
858 }
859 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass)860 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass)
861 {
862     size_t size = hclass->GetObjectSize();
863     return AllocateOldOrHugeObject(thread, hclass, size);
864 }
865 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)866 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
867 {
868     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
869     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
870         return AllocateHugeObject(thread, hclass, size);
871     }
872     TaggedObject *object = thread->IsJitThread() ? nullptr :
873         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
874     if (object == nullptr) {
875         object = AllocateInSOldSpace(thread, size);
876         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
877         object->SetClass(thread, hclass);
878         TryTriggerConcurrentMarking(thread);
879     } else {
880         object->SetClass(thread, hclass);
881     }
882 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
883     OnAllocateEvent(thread->GetEcmaVM(), object, size);
884 #endif
885     return object;
886 }
887 
AllocateOldOrHugeObject(JSThread * thread,size_t size)888 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
889 {
890     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
891     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
892         return AllocateHugeObject(thread, size);
893     }
894     TaggedObject *object = thread->IsJitThread() ? nullptr :
895         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
896     if (object == nullptr) {
897         object = AllocateInSOldSpace(thread, size);
898         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
899         TryTriggerConcurrentMarking(thread);
900     }
901     return object;
902 }
903 
AllocateInSOldSpace(JSThread * thread,size_t size)904 TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
905 {
906     // jit thread no heap
907     bool allowGC = !thread->IsJitThread();
908     if (allowGC) {
909         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
910         localHeap->TryTriggerFullMarkBySharedSize(size);
911     }
912     TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
913      // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
914     if (object == nullptr) {
915         if (allowGC) {
916             CheckAndTriggerSharedGC(thread);
917         }
918         object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
919         if (object == nullptr) {
920             if (allowGC) {
921                 CollectGarbageNearOOM(thread);
922             }
923             object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
924         }
925     }
926     return object;
927 }
928 
AllocateHugeObject(JSThread * thread,JSHClass * hclass,size_t size)929 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
930 {
931     auto object = AllocateHugeObject(thread, size);
932     object->SetClass(thread, hclass);
933 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
934     OnAllocateEvent(thread->GetEcmaVM(), object, size);
935 #endif
936     return object;
937 }
938 
AllocateHugeObject(JSThread * thread,size_t size)939 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
940 {
941     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
942     CheckHugeAndTriggerSharedGC(thread, size);
943     auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
944     if (UNLIKELY(object == nullptr)) {
945         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
946         object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
947         if (UNLIKELY(object == nullptr)) {
948             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
949             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
950             sHugeObjectSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
951             DumpHeapSnapshotBeforeOOM(false, thread, SharedHeapOOMSource::NORMAL_ALLOCATION);
952             ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
953             object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
954             if (UNLIKELY(object == nullptr)) {
955                 FatalOutOfMemoryError(size, "SharedHeap::AllocateHugeObject");
956             }
957         }
958     }
959     TryTriggerConcurrentMarking(thread);
960     return object;
961 }
962 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass)963 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass)
964 {
965     size_t size = hclass->GetObjectSize();
966     return AllocateReadOnlyOrHugeObject(thread, hclass, size);
967 }
968 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)969 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
970 {
971     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
972     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
973         return AllocateHugeObject(thread, hclass, size);
974     }
975     auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
976     CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
977     ASSERT(object != nullptr);
978     object->SetClass(thread, hclass);
979     return object;
980 }
981 
AllocateSOldTlab(JSThread * thread,size_t size)982 TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
983 {
984     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
985     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
986         return nullptr;
987     }
988     TaggedObject *object = nullptr;
989     if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
990         object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
991     } else {
992         object = AllocateInSOldSpace(thread, size);
993     }
994     return object;
995 }
996 
AllocateSNonMovableTlab(JSThread * thread,size_t size)997 TaggedObject *SharedHeap::AllocateSNonMovableTlab(JSThread *thread, size_t size)
998 {
999     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
1000     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
1001         return nullptr;
1002     }
1003     TaggedObject *object = nullptr;
1004     object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
1005     return object;
1006 }
1007 
1008 template<TriggerGCType gcType, GCReason gcReason>
TriggerConcurrentMarking(JSThread * thread)1009 void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
1010 {
1011     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC);
1012     // lock is outside to prevent extreme case, maybe could move update gcFinished_ into CheckAndPostTask
1013     // instead of an outside locking.
1014     LockHolder lock(waitGCFinishedMutex_);
1015     if (dThread_->CheckAndPostTask(TriggerConcurrentMarkTask<gcType, gcReason>(thread))) {
1016         ASSERT(gcFinished_);
1017         gcFinished_ = false;
1018     }
1019 }
1020 
1021 template<TriggerGCType gcType, GCReason gcReason>
CollectGarbage(JSThread * thread)1022 void SharedHeap::CollectGarbage(JSThread *thread)
1023 {
1024     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_PARTIAL_GC ||
1025         gcType == TriggerGCType::SHARED_FULL_GC);
1026 #ifndef NDEBUG
1027     ASSERT(!thread->HasLaunchedSuspendAll());
1028 #endif
1029     if (UNLIKELY(!dThread_->IsRunning())) {
1030         // Hope this will not happen, unless the AppSpawn run smth after PostFork
1031         LOG_GC(ERROR) << "Try to collect garbage in shared heap, but daemon thread is not running.";
1032         ForceCollectGarbageWithoutDaemonThread(gcType, gcReason, thread);
1033         return;
1034     }
1035     {
1036         // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1037         // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1038         // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1039         LockHolder lock(waitGCFinishedMutex_);
1040         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1041             ASSERT(gcFinished_);
1042             gcFinished_ = false;
1043         }
1044     }
1045     ASSERT(!gcFinished_);
1046     SetForceGC(true);
1047     WaitGCFinished(thread);
1048 }
1049 
1050 // This method is used only in the idle state and background switchover state.
1051 template<GCReason gcReason>
CompressCollectGarbageNotWaiting(JSThread * thread)1052 void SharedHeap::CompressCollectGarbageNotWaiting(JSThread *thread)
1053 {
1054     {
1055         // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1056         // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1057         // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1058         LockHolder lock(waitGCFinishedMutex_);
1059         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<TriggerGCType::SHARED_FULL_GC, gcReason>(thread))) {
1060             ASSERT(gcFinished_);
1061             gcFinished_ = false;
1062         }
1063     }
1064     ASSERT(!gcFinished_);
1065     SetForceGC(true);
1066 }
1067 
1068 template<TriggerGCType gcType, GCReason gcReason>
PostGCTaskForTest(JSThread * thread)1069 void SharedHeap::PostGCTaskForTest(JSThread *thread)
1070 {
1071     ASSERT(gcType == TriggerGCType::SHARED_GC ||gcType == TriggerGCType::SHARED_PARTIAL_GC ||
1072         gcType == TriggerGCType::SHARED_FULL_GC);
1073 #ifndef NDEBUG
1074     ASSERT(!thread->HasLaunchedSuspendAll());
1075 #endif
1076     if (dThread_->IsRunning()) {
1077         // Some UT may run without Daemon Thread.
1078         LockHolder lock(waitGCFinishedMutex_);
1079         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1080             ASSERT(gcFinished_);
1081             gcFinished_ = false;
1082         }
1083         ASSERT(!gcFinished_);
1084     }
1085 }
1086 
SwapBackAndPop(CVector<JSNativePointer * > & vec,CVector<JSNativePointer * >::iterator & iter)1087 static void SwapBackAndPop(CVector<JSNativePointer*>& vec, CVector<JSNativePointer*>::iterator& iter)
1088 {
1089     *iter = vec.back();
1090     if (iter + 1 == vec.end()) {
1091         vec.pop_back();
1092         iter = vec.end();
1093     } else {
1094         vec.pop_back();
1095     }
1096 }
1097 
ShrinkWithFactor(CVector<JSNativePointer * > & vec)1098 static void ShrinkWithFactor(CVector<JSNativePointer*>& vec)
1099 {
1100     constexpr size_t SHRINK_FACTOR = 2;
1101     if (vec.size() < vec.capacity() / SHRINK_FACTOR) {
1102         vec.shrink_to_fit();
1103     }
1104 }
1105 
InvokeSharedNativePointerCallbacks()1106 void SharedHeap::InvokeSharedNativePointerCallbacks()
1107 {
1108     Runtime *runtime = Runtime::GetInstance();
1109     if (!runtime->GetSharedNativePointerCallbacks().empty()) {
1110         runtime->InvokeSharedNativePointerCallbacks();
1111     }
1112 }
1113 
PushToSharedNativePointerList(JSNativePointer * pointer)1114 void SharedHeap::PushToSharedNativePointerList(JSNativePointer* pointer)
1115 {
1116     ASSERT(JSTaggedValue(pointer).IsInSharedHeap());
1117     std::lock_guard<std::mutex> lock(sNativePointerListMutex_);
1118     sharedNativePointerList_.emplace_back(pointer);
1119 }
1120 
ProcessSharedNativeDelete(const WeakRootVisitor & visitor)1121 void SharedHeap::ProcessSharedNativeDelete(const WeakRootVisitor& visitor)
1122 {
1123 #ifndef NDEBUG
1124     ASSERT(JSThread::GetCurrent()->HasLaunchedSuspendAll());
1125 #endif
1126     auto& sharedNativePointerCallbacks = Runtime::GetInstance()->GetSharedNativePointerCallbacks();
1127     auto sharedIter = sharedNativePointerList_.begin();
1128     while (sharedIter != sharedNativePointerList_.end()) {
1129         JSNativePointer* object = *sharedIter;
1130         auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1131         if (fwd == nullptr) {
1132             sharedNativePointerCallbacks.emplace_back(
1133                 object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1134             SwapBackAndPop(sharedNativePointerList_, sharedIter);
1135         } else {
1136             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1137                 *sharedIter = reinterpret_cast<JSNativePointer*>(fwd);
1138             }
1139             ++sharedIter;
1140         }
1141     }
1142     ShrinkWithFactor(sharedNativePointerList_);
1143 }
1144 
ProcessNativeDelete(const WeakRootVisitor & visitor)1145 void Heap::ProcessNativeDelete(const WeakRootVisitor& visitor)
1146 {
1147     // ProcessNativeDelete should be limited to OldGC or FullGC only
1148     if (!IsYoungGC()) {
1149         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1150         auto iter = nativePointerList_.begin();
1151         ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size()));
1152         while (iter != nativePointerList_.end()) {
1153             JSNativePointer* object = *iter;
1154             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1155             if (fwd == nullptr) {
1156                 size_t bindingSize = object->GetBindingSize();
1157                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1158                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1159                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1160                 SwapBackAndPop(nativePointerList_, iter);
1161             } else {
1162                 ++iter;
1163             }
1164         }
1165         ShrinkWithFactor(nativePointerList_);
1166 
1167         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1168         auto newIter = concurrentNativePointerList_.begin();
1169         while (newIter != concurrentNativePointerList_.end()) {
1170             JSNativePointer* object = *newIter;
1171             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1172             if (fwd == nullptr) {
1173                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1174                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1175                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1176                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1177             } else {
1178                 ++newIter;
1179             }
1180         }
1181         ShrinkWithFactor(concurrentNativePointerList_);
1182     }
1183 }
1184 
ProcessReferences(const WeakRootVisitor & visitor)1185 void Heap::ProcessReferences(const WeakRootVisitor& visitor)
1186 {
1187     // process native ref should be limited to OldGC or FullGC only
1188     if (!IsYoungGC()) {
1189         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1190         ResetNativeBindingSize();
1191         // array buffer
1192         auto iter = nativePointerList_.begin();
1193         ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessReferencesNum:" + std::to_string(nativePointerList_.size()));
1194         while (iter != nativePointerList_.end()) {
1195             JSNativePointer* object = *iter;
1196             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1197             if (fwd == nullptr) {
1198                 size_t bindingSize = object->GetBindingSize();
1199                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1200                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1201                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1202                 SwapBackAndPop(nativePointerList_, iter);
1203                 continue;
1204             }
1205             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1206             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1207                 *iter = JSNativePointer::Cast(fwd);
1208             }
1209             ++iter;
1210         }
1211         ShrinkWithFactor(nativePointerList_);
1212 
1213         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1214         auto newIter = concurrentNativePointerList_.begin();
1215         while (newIter != concurrentNativePointerList_.end()) {
1216             JSNativePointer* object = *newIter;
1217             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1218             if (fwd == nullptr) {
1219                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1220                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1221                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1222                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1223                 continue;
1224             }
1225             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1226             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1227                 *newIter = JSNativePointer::Cast(fwd);
1228             }
1229             ++newIter;
1230         }
1231         ShrinkWithFactor(concurrentNativePointerList_);
1232     }
1233 }
1234 
PushToNativePointerList(JSNativePointer * pointer,bool isConcurrent)1235 void Heap::PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent)
1236 {
1237     ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
1238     if (isConcurrent) {
1239         concurrentNativePointerList_.emplace_back(pointer);
1240     } else {
1241         nativePointerList_.emplace_back(pointer);
1242     }
1243 }
1244 
RemoveFromNativePointerList(const JSNativePointer * pointer)1245 void Heap::RemoveFromNativePointerList(const JSNativePointer* pointer)
1246 {
1247     auto iter = std::find(nativePointerList_.begin(), nativePointerList_.end(), pointer);
1248     if (iter != nativePointerList_.end()) {
1249         JSNativePointer* object = *iter;
1250         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1251         object->Destroy(thread_);
1252         SwapBackAndPop(nativePointerList_, iter);
1253     }
1254     auto newIter = std::find(concurrentNativePointerList_.begin(), concurrentNativePointerList_.end(), pointer);
1255     if (newIter != concurrentNativePointerList_.end()) {
1256         JSNativePointer* object = *newIter;
1257         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1258         object->Destroy(thread_);
1259         SwapBackAndPop(concurrentNativePointerList_, newIter);
1260     }
1261 }
1262 
ClearNativePointerList()1263 void Heap::ClearNativePointerList()
1264 {
1265     for (auto iter : nativePointerList_) {
1266         iter->Destroy(thread_);
1267     }
1268     for (auto iter : concurrentNativePointerList_) {
1269         iter->Destroy(thread_);
1270     }
1271     nativePointerList_.clear();
1272 }
1273 
1274 }  // namespace panda::ecmascript
1275 
1276 #endif  // ECMASCRIPT_MEM_HEAP_INL_H
1277