• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_HEAP_INL_H
17 #define ECMASCRIPT_MEM_HEAP_INL_H
18 
19 #include "ecmascript/mem/heap.h"
20 
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/daemon/daemon_task-inl.h"
23 #include "ecmascript/dfx/hprof/heap_tracker.h"
24 #include "ecmascript/ecma_vm.h"
25 #include "ecmascript/mem/allocator-inl.h"
26 #include "ecmascript/mem/concurrent_sweeper.h"
27 #include "ecmascript/mem/linear_space.h"
28 #include "ecmascript/mem/mem_controller.h"
29 #include "ecmascript/mem/sparse_space.h"
30 #include "ecmascript/mem/tagged_object.h"
31 #include "ecmascript/mem/thread_local_allocation_buffer.h"
32 #include "ecmascript/mem/barriers-inl.h"
33 #include "ecmascript/mem/mem_map_allocator.h"
34 
35 namespace panda::ecmascript {
36 #define CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, space, message)                                         \
37     if (UNLIKELY((object) == nullptr)) {                                                                    \
38         EcmaVM *vm = GetEcmaVM();                                                                           \
39         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
40         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
41         if ((space)->IsOOMDumpSpace()) {                                                                    \
42             DumpHeapSnapshotBeforeOOM(false);                                                               \
43         }                                                                                                   \
44         StatisticHeapDetail();                                                                              \
45         ThrowOutOfMemoryError(GetJSThread(), size, message);                                                \
46         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
47     }
48 
49 #define CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, space, message)                                \
50     if (UNLIKELY((object) == nullptr)) {                                                                    \
51         size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();                \
52         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
53         DumpHeapSnapshotBeforeOOM(false, thread);                                                           \
54         ThrowOutOfMemoryError(thread, size, message);                                                       \
55         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size));                       \
56     }
57 
58 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, space, desc, message)                   \
59     if (UNLIKELY((object) == nullptr)) {                                                                    \
60         EcmaVM *vm = GetEcmaVM();                                                                           \
61         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
62         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
63         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
64         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size, desc));                         \
65     }
66 
67 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, space, message)                              \
68     if (UNLIKELY((object) == nullptr)) {                                                                    \
69         EcmaVM *vm = GetEcmaVM();                                                                           \
70         size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
71         (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
72         SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
73         (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
74     }
75 
76 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb)77 void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
78 {
79     sOldSpace_->EnumerateRegions(cb);
80     sNonMovableSpace_->EnumerateRegions(cb);
81     sHugeObjectSpace_->EnumerateRegions(cb);
82     sAppSpawnSpace_->EnumerateRegions(cb);
83 }
84 
85 template<class Callback>
EnumerateOldSpaceRegionsWithRecord(const Callback & cb)86 void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
87 {
88     sOldSpace_->EnumerateRegionsWithRecord(cb);
89     sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
90     sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
91 }
92 
93 template<class Callback>
IterateOverObjects(const Callback & cb)94 void SharedHeap::IterateOverObjects(const Callback &cb) const
95 {
96     sOldSpace_->IterateOverObjects(cb);
97     sNonMovableSpace_->IterateOverObjects(cb);
98     sHugeObjectSpace_->IterateOverObjects(cb);
99     sAppSpawnSpace_->IterateOverMarkedObjects(cb);
100 }
101 
102 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb,Region * region)103 void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
104 {
105     oldSpace_->EnumerateRegions(cb, region);
106     appSpawnSpace_->EnumerateRegions(cb);
107     nonMovableSpace_->EnumerateRegions(cb);
108     hugeObjectSpace_->EnumerateRegions(cb);
109     machineCodeSpace_->EnumerateRegions(cb);
110     hugeMachineCodeSpace_->EnumerateRegions(cb);
111 }
112 
113 template<class Callback>
EnumerateSnapshotSpaceRegions(const Callback & cb)114 void Heap::EnumerateSnapshotSpaceRegions(const Callback &cb) const
115 {
116     snapshotSpace_->EnumerateRegions(cb);
117 }
118 
119 template<class Callback>
EnumerateNonNewSpaceRegions(const Callback & cb)120 void Heap::EnumerateNonNewSpaceRegions(const Callback &cb) const
121 {
122     oldSpace_->EnumerateRegions(cb);
123     if (!isCSetClearing_.load(std::memory_order_acquire)) {
124         oldSpace_->EnumerateCollectRegionSet(cb);
125     }
126     appSpawnSpace_->EnumerateRegions(cb);
127     snapshotSpace_->EnumerateRegions(cb);
128     nonMovableSpace_->EnumerateRegions(cb);
129     hugeObjectSpace_->EnumerateRegions(cb);
130     machineCodeSpace_->EnumerateRegions(cb);
131     hugeMachineCodeSpace_->EnumerateRegions(cb);
132 }
133 
134 template<class Callback>
EnumerateNonNewSpaceRegionsWithRecord(const Callback & cb)135 void Heap::EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const
136 {
137     oldSpace_->EnumerateRegionsWithRecord(cb);
138     snapshotSpace_->EnumerateRegionsWithRecord(cb);
139     nonMovableSpace_->EnumerateRegionsWithRecord(cb);
140     hugeObjectSpace_->EnumerateRegionsWithRecord(cb);
141     machineCodeSpace_->EnumerateRegionsWithRecord(cb);
142     hugeMachineCodeSpace_->EnumerateRegionsWithRecord(cb);
143 }
144 
145 template<class Callback>
EnumerateEdenSpaceRegions(const Callback & cb)146 void Heap::EnumerateEdenSpaceRegions(const Callback &cb) const
147 {
148     edenSpace_->EnumerateRegions(cb);
149 }
150 
151 template<class Callback>
EnumerateNewSpaceRegions(const Callback & cb)152 void Heap::EnumerateNewSpaceRegions(const Callback &cb) const
153 {
154     activeSemiSpace_->EnumerateRegions(cb);
155 }
156 
157 template<class Callback>
EnumerateNonMovableRegions(const Callback & cb)158 void Heap::EnumerateNonMovableRegions(const Callback &cb) const
159 {
160     snapshotSpace_->EnumerateRegions(cb);
161     appSpawnSpace_->EnumerateRegions(cb);
162     nonMovableSpace_->EnumerateRegions(cb);
163     hugeObjectSpace_->EnumerateRegions(cb);
164     machineCodeSpace_->EnumerateRegions(cb);
165     hugeMachineCodeSpace_->EnumerateRegions(cb);
166 }
167 
168 template<class Callback>
EnumerateRegions(const Callback & cb)169 void Heap::EnumerateRegions(const Callback &cb) const
170 {
171     edenSpace_->EnumerateRegions(cb);
172     activeSemiSpace_->EnumerateRegions(cb);
173     oldSpace_->EnumerateRegions(cb);
174     if (!isCSetClearing_.load(std::memory_order_acquire)) {
175         oldSpace_->EnumerateCollectRegionSet(cb);
176     }
177     appSpawnSpace_->EnumerateRegions(cb);
178     snapshotSpace_->EnumerateRegions(cb);
179     nonMovableSpace_->EnumerateRegions(cb);
180     hugeObjectSpace_->EnumerateRegions(cb);
181     machineCodeSpace_->EnumerateRegions(cb);
182     hugeMachineCodeSpace_->EnumerateRegions(cb);
183 }
184 
185 template<class Callback>
IterateOverObjects(const Callback & cb,bool isSimplify)186 void Heap::IterateOverObjects(const Callback &cb, bool isSimplify) const
187 {
188     edenSpace_->IterateOverObjects(cb);
189     activeSemiSpace_->IterateOverObjects(cb);
190     oldSpace_->IterateOverObjects(cb);
191     nonMovableSpace_->IterateOverObjects(cb);
192     hugeObjectSpace_->IterateOverObjects(cb);
193     machineCodeSpace_->IterateOverObjects(cb);
194     hugeMachineCodeSpace_->IterateOverObjects(cb);
195     snapshotSpace_->IterateOverObjects(cb);
196     if (!isSimplify) {
197         readOnlySpace_->IterateOverObjects(cb);
198         appSpawnSpace_->IterateOverMarkedObjects(cb);
199     }
200 }
201 
AllocateYoungOrHugeObject(JSHClass * hclass)202 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass)
203 {
204     size_t size = hclass->GetObjectSize();
205     return AllocateYoungOrHugeObject(hclass, size);
206 }
207 
AllocateYoungOrHugeObject(size_t size)208 TaggedObject *Heap::AllocateYoungOrHugeObject(size_t size)
209 {
210     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
211     TaggedObject *object = nullptr;
212     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
213         object = AllocateHugeObject(size);
214     } else {
215         object = AllocateInGeneralNewSpace(size);
216         if (object == nullptr) {
217             if (!HandleExitHighSensitiveEvent()) {
218                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
219             }
220             object = AllocateInGeneralNewSpace(size);
221             if (object == nullptr) {
222                 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
223                 object = AllocateInGeneralNewSpace(size);
224                 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, activeSemiSpace_, "Heap::AllocateYoungOrHugeObject");
225             }
226         }
227     }
228     return object;
229 }
230 
AllocateInGeneralNewSpace(size_t size)231 TaggedObject *Heap::AllocateInGeneralNewSpace(size_t size)
232 {
233     if (enableEdenGC_) {
234         auto object = reinterpret_cast<TaggedObject *>(edenSpace_->Allocate(size));
235         if (object != nullptr) {
236             return object;
237         }
238     }
239     return reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
240 }
241 
AllocateYoungOrHugeObject(JSHClass * hclass,size_t size)242 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass, size_t size)
243 {
244     auto object = AllocateYoungOrHugeObject(size);
245     ASSERT(object != nullptr);
246     object->SetClass(thread_, hclass);
247 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
248     OnAllocateEvent(GetEcmaVM(), object, size);
249 #endif
250     return object;
251 }
252 
SetHClassAndDoAllocateEvent(JSThread * thread,TaggedObject * object,JSHClass * hclass,size_t size)253 void BaseHeap::SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
254                                            [[maybe_unused]] size_t size)
255 {
256     ASSERT(object != nullptr);
257     object->SetClass(thread, hclass);
258 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
259     OnAllocateEvent(thread->GetEcmaVM(), object, size);
260 #endif
261 }
262 
AllocateYoungSync(size_t size)263 uintptr_t Heap::AllocateYoungSync(size_t size)
264 {
265     return activeSemiSpace_->AllocateSync(size);
266 }
267 
MoveYoungRegionSync(Region * region)268 bool Heap::MoveYoungRegionSync(Region *region)
269 {
270     return activeSemiSpace_->SwapRegion(region, inactiveSemiSpace_);
271 }
272 
MergeToOldSpaceSync(LocalSpace * localSpace)273 void Heap::MergeToOldSpaceSync(LocalSpace *localSpace)
274 {
275     oldSpace_->Merge(localSpace);
276 }
277 
InHeapProfiler()278 bool Heap::InHeapProfiler()
279 {
280 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
281     return GetEcmaVM()->GetHeapProfile() != nullptr;
282 #else
283     return false;
284 #endif
285 }
286 
MergeToOldSpaceSync(SharedLocalSpace * localSpace)287 void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
288 {
289     sOldSpace_->Merge(localSpace);
290 }
291 
TryAllocateYoungGeneration(JSHClass * hclass,size_t size)292 TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
293 {
294     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
295     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
296         return nullptr;
297     }
298     auto object = reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
299     if (object != nullptr) {
300         object->SetClass(thread_, hclass);
301     }
302 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
303     OnAllocateEvent(GetEcmaVM(), object, size);
304 #endif
305     return object;
306 }
307 
AllocateOldOrHugeObject(JSHClass * hclass)308 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass)
309 {
310     size_t size = hclass->GetObjectSize();
311     TaggedObject *object = AllocateOldOrHugeObject(hclass, size);
312     if (object == nullptr) {
313         LOG_ECMA(FATAL) << "Heap::AllocateOldOrHugeObject:object is nullptr";
314     }
315 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
316     OnAllocateEvent(GetEcmaVM(), object, size);
317 #endif
318     return object;
319 }
320 
AllocateOldOrHugeObject(JSHClass * hclass,size_t size)321 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass, size_t size)
322 {
323     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
324     TaggedObject *object = nullptr;
325     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
326         object = AllocateHugeObject(hclass, size);
327     } else {
328         object = reinterpret_cast<TaggedObject *>(oldSpace_->Allocate(size));
329         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, oldSpace_, "Heap::AllocateOldOrHugeObject");
330         object->SetClass(thread_, hclass);
331     }
332 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
333     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject*>(object), size);
334 #endif
335     return object;
336 }
337 
AllocateReadOnlyOrHugeObject(JSHClass * hclass)338 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass)
339 {
340     size_t size = hclass->GetObjectSize();
341     TaggedObject *object = AllocateReadOnlyOrHugeObject(hclass, size);
342 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
343     OnAllocateEvent(GetEcmaVM(), object, size);
344 #endif
345     return object;
346 }
347 
AllocateReadOnlyOrHugeObject(JSHClass * hclass,size_t size)348 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size)
349 {
350     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
351     TaggedObject *object = nullptr;
352     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
353         object = AllocateHugeObject(hclass, size);
354     } else {
355         object = reinterpret_cast<TaggedObject *>(readOnlySpace_->Allocate(size));
356         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, readOnlySpace_, "Heap::AllocateReadOnlyOrHugeObject");
357         ASSERT(object != nullptr);
358         object->SetClass(thread_, hclass);
359     }
360 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
361     OnAllocateEvent(GetEcmaVM(), object, size);
362 #endif
363     return object;
364 }
365 
AllocateNonMovableOrHugeObject(JSHClass * hclass)366 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass)
367 {
368     size_t size = hclass->GetObjectSize();
369     TaggedObject *object = AllocateNonMovableOrHugeObject(hclass, size);
370     if (object == nullptr) {
371         LOG_ECMA(FATAL) << "Heap::AllocateNonMovableOrHugeObject:object is nullptr";
372     }
373 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
374     OnAllocateEvent(GetEcmaVM(), object, size);
375 #endif
376     return object;
377 }
378 
AllocateNonMovableOrHugeObject(JSHClass * hclass,size_t size)379 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size)
380 {
381     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
382     TaggedObject *object = nullptr;
383     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
384         object = AllocateHugeObject(hclass, size);
385     } else {
386         object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->CheckAndAllocate(size));
387         CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, nonMovableSpace_, "Heap::AllocateNonMovableOrHugeObject");
388         object->SetClass(thread_, hclass);
389     }
390 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
391     OnAllocateEvent(GetEcmaVM(), object, size);
392 #endif
393     return object;
394 }
395 
AllocateClassClass(JSHClass * hclass,size_t size)396 TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
397 {
398     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
399     auto object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->Allocate(size));
400     if (UNLIKELY(object == nullptr)) {
401         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
402         UNREACHABLE();
403     }
404     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
405 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
406     OnAllocateEvent(GetEcmaVM(), object, size);
407 #endif
408     return object;
409 }
410 
AllocateClassClass(JSThread * thread,JSHClass * hclass,size_t size)411 TaggedObject *SharedHeap::AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size)
412 {
413     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
414     auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
415     if (UNLIKELY(object == nullptr)) {
416         LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
417         UNREACHABLE();
418     }
419     *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
420 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
421     OnAllocateEvent(thread->GetEcmaVM(), object, size);
422 #endif
423     return object;
424 }
425 
AllocateHugeObject(size_t size)426 TaggedObject *Heap::AllocateHugeObject(size_t size)
427 {
428     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
429     CheckAndTriggerOldGC(size);
430 
431     auto *object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
432     if (UNLIKELY(object == nullptr)) {
433         CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
434         object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
435         if (UNLIKELY(object == nullptr)) {
436             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
437             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
438             oldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
439             DumpHeapSnapshotBeforeOOM(false);
440             StatisticHeapDetail();
441             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
442             ThrowOutOfMemoryError(thread_, size, "Heap::AllocateHugeObject");
443             object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
444             if (UNLIKELY(object == nullptr)) {
445                 FatalOutOfMemoryError(size, "Heap::AllocateHugeObject");
446             }
447         }
448     }
449     return object;
450 }
451 
AllocateHugeObject(JSHClass * hclass,size_t size)452 TaggedObject *Heap::AllocateHugeObject(JSHClass *hclass, size_t size)
453 {
454     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
455     CheckAndTriggerOldGC(size);
456     auto object = AllocateHugeObject(size);
457     object->SetClass(thread_, hclass);
458 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
459     OnAllocateEvent(GetEcmaVM(), object, size);
460 #endif
461     return object;
462 }
463 
AllocateHugeMachineCodeObject(size_t size,MachineCodeDesc * desc)464 TaggedObject *Heap::AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc)
465 {
466     TaggedObject *object;
467     if (desc) {
468         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
469             size, thread_, reinterpret_cast<void *>(desc)));
470     } else {
471         object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
472             size, thread_));
473     }
474     return object;
475 }
476 
AllocateMachineCodeObject(JSHClass * hclass,size_t size,MachineCodeDesc * desc)477 TaggedObject *Heap::AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc)
478 {
479     TaggedObject *object;
480     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
481     if (!desc) {
482         // Jit Fort disabled
483         ASSERT(!GetEcmaVM()->GetJSOptions().GetEnableJitFort());
484         object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
485             reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size)) :
486             reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size));
487         CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, machineCodeSpace_,
488             "Heap::AllocateMachineCodeObject");
489         object->SetClass(thread_, hclass);
490 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
491         OnAllocateEvent(GetEcmaVM(), object, size);
492 #endif
493         return object;
494     }
495 
496     // Jit Fort enabled
497     ASSERT(GetEcmaVM()->GetJSOptions().GetEnableJitFort());
498     if (!GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() || !desc->isAsyncCompileMode) {
499         desc->instructionsAddr = 0;
500         if (size <= MAX_REGULAR_HEAP_OBJECT_SIZE) {
501             // for non huge code cache obj, allocate fort space before allocating the code object
502             uintptr_t mem = machineCodeSpace_->JitFortAllocate(desc);
503             if (mem == ToUintPtr(nullptr)) {
504                 return nullptr;
505             }
506             desc->instructionsAddr = mem;
507         }
508     }
509     object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
510         reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
511         reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size, desc, true));
512     CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, machineCodeSpace_, desc,
513         "Heap::AllocateMachineCodeObject");
514     object->SetClass(thread_, hclass);
515 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
516     OnAllocateEvent(GetEcmaVM(), object, size);
517 #endif
518     return object;
519 }
520 
AllocateSnapshotSpace(size_t size)521 uintptr_t Heap::AllocateSnapshotSpace(size_t size)
522 {
523     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
524     uintptr_t object = snapshotSpace_->Allocate(size);
525     if (UNLIKELY(object == 0)) {
526         FatalOutOfMemoryError(size, "Heap::AllocateSnapshotSpaceObject");
527     }
528 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
529     OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject *>(object), size);
530 #endif
531     return object;
532 }
533 
AllocateSharedNonMovableSpaceFromTlab(JSThread * thread,size_t size)534 TaggedObject *Heap::AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size)
535 {
536     ASSERT(!thread->IsJitThread());
537     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
538     TaggedObject *object = reinterpret_cast<TaggedObject*>(sNonMovableTlab_->Allocate(size));
539     if (object != nullptr) {
540         return object;
541     }
542     if (!sNonMovableTlab_->NeedNewTlab(size)) {
543         // slowpath
544         return nullptr;
545     }
546     size_t newTlabSize = sNonMovableTlab_->ComputeSize();
547     object = sHeap_->AllocateSNonMovableTlab(thread, newTlabSize);
548     if (object == nullptr) {
549         sNonMovableTlab_->DisableNewTlab();
550         return nullptr;
551     }
552     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
553     sNonMovableTlab_->Reset(begin, begin + newTlabSize, begin + size);
554     auto topAddress = sNonMovableTlab_->GetTopAddress();
555     auto endAddress = sNonMovableTlab_->GetEndAddress();
556     thread->ReSetSNonMovableSpaceAllocationAddress(topAddress, endAddress);
557     sHeap_->TryTriggerConcurrentMarking(thread);
558     return object;
559 }
560 
AllocateSharedOldSpaceFromTlab(JSThread * thread,size_t size)561 TaggedObject *Heap::AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size)
562 {
563     ASSERT(!thread->IsJitThread());
564     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
565     TaggedObject *object = reinterpret_cast<TaggedObject*>(sOldTlab_->Allocate(size));
566     if (object != nullptr) {
567         return object;
568     }
569     if (!sOldTlab_->NeedNewTlab(size)) {
570         // slowpath
571         return nullptr;
572     }
573     size_t newTlabSize = sOldTlab_->ComputeSize();
574     object = sHeap_->AllocateSOldTlab(thread, newTlabSize);
575     if (object == nullptr) {
576         sOldTlab_->DisableNewTlab();
577         return nullptr;
578     }
579     uintptr_t begin = reinterpret_cast<uintptr_t>(object);
580     sOldTlab_->Reset(begin, begin + newTlabSize, begin + size);
581     auto topAddress = sOldTlab_->GetTopAddress();
582     auto endAddress = sOldTlab_->GetEndAddress();
583     thread->ReSetSOldSpaceAllocationAddress(topAddress, endAddress);
584     sHeap_->TryTriggerConcurrentMarking(thread);
585     return object;
586 }
587 
SwapNewSpace()588 void Heap::SwapNewSpace()
589 {
590     activeSemiSpace_->Stop();
591     size_t newOverShootSize = 0;
592     if (!inBackground_ && gcType_ != TriggerGCType::FULL_GC && gcType_ != TriggerGCType::APPSPAWN_FULL_GC) {
593         newOverShootSize = activeSemiSpace_->CalculateNewOverShootSize();
594     }
595     inactiveSemiSpace_->Restart(newOverShootSize);
596 
597     SemiSpace *newSpace = inactiveSemiSpace_;
598     inactiveSemiSpace_ = activeSemiSpace_;
599     activeSemiSpace_ = newSpace;
600     if (UNLIKELY(ShouldVerifyHeap())) {
601         inactiveSemiSpace_->EnumerateRegions([](Region *region) {
602             region->SetInactiveSemiSpace();
603         });
604     }
605 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
606     activeSemiSpace_->SwapAllocationCounter(inactiveSemiSpace_);
607 #endif
608     auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
609     auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
610     thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
611 }
612 
SwapOldSpace()613 void Heap::SwapOldSpace()
614 {
615     compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
616     auto *oldSpace = compressSpace_;
617     compressSpace_ = oldSpace_;
618     oldSpace_ = oldSpace;
619 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
620     oldSpace_->SwapAllocationCounter(compressSpace_);
621 #endif
622 }
623 
SwapOldSpace()624 void SharedHeap::SwapOldSpace()
625 {
626     sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
627     auto *oldSpace = sCompressSpace_;
628     sCompressSpace_ = sOldSpace_;
629     sOldSpace_ = oldSpace;
630 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
631     sOldSpace_->SwapAllocationCounter(sCompressSpace_);
632 #endif
633 }
634 
ReclaimRegions(TriggerGCType gcType)635 void Heap::ReclaimRegions(TriggerGCType gcType)
636 {
637     activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
638         region->ResetRegionTypeFlag();
639         region->ClearMarkGCBitset();
640         region->ClearCrossRegionRSet();
641         region->ResetAliveObject();
642         region->DeleteNewToEdenRSet();
643         region->ClearGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
644     });
645     size_t cachedSize = inactiveSemiSpace_->GetInitialCapacity();
646     if (gcType == TriggerGCType::FULL_GC) {
647         compressSpace_->Reset();
648         cachedSize = 0;
649     } else if (gcType == TriggerGCType::OLD_GC) {
650         oldSpace_->ReclaimCSet();
651         isCSetClearing_.store(false, std::memory_order_release);
652     }
653 
654     inactiveSemiSpace_->ReclaimRegions(cachedSize);
655     sweeper_->WaitAllTaskFinished();
656     // machinecode space is not sweeped in young GC
657     if (ecmaVm_->GetJSOptions().GetEnableJitFort()) {
658         if (machineCodeSpace_->sweepState_ != SweepState::NO_SWEEP) {
659             if (machineCodeSpace_->GetJitFort() &&
660                 machineCodeSpace_->GetJitFort()->IsMachineCodeGC()) {
661                 machineCodeSpace_->UpdateFortSpace();
662             }
663         }
664     }
665     EnumerateNonNewSpaceRegionsWithRecord([] (Region *region) {
666         region->ClearMarkGCBitset();
667         region->ClearCrossRegionRSet();
668     });
669     if (!clearTaskFinished_) {
670         LockHolder holder(waitClearTaskFinishedMutex_);
671         clearTaskFinished_ = true;
672         waitClearTaskFinishedCV_.SignalAll();
673     }
674 }
675 
676 // only call in js-thread
ClearSlotsRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd)677 void Heap::ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd)
678 {
679     if (!current->InGeneralNewSpace()) {
680         // This clear may exist data race with concurrent sweeping, so use CAS
681         current->AtomicClearSweepingOldToNewRSetInRange(freeStart, freeEnd);
682         current->ClearOldToNewRSetInRange(freeStart, freeEnd);
683         current->AtomicClearCrossRegionRSetInRange(freeStart, freeEnd);
684     }
685     current->ClearLocalToShareRSetInRange(freeStart, freeEnd);
686     current->AtomicClearSweepingLocalToShareRSetInRange(freeStart, freeEnd);
687 }
688 
GetCommittedSize()689 size_t Heap::GetCommittedSize() const
690 {
691     size_t result = edenSpace_->GetCommittedSize() +
692                     activeSemiSpace_->GetCommittedSize() +
693                     oldSpace_->GetCommittedSize() +
694                     hugeObjectSpace_->GetCommittedSize() +
695                     nonMovableSpace_->GetCommittedSize() +
696                     machineCodeSpace_->GetCommittedSize() +
697                     hugeMachineCodeSpace_->GetCommittedSize() +
698                     readOnlySpace_->GetCommittedSize() +
699                     appSpawnSpace_->GetCommittedSize() +
700                     snapshotSpace_->GetCommittedSize();
701     return result;
702 }
703 
GetHeapObjectSize()704 size_t Heap::GetHeapObjectSize() const
705 {
706     size_t result = edenSpace_->GetHeapObjectSize() +
707                     activeSemiSpace_->GetHeapObjectSize() +
708                     oldSpace_->GetHeapObjectSize() +
709                     hugeObjectSpace_->GetHeapObjectSize() +
710                     nonMovableSpace_->GetHeapObjectSize() +
711                     machineCodeSpace_->GetCommittedSize() +
712                     hugeMachineCodeSpace_->GetCommittedSize() +
713                     readOnlySpace_->GetCommittedSize() +
714                     appSpawnSpace_->GetHeapObjectSize() +
715                     snapshotSpace_->GetHeapObjectSize();
716     return result;
717 }
718 
NotifyRecordMemorySize()719 void Heap::NotifyRecordMemorySize()
720 {
721     if (GetRecordObjectSize() == 0) {
722         RecordOrResetObjectSize(GetHeapObjectSize());
723     }
724     if (GetRecordNativeSize() == 0) {
725         RecordOrResetNativeSize(GetNativeBindingSize());
726     }
727 }
728 
GetRegionCount()729 size_t Heap::GetRegionCount() const
730 {
731     size_t result = edenSpace_->GetRegionCount() +
732         activeSemiSpace_->GetRegionCount() +
733         oldSpace_->GetRegionCount() +
734         oldSpace_->GetCollectSetRegionCount() +
735         appSpawnSpace_->GetRegionCount() +
736         snapshotSpace_->GetRegionCount() +
737         nonMovableSpace_->GetRegionCount() +
738         hugeObjectSpace_->GetRegionCount() +
739         machineCodeSpace_->GetRegionCount() +
740         hugeMachineCodeSpace_->GetRegionCount();
741     return result;
742 }
743 
GetHeapObjectCount()744 uint32_t Heap::GetHeapObjectCount() const
745 {
746     uint32_t count = 0;
747     sweeper_->EnsureAllTaskFinished();
748     this->IterateOverObjects([&count]([[maybe_unused]] TaggedObject *obj) {
749         ++count;
750     });
751     return count;
752 }
753 
InitializeIdleStatusControl(std::function<void (bool)> callback)754 void Heap::InitializeIdleStatusControl(std::function<void(bool)> callback)
755 {
756     notifyIdleStatusCallback = callback;
757     if (callback != nullptr) {
758         OPTIONAL_LOG(ecmaVm_, INFO) << "Received idle status control call back";
759         enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
760     }
761 }
762 
TryTriggerConcurrentMarking(JSThread * thread)763 void SharedHeap::TryTriggerConcurrentMarking(JSThread *thread)
764 {
765     if (!CheckCanTriggerConcurrentMarking(thread)) {
766         return;
767     }
768     bool triggerConcurrentMark = (GetHeapObjectSize() >= globalSpaceConcurrentMarkLimit_);
769     if (triggerConcurrentMark && (OnStartupEvent() || IsJustFinishStartup())) {
770         triggerConcurrentMark = ObjectExceedJustFinishStartupThresholdForCM();
771     }
772     if (triggerConcurrentMark) {
773         TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
774     }
775 }
776 
CollectGarbageFinish(bool inDaemon,TriggerGCType gcType)777 void SharedHeap::CollectGarbageFinish(bool inDaemon, TriggerGCType gcType)
778 {
779     if (inDaemon) {
780         ASSERT(JSThread::GetCurrent() == dThread_);
781 #ifndef NDEBUG
782         ASSERT(dThread_->HasLaunchedSuspendAll());
783 #endif
784         dThread_->FinishRunningTask();
785         NotifyGCCompleted();
786         // Update to forceGC_ is in DaemeanSuspendAll, and protected by the Runtime::mutatorLock_,
787         // so do not need lock.
788         smartGCStats_.forceGC_ = false;
789     }
790     localFullMarkTriggered_ = false;
791     // Record alive object size after shared gc and other stats
792     UpdateHeapStatsAfterGC(gcType);
793     // Adjust shared gc trigger threshold
794     AdjustGlobalSpaceAllocLimit();
795     GetEcmaGCStats()->RecordStatisticAfterGC();
796     GetEcmaGCStats()->PrintGCStatistic();
797     ProcessAllGCListeners();
798 }
799 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass)800 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
801 {
802     size_t size = hclass->GetObjectSize();
803     return AllocateNonMovableOrHugeObject(thread, hclass, size);
804 }
805 
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)806 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
807 {
808     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
809     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
810         return AllocateHugeObject(thread, hclass, size);
811     }
812     TaggedObject *object = thread->IsJitThread() ? nullptr :
813         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
814     if (object == nullptr) {
815         object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
816         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
817             "SharedHeap::AllocateNonMovableOrHugeObject");
818         object->SetClass(thread, hclass);
819         TryTriggerConcurrentMarking(thread);
820     } else {
821         object->SetClass(thread, hclass);
822     }
823 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
824     OnAllocateEvent(thread->GetEcmaVM(), object, size);
825 #endif
826     return object;
827 }
828 
AllocateNonMovableOrHugeObject(JSThread * thread,size_t size)829 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, size_t size)
830 {
831     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
832     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
833         return AllocateHugeObject(thread, size);
834     }
835     TaggedObject *object = thread->IsJitThread() ? nullptr :
836         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
837     if (object == nullptr) {
838         object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
839         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
840             "SharedHeap::AllocateNonMovableOrHugeObject");
841         TryTriggerConcurrentMarking(thread);
842     }
843 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
844     OnAllocateEvent(thread->GetEcmaVM(), object, size);
845 #endif
846     return object;
847 }
848 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass)849 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass)
850 {
851     size_t size = hclass->GetObjectSize();
852     return AllocateOldOrHugeObject(thread, hclass, size);
853 }
854 
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)855 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
856 {
857     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
858     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
859         return AllocateHugeObject(thread, hclass, size);
860     }
861     TaggedObject *object = thread->IsJitThread() ? nullptr :
862         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
863     if (object == nullptr) {
864         object = AllocateInSOldSpace(thread, size);
865         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
866         object->SetClass(thread, hclass);
867         TryTriggerConcurrentMarking(thread);
868     } else {
869         object->SetClass(thread, hclass);
870     }
871 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
872     OnAllocateEvent(thread->GetEcmaVM(), object, size);
873 #endif
874     return object;
875 }
876 
AllocateOldOrHugeObject(JSThread * thread,size_t size)877 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
878 {
879     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
880     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
881         return AllocateHugeObject(thread, size);
882     }
883     TaggedObject *object = thread->IsJitThread() ? nullptr :
884         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
885     if (object == nullptr) {
886         object = AllocateInSOldSpace(thread, size);
887         CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
888         TryTriggerConcurrentMarking(thread);
889     }
890     return object;
891 }
892 
AllocateInSOldSpace(JSThread * thread,size_t size)893 TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
894 {
895     // jit thread no heap
896     bool allowGC = !thread->IsJitThread();
897     if (allowGC) {
898         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
899         localHeap->TryTriggerFullMarkBySharedSize(size);
900     }
901     TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
902      // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
903     if (object == nullptr) {
904         if (allowGC) {
905             CheckAndTriggerSharedGC(thread);
906         }
907         object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
908         if (object == nullptr) {
909             if (allowGC) {
910                 CollectGarbageNearOOM(thread);
911             }
912             object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
913         }
914     }
915     return object;
916 }
917 
AllocateHugeObject(JSThread * thread,JSHClass * hclass,size_t size)918 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
919 {
920     auto object = AllocateHugeObject(thread, size);
921     object->SetClass(thread, hclass);
922 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
923     OnAllocateEvent(thread->GetEcmaVM(), object, size);
924 #endif
925     return object;
926 }
927 
AllocateHugeObject(JSThread * thread,size_t size)928 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
929 {
930     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
931     CheckHugeAndTriggerSharedGC(thread, size);
932     auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
933     if (UNLIKELY(object == nullptr)) {
934         CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
935         object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
936         if (UNLIKELY(object == nullptr)) {
937             // if allocate huge object OOM, temporarily increase space size to avoid vm crash
938             size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
939             sHugeObjectSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
940             DumpHeapSnapshotBeforeOOM(false, thread);
941             ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
942             object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
943             if (UNLIKELY(object == nullptr)) {
944                 FatalOutOfMemoryError(size, "SharedHeap::AllocateHugeObject");
945             }
946         }
947     }
948     TryTriggerConcurrentMarking(thread);
949     return object;
950 }
951 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass)952 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass)
953 {
954     size_t size = hclass->GetObjectSize();
955     return AllocateReadOnlyOrHugeObject(thread, hclass, size);
956 }
957 
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)958 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
959 {
960     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
961     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
962         return AllocateHugeObject(thread, hclass, size);
963     }
964     auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
965     CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
966     ASSERT(object != nullptr);
967     object->SetClass(thread, hclass);
968     return object;
969 }
970 
AllocateSOldTlab(JSThread * thread,size_t size)971 TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
972 {
973     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
974     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
975         return nullptr;
976     }
977     TaggedObject *object = nullptr;
978     if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
979         object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
980     } else {
981         object = AllocateInSOldSpace(thread, size);
982     }
983     return object;
984 }
985 
AllocateSNonMovableTlab(JSThread * thread,size_t size)986 TaggedObject *SharedHeap::AllocateSNonMovableTlab(JSThread *thread, size_t size)
987 {
988     size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
989     if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
990         return nullptr;
991     }
992     TaggedObject *object = nullptr;
993     object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
994     return object;
995 }
996 
997 template<TriggerGCType gcType, GCReason gcReason>
TriggerConcurrentMarking(JSThread * thread)998 void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
999 {
1000     ASSERT(gcType == TriggerGCType::SHARED_GC);
1001     // lock is outside to prevent extreme case, maybe could move update gcFinished_ into CheckAndPostTask
1002     // instead of an outside locking.
1003     LockHolder lock(waitGCFinishedMutex_);
1004     if (dThread_->CheckAndPostTask(TriggerConcurrentMarkTask<gcType, gcReason>(thread))) {
1005         ASSERT(gcFinished_);
1006         gcFinished_ = false;
1007     }
1008 }
1009 
1010 template<TriggerGCType gcType, GCReason gcReason>
CollectGarbage(JSThread * thread)1011 void SharedHeap::CollectGarbage(JSThread *thread)
1012 {
1013     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
1014 #ifndef NDEBUG
1015     ASSERT(!thread->HasLaunchedSuspendAll());
1016 #endif
1017     if (UNLIKELY(!dThread_->IsRunning())) {
1018         // Hope this will not happen, unless the AppSpawn run smth after PostFork
1019         LOG_GC(ERROR) << "Try to collect garbage in shared heap, but daemon thread is not running.";
1020         ForceCollectGarbageWithoutDaemonThread(gcType, gcReason, thread);
1021         return;
1022     }
1023     {
1024         // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1025         // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1026         // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1027         LockHolder lock(waitGCFinishedMutex_);
1028         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1029             ASSERT(gcFinished_);
1030             gcFinished_ = false;
1031         }
1032     }
1033     ASSERT(!gcFinished_);
1034     SetForceGC(true);
1035     WaitGCFinished(thread);
1036 }
1037 
1038 template<TriggerGCType gcType, GCReason gcReason>
PostGCTaskForTest(JSThread * thread)1039 void SharedHeap::PostGCTaskForTest(JSThread *thread)
1040 {
1041     ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
1042 #ifndef NDEBUG
1043     ASSERT(!thread->HasLaunchedSuspendAll());
1044 #endif
1045     if (dThread_->IsRunning()) {
1046         // Some UT may run without Daemon Thread.
1047         LockHolder lock(waitGCFinishedMutex_);
1048         if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1049             ASSERT(gcFinished_);
1050             gcFinished_ = false;
1051         }
1052         ASSERT(!gcFinished_);
1053     }
1054 }
1055 
SwapBackAndPop(CVector<JSNativePointer * > & vec,CVector<JSNativePointer * >::iterator & iter)1056 static void SwapBackAndPop(CVector<JSNativePointer*>& vec, CVector<JSNativePointer*>::iterator& iter)
1057 {
1058     *iter = vec.back();
1059     if (iter + 1 == vec.end()) {
1060         vec.pop_back();
1061         iter = vec.end();
1062     } else {
1063         vec.pop_back();
1064     }
1065 }
1066 
ShrinkWithFactor(CVector<JSNativePointer * > & vec)1067 static void ShrinkWithFactor(CVector<JSNativePointer*>& vec)
1068 {
1069     constexpr size_t SHRINK_FACTOR = 2;
1070     if (vec.size() < vec.capacity() / SHRINK_FACTOR) {
1071         vec.shrink_to_fit();
1072     }
1073 }
1074 
ProcessNativeDelete(const WeakRootVisitor & visitor)1075 void Heap::ProcessNativeDelete(const WeakRootVisitor& visitor)
1076 {
1077     // ProcessNativeDelete should be limited to OldGC or FullGC only
1078     if (!IsGeneralYoungGC()) {
1079         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1080         auto iter = nativePointerList_.begin();
1081         ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size()));
1082         while (iter != nativePointerList_.end()) {
1083             JSNativePointer* object = *iter;
1084             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1085             if (fwd == nullptr) {
1086                 size_t bindingSize = object->GetBindingSize();
1087                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1088                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1089                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1090                 SwapBackAndPop(nativePointerList_, iter);
1091             } else {
1092                 ++iter;
1093             }
1094         }
1095         ShrinkWithFactor(nativePointerList_);
1096 
1097         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1098         auto newIter = concurrentNativePointerList_.begin();
1099         while (newIter != concurrentNativePointerList_.end()) {
1100             JSNativePointer* object = *newIter;
1101             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1102             if (fwd == nullptr) {
1103                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1104                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1105                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1106                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1107             } else {
1108                 ++newIter;
1109             }
1110         }
1111         ShrinkWithFactor(concurrentNativePointerList_);
1112     }
1113 }
1114 
ProcessSharedNativeDelete(const WeakRootVisitor & visitor)1115 void Heap::ProcessSharedNativeDelete(const WeakRootVisitor& visitor)
1116 {
1117     auto& sharedNativePointerCallbacks = GetEcmaVM()->GetSharedNativePointerCallbacks();
1118     auto sharedIter = sharedNativePointerList_.begin();
1119     while (sharedIter != sharedNativePointerList_.end()) {
1120         JSNativePointer* object = *sharedIter;
1121         auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1122         if (fwd == nullptr) {
1123             sharedNativePointerCallbacks.emplace_back(
1124                 object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1125             SwapBackAndPop(sharedNativePointerList_, sharedIter);
1126         } else {
1127             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1128                 *sharedIter = reinterpret_cast<JSNativePointer*>(fwd);
1129             }
1130             ++sharedIter;
1131         }
1132     }
1133     ShrinkWithFactor(sharedNativePointerList_);
1134 }
1135 
ProcessReferences(const WeakRootVisitor & visitor)1136 void Heap::ProcessReferences(const WeakRootVisitor& visitor)
1137 {
1138     // process native ref should be limited to OldGC or FullGC only
1139     if (!IsGeneralYoungGC()) {
1140         auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1141         ResetNativeBindingSize();
1142         // array buffer
1143         auto iter = nativePointerList_.begin();
1144         ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessReferencesNum:" + std::to_string(nativePointerList_.size()));
1145         while (iter != nativePointerList_.end()) {
1146             JSNativePointer* object = *iter;
1147             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1148             if (fwd == nullptr) {
1149                 size_t bindingSize = object->GetBindingSize();
1150                 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1151                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1152                 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1153                 SwapBackAndPop(nativePointerList_, iter);
1154                 continue;
1155             }
1156             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1157             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1158                 *iter = JSNativePointer::Cast(fwd);
1159             }
1160             ++iter;
1161         }
1162         ShrinkWithFactor(nativePointerList_);
1163 
1164         auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1165         auto newIter = concurrentNativePointerList_.begin();
1166         while (newIter != concurrentNativePointerList_.end()) {
1167             JSNativePointer* object = *newIter;
1168             auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1169             if (fwd == nullptr) {
1170                 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1171                 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1172                     std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1173                 SwapBackAndPop(concurrentNativePointerList_, newIter);
1174                 continue;
1175             }
1176             IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1177             if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1178                 *newIter = JSNativePointer::Cast(fwd);
1179             }
1180             ++newIter;
1181         }
1182         ShrinkWithFactor(concurrentNativePointerList_);
1183     }
1184 }
1185 
PushToNativePointerList(JSNativePointer * pointer,bool isConcurrent)1186 void Heap::PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent)
1187 {
1188     ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
1189     if (isConcurrent) {
1190         concurrentNativePointerList_.emplace_back(pointer);
1191     } else {
1192         nativePointerList_.emplace_back(pointer);
1193     }
1194 }
1195 
PushToSharedNativePointerList(JSNativePointer * pointer)1196 void Heap::PushToSharedNativePointerList(JSNativePointer* pointer)
1197 {
1198     ASSERT(JSTaggedValue(pointer).IsInSharedHeap());
1199     sharedNativePointerList_.emplace_back(pointer);
1200 }
1201 
RemoveFromNativePointerList(const JSNativePointer * pointer)1202 void Heap::RemoveFromNativePointerList(const JSNativePointer* pointer)
1203 {
1204     auto iter = std::find(nativePointerList_.begin(), nativePointerList_.end(), pointer);
1205     if (iter != nativePointerList_.end()) {
1206         JSNativePointer* object = *iter;
1207         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1208         object->Destroy(thread_);
1209         SwapBackAndPop(nativePointerList_, iter);
1210     }
1211     auto newIter = std::find(concurrentNativePointerList_.begin(), concurrentNativePointerList_.end(), pointer);
1212     if (newIter != concurrentNativePointerList_.end()) {
1213         JSNativePointer* object = *newIter;
1214         nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1215         object->Destroy(thread_);
1216         SwapBackAndPop(concurrentNativePointerList_, newIter);
1217     }
1218 }
1219 
ClearNativePointerList()1220 void Heap::ClearNativePointerList()
1221 {
1222     for (auto iter : nativePointerList_) {
1223         iter->Destroy(thread_);
1224     }
1225     for (auto iter : concurrentNativePointerList_) {
1226         iter->Destroy(thread_);
1227     }
1228     nativePointerList_.clear();
1229 }
1230 
1231 }  // namespace panda::ecmascript
1232 
1233 #endif  // ECMASCRIPT_MEM_HEAP_INL_H
1234