1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef ECMASCRIPT_MEM_HEAP_INL_H
17 #define ECMASCRIPT_MEM_HEAP_INL_H
18
19 #include "ecmascript/mem/heap.h"
20
21 #include "ecmascript/js_native_pointer.h"
22 #include "ecmascript/daemon/daemon_task-inl.h"
23 #include "ecmascript/dfx/hprof/heap_tracker.h"
24 #include "ecmascript/ecma_vm.h"
25 #include "ecmascript/mem/allocator-inl.h"
26 #include "ecmascript/mem/concurrent_sweeper.h"
27 #include "ecmascript/mem/linear_space.h"
28 #include "ecmascript/mem/mem_controller.h"
29 #include "ecmascript/mem/sparse_space.h"
30 #include "ecmascript/mem/tagged_object.h"
31 #include "ecmascript/mem/thread_local_allocation_buffer.h"
32 #include "ecmascript/mem/barriers-inl.h"
33 #include "ecmascript/mem/mem_map_allocator.h"
34
35 namespace panda::ecmascript {
36 #define CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, space, message) \
37 if (UNLIKELY((object) == nullptr)) { \
38 EcmaVM *vm = GetEcmaVM(); \
39 size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize(); \
40 (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize); \
41 if ((space)->IsOOMDumpSpace()) { \
42 DumpHeapSnapshotBeforeOOM(false); \
43 } \
44 StatisticHeapDetail(); \
45 ThrowOutOfMemoryError(GetJSThread(), size, message); \
46 (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size)); \
47 }
48
49 #define CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, space, message) \
50 if (UNLIKELY((object) == nullptr)) { \
51 size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize(); \
52 (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize); \
53 DumpHeapSnapshotBeforeOOM(false, thread); \
54 ThrowOutOfMemoryError(thread, size, message); \
55 (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size)); \
56 }
57
58 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, space, desc, message) \
59 if (UNLIKELY((object) == nullptr)) { \
60 EcmaVM *vm = GetEcmaVM(); \
61 size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize(); \
62 (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize); \
63 SetMachineCodeOutOfMemoryError(GetJSThread(), size, message); \
64 (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size, desc)); \
65 }
66
67 #define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, space, message) \
68 if (UNLIKELY((object) == nullptr)) { \
69 EcmaVM *vm = GetEcmaVM(); \
70 size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize(); \
71 (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize); \
72 SetMachineCodeOutOfMemoryError(GetJSThread(), size, message); \
73 (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size)); \
74 }
75
76 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb)77 void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
78 {
79 sOldSpace_->EnumerateRegions(cb);
80 sNonMovableSpace_->EnumerateRegions(cb);
81 sHugeObjectSpace_->EnumerateRegions(cb);
82 sAppSpawnSpace_->EnumerateRegions(cb);
83 }
84
85 template<class Callback>
EnumerateOldSpaceRegionsWithRecord(const Callback & cb)86 void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
87 {
88 sOldSpace_->EnumerateRegionsWithRecord(cb);
89 sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
90 sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
91 }
92
93 template<class Callback>
IterateOverObjects(const Callback & cb)94 void SharedHeap::IterateOverObjects(const Callback &cb) const
95 {
96 sOldSpace_->IterateOverObjects(cb);
97 sNonMovableSpace_->IterateOverObjects(cb);
98 sHugeObjectSpace_->IterateOverObjects(cb);
99 sAppSpawnSpace_->IterateOverMarkedObjects(cb);
100 }
101
102 template<class Callback>
EnumerateOldSpaceRegions(const Callback & cb,Region * region)103 void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
104 {
105 oldSpace_->EnumerateRegions(cb, region);
106 appSpawnSpace_->EnumerateRegions(cb);
107 nonMovableSpace_->EnumerateRegions(cb);
108 hugeObjectSpace_->EnumerateRegions(cb);
109 machineCodeSpace_->EnumerateRegions(cb);
110 hugeMachineCodeSpace_->EnumerateRegions(cb);
111 }
112
113 template<class Callback>
EnumerateSnapshotSpaceRegions(const Callback & cb)114 void Heap::EnumerateSnapshotSpaceRegions(const Callback &cb) const
115 {
116 snapshotSpace_->EnumerateRegions(cb);
117 }
118
119 template<class Callback>
EnumerateNonNewSpaceRegions(const Callback & cb)120 void Heap::EnumerateNonNewSpaceRegions(const Callback &cb) const
121 {
122 oldSpace_->EnumerateRegions(cb);
123 if (!isCSetClearing_.load(std::memory_order_acquire)) {
124 oldSpace_->EnumerateCollectRegionSet(cb);
125 }
126 appSpawnSpace_->EnumerateRegions(cb);
127 snapshotSpace_->EnumerateRegions(cb);
128 nonMovableSpace_->EnumerateRegions(cb);
129 hugeObjectSpace_->EnumerateRegions(cb);
130 machineCodeSpace_->EnumerateRegions(cb);
131 hugeMachineCodeSpace_->EnumerateRegions(cb);
132 }
133
134 template<class Callback>
EnumerateNonNewSpaceRegionsWithRecord(const Callback & cb)135 void Heap::EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const
136 {
137 oldSpace_->EnumerateRegionsWithRecord(cb);
138 snapshotSpace_->EnumerateRegionsWithRecord(cb);
139 nonMovableSpace_->EnumerateRegionsWithRecord(cb);
140 hugeObjectSpace_->EnumerateRegionsWithRecord(cb);
141 machineCodeSpace_->EnumerateRegionsWithRecord(cb);
142 hugeMachineCodeSpace_->EnumerateRegionsWithRecord(cb);
143 }
144
145 template<class Callback>
EnumerateEdenSpaceRegions(const Callback & cb)146 void Heap::EnumerateEdenSpaceRegions(const Callback &cb) const
147 {
148 edenSpace_->EnumerateRegions(cb);
149 }
150
151 template<class Callback>
EnumerateNewSpaceRegions(const Callback & cb)152 void Heap::EnumerateNewSpaceRegions(const Callback &cb) const
153 {
154 activeSemiSpace_->EnumerateRegions(cb);
155 }
156
157 template<class Callback>
EnumerateNonMovableRegions(const Callback & cb)158 void Heap::EnumerateNonMovableRegions(const Callback &cb) const
159 {
160 snapshotSpace_->EnumerateRegions(cb);
161 appSpawnSpace_->EnumerateRegions(cb);
162 nonMovableSpace_->EnumerateRegions(cb);
163 hugeObjectSpace_->EnumerateRegions(cb);
164 machineCodeSpace_->EnumerateRegions(cb);
165 hugeMachineCodeSpace_->EnumerateRegions(cb);
166 }
167
168 template<class Callback>
EnumerateRegions(const Callback & cb)169 void Heap::EnumerateRegions(const Callback &cb) const
170 {
171 edenSpace_->EnumerateRegions(cb);
172 activeSemiSpace_->EnumerateRegions(cb);
173 oldSpace_->EnumerateRegions(cb);
174 if (!isCSetClearing_.load(std::memory_order_acquire)) {
175 oldSpace_->EnumerateCollectRegionSet(cb);
176 }
177 appSpawnSpace_->EnumerateRegions(cb);
178 snapshotSpace_->EnumerateRegions(cb);
179 nonMovableSpace_->EnumerateRegions(cb);
180 hugeObjectSpace_->EnumerateRegions(cb);
181 machineCodeSpace_->EnumerateRegions(cb);
182 hugeMachineCodeSpace_->EnumerateRegions(cb);
183 }
184
185 template<class Callback>
IterateOverObjects(const Callback & cb,bool isSimplify)186 void Heap::IterateOverObjects(const Callback &cb, bool isSimplify) const
187 {
188 edenSpace_->IterateOverObjects(cb);
189 activeSemiSpace_->IterateOverObjects(cb);
190 oldSpace_->IterateOverObjects(cb);
191 nonMovableSpace_->IterateOverObjects(cb);
192 hugeObjectSpace_->IterateOverObjects(cb);
193 machineCodeSpace_->IterateOverObjects(cb);
194 hugeMachineCodeSpace_->IterateOverObjects(cb);
195 snapshotSpace_->IterateOverObjects(cb);
196 if (!isSimplify) {
197 readOnlySpace_->IterateOverObjects(cb);
198 appSpawnSpace_->IterateOverMarkedObjects(cb);
199 }
200 }
201
AllocateYoungOrHugeObject(JSHClass * hclass)202 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass)
203 {
204 size_t size = hclass->GetObjectSize();
205 return AllocateYoungOrHugeObject(hclass, size);
206 }
207
AllocateYoungOrHugeObject(size_t size)208 TaggedObject *Heap::AllocateYoungOrHugeObject(size_t size)
209 {
210 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
211 TaggedObject *object = nullptr;
212 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
213 object = AllocateHugeObject(size);
214 } else {
215 object = AllocateInGeneralNewSpace(size);
216 if (object == nullptr) {
217 if (!HandleExitHighSensitiveEvent()) {
218 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
219 }
220 object = AllocateInGeneralNewSpace(size);
221 if (object == nullptr) {
222 CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
223 object = AllocateInGeneralNewSpace(size);
224 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, activeSemiSpace_, "Heap::AllocateYoungOrHugeObject");
225 }
226 }
227 }
228 return object;
229 }
230
AllocateInGeneralNewSpace(size_t size)231 TaggedObject *Heap::AllocateInGeneralNewSpace(size_t size)
232 {
233 if (enableEdenGC_) {
234 auto object = reinterpret_cast<TaggedObject *>(edenSpace_->Allocate(size));
235 if (object != nullptr) {
236 return object;
237 }
238 }
239 return reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
240 }
241
AllocateYoungOrHugeObject(JSHClass * hclass,size_t size)242 TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass, size_t size)
243 {
244 auto object = AllocateYoungOrHugeObject(size);
245 ASSERT(object != nullptr);
246 object->SetClass(thread_, hclass);
247 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
248 OnAllocateEvent(GetEcmaVM(), object, size);
249 #endif
250 return object;
251 }
252
SetHClassAndDoAllocateEvent(JSThread * thread,TaggedObject * object,JSHClass * hclass,size_t size)253 void BaseHeap::SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
254 [[maybe_unused]] size_t size)
255 {
256 ASSERT(object != nullptr);
257 object->SetClass(thread, hclass);
258 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
259 OnAllocateEvent(thread->GetEcmaVM(), object, size);
260 #endif
261 }
262
AllocateYoungSync(size_t size)263 uintptr_t Heap::AllocateYoungSync(size_t size)
264 {
265 return activeSemiSpace_->AllocateSync(size);
266 }
267
MoveYoungRegionSync(Region * region)268 bool Heap::MoveYoungRegionSync(Region *region)
269 {
270 return activeSemiSpace_->SwapRegion(region, inactiveSemiSpace_);
271 }
272
MergeToOldSpaceSync(LocalSpace * localSpace)273 void Heap::MergeToOldSpaceSync(LocalSpace *localSpace)
274 {
275 oldSpace_->Merge(localSpace);
276 }
277
InHeapProfiler()278 bool Heap::InHeapProfiler()
279 {
280 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
281 return GetEcmaVM()->GetHeapProfile() != nullptr;
282 #else
283 return false;
284 #endif
285 }
286
MergeToOldSpaceSync(SharedLocalSpace * localSpace)287 void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
288 {
289 sOldSpace_->Merge(localSpace);
290 }
291
TryAllocateYoungGeneration(JSHClass * hclass,size_t size)292 TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
293 {
294 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
295 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
296 return nullptr;
297 }
298 auto object = reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
299 if (object != nullptr) {
300 object->SetClass(thread_, hclass);
301 }
302 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
303 OnAllocateEvent(GetEcmaVM(), object, size);
304 #endif
305 return object;
306 }
307
AllocateOldOrHugeObject(JSHClass * hclass)308 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass)
309 {
310 size_t size = hclass->GetObjectSize();
311 TaggedObject *object = AllocateOldOrHugeObject(hclass, size);
312 if (object == nullptr) {
313 LOG_ECMA(FATAL) << "Heap::AllocateOldOrHugeObject:object is nullptr";
314 }
315 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
316 OnAllocateEvent(GetEcmaVM(), object, size);
317 #endif
318 return object;
319 }
320
AllocateOldOrHugeObject(JSHClass * hclass,size_t size)321 TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass, size_t size)
322 {
323 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
324 TaggedObject *object = nullptr;
325 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
326 object = AllocateHugeObject(hclass, size);
327 } else {
328 object = reinterpret_cast<TaggedObject *>(oldSpace_->Allocate(size));
329 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, oldSpace_, "Heap::AllocateOldOrHugeObject");
330 object->SetClass(thread_, hclass);
331 }
332 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
333 OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject*>(object), size);
334 #endif
335 return object;
336 }
337
AllocateReadOnlyOrHugeObject(JSHClass * hclass)338 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass)
339 {
340 size_t size = hclass->GetObjectSize();
341 TaggedObject *object = AllocateReadOnlyOrHugeObject(hclass, size);
342 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
343 OnAllocateEvent(GetEcmaVM(), object, size);
344 #endif
345 return object;
346 }
347
AllocateReadOnlyOrHugeObject(JSHClass * hclass,size_t size)348 TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size)
349 {
350 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
351 TaggedObject *object = nullptr;
352 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
353 object = AllocateHugeObject(hclass, size);
354 } else {
355 object = reinterpret_cast<TaggedObject *>(readOnlySpace_->Allocate(size));
356 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, readOnlySpace_, "Heap::AllocateReadOnlyOrHugeObject");
357 ASSERT(object != nullptr);
358 object->SetClass(thread_, hclass);
359 }
360 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
361 OnAllocateEvent(GetEcmaVM(), object, size);
362 #endif
363 return object;
364 }
365
AllocateNonMovableOrHugeObject(JSHClass * hclass)366 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass)
367 {
368 size_t size = hclass->GetObjectSize();
369 TaggedObject *object = AllocateNonMovableOrHugeObject(hclass, size);
370 if (object == nullptr) {
371 LOG_ECMA(FATAL) << "Heap::AllocateNonMovableOrHugeObject:object is nullptr";
372 }
373 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
374 OnAllocateEvent(GetEcmaVM(), object, size);
375 #endif
376 return object;
377 }
378
AllocateNonMovableOrHugeObject(JSHClass * hclass,size_t size)379 TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size)
380 {
381 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
382 TaggedObject *object = nullptr;
383 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
384 object = AllocateHugeObject(hclass, size);
385 } else {
386 object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->CheckAndAllocate(size));
387 CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, nonMovableSpace_, "Heap::AllocateNonMovableOrHugeObject");
388 object->SetClass(thread_, hclass);
389 }
390 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
391 OnAllocateEvent(GetEcmaVM(), object, size);
392 #endif
393 return object;
394 }
395
AllocateClassClass(JSHClass * hclass,size_t size)396 TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
397 {
398 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
399 auto object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->Allocate(size));
400 if (UNLIKELY(object == nullptr)) {
401 LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
402 UNREACHABLE();
403 }
404 *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
405 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
406 OnAllocateEvent(GetEcmaVM(), object, size);
407 #endif
408 return object;
409 }
410
AllocateClassClass(JSThread * thread,JSHClass * hclass,size_t size)411 TaggedObject *SharedHeap::AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size)
412 {
413 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
414 auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
415 if (UNLIKELY(object == nullptr)) {
416 LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
417 UNREACHABLE();
418 }
419 *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
420 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
421 OnAllocateEvent(thread->GetEcmaVM(), object, size);
422 #endif
423 return object;
424 }
425
AllocateHugeObject(size_t size)426 TaggedObject *Heap::AllocateHugeObject(size_t size)
427 {
428 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
429 CheckAndTriggerOldGC(size);
430
431 auto *object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
432 if (UNLIKELY(object == nullptr)) {
433 CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
434 object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
435 if (UNLIKELY(object == nullptr)) {
436 // if allocate huge object OOM, temporarily increase space size to avoid vm crash
437 size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
438 oldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
439 DumpHeapSnapshotBeforeOOM(false);
440 StatisticHeapDetail();
441 object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
442 ThrowOutOfMemoryError(thread_, size, "Heap::AllocateHugeObject");
443 object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
444 if (UNLIKELY(object == nullptr)) {
445 FatalOutOfMemoryError(size, "Heap::AllocateHugeObject");
446 }
447 }
448 }
449 return object;
450 }
451
AllocateHugeObject(JSHClass * hclass,size_t size)452 TaggedObject *Heap::AllocateHugeObject(JSHClass *hclass, size_t size)
453 {
454 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
455 CheckAndTriggerOldGC(size);
456 auto object = AllocateHugeObject(size);
457 object->SetClass(thread_, hclass);
458 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
459 OnAllocateEvent(GetEcmaVM(), object, size);
460 #endif
461 return object;
462 }
463
AllocateHugeMachineCodeObject(size_t size,MachineCodeDesc * desc)464 TaggedObject *Heap::AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc)
465 {
466 TaggedObject *object;
467 if (desc) {
468 object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
469 size, thread_, reinterpret_cast<void *>(desc)));
470 } else {
471 object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
472 size, thread_));
473 }
474 return object;
475 }
476
AllocateMachineCodeObject(JSHClass * hclass,size_t size,MachineCodeDesc * desc)477 TaggedObject *Heap::AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc)
478 {
479 TaggedObject *object;
480 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
481 if (!desc) {
482 // Jit Fort disabled
483 ASSERT(!GetEcmaVM()->GetJSOptions().GetEnableJitFort());
484 object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
485 reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size)) :
486 reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size));
487 CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, machineCodeSpace_,
488 "Heap::AllocateMachineCodeObject");
489 object->SetClass(thread_, hclass);
490 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
491 OnAllocateEvent(GetEcmaVM(), object, size);
492 #endif
493 return object;
494 }
495
496 // Jit Fort enabled
497 ASSERT(GetEcmaVM()->GetJSOptions().GetEnableJitFort());
498 if (!GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() || !desc->isAsyncCompileMode) {
499 desc->instructionsAddr = 0;
500 if (size <= MAX_REGULAR_HEAP_OBJECT_SIZE) {
501 // for non huge code cache obj, allocate fort space before allocating the code object
502 uintptr_t mem = machineCodeSpace_->JitFortAllocate(desc);
503 if (mem == ToUintPtr(nullptr)) {
504 return nullptr;
505 }
506 desc->instructionsAddr = mem;
507 }
508 }
509 object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
510 reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
511 reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size, desc, true));
512 CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, machineCodeSpace_, desc,
513 "Heap::AllocateMachineCodeObject");
514 object->SetClass(thread_, hclass);
515 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
516 OnAllocateEvent(GetEcmaVM(), object, size);
517 #endif
518 return object;
519 }
520
AllocateSnapshotSpace(size_t size)521 uintptr_t Heap::AllocateSnapshotSpace(size_t size)
522 {
523 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
524 uintptr_t object = snapshotSpace_->Allocate(size);
525 if (UNLIKELY(object == 0)) {
526 FatalOutOfMemoryError(size, "Heap::AllocateSnapshotSpaceObject");
527 }
528 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
529 OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject *>(object), size);
530 #endif
531 return object;
532 }
533
AllocateSharedNonMovableSpaceFromTlab(JSThread * thread,size_t size)534 TaggedObject *Heap::AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size)
535 {
536 ASSERT(!thread->IsJitThread());
537 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
538 TaggedObject *object = reinterpret_cast<TaggedObject*>(sNonMovableTlab_->Allocate(size));
539 if (object != nullptr) {
540 return object;
541 }
542 if (!sNonMovableTlab_->NeedNewTlab(size)) {
543 // slowpath
544 return nullptr;
545 }
546 size_t newTlabSize = sNonMovableTlab_->ComputeSize();
547 object = sHeap_->AllocateSNonMovableTlab(thread, newTlabSize);
548 if (object == nullptr) {
549 sNonMovableTlab_->DisableNewTlab();
550 return nullptr;
551 }
552 uintptr_t begin = reinterpret_cast<uintptr_t>(object);
553 sNonMovableTlab_->Reset(begin, begin + newTlabSize, begin + size);
554 auto topAddress = sNonMovableTlab_->GetTopAddress();
555 auto endAddress = sNonMovableTlab_->GetEndAddress();
556 thread->ReSetSNonMovableSpaceAllocationAddress(topAddress, endAddress);
557 sHeap_->TryTriggerConcurrentMarking(thread);
558 return object;
559 }
560
AllocateSharedOldSpaceFromTlab(JSThread * thread,size_t size)561 TaggedObject *Heap::AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size)
562 {
563 ASSERT(!thread->IsJitThread());
564 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
565 TaggedObject *object = reinterpret_cast<TaggedObject*>(sOldTlab_->Allocate(size));
566 if (object != nullptr) {
567 return object;
568 }
569 if (!sOldTlab_->NeedNewTlab(size)) {
570 // slowpath
571 return nullptr;
572 }
573 size_t newTlabSize = sOldTlab_->ComputeSize();
574 object = sHeap_->AllocateSOldTlab(thread, newTlabSize);
575 if (object == nullptr) {
576 sOldTlab_->DisableNewTlab();
577 return nullptr;
578 }
579 uintptr_t begin = reinterpret_cast<uintptr_t>(object);
580 sOldTlab_->Reset(begin, begin + newTlabSize, begin + size);
581 auto topAddress = sOldTlab_->GetTopAddress();
582 auto endAddress = sOldTlab_->GetEndAddress();
583 thread->ReSetSOldSpaceAllocationAddress(topAddress, endAddress);
584 sHeap_->TryTriggerConcurrentMarking(thread);
585 return object;
586 }
587
SwapNewSpace()588 void Heap::SwapNewSpace()
589 {
590 activeSemiSpace_->Stop();
591 size_t newOverShootSize = 0;
592 if (!inBackground_ && gcType_ != TriggerGCType::FULL_GC && gcType_ != TriggerGCType::APPSPAWN_FULL_GC) {
593 newOverShootSize = activeSemiSpace_->CalculateNewOverShootSize();
594 }
595 inactiveSemiSpace_->Restart(newOverShootSize);
596
597 SemiSpace *newSpace = inactiveSemiSpace_;
598 inactiveSemiSpace_ = activeSemiSpace_;
599 activeSemiSpace_ = newSpace;
600 if (UNLIKELY(ShouldVerifyHeap())) {
601 inactiveSemiSpace_->EnumerateRegions([](Region *region) {
602 region->SetInactiveSemiSpace();
603 });
604 }
605 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
606 activeSemiSpace_->SwapAllocationCounter(inactiveSemiSpace_);
607 #endif
608 auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
609 auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
610 thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
611 }
612
SwapOldSpace()613 void Heap::SwapOldSpace()
614 {
615 compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
616 auto *oldSpace = compressSpace_;
617 compressSpace_ = oldSpace_;
618 oldSpace_ = oldSpace;
619 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
620 oldSpace_->SwapAllocationCounter(compressSpace_);
621 #endif
622 }
623
SwapOldSpace()624 void SharedHeap::SwapOldSpace()
625 {
626 sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
627 auto *oldSpace = sCompressSpace_;
628 sCompressSpace_ = sOldSpace_;
629 sOldSpace_ = oldSpace;
630 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
631 sOldSpace_->SwapAllocationCounter(sCompressSpace_);
632 #endif
633 }
634
ReclaimRegions(TriggerGCType gcType)635 void Heap::ReclaimRegions(TriggerGCType gcType)
636 {
637 activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
638 region->ResetRegionTypeFlag();
639 region->ClearMarkGCBitset();
640 region->ClearCrossRegionRSet();
641 region->ResetAliveObject();
642 region->DeleteNewToEdenRSet();
643 region->ClearGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
644 });
645 size_t cachedSize = inactiveSemiSpace_->GetInitialCapacity();
646 if (gcType == TriggerGCType::FULL_GC) {
647 compressSpace_->Reset();
648 cachedSize = 0;
649 } else if (gcType == TriggerGCType::OLD_GC) {
650 oldSpace_->ReclaimCSet();
651 isCSetClearing_.store(false, std::memory_order_release);
652 }
653
654 inactiveSemiSpace_->ReclaimRegions(cachedSize);
655 sweeper_->WaitAllTaskFinished();
656 // machinecode space is not sweeped in young GC
657 if (ecmaVm_->GetJSOptions().GetEnableJitFort()) {
658 if (machineCodeSpace_->sweepState_ != SweepState::NO_SWEEP) {
659 if (machineCodeSpace_->GetJitFort() &&
660 machineCodeSpace_->GetJitFort()->IsMachineCodeGC()) {
661 machineCodeSpace_->UpdateFortSpace();
662 }
663 }
664 }
665 EnumerateNonNewSpaceRegionsWithRecord([] (Region *region) {
666 region->ClearMarkGCBitset();
667 region->ClearCrossRegionRSet();
668 });
669 if (!clearTaskFinished_) {
670 LockHolder holder(waitClearTaskFinishedMutex_);
671 clearTaskFinished_ = true;
672 waitClearTaskFinishedCV_.SignalAll();
673 }
674 }
675
676 // only call in js-thread
ClearSlotsRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd)677 void Heap::ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd)
678 {
679 if (!current->InGeneralNewSpace()) {
680 // This clear may exist data race with concurrent sweeping, so use CAS
681 current->AtomicClearSweepingOldToNewRSetInRange(freeStart, freeEnd);
682 current->ClearOldToNewRSetInRange(freeStart, freeEnd);
683 current->AtomicClearCrossRegionRSetInRange(freeStart, freeEnd);
684 }
685 current->ClearLocalToShareRSetInRange(freeStart, freeEnd);
686 current->AtomicClearSweepingLocalToShareRSetInRange(freeStart, freeEnd);
687 }
688
GetCommittedSize()689 size_t Heap::GetCommittedSize() const
690 {
691 size_t result = edenSpace_->GetCommittedSize() +
692 activeSemiSpace_->GetCommittedSize() +
693 oldSpace_->GetCommittedSize() +
694 hugeObjectSpace_->GetCommittedSize() +
695 nonMovableSpace_->GetCommittedSize() +
696 machineCodeSpace_->GetCommittedSize() +
697 hugeMachineCodeSpace_->GetCommittedSize() +
698 readOnlySpace_->GetCommittedSize() +
699 appSpawnSpace_->GetCommittedSize() +
700 snapshotSpace_->GetCommittedSize();
701 return result;
702 }
703
GetHeapObjectSize()704 size_t Heap::GetHeapObjectSize() const
705 {
706 size_t result = edenSpace_->GetHeapObjectSize() +
707 activeSemiSpace_->GetHeapObjectSize() +
708 oldSpace_->GetHeapObjectSize() +
709 hugeObjectSpace_->GetHeapObjectSize() +
710 nonMovableSpace_->GetHeapObjectSize() +
711 machineCodeSpace_->GetCommittedSize() +
712 hugeMachineCodeSpace_->GetCommittedSize() +
713 readOnlySpace_->GetCommittedSize() +
714 appSpawnSpace_->GetHeapObjectSize() +
715 snapshotSpace_->GetHeapObjectSize();
716 return result;
717 }
718
NotifyRecordMemorySize()719 void Heap::NotifyRecordMemorySize()
720 {
721 if (GetRecordObjectSize() == 0) {
722 RecordOrResetObjectSize(GetHeapObjectSize());
723 }
724 if (GetRecordNativeSize() == 0) {
725 RecordOrResetNativeSize(GetNativeBindingSize());
726 }
727 }
728
GetRegionCount()729 size_t Heap::GetRegionCount() const
730 {
731 size_t result = edenSpace_->GetRegionCount() +
732 activeSemiSpace_->GetRegionCount() +
733 oldSpace_->GetRegionCount() +
734 oldSpace_->GetCollectSetRegionCount() +
735 appSpawnSpace_->GetRegionCount() +
736 snapshotSpace_->GetRegionCount() +
737 nonMovableSpace_->GetRegionCount() +
738 hugeObjectSpace_->GetRegionCount() +
739 machineCodeSpace_->GetRegionCount() +
740 hugeMachineCodeSpace_->GetRegionCount();
741 return result;
742 }
743
GetHeapObjectCount()744 uint32_t Heap::GetHeapObjectCount() const
745 {
746 uint32_t count = 0;
747 sweeper_->EnsureAllTaskFinished();
748 this->IterateOverObjects([&count]([[maybe_unused]] TaggedObject *obj) {
749 ++count;
750 });
751 return count;
752 }
753
InitializeIdleStatusControl(std::function<void (bool)> callback)754 void Heap::InitializeIdleStatusControl(std::function<void(bool)> callback)
755 {
756 notifyIdleStatusCallback = callback;
757 if (callback != nullptr) {
758 OPTIONAL_LOG(ecmaVm_, INFO) << "Received idle status control call back";
759 enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
760 }
761 }
762
TryTriggerConcurrentMarking(JSThread * thread)763 void SharedHeap::TryTriggerConcurrentMarking(JSThread *thread)
764 {
765 if (!CheckCanTriggerConcurrentMarking(thread)) {
766 return;
767 }
768 if (GetHeapObjectSize() >= globalSpaceConcurrentMarkLimit_) {
769 TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
770 }
771 }
772
CollectGarbageFinish(bool inDaemon,TriggerGCType gcType)773 void SharedHeap::CollectGarbageFinish(bool inDaemon, TriggerGCType gcType)
774 {
775 if (inDaemon) {
776 ASSERT(JSThread::GetCurrent() == dThread_);
777 #ifndef NDEBUG
778 ASSERT(dThread_->HasLaunchedSuspendAll());
779 #endif
780 dThread_->FinishRunningTask();
781 NotifyGCCompleted();
782 // Update to forceGC_ is in DaemeanSuspendAll, and protected by the Runtime::mutatorLock_,
783 // so do not need lock.
784 smartGCStats_.forceGC_ = false;
785 }
786 localFullMarkTriggered_ = false;
787 // Record alive object size after shared gc and other stats
788 UpdateHeapStatsAfterGC(gcType);
789 // Adjust shared gc trigger threshold
790 AdjustGlobalSpaceAllocLimit();
791 GetEcmaGCStats()->RecordStatisticAfterGC();
792 GetEcmaGCStats()->PrintGCStatistic();
793 ProcessAllGCListeners();
794 }
795
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass)796 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
797 {
798 size_t size = hclass->GetObjectSize();
799 return AllocateNonMovableOrHugeObject(thread, hclass, size);
800 }
801
AllocateNonMovableOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)802 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
803 {
804 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
805 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
806 return AllocateHugeObject(thread, hclass, size);
807 }
808 TaggedObject *object = thread->IsJitThread() ? nullptr :
809 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
810 if (object == nullptr) {
811 object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
812 CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
813 "SharedHeap::AllocateNonMovableOrHugeObject");
814 object->SetClass(thread, hclass);
815 TryTriggerConcurrentMarking(thread);
816 } else {
817 object->SetClass(thread, hclass);
818 }
819 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
820 OnAllocateEvent(thread->GetEcmaVM(), object, size);
821 #endif
822 return object;
823 }
824
AllocateNonMovableOrHugeObject(JSThread * thread,size_t size)825 TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, size_t size)
826 {
827 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
828 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
829 return AllocateHugeObject(thread, size);
830 }
831 TaggedObject *object = thread->IsJitThread() ? nullptr :
832 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
833 if (object == nullptr) {
834 object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
835 CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
836 "SharedHeap::AllocateNonMovableOrHugeObject");
837 TryTriggerConcurrentMarking(thread);
838 }
839 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
840 OnAllocateEvent(thread->GetEcmaVM(), object, size);
841 #endif
842 return object;
843 }
844
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass)845 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass)
846 {
847 size_t size = hclass->GetObjectSize();
848 return AllocateOldOrHugeObject(thread, hclass, size);
849 }
850
AllocateOldOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)851 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
852 {
853 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
854 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
855 return AllocateHugeObject(thread, hclass, size);
856 }
857 TaggedObject *object = thread->IsJitThread() ? nullptr :
858 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
859 if (object == nullptr) {
860 object = AllocateInSOldSpace(thread, size);
861 CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
862 object->SetClass(thread, hclass);
863 TryTriggerConcurrentMarking(thread);
864 } else {
865 object->SetClass(thread, hclass);
866 }
867 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
868 OnAllocateEvent(thread->GetEcmaVM(), object, size);
869 #endif
870 return object;
871 }
872
AllocateOldOrHugeObject(JSThread * thread,size_t size)873 TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
874 {
875 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
876 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
877 return AllocateHugeObject(thread, size);
878 }
879 TaggedObject *object = thread->IsJitThread() ? nullptr :
880 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
881 if (object == nullptr) {
882 object = AllocateInSOldSpace(thread, size);
883 CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
884 TryTriggerConcurrentMarking(thread);
885 }
886 return object;
887 }
888
AllocateInSOldSpace(JSThread * thread,size_t size)889 TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
890 {
891 // jit thread no heap
892 bool allowGC = !thread->IsJitThread();
893 if (allowGC) {
894 auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
895 localHeap->TryTriggerFullMarkBySharedSize(size);
896 }
897 TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
898 // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
899 if (object == nullptr) {
900 if (allowGC) {
901 CheckAndTriggerSharedGC(thread);
902 }
903 object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
904 if (object == nullptr) {
905 if (allowGC) {
906 CollectGarbageNearOOM(thread);
907 }
908 object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
909 }
910 }
911 return object;
912 }
913
AllocateHugeObject(JSThread * thread,JSHClass * hclass,size_t size)914 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
915 {
916 auto object = AllocateHugeObject(thread, size);
917 object->SetClass(thread, hclass);
918 #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
919 OnAllocateEvent(thread->GetEcmaVM(), object, size);
920 #endif
921 return object;
922 }
923
AllocateHugeObject(JSThread * thread,size_t size)924 TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
925 {
926 // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
927 CheckHugeAndTriggerSharedGC(thread, size);
928 auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
929 if (UNLIKELY(object == nullptr)) {
930 CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
931 object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
932 if (UNLIKELY(object == nullptr)) {
933 // if allocate huge object OOM, temporarily increase space size to avoid vm crash
934 size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
935 sHugeObjectSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
936 DumpHeapSnapshotBeforeOOM(false, thread);
937 ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
938 object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
939 if (UNLIKELY(object == nullptr)) {
940 FatalOutOfMemoryError(size, "SharedHeap::AllocateHugeObject");
941 }
942 }
943 }
944 TryTriggerConcurrentMarking(thread);
945 return object;
946 }
947
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass)948 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass)
949 {
950 size_t size = hclass->GetObjectSize();
951 return AllocateReadOnlyOrHugeObject(thread, hclass, size);
952 }
953
AllocateReadOnlyOrHugeObject(JSThread * thread,JSHClass * hclass,size_t size)954 TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
955 {
956 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
957 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
958 return AllocateHugeObject(thread, hclass, size);
959 }
960 auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
961 CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
962 ASSERT(object != nullptr);
963 object->SetClass(thread, hclass);
964 return object;
965 }
966
AllocateSOldTlab(JSThread * thread,size_t size)967 TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
968 {
969 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
970 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
971 return nullptr;
972 }
973 TaggedObject *object = nullptr;
974 if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
975 object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
976 } else {
977 object = AllocateInSOldSpace(thread, size);
978 }
979 return object;
980 }
981
AllocateSNonMovableTlab(JSThread * thread,size_t size)982 TaggedObject *SharedHeap::AllocateSNonMovableTlab(JSThread *thread, size_t size)
983 {
984 size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
985 if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
986 return nullptr;
987 }
988 TaggedObject *object = nullptr;
989 object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
990 return object;
991 }
992
993 template<TriggerGCType gcType, GCReason gcReason>
TriggerConcurrentMarking(JSThread * thread)994 void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
995 {
996 ASSERT(gcType == TriggerGCType::SHARED_GC);
997 // lock is outside to prevent extreme case, maybe could move update gcFinished_ into CheckAndPostTask
998 // instead of an outside locking.
999 LockHolder lock(waitGCFinishedMutex_);
1000 if (dThread_->CheckAndPostTask(TriggerConcurrentMarkTask<gcType, gcReason>(thread))) {
1001 ASSERT(gcFinished_);
1002 gcFinished_ = false;
1003 }
1004 }
1005
1006 template<TriggerGCType gcType, GCReason gcReason>
CollectGarbage(JSThread * thread)1007 void SharedHeap::CollectGarbage(JSThread *thread)
1008 {
1009 ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
1010 #ifndef NDEBUG
1011 ASSERT(!thread->HasLaunchedSuspendAll());
1012 #endif
1013 if (UNLIKELY(!dThread_->IsRunning())) {
1014 // Hope this will not happen, unless the AppSpawn run smth after PostFork
1015 LOG_GC(ERROR) << "Try to collect garbage in shared heap, but daemon thread is not running.";
1016 ForceCollectGarbageWithoutDaemonThread(gcType, gcReason, thread);
1017 return;
1018 }
1019 {
1020 // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1021 // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1022 // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1023 LockHolder lock(waitGCFinishedMutex_);
1024 if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1025 ASSERT(gcFinished_);
1026 gcFinished_ = false;
1027 }
1028 }
1029 ASSERT(!gcFinished_);
1030 SetForceGC(true);
1031 WaitGCFinished(thread);
1032 }
1033
SwapBackAndPop(CVector<JSNativePointer * > & vec,CVector<JSNativePointer * >::iterator & iter)1034 static void SwapBackAndPop(CVector<JSNativePointer*>& vec, CVector<JSNativePointer*>::iterator& iter)
1035 {
1036 *iter = vec.back();
1037 if (iter + 1 == vec.end()) {
1038 vec.pop_back();
1039 iter = vec.end();
1040 } else {
1041 vec.pop_back();
1042 }
1043 }
1044
ShrinkWithFactor(CVector<JSNativePointer * > & vec)1045 static void ShrinkWithFactor(CVector<JSNativePointer*>& vec)
1046 {
1047 constexpr size_t SHRINK_FACTOR = 2;
1048 if (vec.size() < vec.capacity() / SHRINK_FACTOR) {
1049 vec.shrink_to_fit();
1050 }
1051 }
1052
ProcessNativeDelete(const WeakRootVisitor & visitor)1053 void Heap::ProcessNativeDelete(const WeakRootVisitor& visitor)
1054 {
1055 // ProcessNativeDelete should be limited to OldGC or FullGC only
1056 if (!IsGeneralYoungGC()) {
1057 auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1058 auto iter = nativePointerList_.begin();
1059 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size()));
1060 while (iter != nativePointerList_.end()) {
1061 JSNativePointer* object = *iter;
1062 auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1063 if (fwd == nullptr) {
1064 size_t bindingSize = object->GetBindingSize();
1065 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1066 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1067 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1068 SwapBackAndPop(nativePointerList_, iter);
1069 } else {
1070 ++iter;
1071 }
1072 }
1073 ShrinkWithFactor(nativePointerList_);
1074
1075 auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1076 auto newIter = concurrentNativePointerList_.begin();
1077 while (newIter != concurrentNativePointerList_.end()) {
1078 JSNativePointer* object = *newIter;
1079 auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1080 if (fwd == nullptr) {
1081 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1082 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1083 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1084 SwapBackAndPop(concurrentNativePointerList_, newIter);
1085 } else {
1086 ++newIter;
1087 }
1088 }
1089 ShrinkWithFactor(concurrentNativePointerList_);
1090 }
1091 }
1092
ProcessSharedNativeDelete(const WeakRootVisitor & visitor)1093 void Heap::ProcessSharedNativeDelete(const WeakRootVisitor& visitor)
1094 {
1095 auto& sharedNativePointerCallbacks = GetEcmaVM()->GetSharedNativePointerCallbacks();
1096 auto sharedIter = sharedNativePointerList_.begin();
1097 while (sharedIter != sharedNativePointerList_.end()) {
1098 JSNativePointer* object = *sharedIter;
1099 auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1100 if (fwd == nullptr) {
1101 sharedNativePointerCallbacks.emplace_back(
1102 object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1103 SwapBackAndPop(sharedNativePointerList_, sharedIter);
1104 } else {
1105 if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1106 *sharedIter = reinterpret_cast<JSNativePointer*>(fwd);
1107 }
1108 ++sharedIter;
1109 }
1110 }
1111 ShrinkWithFactor(sharedNativePointerList_);
1112 }
1113
ProcessReferences(const WeakRootVisitor & visitor)1114 void Heap::ProcessReferences(const WeakRootVisitor& visitor)
1115 {
1116 // process native ref should be limited to OldGC or FullGC only
1117 if (!IsGeneralYoungGC()) {
1118 auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1119 ResetNativeBindingSize();
1120 // array buffer
1121 auto iter = nativePointerList_.begin();
1122 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessReferencesNum:" + std::to_string(nativePointerList_.size()));
1123 while (iter != nativePointerList_.end()) {
1124 JSNativePointer* object = *iter;
1125 auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1126 if (fwd == nullptr) {
1127 size_t bindingSize = object->GetBindingSize();
1128 asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1129 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1130 nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1131 SwapBackAndPop(nativePointerList_, iter);
1132 continue;
1133 }
1134 IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1135 if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1136 *iter = JSNativePointer::Cast(fwd);
1137 }
1138 ++iter;
1139 }
1140 ShrinkWithFactor(nativePointerList_);
1141
1142 auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1143 auto newIter = concurrentNativePointerList_.begin();
1144 while (newIter != concurrentNativePointerList_.end()) {
1145 JSNativePointer* object = *newIter;
1146 auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1147 if (fwd == nullptr) {
1148 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1149 concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1150 std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1151 SwapBackAndPop(concurrentNativePointerList_, newIter);
1152 continue;
1153 }
1154 IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1155 if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1156 *newIter = JSNativePointer::Cast(fwd);
1157 }
1158 ++newIter;
1159 }
1160 ShrinkWithFactor(concurrentNativePointerList_);
1161 }
1162 }
1163
PushToNativePointerList(JSNativePointer * pointer,bool isConcurrent)1164 void Heap::PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent)
1165 {
1166 ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
1167 if (isConcurrent) {
1168 concurrentNativePointerList_.emplace_back(pointer);
1169 } else {
1170 nativePointerList_.emplace_back(pointer);
1171 }
1172 }
1173
PushToSharedNativePointerList(JSNativePointer * pointer)1174 void Heap::PushToSharedNativePointerList(JSNativePointer* pointer)
1175 {
1176 ASSERT(JSTaggedValue(pointer).IsInSharedHeap());
1177 sharedNativePointerList_.emplace_back(pointer);
1178 }
1179
RemoveFromNativePointerList(const JSNativePointer * pointer)1180 void Heap::RemoveFromNativePointerList(const JSNativePointer* pointer)
1181 {
1182 auto iter = std::find(nativePointerList_.begin(), nativePointerList_.end(), pointer);
1183 if (iter != nativePointerList_.end()) {
1184 JSNativePointer* object = *iter;
1185 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1186 object->Destroy(thread_);
1187 SwapBackAndPop(nativePointerList_, iter);
1188 }
1189 auto newIter = std::find(concurrentNativePointerList_.begin(), concurrentNativePointerList_.end(), pointer);
1190 if (newIter != concurrentNativePointerList_.end()) {
1191 JSNativePointer* object = *newIter;
1192 nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1193 object->Destroy(thread_);
1194 SwapBackAndPop(concurrentNativePointerList_, newIter);
1195 }
1196 }
1197
ClearNativePointerList()1198 void Heap::ClearNativePointerList()
1199 {
1200 for (auto iter : nativePointerList_) {
1201 iter->Destroy(thread_);
1202 }
1203 for (auto iter : concurrentNativePointerList_) {
1204 iter->Destroy(thread_);
1205 }
1206 nativePointerList_.clear();
1207 }
1208
1209 } // namespace panda::ecmascript
1210
1211 #endif // ECMASCRIPT_MEM_HEAP_INL_H
1212