1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/sparse_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/allocator-inl.h"
20 #include "ecmascript/mem/concurrent_sweeper.h"
21 #include "ecmascript/mem/free_object_set.h"
22 #include "ecmascript/mem/heap-inl.h"
23 #include "ecmascript/mem/mem_controller.h"
24 #include "ecmascript/runtime_call_id.h"
25
26 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)27 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
28 : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
29 sweepState_(SweepState::NO_SWEEP),
30 liveObjectSize_(0)
31 {
32 allocator_ = new FreeListAllocator(heap);
33 }
34
Initialize()35 void SparseSpace::Initialize()
36 {
37 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE,
38 heap_->GetJSThread());
39 region->InitializeFreeObjectSets();
40 AddRegion(region);
41
42 allocator_->Initialize(region);
43 }
44
Reset()45 void SparseSpace::Reset()
46 {
47 allocator_->RebuildFreeList();
48 ReclaimRegions();
49 liveObjectSize_ = 0;
50 }
51
ResetTopPointer(uintptr_t top)52 void SparseSpace::ResetTopPointer(uintptr_t top)
53 {
54 allocator_->ResetTopPointer(top);
55 }
56
Allocate(size_t size,bool allowGC)57 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
58 {
59 auto object = allocator_->Allocate(size);
60 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
61
62 if (sweepState_ == SweepState::SWEEPING) {
63 object = AllocateAfterSweepingCompleted(size);
64 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
65 }
66
67 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
68 if (allowGC && heap_->CheckAndTriggerOldGC()) {
69 object = allocator_->Allocate(size);
70 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
71 }
72
73 if (Expand()) {
74 object = allocator_->Allocate(size);
75 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
76 }
77
78 if (allowGC) {
79 heap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
80 object = Allocate(size, false);
81 // Size is already increment
82 }
83 return object;
84 }
85
Expand()86 bool SparseSpace::Expand()
87 {
88 if (CommittedSizeExceed()) {
89 LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
90 return false;
91 }
92
93 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_->GetJSThread());
94 region->InitializeFreeObjectSets();
95 AddRegion(region);
96 allocator_->AddFree(region);
97 return true;
98 }
99
AllocateAfterSweepingCompleted(size_t size)100 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
101 {
102 ASSERT(sweepState_ == SweepState::SWEEPING);
103 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ConcurrentSweepingWait);
104 if (TryFillSweptRegion()) {
105 auto object = allocator_->Allocate(size);
106 if (object != 0) {
107 return object;
108 }
109 }
110 // Parallel sweep and fill
111 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
112 return allocator_->Allocate(size);
113 }
114
PrepareSweeping()115 void SparseSpace::PrepareSweeping()
116 {
117 liveObjectSize_ = 0;
118 EnumerateRegions([this](Region *current) {
119 if (!current->InCollectSet()) {
120 IncreaseLiveObjectSize(current->AliveObject());
121 current->ResetWasted();
122 current->SwapRSetForConcurrentSweeping();
123 AddSweepingRegion(current);
124 }
125 });
126 SortSweepingRegion();
127 sweepState_ = SweepState::SWEEPING;
128 allocator_->RebuildFreeList();
129 }
130
AsyncSweep(bool isMain)131 void SparseSpace::AsyncSweep(bool isMain)
132 {
133 Region *current = GetSweepingRegionSafe();
134 while (current != nullptr) {
135 FreeRegion(current, isMain);
136 // Main thread sweeping region is added;
137 if (!isMain) {
138 AddSweptRegionSafe(current);
139 current->SetSwept();
140 } else {
141 current->MergeRSetForConcurrentSweeping();
142 }
143 current = GetSweepingRegionSafe();
144 }
145 }
146
Sweep()147 void SparseSpace::Sweep()
148 {
149 liveObjectSize_ = 0;
150 allocator_->RebuildFreeList();
151 EnumerateRegions([this](Region *current) {
152 if (!current->InCollectSet()) {
153 IncreaseLiveObjectSize(current->AliveObject());
154 current->ResetWasted();
155 FreeRegion(current);
156 }
157 });
158 }
159
TryFillSweptRegion()160 bool SparseSpace::TryFillSweptRegion()
161 {
162 if (sweptList_.empty()) {
163 return false;
164 }
165 Region *region = nullptr;
166 while ((region = GetSweptRegionSafe()) != nullptr) {
167 allocator_->CollectFreeObjectSet(region);
168 region->ResetSwept();
169 region->MergeRSetForConcurrentSweeping();
170 }
171 return true;
172 }
173
FinishFillSweptRegion()174 bool SparseSpace::FinishFillSweptRegion()
175 {
176 bool ret = TryFillSweptRegion();
177 sweepState_ = SweepState::SWEPT;
178 return ret;
179 }
180
AddSweepingRegion(Region * region)181 void SparseSpace::AddSweepingRegion(Region *region)
182 {
183 sweepingList_.emplace_back(region);
184 }
185
SortSweepingRegion()186 void SparseSpace::SortSweepingRegion()
187 {
188 // Sweep low alive object size at first
189 std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
190 return first->AliveObject() < second->AliveObject();
191 });
192 }
193
GetSweepingRegionSafe()194 Region *SparseSpace::GetSweepingRegionSafe()
195 {
196 LockHolder holder(lock_);
197 Region *region = nullptr;
198 if (!sweepingList_.empty()) {
199 region = sweepingList_.back();
200 sweepingList_.pop_back();
201 }
202 return region;
203 }
204
AddSweptRegionSafe(Region * region)205 void SparseSpace::AddSweptRegionSafe(Region *region)
206 {
207 LockHolder holder(lock_);
208 sweptList_.emplace_back(region);
209 }
210
GetSweptRegionSafe()211 Region *SparseSpace::GetSweptRegionSafe()
212 {
213 LockHolder holder(lock_);
214 Region *region = nullptr;
215 if (!sweptList_.empty()) {
216 region = sweptList_.back();
217 sweptList_.pop_back();
218 }
219 return region;
220 }
221
FreeRegionFromSpace(Region * region)222 void SparseSpace::FreeRegionFromSpace(Region *region)
223 {
224 region->ResetSwept();
225 region->MergeRSetForConcurrentSweeping();
226 RemoveRegion(region);
227 DecreaseLiveObjectSize(region->AliveObject());
228 }
229
TryToGetSuitableSweptRegion(size_t size)230 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
231 {
232 if (sweepState_ != SweepState::SWEEPING) {
233 return nullptr;
234 }
235 if (sweptList_.empty()) {
236 return nullptr;
237 }
238 LockHolder holder(lock_);
239 for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
240 if (allocator_->MatchFreeObjectSet(*iter, size)) {
241 Region *region = *iter;
242 FreeRegionFromSpace(region);
243 sweptList_.erase(iter);
244 return region;
245 }
246 }
247 return nullptr;
248 }
249
FreeRegion(Region * current,bool isMain)250 void SparseSpace::FreeRegion(Region *current, bool isMain)
251 {
252 uintptr_t freeStart = current->GetBegin();
253 current->IterateAllMarkedBits([this, ¤t, &freeStart, isMain](void *mem) {
254 ASSERT(current->InRange(ToUintPtr(mem)));
255 auto header = reinterpret_cast<TaggedObject *>(mem);
256 auto klass = header->GetClass();
257 auto size = klass->SizeFromJSHClass(header);
258
259 uintptr_t freeEnd = ToUintPtr(mem);
260 if (freeStart != freeEnd) {
261 FreeLiveRange(current, freeStart, freeEnd, isMain);
262 }
263 freeStart = freeEnd + size;
264 });
265 uintptr_t freeEnd = current->GetEnd();
266 if (freeStart != freeEnd) {
267 FreeLiveRange(current, freeStart, freeEnd, isMain);
268 }
269 }
270
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)271 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
272 {
273 heap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
274 allocator_->Free(freeStart, freeEnd - freeStart, isMain);
275 }
276
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const277 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
278 {
279 allocator_->FillBumpPointer();
280 EnumerateRegions([&](Region *region) {
281 if (region->InCollectSet()) {
282 return;
283 }
284 uintptr_t curPtr = region->GetBegin();
285 uintptr_t endPtr = region->GetEnd();
286 while (curPtr < endPtr) {
287 auto freeObject = FreeObject::Cast(curPtr);
288 size_t objSize;
289 // If curPtr is freeObject, It must to mark unpoison first.
290 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
291 if (!freeObject->IsFreeObject()) {
292 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
293 visitor(obj);
294 objSize = obj->GetClass()->SizeFromJSHClass(obj);
295 } else {
296 freeObject->AsanUnPoisonFreeObject();
297 objSize = freeObject->Available();
298 freeObject->AsanPoisonFreeObject();
299 }
300 curPtr += objSize;
301 CHECK_OBJECT_SIZE(objSize);
302 }
303 CHECK_REGION_END(curPtr, endPtr);
304 });
305 }
306
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const307 void SparseSpace::IterateOldToNewOverObjects(
308 const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
309 {
310 auto cb = [visitor](void *mem) -> bool {
311 ObjectSlot slot(ToUintPtr(mem));
312 visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
313 return true;
314 };
315 EnumerateRegions([cb] (Region *region) {
316 region->IterateAllSweepingRSetBits(cb);
317 region->IterateAllOldToNewBits(cb);
318 });
319 }
320
GetHeapObjectSize() const321 size_t SparseSpace::GetHeapObjectSize() const
322 {
323 return liveObjectSize_;
324 }
325
IncreaseAllocatedSize(size_t size)326 void SparseSpace::IncreaseAllocatedSize(size_t size)
327 {
328 allocator_->IncreaseAllocatedSize(size);
329 }
330
GetTotalAllocatedSize() const331 size_t SparseSpace::GetTotalAllocatedSize() const
332 {
333 return allocator_->GetAllocatedSize();
334 }
335
DetachFreeObjectSet(Region * region)336 void SparseSpace::DetachFreeObjectSet(Region *region)
337 {
338 allocator_->DetachFreeObjectSet(region);
339 }
340
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)341 void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
342 {
343 ASSERT(size <= alignedSize);
344 if (LIKELY(!allocationCounter_.IsActive())) {
345 return;
346 }
347 if (alignedSize >= allocationCounter_.NextBytes()) {
348 allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
349 }
350 allocationCounter_.AdvanceAllocationInspector(alignedSize);
351 }
352
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)353 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
354 : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
355
TrySweepToGetSuitableRegion(size_t size)356 Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
357 {
358 // Try Sweeping region to get space for allocation
359 // since sweepingList_ is ordered, we just need to check once
360 Region *availableRegion = GetSweepingRegionSafe();
361 if (availableRegion != nullptr) {
362 FreeRegion(availableRegion, false);
363 // if region has free enough space for the size,
364 // free the region from current space
365 // and return for local space to use
366 // otherwise, we add region to sweptList_.
367 if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
368 FreeRegionFromSpace(availableRegion);
369 return availableRegion;
370 } else {
371 AddSweptRegionSafe(availableRegion);
372 availableRegion->SetSwept();
373 }
374 }
375 return nullptr;
376 }
377
TryToGetExclusiveRegion(size_t size)378 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
379 {
380 LockHolder lock(lock_);
381 uintptr_t result = allocator_->LookupSuitableFreeObject(size);
382 if (result != 0) {
383 // Remove region from global old space
384 Region *region = Region::ObjectAddressToRange(result);
385 RemoveRegion(region);
386 allocator_->DetachFreeObjectSet(region);
387 DecreaseLiveObjectSize(region->AliveObject());
388 return region;
389 }
390 if (sweepState_ == SweepState::SWEEPING) {
391 Region *availableRegion = nullptr;
392 availableRegion = TryToGetSuitableSweptRegion(size);
393 if (availableRegion != nullptr) {
394 return availableRegion;
395 }
396 return TrySweepToGetSuitableRegion(size);
397 }
398 return nullptr;
399 }
400
Merge(LocalSpace * localSpace)401 void OldSpace::Merge(LocalSpace *localSpace)
402 {
403 localSpace->FreeBumpPoint();
404 LockHolder lock(lock_);
405 size_t oldCommittedSize = committedSize_;
406 localSpace->EnumerateRegions([&](Region *region) {
407 localSpace->DetachFreeObjectSet(region);
408 localSpace->RemoveRegion(region);
409 localSpace->DecreaseLiveObjectSize(region->AliveObject());
410 AddRegion(region);
411 IncreaseLiveObjectSize(region->AliveObject());
412 allocator_->CollectFreeObjectSet(region);
413 });
414 size_t hugeSpaceCommitSize = heap_->GetHugeObjectSpace()->GetCommittedSize();
415 if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
416 LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
417 heap_->ShouldThrowOOMError(true);
418 IncreaseMergeSize(committedSize_ - oldCommittedSize);
419 // if throw OOM, temporarily increase space size to avoid vm crash
420 IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
421 }
422
423 localSpace->GetRegionList().Clear();
424 allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
425 }
426
SelectCSet()427 void OldSpace::SelectCSet()
428 {
429 if (heap_->GetJSThread()->IsMarking()) {
430 heap_->GetEcmaVM()->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
431 }
432 CheckRegionSize();
433 // 1、Select region which alive object larger than limit
434 int64_t evacuateSizeLimit = 0;
435 if (!heap_->IsInBackground()) {
436 evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
437 EnumerateRegions([this](Region *region) {
438 if (!region->MostObjectAlive()) {
439 collectRegionSet_.emplace_back(region);
440 }
441 });
442 } else {
443 evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
444 EnumerateRegions([this](Region *region) {
445 if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
446 collectRegionSet_.emplace_back(region);
447 }
448 });
449 }
450 if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
451 LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
452 collectRegionSet_.clear();
453 return;
454 }
455 // sort
456 std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
457 return first->AliveObject() < second->AliveObject();
458 });
459
460 // Limit cset size
461 unsigned long selectedRegionNumber = 0;
462 int64_t expectFreeSize = static_cast<int64_t>(heap_->GetCommittedSize() - heap_->GetHeapAliveSizeAfterGC());
463 int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
464 EnumerateCollectRegionSet([&](Region *current) {
465 if (evacuateSize > 0) {
466 selectedRegionNumber++;
467 evacuateSize -= current->AliveObject();
468 } else {
469 return;
470 }
471 });
472 OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Max evacuation size is 6_MB. The CSet region number: "
473 << selectedRegionNumber;
474 selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
475 if (collectRegionSet_.size() > selectedRegionNumber) {
476 collectRegionSet_.resize(selectedRegionNumber);
477 }
478
479 heap_->GetEcmaVM()->GetEcmaGCStats()->SetRecordData(
480 RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
481 EnumerateCollectRegionSet([&](Region *current) {
482 RemoveRegion(current);
483 DecreaseLiveObjectSize(current->AliveObject());
484 allocator_->DetachFreeObjectSet(current);
485 current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
486 });
487 sweepState_ = SweepState::NO_SWEEP;
488 OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Select CSet success: number is " << collectRegionSet_.size();
489 }
490
CheckRegionSize()491 void OldSpace::CheckRegionSize()
492 {
493 #ifndef NDEBUG
494 if (sweepState_ == SweepState::SWEEPING) {
495 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
496 }
497 size_t available = allocator_->GetAvailableSize();
498 size_t wasted = allocator_->GetWastedSize();
499 if (GetHeapObjectSize() + wasted + available != objectSize_) {
500 LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
501 << ", free object size:" << available
502 << ", wasted size:" << wasted
503 << ", but exception total size:" << objectSize_;
504 }
505 #endif
506 }
507
RevertCSet()508 void OldSpace::RevertCSet()
509 {
510 EnumerateCollectRegionSet([&](Region *region) {
511 region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
512 AddRegion(region);
513 allocator_->CollectFreeObjectSet(region);
514 IncreaseLiveObjectSize(region->AliveObject());
515 });
516 collectRegionSet_.clear();
517 }
518
ReclaimCSet()519 void OldSpace::ReclaimCSet()
520 {
521 size_t cachedSize = heap_->GetNewSpace()->GetInitialCapacity();
522 EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
523 region->DeleteCrossRegionRSet();
524 region->DeleteOldToNewRSet();
525 region->DeleteSweepingRSet();
526 region->DestroyFreeObjectSets();
527 heapRegionAllocator_->FreeRegion(region, cachedSize);
528 });
529 collectRegionSet_.clear();
530 }
531
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)532 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
533 : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
534
AddRegionToList(Region * region)535 bool LocalSpace::AddRegionToList(Region *region)
536 {
537 if (committedSize_ >= maximumCapacity_) {
538 LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
539 return false;
540 }
541 AddRegion(region);
542 allocator_->CollectFreeObjectSet(region);
543 IncreaseLiveObjectSize(region->AliveObject());
544 return true;
545 }
546
FreeBumpPoint()547 void LocalSpace::FreeBumpPoint()
548 {
549 allocator_->FreeBumpPoint();
550 }
551
Stop()552 void LocalSpace::Stop()
553 {
554 if (GetCurrentRegion() != nullptr) {
555 GetCurrentRegion()->SetHighWaterMark(allocator_->GetTop());
556 }
557 }
558
CheckAndAllocate(size_t size)559 uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
560 {
561 if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
562 !heap_->GetOldGCRequested()) {
563 heap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
564 }
565 return Allocate(size);
566 }
567
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)568 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
569 : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
570 {
571 }
572
AppSpawnSpace(Heap * heap,size_t initialCapacity)573 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
574 : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
575 {
576 }
577
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const578 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
579 {
580 EnumerateRegions([&](Region *current) {
581 current->IterateAllMarkedBits([&](void *mem) {
582 ASSERT(current->InRange(ToUintPtr(mem)));
583 visitor(reinterpret_cast<TaggedObject *>(mem));
584 });
585 });
586 }
587
Allocate(size_t size,bool isExpand)588 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
589 {
590 auto object = allocator_->Allocate(size);
591 if (object == 0) {
592 if (isExpand && Expand()) {
593 object = allocator_->Allocate(size);
594 }
595 }
596 if (object != 0) {
597 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
598 }
599 return object;
600 }
601
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)602 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
603 : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
604 {
605 }
606 } // namespace panda::ecmascript
607