• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/sparse_space.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/concurrent_sweeper.h"
20 #include "ecmascript/mem/free_object_set.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/mem_controller.h"
23 #include "ecmascript/runtime_call_id.h"
24 
25 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)26 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
27     : Space(heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
28       sweepState_(SweepState::NO_SWEEP),
29       heap_(heap),
30       liveObjectSize_(0)
31 {
32     allocator_ = new FreeListAllocator(heap);
33 }
34 
Initialize()35 void SparseSpace::Initialize()
36 {
37     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE,
38                                                                  heap_->GetJSThread());
39     region->InitializeFreeObjectSets();
40     AddRegion(region);
41 
42     allocator_->Initialize(region);
43 }
44 
Reset()45 void SparseSpace::Reset()
46 {
47     allocator_->RebuildFreeList();
48     ReclaimRegions();
49     liveObjectSize_ = 0;
50 }
51 
Allocate(size_t size,bool allowGC)52 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
53 {
54     auto object = allocator_->Allocate(size);
55     CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
56 
57     if (sweepState_ == SweepState::SWEEPING) {
58         object = AllocateAfterSweepingCompleted(size);
59         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
60     }
61 
62     if (allowGC) {
63         // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
64         heap_->CheckAndTriggerOldGC();
65     }
66 
67     if (Expand()) {
68         object = allocator_->Allocate(size);
69         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
70         return object;
71     }
72 
73     if (allowGC) {
74         heap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
75         object = Allocate(size, false);
76         // Size is already increment
77     }
78     return object;
79 }
80 
Expand()81 bool SparseSpace::Expand()
82 {
83     if (committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_) {
84         LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
85         return false;
86     }
87 
88     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_->GetJSThread());
89     region->InitializeFreeObjectSets();
90     AddRegion(region);
91     allocator_->AddFree(region);
92     return true;
93 }
94 
AllocateAfterSweepingCompleted(size_t size)95 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
96 {
97     ASSERT(sweepState_ == SweepState::SWEEPING);
98     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ConcurrentSweepingWait);
99     if (TryFillSweptRegion()) {
100         auto object = allocator_->Allocate(size);
101         if (object != 0) {
102             return object;
103         }
104     }
105     // Parallel sweep and fill
106     heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
107     return allocator_->Allocate(size);
108 }
109 
PrepareSweeping()110 void SparseSpace::PrepareSweeping()
111 {
112     liveObjectSize_ = 0;
113     EnumerateRegions([this](Region *current) {
114         if (!current->InCollectSet()) {
115             IncreaseLiveObjectSize(current->AliveObject());
116             current->ResetWasted();
117             current->SwapRSetForConcurrentSweeping();
118             AddSweepingRegion(current);
119         }
120     });
121     SortSweepingRegion();
122     sweepState_ = SweepState::SWEEPING;
123     allocator_->RebuildFreeList();
124 }
125 
AsyncSweep(bool isMain)126 void SparseSpace::AsyncSweep(bool isMain)
127 {
128     Region *current = GetSweepingRegionSafe();
129     while (current != nullptr) {
130         FreeRegion(current, isMain);
131         // Main thread sweeping region is added;
132         if (!isMain) {
133             AddSweptRegionSafe(current);
134             current->SetSwept();
135         } else {
136             current->MergeRSetForConcurrentSweeping();
137         }
138         current = GetSweepingRegionSafe();
139     }
140 }
141 
Sweep()142 void SparseSpace::Sweep()
143 {
144     liveObjectSize_ = 0;
145     allocator_->RebuildFreeList();
146     EnumerateRegions([this](Region *current) {
147         if (!current->InCollectSet()) {
148             IncreaseLiveObjectSize(current->AliveObject());
149             current->ResetWasted();
150             FreeRegion(current);
151         }
152     });
153 }
154 
TryFillSweptRegion()155 bool SparseSpace::TryFillSweptRegion()
156 {
157     if (sweptList_.empty()) {
158         return false;
159     }
160     Region *region = nullptr;
161     while ((region = GetSweptRegionSafe()) != nullptr) {
162         allocator_->CollectFreeObjectSet(region);
163         region->ResetSwept();
164         region->MergeRSetForConcurrentSweeping();
165     }
166     return true;
167 }
168 
FinishFillSweptRegion()169 bool SparseSpace::FinishFillSweptRegion()
170 {
171     bool ret = TryFillSweptRegion();
172     sweepState_ = SweepState::SWEPT;
173     return ret;
174 }
175 
AddSweepingRegion(Region * region)176 void SparseSpace::AddSweepingRegion(Region *region)
177 {
178     sweepingList_.emplace_back(region);
179 }
180 
SortSweepingRegion()181 void SparseSpace::SortSweepingRegion()
182 {
183     // Sweep low alive object size at first
184     std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
185         return first->AliveObject() < second->AliveObject();
186     });
187 }
188 
GetSweepingRegionSafe()189 Region *SparseSpace::GetSweepingRegionSafe()
190 {
191     os::memory::LockHolder holder(lock_);
192     Region *region = nullptr;
193     if (!sweepingList_.empty()) {
194         region = sweepingList_.back();
195         sweepingList_.pop_back();
196     }
197     return region;
198 }
199 
AddSweptRegionSafe(Region * region)200 void SparseSpace::AddSweptRegionSafe(Region *region)
201 {
202     os::memory::LockHolder holder(lock_);
203     sweptList_.emplace_back(region);
204 }
205 
GetSweptRegionSafe()206 Region *SparseSpace::GetSweptRegionSafe()
207 {
208     os::memory::LockHolder holder(lock_);
209     Region *region = nullptr;
210     if (!sweptList_.empty()) {
211         region = sweptList_.back();
212         sweptList_.pop_back();
213     }
214     return region;
215 }
216 
FreeRegionFromSpace(Region * region)217 void SparseSpace::FreeRegionFromSpace(Region *region)
218 {
219     region->ResetSwept();
220     region->MergeRSetForConcurrentSweeping();
221     RemoveRegion(region);
222     DecreaseLiveObjectSize(region->AliveObject());
223 }
224 
TryToGetSuitableSweptRegion(size_t size)225 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
226 {
227     if (sweepState_ != SweepState::SWEEPING) {
228         return nullptr;
229     }
230     if (sweptList_.empty()) {
231         return nullptr;
232     }
233     os::memory::LockHolder holder(lock_);
234     for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
235         if (allocator_->MatchFreeObjectSet(*iter, size)) {
236             Region *region = *iter;
237             FreeRegionFromSpace(region);
238             sweptList_.erase(iter);
239             return region;
240         }
241     }
242     return nullptr;
243 }
244 
FreeRegion(Region * current,bool isMain)245 void SparseSpace::FreeRegion(Region *current, bool isMain)
246 {
247     uintptr_t freeStart = current->GetBegin();
248     current->IterateAllMarkedBits([this, &current, &freeStart, isMain](void *mem) {
249         ASSERT(current->InRange(ToUintPtr(mem)));
250         auto header = reinterpret_cast<TaggedObject *>(mem);
251         auto klass = header->GetClass();
252         auto size = klass->SizeFromJSHClass(header);
253 
254         uintptr_t freeEnd = ToUintPtr(mem);
255         if (freeStart != freeEnd) {
256             FreeLiveRange(current, freeStart, freeEnd, isMain);
257         }
258         freeStart = freeEnd + size;
259     });
260     uintptr_t freeEnd = current->GetEnd();
261     if (freeStart != freeEnd) {
262         FreeLiveRange(current, freeStart, freeEnd, isMain);
263     }
264 }
265 
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)266 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
267 {
268     heap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
269     allocator_->Free(freeStart, freeEnd - freeStart, isMain);
270 }
271 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const272 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
273 {
274     allocator_->FillBumpPointer();
275     EnumerateRegions([&](Region *region) {
276         if (region->InCollectSet()) {
277             return;
278         }
279         uintptr_t curPtr = region->GetBegin();
280         uintptr_t endPtr = region->GetEnd();
281         while (curPtr < endPtr) {
282             auto freeObject = FreeObject::Cast(curPtr);
283             size_t objSize;
284             // If curPtr is freeObject, It must to mark unpoison first.
285             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
286             if (!freeObject->IsFreeObject()) {
287                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
288                 visitor(obj);
289                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
290             } else {
291                 freeObject->AsanUnPoisonFreeObject();
292                 objSize = freeObject->Available();
293                 freeObject->AsanPoisonFreeObject();
294             }
295             curPtr += objSize;
296             CHECK_OBJECT_SIZE(objSize);
297         }
298         CHECK_REGION_END(curPtr, endPtr);
299     });
300 }
301 
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const302 void SparseSpace::IterateOldToNewOverObjects(
303     const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
304 {
305     auto cb = [visitor](void *mem) -> bool {
306         ObjectSlot slot(ToUintPtr(mem));
307         visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
308         return true;
309     };
310     EnumerateRegions([cb] (Region *region) {
311         region->IterateAllSweepingRSetBits(cb);
312         region->IterateAllOldToNewBits(cb);
313     });
314 }
315 
GetHeapObjectSize() const316 size_t SparseSpace::GetHeapObjectSize() const
317 {
318     return liveObjectSize_;
319 }
320 
IncreaseAllocatedSize(size_t size)321 void SparseSpace::IncreaseAllocatedSize(size_t size)
322 {
323     allocator_->IncreaseAllocatedSize(size);
324 }
325 
GetTotalAllocatedSize() const326 size_t SparseSpace::GetTotalAllocatedSize() const
327 {
328     return allocator_->GetAllocatedSize();
329 }
330 
DetachFreeObjectSet(Region * region)331 void SparseSpace::DetachFreeObjectSet(Region *region)
332 {
333     allocator_->DetachFreeObjectSet(region);
334 }
335 
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)336 void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
337 {
338     ASSERT(size <= alignedSize);
339     if (LIKELY(!allocationCounter_.IsActive())) {
340         return;
341     }
342     if (alignedSize >= allocationCounter_.NextBytes()) {
343         allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
344     }
345     allocationCounter_.AdvanceAllocationInspector(alignedSize);
346 }
347 
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)348 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
349     : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
350 
TrySweepToGetSuitableRegion(size_t size)351 Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
352 {
353     // Try Sweeping region to get space for allocation
354     // since sweepingList_ is ordered, we just need to check once
355     Region *availableRegion = GetSweepingRegionSafe();
356     if (availableRegion != nullptr) {
357         FreeRegion(availableRegion, false);
358         // if region has free enough space for the size,
359         // free the region from current space
360         // and return for local space to use
361         // otherwise, we add region to sweptList_.
362         if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
363             FreeRegionFromSpace(availableRegion);
364             return availableRegion;
365         } else {
366             AddSweptRegionSafe(availableRegion);
367             availableRegion->SetSwept();
368         }
369     }
370     return nullptr;
371 }
372 
TryToGetExclusiveRegion(size_t size)373 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
374 {
375     os::memory::LockHolder lock(lock_);
376     uintptr_t result = allocator_->LookupSuitableFreeObject(size);
377     if (result != 0) {
378         // Remove region from global old space
379         Region *region = Region::ObjectAddressToRange(result);
380         RemoveRegion(region);
381         allocator_->DetachFreeObjectSet(region);
382         DecreaseLiveObjectSize(region->AliveObject());
383         return region;
384     }
385     if (sweepState_ == SweepState::SWEEPING) {
386         Region *availableRegion = nullptr;
387         availableRegion = TryToGetSuitableSweptRegion(size);
388         if (availableRegion != nullptr) {
389             return availableRegion;
390         }
391         return TrySweepToGetSuitableRegion(size);
392     }
393     return nullptr;
394 }
395 
Merge(LocalSpace * localSpace)396 void OldSpace::Merge(LocalSpace *localSpace)
397 {
398     localSpace->FreeBumpPoint();
399     os::memory::LockHolder lock(lock_);
400     size_t oldCommittedSize = committedSize_;
401     localSpace->EnumerateRegions([&](Region *region) {
402         localSpace->DetachFreeObjectSet(region);
403         localSpace->RemoveRegion(region);
404         localSpace->DecreaseLiveObjectSize(region->AliveObject());
405         AddRegion(region);
406         IncreaseLiveObjectSize(region->AliveObject());
407         allocator_->CollectFreeObjectSet(region);
408     });
409     size_t hugeSpaceCommitSize = heap_->GetHugeObjectSpace()->GetCommittedSize();
410     if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
411         LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
412         heap_->ShouldThrowOOMError(true);
413         IncreaseMergeSize(committedSize_ - oldCommittedSize);
414         // if throw OOM, temporarily increase space size to avoid vm crash
415         IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
416     }
417 
418     localSpace->GetRegionList().Clear();
419     allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
420 }
421 
SelectCSet()422 void OldSpace::SelectCSet()
423 {
424     if (heap_->GetJSThread()->IsMarking()) {
425         heap_->GetEcmaVM()->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
426     }
427     CheckRegionSize();
428     // 1、Select region which alive object larger than limit
429     int64_t evacuateSizeLimit = 0;
430     if (!heap_->IsInBackground()) {
431         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
432         EnumerateRegions([this](Region *region) {
433             if (!region->MostObjectAlive()) {
434                 collectRegionSet_.emplace_back(region);
435             }
436         });
437     } else {
438         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
439         EnumerateRegions([this](Region *region) {
440             if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
441                 collectRegionSet_.emplace_back(region);
442             }
443         });
444     }
445     if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
446         LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
447         collectRegionSet_.clear();
448         return;
449     }
450     // sort
451     std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
452         return first->AliveObject() < second->AliveObject();
453     });
454 
455     // Limit cset size
456     unsigned long selectedRegionNumber = 0;
457     int64_t expectFreeSize = static_cast<int64_t>(heap_->GetCommittedSize() - heap_->GetHeapAliveSizeAfterGC());
458     int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
459     EnumerateCollectRegionSet([&](Region *current) {
460         if (evacuateSize > 0) {
461             selectedRegionNumber++;
462             evacuateSize -= current->AliveObject();
463         } else {
464             return;
465         }
466     });
467     OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Max evacuation size is 6_MB. The CSet region number: "
468         << selectedRegionNumber;
469     selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
470     if (collectRegionSet_.size() > selectedRegionNumber) {
471         collectRegionSet_.resize(selectedRegionNumber);
472     }
473 
474     heap_->GetEcmaVM()->GetEcmaGCStats()->SetRecordData(
475         RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
476     EnumerateCollectRegionSet([&](Region *current) {
477         RemoveRegion(current);
478         DecreaseLiveObjectSize(current->AliveObject());
479         allocator_->DetachFreeObjectSet(current);
480         current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
481     });
482     sweepState_ = SweepState::NO_SWEEP;
483     OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Select CSet success: number is " << collectRegionSet_.size();
484 }
485 
CheckRegionSize()486 void OldSpace::CheckRegionSize()
487 {
488 #ifndef NDEBUG
489     if (sweepState_ == SweepState::SWEEPING) {
490         heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
491     }
492     size_t available = allocator_->GetAvailableSize();
493     size_t wasted = allocator_->GetWastedSize();
494     if (GetHeapObjectSize() + wasted + available != objectSize_) {
495         LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
496                             << ", free object size:" << available
497                             << ", wasted size:" << wasted
498                             << ", but exception total size:" << objectSize_;
499     }
500 #endif
501 }
502 
RevertCSet()503 void OldSpace::RevertCSet()
504 {
505     EnumerateCollectRegionSet([&](Region *region) {
506         region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
507         AddRegion(region);
508         allocator_->CollectFreeObjectSet(region);
509         IncreaseLiveObjectSize(region->AliveObject());
510     });
511     collectRegionSet_.clear();
512 }
513 
ReclaimCSet()514 void OldSpace::ReclaimCSet()
515 {
516     EnumerateCollectRegionSet([this](Region *region) {
517         region->DeleteCrossRegionRSet();
518         region->DeleteOldToNewRSet();
519         region->DeleteSweepingRSet();
520         region->DestroyFreeObjectSets();
521         heapRegionAllocator_->FreeRegion(region);
522     });
523     collectRegionSet_.clear();
524 }
525 
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)526 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
527     : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
528 
AddRegionToList(Region * region)529 bool LocalSpace::AddRegionToList(Region *region)
530 {
531     if (committedSize_ >= maximumCapacity_) {
532         LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
533         return false;
534     }
535     AddRegion(region);
536     allocator_->CollectFreeObjectSet(region);
537     IncreaseLiveObjectSize(region->AliveObject());
538     return true;
539 }
540 
FreeBumpPoint()541 void LocalSpace::FreeBumpPoint()
542 {
543     allocator_->FreeBumpPoint();
544 }
545 
Stop()546 void LocalSpace::Stop()
547 {
548     if (GetCurrentRegion() != nullptr) {
549         GetCurrentRegion()->SetHighWaterMark(allocator_->GetTop());
550     }
551 }
552 
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)553 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
554     : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
555 {
556 }
557 
AppSpawnSpace(Heap * heap,size_t initialCapacity)558 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
559     : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
560 {
561 }
562 
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const563 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
564 {
565     EnumerateRegions([&](Region *current) {
566         current->IterateAllMarkedBits([&](void *mem) {
567             ASSERT(current->InRange(ToUintPtr(mem)));
568             visitor(reinterpret_cast<TaggedObject *>(mem));
569         });
570     });
571 }
572 
Allocate(size_t size,bool isExpand)573 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
574 {
575     auto object = allocator_->Allocate(size);
576     if (object == 0) {
577         if (isExpand && Expand()) {
578             object = allocator_->Allocate(size);
579         }
580     }
581     if (object != 0) {
582         Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
583     }
584     return object;
585 }
586 
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)587 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
588     : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
589 {
590 }
591 }  // namespace panda::ecmascript
592