• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/sparse_space.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/heap-inl.h"
20 
21 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)22 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
23     : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
24       sweepState_(SweepState::NO_SWEEP),
25       localHeap_(heap),
26       liveObjectSize_(0)
27 {
28     allocator_ = new FreeListAllocator<FreeObject>(heap);
29 }
30 
Initialize()31 void SparseSpace::Initialize()
32 {
33     JSThread *thread = localHeap_->GetJSThread();
34     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
35     AddRegion(region);
36 
37     allocator_->Initialize(region);
38 }
39 
Reset()40 void SparseSpace::Reset()
41 {
42     allocator_->RebuildFreeList();
43     ReclaimRegions();
44     liveObjectSize_ = 0;
45 }
46 
ResetTopPointer(uintptr_t top)47 void SparseSpace::ResetTopPointer(uintptr_t top)
48 {
49     allocator_->ResetTopPointer(top);
50 }
51 
Allocate(size_t size,bool allowGC)52 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
53 {
54 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
55     if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) {
56         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
57         UNREACHABLE();
58     }
59 #endif
60     auto object = allocator_->Allocate(size);
61     CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
62 
63     if (sweepState_ == SweepState::SWEEPING) {
64         object = AllocateAfterSweepingCompleted(size);
65         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
66     }
67 
68     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
69     if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
70         object = allocator_->Allocate(size);
71         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
72     }
73 
74     if (Expand()) {
75         object = allocator_->Allocate(size);
76         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
77     }
78 
79     if (allowGC) {
80         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
81         object = Allocate(size, false);
82         // Size is already increment
83     }
84     return object;
85 }
86 
Expand()87 bool SparseSpace::Expand()
88 {
89     if (CommittedSizeExceed()) {
90         LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
91         return false;
92     }
93     JSThread *thread = localHeap_->GetJSThread();
94     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
95     AddRegion(region);
96     allocator_->AddFree(region);
97     return true;
98 }
99 
AllocateAfterSweepingCompleted(size_t size)100 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
101 {
102     ASSERT(sweepState_ == SweepState::SWEEPING);
103     if (TryFillSweptRegion()) {
104         auto object = allocator_->Allocate(size);
105         if (object != 0) {
106             return object;
107         }
108     }
109     // Parallel sweep and fill
110     localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
111     return allocator_->Allocate(size);
112 }
113 
PrepareSweeping()114 void SparseSpace::PrepareSweeping()
115 {
116     liveObjectSize_ = 0;
117     EnumerateRegions([this](Region *current) {
118         if (!current->InCollectSet()) {
119             if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
120                 current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) {
121                 LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
122             }
123             IncreaseLiveObjectSize(current->AliveObject());
124             current->ResetWasted();
125             current->SwapOldToNewRSetForCS();
126             current->SwapLocalToShareRSetForCS();
127             AddSweepingRegion(current);
128         }
129     });
130     SortSweepingRegion();
131     sweepState_ = SweepState::SWEEPING;
132     allocator_->RebuildFreeList();
133 }
134 
AsyncSweep(bool isMain)135 void SparseSpace::AsyncSweep(bool isMain)
136 {
137     Region *current = GetSweepingRegionSafe();
138     while (current != nullptr) {
139         FreeRegion(current, isMain);
140         // Main thread sweeping region is added;
141         if (!isMain) {
142             AddSweptRegionSafe(current);
143         } else {
144             current->MergeOldToNewRSetForCS();
145             current->MergeLocalToShareRSetForCS();
146         }
147         current = GetSweepingRegionSafe();
148     }
149 }
150 
Sweep()151 void SparseSpace::Sweep()
152 {
153     liveObjectSize_ = 0;
154     allocator_->RebuildFreeList();
155     EnumerateRegions([this](Region *current) {
156         if (!current->InCollectSet()) {
157             IncreaseLiveObjectSize(current->AliveObject());
158             current->ResetWasted();
159             FreeRegion(current);
160         }
161     });
162 }
163 
TryFillSweptRegion()164 bool SparseSpace::TryFillSweptRegion()
165 {
166     if (sweptList_.empty()) {
167         return false;
168     }
169     Region *region = nullptr;
170     while ((region = GetSweptRegionSafe()) != nullptr) {
171         allocator_->CollectFreeObjectSet(region);
172         region->ResetSwept();
173         region->MergeOldToNewRSetForCS();
174         region->MergeLocalToShareRSetForCS();
175     }
176     return true;
177 }
178 
FinishFillSweptRegion()179 bool SparseSpace::FinishFillSweptRegion()
180 {
181     bool ret = TryFillSweptRegion();
182     sweepState_ = SweepState::SWEPT;
183     return ret;
184 }
185 
AddSweepingRegion(Region * region)186 void SparseSpace::AddSweepingRegion(Region *region)
187 {
188     sweepingList_.emplace_back(region);
189 }
190 
SortSweepingRegion()191 void SparseSpace::SortSweepingRegion()
192 {
193     // Sweep low alive object size at first
194     std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
195         return first->AliveObject() < second->AliveObject();
196     });
197 }
198 
GetSweepingRegionSafe()199 Region *SparseSpace::GetSweepingRegionSafe()
200 {
201     LockHolder holder(lock_);
202     Region *region = nullptr;
203     if (!sweepingList_.empty()) {
204         region = sweepingList_.back();
205         sweepingList_.pop_back();
206     }
207     return region;
208 }
209 
AddSweptRegionSafe(Region * region)210 void SparseSpace::AddSweptRegionSafe(Region *region)
211 {
212     LockHolder holder(lock_);
213     sweptList_.emplace_back(region);
214     region->SetSwept();
215 }
216 
GetSweptRegionSafe()217 Region *SparseSpace::GetSweptRegionSafe()
218 {
219     LockHolder holder(lock_);
220     Region *region = nullptr;
221     if (!sweptList_.empty()) {
222         region = sweptList_.back();
223         sweptList_.pop_back();
224     }
225     return region;
226 }
227 
FreeRegionFromSpace(Region * region)228 void SparseSpace::FreeRegionFromSpace(Region *region)
229 {
230     region->ResetSwept();
231     region->MergeOldToNewRSetForCS();
232     region->MergeLocalToShareRSetForCS();
233     RemoveRegion(region);
234     DecreaseLiveObjectSize(region->AliveObject());
235 }
236 
TryToGetSuitableSweptRegion(size_t size)237 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
238 {
239     if (sweepState_ != SweepState::SWEEPING) {
240         return nullptr;
241     }
242     if (sweptList_.empty()) {
243         return nullptr;
244     }
245     LockHolder holder(lock_);
246     for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
247         if (allocator_->MatchFreeObjectSet(*iter, size)) {
248             Region *region = *iter;
249             FreeRegionFromSpace(region);
250             sweptList_.erase(iter);
251             return region;
252         }
253     }
254     return nullptr;
255 }
256 
FreeRegion(Region * current,bool isMain)257 void SparseSpace::FreeRegion(Region *current, bool isMain)
258 {
259     uintptr_t freeStart = current->GetBegin();
260     current->IterateAllMarkedBits([this, &current, &freeStart, isMain](void *mem) {
261         ASSERT(current->InRange(ToUintPtr(mem)));
262         auto header = reinterpret_cast<TaggedObject *>(mem);
263         auto klass = header->GetClass();
264         auto size = klass->SizeFromJSHClass(header);
265 
266         uintptr_t freeEnd = ToUintPtr(mem);
267         if (freeStart != freeEnd) {
268             FreeLiveRange(current, freeStart, freeEnd, isMain);
269         }
270         freeStart = freeEnd + size;
271     });
272     uintptr_t freeEnd = current->GetEnd();
273     if (freeStart != freeEnd) {
274         FreeLiveRange(current, freeStart, freeEnd, isMain);
275     }
276 }
277 
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)278 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
279 {
280     localHeap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
281     allocator_->Free(freeStart, freeEnd - freeStart, isMain);
282 }
283 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const284 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
285 {
286     allocator_->FillBumpPointer();
287     EnumerateRegions([&](Region *region) {
288         if (region->InCollectSet()) {
289             return;
290         }
291         uintptr_t curPtr = region->GetBegin();
292         uintptr_t endPtr = region->GetEnd();
293         while (curPtr < endPtr) {
294             auto freeObject = FreeObject::Cast(curPtr);
295             size_t objSize;
296             // If curPtr is freeObject, It must to mark unpoison first.
297             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
298             if (!freeObject->IsFreeObject()) {
299                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
300                 visitor(obj);
301                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
302             } else {
303                 freeObject->AsanUnPoisonFreeObject();
304                 objSize = freeObject->Available();
305                 freeObject->AsanPoisonFreeObject();
306             }
307             curPtr += objSize;
308             CHECK_OBJECT_SIZE(objSize);
309         }
310         CHECK_REGION_END(curPtr, endPtr);
311     });
312 }
313 
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const314 void SparseSpace::IterateOldToNewOverObjects(
315     const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
316 {
317     auto cb = [visitor](void *mem) -> bool {
318         ObjectSlot slot(ToUintPtr(mem));
319         visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
320         return true;
321     };
322     EnumerateRegions([cb] (Region *region) {
323         region->IterateAllSweepingRSetBits(cb);
324         region->IterateAllOldToNewBits(cb);
325     });
326 }
327 
GetHeapObjectSize() const328 size_t SparseSpace::GetHeapObjectSize() const
329 {
330     return liveObjectSize_;
331 }
332 
IncreaseAllocatedSize(size_t size)333 void SparseSpace::IncreaseAllocatedSize(size_t size)
334 {
335     allocator_->IncreaseAllocatedSize(size);
336 }
337 
GetTotalAllocatedSize() const338 size_t SparseSpace::GetTotalAllocatedSize() const
339 {
340     return allocator_->GetAllocatedSize();
341 }
342 
DetachFreeObjectSet(Region * region)343 void SparseSpace::DetachFreeObjectSet(Region *region)
344 {
345     allocator_->DetachFreeObjectSet(region);
346 }
347 
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)348 void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
349 {
350     ASSERT(size <= alignedSize);
351     if (LIKELY(!allocationCounter_.IsActive())) {
352         return;
353     }
354     if (alignedSize >= allocationCounter_.NextBytes()) {
355         allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
356     }
357     allocationCounter_.AdvanceAllocationInspector(alignedSize);
358 }
359 
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)360 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
361     : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
362 
TrySweepToGetSuitableRegion(size_t size)363 Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
364 {
365     // Try Sweeping region to get space for allocation
366     // since sweepingList_ is ordered, we just need to check once
367     Region *availableRegion = GetSweepingRegionSafe();
368     if (availableRegion != nullptr) {
369         FreeRegion(availableRegion, false);
370         // if region has free enough space for the size,
371         // free the region from current space
372         // and return for local space to use
373         // otherwise, we add region to sweptList_.
374         if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
375             FreeRegionFromSpace(availableRegion);
376             return availableRegion;
377         } else {
378             AddSweptRegionSafe(availableRegion);
379         }
380     }
381     return nullptr;
382 }
383 
TryToGetExclusiveRegion(size_t size)384 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
385 {
386     LockHolder lock(lock_);
387     uintptr_t result = allocator_->LookupSuitableFreeObject(size);
388     if (result != 0) {
389         // Remove region from global old space
390         Region *region = Region::ObjectAddressToRange(result);
391         RemoveRegion(region);
392         allocator_->DetachFreeObjectSet(region);
393         DecreaseLiveObjectSize(region->AliveObject());
394         return region;
395     }
396     if (sweepState_ == SweepState::SWEEPING) {
397         Region *availableRegion = nullptr;
398         availableRegion = TryToGetSuitableSweptRegion(size);
399         if (availableRegion != nullptr) {
400             return availableRegion;
401         }
402         return TrySweepToGetSuitableRegion(size);
403     }
404     return nullptr;
405 }
406 
Merge(LocalSpace * localSpace)407 void OldSpace::Merge(LocalSpace *localSpace)
408 {
409     localSpace->FreeBumpPoint();
410     LockHolder lock(lock_);
411     size_t oldCommittedSize = committedSize_;
412     localSpace->EnumerateRegions([&](Region *region) {
413         localSpace->DetachFreeObjectSet(region);
414         localSpace->RemoveRegion(region);
415         localSpace->DecreaseLiveObjectSize(region->AliveObject());
416         AddRegion(region);
417         IncreaseLiveObjectSize(region->AliveObject());
418         allocator_->CollectFreeObjectSet(region);
419     });
420     size_t hugeSpaceCommitSize = localHeap_->GetHugeObjectSpace()->GetCommittedSize();
421     if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
422         LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
423         if (localHeap_->CanThrowOOMError()) {
424             localHeap_->ShouldThrowOOMError(true);
425         }
426         IncreaseMergeSize(committedSize_ - oldCommittedSize);
427         size_t committedOverSizeLimit = committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity();
428         IncreaseCommittedOverSizeLimit(committedOverSizeLimit);
429         // if throw OOM, temporarily increase space size to avoid vm crash
430         IncreaseOutOfMemoryOvershootSize(committedOverSizeLimit);
431     }
432 
433     localSpace->GetRegionList().Clear();
434     allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
435 }
436 
SelectCSet()437 void OldSpace::SelectCSet()
438 {
439     if (localHeap_->IsMarking()) {
440         localHeap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
441     }
442     if (localHeap_->InSensitiveStatus()) {
443         return;
444     }
445     CheckRegionSize();
446     // 1、Select region which alive object larger than limit
447     int64_t evacuateSizeLimit = 0;
448     if (!localHeap_->IsInBackground()) {
449         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
450         EnumerateRegions([this](Region *region) {
451             if (!region->MostObjectAlive()) {
452                 collectRegionSet_.emplace_back(region);
453             }
454         });
455     } else {
456         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
457         EnumerateRegions([this](Region *region) {
458             if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
459                 collectRegionSet_.emplace_back(region);
460             }
461         });
462     }
463     if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
464         LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
465         collectRegionSet_.clear();
466         return;
467     }
468     // sort
469     std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
470         return first->AliveObject() < second->AliveObject();
471     });
472 
473     // Limit cset size
474     unsigned long selectedRegionNumber = 0;
475     int64_t expectFreeSize =
476         static_cast<int64_t>(localHeap_->GetCommittedSize() - localHeap_->GetHeapAliveSizeAfterGC());
477     int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
478     EnumerateCollectRegionSet([&](Region *current) {
479         if (evacuateSize > 0) {
480             selectedRegionNumber++;
481             evacuateSize -= current->AliveObject();
482         } else {
483             return;
484         }
485     });
486     LOG_ECMA_MEM(DEBUG) << "Max evacuation size is 6_MB. The CSet region number: "
487         << selectedRegionNumber;
488     selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
489     if (collectRegionSet_.size() > selectedRegionNumber) {
490         collectRegionSet_.resize(selectedRegionNumber);
491     }
492 
493     localHeap_->GetEcmaGCStats()->SetRecordData(
494         RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
495     EnumerateCollectRegionSet([&](Region *current) {
496         RemoveRegion(current);
497         DecreaseLiveObjectSize(current->AliveObject());
498         allocator_->DetachFreeObjectSet(current);
499         current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
500     });
501     sweepState_ = SweepState::NO_SWEEP;
502     LOG_ECMA_MEM(DEBUG) << "Select CSet success: number is " << collectRegionSet_.size();
503 }
504 
CheckRegionSize()505 void OldSpace::CheckRegionSize()
506 {
507 #ifndef NDEBUG
508     if (sweepState_ == SweepState::SWEEPING) {
509         localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
510     }
511     size_t available = allocator_->GetAvailableSize();
512     size_t wasted = allocator_->GetWastedSize();
513     if (GetHeapObjectSize() + wasted + available != objectSize_) {
514         LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
515                             << ", free object size:" << available
516                             << ", wasted size:" << wasted
517                             << ", but exception total size:" << objectSize_;
518     }
519 #endif
520 }
521 
RevertCSet()522 void OldSpace::RevertCSet()
523 {
524     EnumerateCollectRegionSet([&](Region *region) {
525         region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
526         AddRegion(region);
527         allocator_->CollectFreeObjectSet(region);
528         IncreaseLiveObjectSize(region->AliveObject());
529     });
530     collectRegionSet_.clear();
531 }
532 
ReclaimCSet()533 void OldSpace::ReclaimCSet()
534 {
535     size_t cachedSize = localHeap_->GetRegionCachedSize();
536     EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
537         region->DeleteCrossRegionRSet();
538         region->DeleteOldToNewRSet();
539         region->DeleteLocalToShareRSet();
540         region->DeleteSweepingOldToNewRSet();
541         region->DeleteSweepingLocalToShareRSet();
542         region->DestroyFreeObjectSets();
543         heapRegionAllocator_->FreeRegion(region, cachedSize);
544     });
545     collectRegionSet_.clear();
546 }
547 
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)548 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
549     : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
550 
AddRegionToList(Region * region)551 bool LocalSpace::AddRegionToList(Region *region)
552 {
553     if (committedSize_ >= maximumCapacity_) {
554         LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
555         return false;
556     }
557     AddRegion(region);
558     allocator_->CollectFreeObjectSet(region);
559     IncreaseLiveObjectSize(region->AliveObject());
560     return true;
561 }
562 
FreeBumpPoint()563 void LocalSpace::FreeBumpPoint()
564 {
565     allocator_->FreeBumpPoint();
566 }
567 
Stop()568 void LocalSpace::Stop()
569 {
570     Region *currentRegion = GetCurrentRegion();
571     if (GetCurrentRegion() != nullptr) {
572         // Do not use allocator_->GetTop(), because it may point to freeObj from other regions.
573         currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
574     }
575 }
576 
CheckAndAllocate(size_t size)577 uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
578 {
579     if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
580         !localHeap_->GetOldGCRequested() && !localHeap_->NeedStopCollection()) {
581         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
582     }
583     return Allocate(size);
584 }
585 
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)586 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
587     : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
588 {
589 }
590 
AppSpawnSpace(Heap * heap,size_t initialCapacity)591 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
592     : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
593 {
594 }
595 
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const596 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
597 {
598     EnumerateRegions([&](Region *current) {
599         current->IterateAllMarkedBits([&](void *mem) {
600             ASSERT(current->InRange(ToUintPtr(mem)));
601             visitor(reinterpret_cast<TaggedObject *>(mem));
602         });
603     });
604 }
605 
Allocate(size_t size,bool isExpand)606 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
607 {
608     auto object = allocator_->Allocate(size);
609     if (object == 0) {
610         if (isExpand && Expand()) {
611             object = allocator_->Allocate(size);
612         }
613     }
614     if (object != 0) {
615         Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
616     }
617     return object;
618 }
619 
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)620 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
621     : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
622 {
623 }
624 
~MachineCodeSpace()625 MachineCodeSpace::~MachineCodeSpace()
626 {
627     if (localHeap_->GetEcmaVM()->GetJSOptions().GetEnableJitFort()) {
628         if (jitFort_) {
629             delete jitFort_;
630             jitFort_ = nullptr;
631         }
632     }
633 }
634 
RecordLiveJitCode(MachineCode * obj)635 inline void MachineCodeSpace::RecordLiveJitCode(MachineCode *obj)
636 {
637     if (jitFort_) {
638         jitFort_->RecordLiveJitCode(obj->GetInstructionsAddr(), obj->GetInstructionsSize());
639     }
640 }
641 
JitFortAllocate(MachineCodeDesc * desc)642 uintptr_t MachineCodeSpace::JitFortAllocate(MachineCodeDesc *desc)
643 {
644     if (!jitFort_) {
645         jitFort_ = new JitFort();
646     }
647     localHeap_->GetSweeper()->EnsureTaskFinishedNoCheck(spaceType_);
648     while (jitFort_->IsMachineCodeGC()) {};
649     return jitFort_->Allocate(desc);
650 }
651 
652 // Record info on JitFort mem allocated to live MachineCode objects
FreeRegion(Region * current,bool isMain)653 void MachineCodeSpace::FreeRegion(Region *current, bool isMain)
654 {
655     LOG_JIT(DEBUG) << "MachineCodeSpace FreeRegion: " << current << " isMain " << isMain;
656     uintptr_t freeStart = current->GetBegin();
657     current->IterateAllMarkedBits([this, &current, &freeStart, isMain](void *mem) {
658         ASSERT(current->InRange(ToUintPtr(mem)));
659         auto header = reinterpret_cast<TaggedObject *>(mem);
660         auto klass = header->GetClass();
661         auto size = klass->SizeFromJSHClass(header);
662 
663         uintptr_t freeEnd = ToUintPtr(mem);
664         if (freeStart != freeEnd) {
665             FreeLiveRange(current, freeStart, freeEnd, isMain);
666         }
667         freeStart = freeEnd + size;
668         RecordLiveJitCode(reinterpret_cast<MachineCode *>(mem));
669     });
670     uintptr_t freeEnd = current->GetEnd();
671     if (freeStart != freeEnd) {
672         FreeLiveRange(current, freeStart, freeEnd, isMain);
673     }
674     if (jitFort_) {
675         jitFort_->SetMachineCodeGC(true);
676     }
677 }
678 
AsyncSweep(bool isMain)679 void MachineCodeSpace::AsyncSweep(bool isMain)
680 {
681     LockHolder holder(asyncSweepMutex_);
682     Region *current = GetSweepingRegionSafe();
683     while (current != nullptr) {
684         FreeRegion(current, isMain);
685         // Main thread sweeping region is added;
686         if (!isMain) {
687             AddSweptRegionSafe(current);
688         } else {
689             current->MergeOldToNewRSetForCS();
690             current->MergeLocalToShareRSetForCS();
691         }
692         current = GetSweepingRegionSafe();
693     }
694 }
695 
Allocate(size_t size,bool allowGC)696 uintptr_t MachineCodeSpace::Allocate(size_t size, bool allowGC)
697 {
698     return SparseSpace::Allocate(size, allowGC);
699 }
700 
Allocate(size_t size,MachineCodeDesc * desc,bool allowGC)701 uintptr_t MachineCodeSpace::Allocate(size_t size, MachineCodeDesc *desc, bool allowGC)
702 {
703 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
704     if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) {
705         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
706         UNREACHABLE();
707     }
708 #endif
709     // Include JitFort allocation size in Space LiveObjectSize and Region AliveObject size
710     // in CHECK_AND_INC_OBJ_SIZE. Could be a problem with InvokeAllocationInspectr with
711     // instruction separated from Machine Code object into Jit FortSpace.
712 
713     auto object = allocator_->Allocate(size);
714     CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
715 
716     if (sweepState_ == SweepState::SWEEPING) {
717         object = AllocateAfterSweepingCompleted(size);
718         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
719     }
720 
721     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
722     if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
723         object = allocator_->Allocate(size);
724         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
725     }
726 
727     if (Expand()) {
728         object = allocator_->Allocate(size);
729         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
730     }
731 
732     if (allowGC) {
733         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
734         object = Allocate(size, desc, false);
735         // Size is already increment
736     }
737     return object;
738 }
739 
CheckMachineCodeObject(uintptr_t curPtr,uintptr_t & machineCode,uintptr_t pc)740 size_t MachineCodeSpace::CheckMachineCodeObject(uintptr_t curPtr, uintptr_t &machineCode, uintptr_t pc)
741 {
742     auto freeObject = FreeObject::Cast(curPtr);
743     size_t objSize = 0;
744     if (!freeObject->IsFreeObject()) {
745         auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
746         if (obj->IsInText(pc)) {
747             machineCode = curPtr;
748         }
749         objSize = obj->GetClass()->SizeFromJSHClass(obj);
750     } else {
751         objSize = freeObject->Available();
752     }
753     return objSize;
754 }
755 
GetMachineCodeObject(uintptr_t pc)756 uintptr_t MachineCodeSpace::GetMachineCodeObject(uintptr_t pc)
757 {
758     uintptr_t machineCode = 0;
759     LockHolder holder(asyncSweepMutex_);
760     allocator_->FillBumpPointer();
761 
762     EnumerateRegions([&](Region *region) {
763         if (machineCode != 0) {
764             return;
765         }
766         if (region->InCollectSet() || (!region->InRange(pc) && !InJitFortRange(pc))) {
767             return;
768         }
769         uintptr_t curPtr = region->GetBegin();
770         uintptr_t endPtr = region->GetEnd();
771         while (curPtr < endPtr) {
772             size_t objSize = CheckMachineCodeObject(curPtr, machineCode, pc);
773             if (machineCode != 0) {
774                 return;
775             }
776             curPtr += objSize;
777             CHECK_OBJECT_SIZE(objSize);
778         }
779         CHECK_REGION_END(curPtr, endPtr);
780     });
781     return machineCode;
782 }
783 }  // namespace panda::ecmascript
784