• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/sparse_space.h"
17 
18 #include "ecmascript/base/config.h"
19 #include "ecmascript/js_hclass-inl.h"
20 #include "ecmascript/mem/heap-inl.h"
21 
22 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)23 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
24     : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
25       sweepState_(SweepState::NO_SWEEP),
26       localHeap_(heap),
27       liveObjectSize_(0)
28 {
29     allocator_ = new FreeListAllocator<FreeObject>(heap);
30 }
31 
Initialize()32 void SparseSpace::Initialize()
33 {
34     JSThread *thread = localHeap_->GetJSThread();
35     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
36     region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
37     AddRegion(region);
38 
39     allocator_->Initialize(region);
40 }
41 
Reset()42 void SparseSpace::Reset()
43 {
44     allocator_->RebuildFreeList();
45     ReclaimRegions();
46     liveObjectSize_ = 0;
47 }
48 
ResetTopPointer(uintptr_t top)49 void SparseSpace::ResetTopPointer(uintptr_t top)
50 {
51     allocator_->ResetTopPointer(top);
52 }
53 
Allocate(size_t size,bool allowGC)54 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
55 {
56     ASSERT(spaceType_ != MemSpaceType::OLD_SPACE);
57 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
58     if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
59         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
60         UNREACHABLE();
61     }
62 #endif
63     auto object = allocator_->Allocate(size);
64     CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
65 
66     if (sweepState_ == SweepState::SWEEPING) {
67         object = AllocateAfterSweepingCompleted(size);
68         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
69     }
70 
71     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
72     if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
73         object = allocator_->Allocate(size);
74         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
75     }
76 
77     if (Expand()) {
78         object = allocator_->Allocate(size);
79         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
80     }
81 
82     if (allowGC) {
83         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
84         object = Allocate(size, false);
85         // Size is already increment
86     }
87     return object;
88 }
89 
Expand()90 bool SparseSpace::Expand()
91 {
92     if (CommittedSizeExceed()) {
93         LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
94         return false;
95     }
96     JSThread *thread = localHeap_->GetJSThread();
97     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
98     region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
99     AddRegion(region);
100     allocator_->AddFree(region);
101     return true;
102 }
103 
AllocateAfterSweepingCompleted(size_t size)104 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
105 {
106     ASSERT(sweepState_ == SweepState::SWEEPING);
107     if (TryFillSweptRegion()) {
108         auto object = allocator_->Allocate(size);
109         if (object != 0) {
110             return object;
111         }
112     }
113     // Parallel sweep and fill
114     localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
115     return allocator_->Allocate(size);
116 }
117 
PrepareSweeping()118 void SparseSpace::PrepareSweeping()
119 {
120     liveObjectSize_ = 0;
121     ASSERT(GetSweepingRegionSafe() == nullptr);
122     ASSERT(GetSweptRegionSafe() == nullptr);
123     EnumerateRegions([this](Region *current) {
124         if (!current->InCollectSet()) {
125             ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
126             if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
127                 current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) { // LOCV_EXCL_BR_LINE
128                 LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
129             }
130             IncreaseLiveObjectSize(current->AliveObject());
131             current->ResetWasted();
132             current->SwapOldToNewRSetForCS();
133             current->SwapLocalToShareRSetForCS();
134             AddSweepingRegion(current);
135         }
136     });
137     SortSweepingRegion();
138     sweepState_ = SweepState::SWEEPING;
139     allocator_->RebuildFreeList();
140 }
141 
AsyncSweep(bool isMain)142 void SparseSpace::AsyncSweep(bool isMain)
143 {
144     Region *current = GetSweepingRegionSafe();
145     while (current != nullptr) {
146         FreeRegion(current, isMain);
147         // Main thread sweeping region is added;
148         if (!isMain) {
149             AddSweptRegionSafe(current);
150         } else {
151             current->MergeOldToNewRSetForCS();
152             current->MergeLocalToShareRSetForCS();
153         }
154         current = GetSweepingRegionSafe();
155     }
156 }
157 
Sweep()158 void SparseSpace::Sweep()
159 {
160     liveObjectSize_ = 0;
161     allocator_->RebuildFreeList();
162     EnumerateRegions([this](Region *current) {
163         if (!current->InCollectSet()) {
164             IncreaseLiveObjectSize(current->AliveObject());
165             current->ResetWasted();
166             FreeRegion(current);
167         }
168     });
169 }
170 
TryFillSweptRegion()171 bool SparseSpace::TryFillSweptRegion()
172 {
173     if (sweptList_.empty()) {
174         return false;
175     }
176     Region *region = nullptr;
177     while ((region = GetSweptRegionSafe()) != nullptr) {
178         allocator_->CollectFreeObjectSet(region);
179         region->ResetSwept();
180         region->MergeOldToNewRSetForCS();
181         region->MergeLocalToShareRSetForCS();
182     }
183     return true;
184 }
185 
FinishFillSweptRegion()186 bool SparseSpace::FinishFillSweptRegion()
187 {
188     bool ret = TryFillSweptRegion();
189     sweepState_ = SweepState::SWEPT;
190     return ret;
191 }
192 
AddSweepingRegion(Region * region)193 void SparseSpace::AddSweepingRegion(Region *region)
194 {
195     sweepingList_.emplace_back(region);
196 }
197 
SortSweepingRegion()198 void SparseSpace::SortSweepingRegion()
199 {
200     // Sweep low alive object size at first
201     std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
202         return first->AliveObject() > second->AliveObject();
203     });
204 }
205 
GetSweepingRegionSafe()206 Region *SparseSpace::GetSweepingRegionSafe()
207 {
208     LockHolder holder(lock_);
209     Region *region = nullptr;
210     if (!sweepingList_.empty()) {
211         region = sweepingList_.back();
212         sweepingList_.pop_back();
213     }
214     return region;
215 }
216 
AddSweptRegionSafe(Region * region)217 void SparseSpace::AddSweptRegionSafe(Region *region)
218 {
219     LockHolder holder(lock_);
220     sweptList_.emplace_back(region);
221     region->SetSwept();
222 }
223 
GetSweptRegionSafe()224 Region *SparseSpace::GetSweptRegionSafe()
225 {
226     LockHolder holder(lock_);
227     Region *region = nullptr;
228     if (!sweptList_.empty()) {
229         region = sweptList_.back();
230         sweptList_.pop_back();
231     }
232     return region;
233 }
234 
TryToGetSuitableSweptRegion(size_t size)235 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
236 {
237     if (sweepState_ != SweepState::SWEEPING) {
238         return nullptr;
239     }
240     if (sweptList_.empty()) {
241         return nullptr;
242     }
243     LockHolder holder(lock_);
244     for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
245         if (allocator_->MatchFreeObjectSet(*iter, size)) {
246             Region *region = *iter;
247             sweptList_.erase(iter);
248             return region;
249         }
250     }
251     return nullptr;
252 }
253 
FreeRegion(Region * current,bool isMain)254 void SparseSpace::FreeRegion(Region *current, bool isMain)
255 {
256     uintptr_t freeStart = current->GetBegin();
257     current->IterateAllMarkedBits([this, &current, &freeStart, isMain](void *mem) {
258         ASSERT(current->InRange(ToUintPtr(mem)));
259         auto header = reinterpret_cast<TaggedObject *>(mem);
260         auto size = header->GetSize();
261 
262         uintptr_t freeEnd = ToUintPtr(mem);
263         if (freeStart != freeEnd) {
264             FreeLiveRange(current, freeStart, freeEnd, isMain);
265         }
266         freeStart = freeEnd + size;
267     });
268     uintptr_t freeEnd = current->GetEnd();
269     if (freeStart != freeEnd) {
270         FreeLiveRange(current, freeStart, freeEnd, isMain);
271     }
272 }
273 
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)274 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
275 {
276     localHeap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
277     allocator_->Free(freeStart, freeEnd - freeStart, isMain);
278 }
279 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const280 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
281 {
282     allocator_->FillBumpPointer();
283     EnumerateRegions([&](Region *region) {
284         if (region->InCollectSet()) {
285             return;
286         }
287         uintptr_t curPtr = region->GetBegin();
288         uintptr_t endPtr = region->GetEnd();
289         while (curPtr < endPtr) {
290             auto freeObject = FreeObject::Cast(curPtr);
291             size_t objSize;
292             // If curPtr is freeObject, It must to mark unpoison first.
293             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
294             if (!freeObject->IsFreeObject()) {
295                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
296                 visitor(obj);
297                 objSize = obj->GetSize();
298             } else {
299                 freeObject->AsanUnPoisonFreeObject();
300                 objSize = freeObject->Available();
301                 freeObject->AsanPoisonFreeObject();
302             }
303             curPtr += objSize;
304             CHECK_OBJECT_SIZE(objSize);
305         }
306         CHECK_REGION_END(curPtr, endPtr);
307     });
308 }
309 
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const310 void SparseSpace::IterateOldToNewOverObjects(
311     const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
312 {
313     auto cb = [visitor](void *mem) -> bool {
314         ObjectSlot slot(ToUintPtr(mem));
315         visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
316         return true;
317     };
318     EnumerateRegions([cb] (Region *region) {
319         region->IterateAllSweepingRSetBits(cb);
320         region->IterateAllOldToNewBits(cb);
321     });
322 }
323 
GetHeapObjectSize() const324 size_t SparseSpace::GetHeapObjectSize() const
325 {
326     return liveObjectSize_;
327 }
328 
IncreaseAllocatedSize(size_t size)329 void SparseSpace::IncreaseAllocatedSize(size_t size)
330 {
331     allocator_->IncreaseAllocatedSize(size);
332 }
333 
GetTotalAllocatedSize() const334 size_t SparseSpace::GetTotalAllocatedSize() const
335 {
336     return allocator_->GetAllocatedSize();
337 }
338 
DetachFreeObjectSet(Region * region)339 void SparseSpace::DetachFreeObjectSet(Region *region)
340 {
341     allocator_->DetachFreeObjectSet(region);
342 }
343 
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)344 void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
345 {
346     ASSERT(size <= alignedSize);
347     if (LIKELY(!allocationCounter_.IsActive())) { // LCOV_EXCL_BR_LINE
348         return;
349     }
350     if (alignedSize >= allocationCounter_.NextBytes()) {
351         allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
352     }
353     allocationCounter_.AdvanceAllocationInspector(alignedSize);
354 }
355 
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)356 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
357     : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
358 
TrySweepToGetSuitableRegion(size_t size)359 Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
360 {
361     // Try Sweeping region to get space for allocation
362     // since sweepingList_ is ordered, we just need to check once
363     Region *availableRegion = GetSweepingRegionSafe();
364     if (availableRegion != nullptr) {
365         FreeRegion(availableRegion, false);
366         // if region has free enough space for the size,
367         // free the region from current space
368         // and return for local space to use
369         // otherwise, we add region to sweptList_.
370         if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
371             return availableRegion;
372         } else {
373             AddSweptRegionSafe(availableRegion);
374         }
375     }
376     return nullptr;
377 }
378 
TryToGetExclusiveRegion(size_t size)379 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
380 {
381     if (sweepState_ == SweepState::SWEEPING) {
382         Region *availableRegion = nullptr;
383         availableRegion = TryToGetSuitableSweptRegion(size);
384         if (availableRegion == nullptr) {
385             availableRegion = TrySweepToGetSuitableRegion(size);
386         }
387         if (availableRegion) {
388             FreeRegionFromSpace(availableRegion);
389         }
390         return availableRegion;
391     } else {
392         LockHolder lock(lock_);
393         uintptr_t result = allocator_->LookupSuitableFreeObject(size);
394         if (result != 0) {
395             // Remove region from global old space
396             Region *region = Region::ObjectAddressToRange(result);
397             RemoveRegion(region);
398             allocator_->DetachFreeObjectSet(region);
399             DecreaseLiveObjectSize(region->AliveObject());
400             return region;
401         }
402     }
403     return nullptr;
404 }
405 
FreeRegionFromSpace(Region * region)406 void OldSpace::FreeRegionFromSpace(Region *region)
407 {
408     region->ResetSwept();
409     region->MergeOldToNewRSetForCS();
410     region->MergeLocalToShareRSetForCS();
411     LockHolder holder(lock_);
412     RemoveRegion(region);
413     DecreaseLiveObjectSize(region->AliveObject());
414 }
415 
AllocateFast(size_t size)416 uintptr_t OldSpace::AllocateFast(size_t size)
417 {
418 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
419     if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
420         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
421         UNREACHABLE();
422     }
423 #endif
424     auto object = allocator_->Allocate(size);
425     CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
426 
427     if (sweepState_ == SweepState::SWEEPING) {
428         object = AllocateAfterSweepingCompleted(size);
429         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
430     }
431     return 0;
432 }
433 
AllocateSlow(size_t size,bool tryFast)434 uintptr_t OldSpace::AllocateSlow(size_t size, bool tryFast)
435 {
436     if (tryFast) {
437         uintptr_t object = AllocateFast(size);
438         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
439     }
440     if (Expand()) {
441         uintptr_t object = allocator_->Allocate(size);
442         CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
443     }
444     return 0;
445 }
446 
Merge(LocalSpace * localSpace)447 void OldSpace::Merge(LocalSpace *localSpace)
448 {
449     localSpace->FreeBumpPoint();
450     LockHolder lock(lock_);
451     size_t oldCommittedSize = committedSize_;
452     localSpace->EnumerateRegions([&](Region *region) {
453         localSpace->DetachFreeObjectSet(region);
454         localSpace->RemoveRegion(region);
455         localSpace->DecreaseLiveObjectSize(region->AliveObject());
456         AddRegion(region);
457         IncreaseLiveObjectSize(region->AliveObject());
458         allocator_->CollectFreeObjectSet(region);
459     });
460     size_t hugeSpaceCommitSize = localHeap_->GetHugeObjectSpace()->GetCommittedSize();
461     if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
462         LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
463         if (localHeap_->CanThrowOOMError()) {
464             localHeap_->ShouldThrowOOMError(true);
465         }
466         IncreaseMergeSize(committedSize_ - oldCommittedSize);
467         size_t committedOverSizeLimit = committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity();
468         IncreaseCommittedOverSizeLimit(committedOverSizeLimit);
469         // if throw OOM, temporarily increase space size to avoid vm crash
470         IncreaseOutOfMemoryOvershootSize(committedOverSizeLimit);
471     }
472 
473     localSpace->GetRegionList().Clear();
474     allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
475 }
476 
SelectCSet()477 void OldSpace::SelectCSet()
478 {
479     if (localHeap_->IsMarking()) {
480         localHeap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
481     }
482     if (localHeap_->InSensitiveStatus()) {
483         return;
484     }
485     CheckRegionSize();
486     // 1、Select region which alive object larger than limit
487     int64_t evacuateSizeLimit = 0;
488     if (!localHeap_->IsInBackground()) {
489         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
490         EnumerateRegions([this](Region *region) {
491             if (!region->MostObjectAlive()) {
492                 collectRegionSet_.emplace_back(region);
493             }
494         });
495     } else {
496         evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
497         EnumerateRegions([this](Region *region) {
498             if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
499                 collectRegionSet_.emplace_back(region);
500             }
501         });
502     }
503     if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
504         LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
505         collectRegionSet_.clear();
506         return;
507     }
508     // sort
509     std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
510         return first->AliveObject() < second->AliveObject();
511     });
512 
513     // Limit cset size
514     unsigned long selectedRegionNumber = 0;
515     int64_t expectFreeSize =
516         static_cast<int64_t>(localHeap_->GetCommittedSize() - localHeap_->GetHeapAliveSizeAfterGC());
517     int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
518     EnumerateCollectRegionSet([&](Region *current) {
519         if (evacuateSize > 0) {
520             selectedRegionNumber++;
521             evacuateSize -= current->AliveObject();
522         } else {
523             return;
524         }
525     });
526     LOG_ECMA_MEM(DEBUG) << "Max evacuation size is 6_MB. The CSet region number: "
527         << selectedRegionNumber;
528     selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
529     if (collectRegionSet_.size() > selectedRegionNumber) {
530         collectRegionSet_.resize(selectedRegionNumber);
531     }
532 
533     localHeap_->GetEcmaGCStats()->SetRecordData(
534         RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
535     EnumerateCollectRegionSet([&](Region *current) {
536         RemoveRegion(current);
537         DecreaseLiveObjectSize(current->AliveObject());
538         allocator_->DetachFreeObjectSet(current);
539         current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
540     });
541     sweepState_ = SweepState::NO_SWEEP;
542     LOG_ECMA_MEM(DEBUG) << "Select CSet success: number is " << collectRegionSet_.size();
543 }
544 
CheckRegionSize()545 void OldSpace::CheckRegionSize()
546 {
547 #ifndef NDEBUG
548     if (sweepState_ == SweepState::SWEEPING) {
549         localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
550     }
551     size_t available = allocator_->GetAvailableSize();
552     size_t wasted = allocator_->GetWastedSize();
553     if (GetHeapObjectSize() + wasted + available != objectSize_) {
554         LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
555                             << ", free object size:" << available
556                             << ", wasted size:" << wasted
557                             << ", but exception total size:" << objectSize_;
558     }
559 #endif
560 }
561 
RevertCSet()562 void OldSpace::RevertCSet()
563 {
564     EnumerateCollectRegionSet([&](Region *region) {
565         region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
566         AddRegion(region);
567         allocator_->CollectFreeObjectSet(region);
568         IncreaseLiveObjectSize(region->AliveObject());
569     });
570     collectRegionSet_.clear();
571 }
572 
ReclaimCSet()573 void OldSpace::ReclaimCSet()
574 {
575     size_t cachedSize = localHeap_->GetRegionCachedSize();
576     EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
577         region->DeleteCrossRegionRSet();
578         region->DeleteOldToNewRSet();
579         region->DeleteLocalToShareRSet();
580         region->DeleteSweepingOldToNewRSet();
581         region->DeleteSweepingLocalToShareRSet();
582         region->DestroyFreeObjectSets();
583         heapRegionAllocator_->FreeRegion(region, cachedSize);
584     });
585     collectRegionSet_.clear();
586 }
587 
SwapRegion(Region * region,SemiSpace * fromSpace)588 bool OldSpace::SwapRegion(Region *region, SemiSpace *fromSpace)
589 {
590     if (committedSize_ + region->GetCapacity() > maximumCapacity_) {
591         return false;
592     }
593     fromSpace->RemoveRegion(region);
594     region->InitializeFreeObjectSets();
595     region->ResetRegionFlag(RegionSpaceFlag::IN_OLD_SPACE, RegionGCFlags::IN_NEW_TO_OLD_SET);
596 
597     regionList_.AddNodeToFront(region);
598     IncreaseCommitted(region->GetCapacity());
599     IncreaseObjectSize(region->GetSize());
600     IncreaseLiveObjectSize(region->AliveObject());
601     return true;
602 }
603 
PrepareSweepNewToOldRegions()604 void OldSpace::PrepareSweepNewToOldRegions()
605 {
606     EnumerateRegions([this](Region *current) {
607         if (current->InNewToOldSet()) {
608             ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
609             if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
610                 current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) { // LOCV_EXCL_BR_LINE
611                 LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
612             }
613             current->ResetWasted();
614             current->SwapOldToNewRSetForCS();
615             current->SwapLocalToShareRSetForCS();
616             current->ClearGCFlag(RegionGCFlags::IN_NEW_TO_OLD_SET);
617             AddSweepingRegion(current);
618         }
619     });
620     sweepState_ = SweepState::SWEEPING;
621 }
622 
SweepNewToOldRegions()623 void OldSpace::SweepNewToOldRegions()
624 {
625     EnumerateRegions([this](Region *current) {
626         if (current->InNewToOldSet()) {
627             current->ResetWasted();
628             current->ClearGCFlag(RegionGCFlags::IN_NEW_TO_OLD_SET);
629             FreeRegion(current);
630         }
631     });
632 }
633 
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)634 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
635     : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
636 
AddRegionToList(Region * region)637 bool LocalSpace::AddRegionToList(Region *region)
638 {
639     if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
640         LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
641         return false;
642     }
643     AddRegion(region);
644     allocator_->CollectFreeObjectSet(region);
645     IncreaseLiveObjectSize(region->AliveObject());
646     return true;
647 }
648 
FreeBumpPoint()649 void LocalSpace::FreeBumpPoint()
650 {
651     allocator_->FreeBumpPoint();
652 }
653 
Stop()654 void LocalSpace::Stop()
655 {
656     Region *currentRegion = GetCurrentRegion();
657     if (GetCurrentRegion() != nullptr) {
658         // Do not use allocator_->GetTop(), because it may point to freeObj from other regions.
659         currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
660     }
661 }
662 
CheckAndAllocate(size_t size)663 uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
664 {
665     if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
666         !localHeap_->GetOldGCRequested() && !localHeap_->NeedStopCollection()) {
667         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
668     }
669     return Allocate(size);
670 }
671 
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)672 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
673     : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
674 {
675 }
676 
AppSpawnSpace(Heap * heap,size_t initialCapacity)677 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
678     : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
679 {
680 }
681 
AllocateSync(size_t size)682 uintptr_t AppSpawnSpace::AllocateSync(size_t size)
683 {
684     LockHolder holder(mutex_);
685     return Allocate(size);
686 }
687 
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const688 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
689 {
690     EnumerateRegions([&](Region *current) {
691         current->IterateAllMarkedBits([&](void *mem) {
692             ASSERT(current->InRange(ToUintPtr(mem)));
693             visitor(reinterpret_cast<TaggedObject *>(mem));
694         });
695     });
696 }
697 
ForceExpandInGC()698 void LocalSpace::ForceExpandInGC()
699 {
700     JSThread *thread = localHeap_->GetJSThread();
701     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
702     region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
703     AddRegion(region);
704     allocator_->AddFree(region);
705 }
706 
Allocate(size_t size,bool isExpand)707 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
708 {
709     auto object = allocator_->Allocate(size);
710     if (object == 0 && isExpand) {
711         if (!Expand()) {
712             ForceExpandInGC();
713             localHeap_->ShouldThrowOOMError(true);
714         }
715         object = allocator_->Allocate(size);
716         ASSERT(object != 0);
717     }
718     if (object != 0) {
719         Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
720     }
721     return object;
722 }
723 
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)724 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
725     : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
726 {
727 }
728 
~MachineCodeSpace()729 MachineCodeSpace::~MachineCodeSpace()
730 {
731     if (localHeap_->GetEcmaVM()->GetJSOptions().GetEnableJitFort()) {
732         if (jitFort_) {
733             delete jitFort_;
734             jitFort_ = nullptr;
735         }
736     }
737 }
738 
PrepareSweeping()739 void MachineCodeSpace::PrepareSweeping()
740 {
741     ASSERT(!g_isEnableCMCGC);
742     // fill free obj before sparse space prepare sweeping rebuild freelist, as may fail set free obj
743     // when iterate machine code space in GetMachineCodeObject
744     allocator_->FillBumpPointer();
745     SparseSpace::PrepareSweeping();
746     if (jitFort_) {
747         jitFort_->PrepareSweeping();
748     }
749 }
750 
ClearMarkBits()751 void MachineCodeSpace::ClearMarkBits()
752 {
753     ASSERT(g_isEnableCMCGC);
754     if (jitFort_) {
755         jitFort_->ClearMarkBits();
756     }
757 }
758 
Sweep()759 void MachineCodeSpace::Sweep()
760 {
761     if (!g_isEnableCMCGC) {
762         SparseSpace::Sweep();
763     }
764     if (jitFort_) {
765         jitFort_->Sweep();
766     }
767 }
768 
AsyncSweep(bool isMain)769 void MachineCodeSpace::AsyncSweep(bool isMain)
770 {
771     ASSERT(!g_isEnableCMCGC);
772     LockHolder holder(asyncSweepMutex_);
773     SparseSpace::AsyncSweep(isMain);
774     if (jitFort_) {
775         jitFort_->AsyncSweep();
776     }
777 }
778 
JitFortAllocate(MachineCodeDesc * desc)779 uintptr_t MachineCodeSpace::JitFortAllocate(MachineCodeDesc *desc)
780 {
781     if (!jitFort_) {
782         jitFort_ = new JitFort();
783     }
784     if (!g_isEnableCMCGC) {
785         localHeap_->GetSweeper()->EnsureTaskFinishedNoCheck(spaceType_);
786     }
787     return jitFort_->Allocate(desc);
788 }
789 
Allocate(size_t size,bool allowGC)790 uintptr_t MachineCodeSpace::Allocate(size_t size, bool allowGC)
791 {
792     ASSERT(!g_isEnableCMCGC);
793     return SparseSpace::Allocate(size, allowGC);
794 }
795 
Allocate(size_t size,MachineCodeDesc * desc,bool allowGC)796 uintptr_t MachineCodeSpace::Allocate(size_t size, MachineCodeDesc *desc, bool allowGC)
797 {
798     ASSERT(!g_isEnableCMCGC);
799 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
800     if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
801         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
802         UNREACHABLE();
803     }
804 #endif
805     // Include JitFort allocation size in Space LiveObjectSize and Region AliveObject size
806     // in CHECK_AND_INC_OBJ_SIZE. Could be a problem with InvokeAllocationInspectr with
807     // instruction separated from Machine Code object into Jit FortSpace.
808 
809     auto object = allocator_->Allocate(size);
810     CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
811 
812     if (sweepState_ == SweepState::SWEEPING) {
813         object = AllocateAfterSweepingCompleted(size);
814         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
815     }
816 
817     // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
818     if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
819         object = allocator_->Allocate(size);
820         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
821     }
822 
823     if (Expand()) {
824         object = allocator_->Allocate(size);
825         CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
826     }
827 
828     if (allowGC) {
829         localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
830         object = Allocate(size, desc, false);
831         // Size is already increment
832     }
833     return object;
834 }
835 
CheckMachineCodeObject(uintptr_t curPtr,uintptr_t & machineCode,uintptr_t pc)836 size_t MachineCodeSpace::CheckMachineCodeObject(uintptr_t curPtr, uintptr_t &machineCode, uintptr_t pc)
837 {
838     ASSERT(!g_isEnableCMCGC);
839     auto freeObject = FreeObject::Cast(curPtr);
840     size_t objSize = 0;
841     if (!freeObject->IsFreeObject()) {
842         auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
843         if (obj->IsInText(pc)) {
844             machineCode = curPtr;
845         }
846         objSize = obj->GetSize();
847     } else {
848         objSize = freeObject->Available();
849     }
850     return objSize;
851 }
852 
StoreMachineCodeObjectLocation(uintptr_t start,uintptr_t end,uintptr_t address)853 void MachineCodeSpace::StoreMachineCodeObjectLocation(uintptr_t start, uintptr_t end, uintptr_t address)
854 {
855     machineCodeObjectLocations.InsertMachineCodeObject(start, end, address);
856 }
857 
GetMachineCodeObject(uintptr_t pc)858 uintptr_t MachineCodeSpace::GetMachineCodeObject(uintptr_t pc)
859 {
860     if (g_isEnableCMCGC) {
861         return machineCodeObjectLocations.GetMachineCodeObject(pc);
862     }
863     uintptr_t machineCode = 0;
864     LockHolder holder(asyncSweepMutex_);
865     allocator_->FillBumpPointer();
866 
867     EnumerateRegions([&](Region *region) {
868         if (machineCode != 0) {
869             return;
870         }
871         if (region->InCollectSet() || (!region->InRange(pc) && !InJitFortRange(pc))) {
872             return;
873         }
874         uintptr_t curPtr = region->GetBegin();
875         uintptr_t endPtr = region->GetEnd();
876         while (curPtr < endPtr) {
877             size_t objSize = CheckMachineCodeObject(curPtr, machineCode, pc);
878             if (machineCode != 0) {
879                 return;
880             }
881             curPtr += objSize;
882             CHECK_OBJECT_SIZE(objSize);
883         }
884         CHECK_REGION_END(curPtr, endPtr);
885     });
886     return machineCode;
887 }
888 }  // namespace panda::ecmascript
889