• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/shared_heap/shared_space.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/heap-inl.h"
20 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
21 
22 namespace panda::ecmascript {
SharedSparseSpace(SharedHeap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)23 SharedSparseSpace::SharedSparseSpace(SharedHeap *heap,
24                                      MemSpaceType type,
25                                      size_t initialCapacity,
26                                      size_t maximumCapacity)
27     : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
28       sweepState_(SweepState::NO_SWEEP),
29       sHeap_(heap),
30       liveObjectSize_(0)
31 {
32     triggerLocalFullMarkLimit_ = maximumCapacity * LIVE_OBJECT_SIZE_RATIO;
33     allocator_ = new FreeListAllocator<FreeObject>(heap);
34 }
35 
Reset()36 void SharedSparseSpace::Reset()
37 {
38     allocator_->RebuildFreeList();
39     ReclaimRegions();
40     liveObjectSize_ = 0;
41 }
42 
ResetTopPointer(uintptr_t top)43 void SharedSparseSpace::ResetTopPointer(uintptr_t top)
44 {
45     allocator_->ResetTopPointer(top);
46 }
47 
48 // only used in share heap initialize before first vmThread created.
AllocateWithoutGC(JSThread * thread,size_t size)49 uintptr_t SharedSparseSpace::AllocateWithoutGC(JSThread *thread, size_t size)
50 {
51     uintptr_t object = TryAllocate(thread, size);
52     CHECK_SOBJECT_NOT_NULL();
53     object = AllocateWithExpand(thread, size);
54     return object;
55 }
56 
Allocate(JSThread * thread,size_t size,bool allowGC)57 uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowGC)
58 {
59 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
60     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
61         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
62         UNREACHABLE();
63     }
64 #endif
65     // Shared old space cannot use this allocate func. Shared full gc may happen in trigger and thread state update.
66     // Shared old space pointer might change by shared full gc.
67     // jit thread no heap
68     allowGC = allowGC && (!thread->IsJitThread());
69     if (allowGC) {
70         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
71         localHeap->TryTriggerFullMarkBySharedSize(size);
72     }
73     uintptr_t object = TryAllocate(thread, size);
74     CHECK_SOBJECT_NOT_NULL();
75     if (sweepState_ == SweepState::SWEEPING) {
76         object = AllocateAfterSweepingCompleted(thread, size);
77         CHECK_SOBJECT_NOT_NULL();
78     }
79     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
80     if (allowGC && sHeap_->CheckAndTriggerSharedGC(thread)) {
81         object = TryAllocate(thread, size);
82         CHECK_SOBJECT_NOT_NULL();
83     }
84     object = AllocateWithExpand(thread, size);
85     CHECK_SOBJECT_NOT_NULL();
86     if (allowGC) {
87         sHeap_->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
88         object = Allocate(thread, size, false);
89     }
90     return object;
91 }
92 
TryAllocateAndExpand(JSThread * thread,size_t size,bool expand)93 uintptr_t SharedSparseSpace::TryAllocateAndExpand(JSThread *thread, size_t size, bool expand)
94 {
95     uintptr_t object = TryAllocate(thread, size);
96     CHECK_SOBJECT_NOT_NULL();
97     if (sweepState_ == SweepState::SWEEPING) {
98         object = AllocateAfterSweepingCompleted(thread, size);
99         CHECK_SOBJECT_NOT_NULL();
100     }
101     if (expand) {
102         object = AllocateWithExpand(thread, size);
103     }
104     return object;
105 }
106 
AllocateNoGCAndExpand(JSThread * thread,size_t size)107 uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size)
108 {
109 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
110     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
111         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
112         UNREACHABLE();
113     }
114 #endif
115     uintptr_t object = TryAllocate(thread, size);
116     CHECK_SOBJECT_NOT_NULL();
117     if (sweepState_ == SweepState::SWEEPING) {
118         object = AllocateAfterSweepingCompleted(thread, size);
119     }
120     return object;
121 }
122 
TryAllocate(JSThread * thread,size_t size)123 uintptr_t SharedSparseSpace::TryAllocate([[maybe_unused]] JSThread *thread, size_t size)
124 {
125     LockHolder lock(allocateLock_);
126     uintptr_t object = allocator_->Allocate(size);
127     IncAllocSObjectSize(object, size);
128     return object;
129 }
130 
AllocateWithExpand(JSThread * thread,size_t size)131 uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size)
132 {
133     LockHolder lock(allocateLock_);
134     // In order to avoid expand twice by different threads, try allocate first.
135     CheckAndTriggerLocalFullMark();
136     auto object = allocator_->Allocate(size);
137     if (object == 0 && Expand(thread)) {
138         object = allocator_->Allocate(size);
139     }
140     IncAllocSObjectSize(object, size);
141     return object;
142 }
143 
Expand(JSThread * thread)144 bool SharedSparseSpace::Expand(JSThread *thread)
145 {
146     if (CommittedSizeExceed()) {
147         LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
148         return false;
149     }
150     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
151     if (region == nullptr) { // LOCV_EXCL_BR_LINE
152         LOG_ECMA(FATAL) << "SharedSparseSpace::Expand:region is nullptr";
153     }
154     AddRegion(region);
155     allocator_->AddFree(region);
156     return true;
157 }
158 
AllocateDeserializeRegion(JSThread * thread)159 Region *SharedSparseSpace::AllocateDeserializeRegion(JSThread *thread)
160 {
161     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
162     if (region == nullptr) { // LOCV_EXCL_BR_LINE
163         LOG_ECMA(FATAL) << "SharedSparseSpace::AllocateDeserializeRegion:region is nullptr";
164     }
165     return region;
166 }
167 
MergeDeserializeAllocateRegions(const std::vector<Region * > & allocateRegions)168 void SharedSparseSpace::MergeDeserializeAllocateRegions(const std::vector<Region *> &allocateRegions)
169 {
170     LockHolder lock(allocateLock_);
171     for (auto region : allocateRegions) {
172         AddRegion(region);
173         allocator_->AddFree(region);
174         allocator_->ResetTopPointer(region->GetHighWaterMark());
175         region->SetHighWaterMark(region->GetEnd());
176     }
177 }
178 
AllocateAfterSweepingCompleted(JSThread * thread,size_t size)179 uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted([[maybe_unused]] JSThread *thread, size_t size)
180 {
181     LockHolder lock(allocateLock_);
182     uintptr_t object = 0U;
183     if (sweepState_ != SweepState::SWEEPING) {
184         object = allocator_->Allocate(size);
185         IncAllocSObjectSize(object, size);
186         return object;
187     }
188     if (TryFillSweptRegion()) {
189         object = allocator_->Allocate(size);
190         IncAllocSObjectSize(object, size);
191         if (object != 0) {
192             return object;
193         }
194     }
195     // Parallel sweep and fill
196     sHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
197     object = allocator_->Allocate(size);
198     IncAllocSObjectSize(object, size);
199     return object;
200 }
201 
PrepareSweeping()202 void SharedSparseSpace::PrepareSweeping()
203 {
204     liveObjectSize_ = 0;
205     ASSERT(GetSweepingRegionSafe() == nullptr);
206     ASSERT(GetSweptRegionSafe() == nullptr);
207     EnumerateRegions([this](Region *current) {
208         ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
209         ASSERT(!current->InSCollectSet());
210         IncreaseLiveObjectSize(current->AliveObject());
211         current->ResetWasted();
212         AddSweepingRegion(current);
213     });
214     SortSweepingRegion();
215     sweepState_ = SweepState::SWEEPING;
216     allocator_->RebuildFreeList();
217 }
218 
AsyncSweep(bool isMain)219 void SharedSparseSpace::AsyncSweep(bool isMain)
220 {
221     Region *current = GetSweepingRegionSafe();
222     while (current != nullptr) {
223         FreeRegion(current, isMain);
224         // Main thread sweeping region is added;
225         if (!isMain) {
226             AddSweptRegionSafe(current);
227         }
228         current = GetSweepingRegionSafe();
229     }
230 }
231 
Sweep()232 void SharedSparseSpace::Sweep()
233 {
234     liveObjectSize_ = 0;
235     allocator_->RebuildFreeList();
236     EnumerateRegions([this](Region *current) {
237         IncreaseLiveObjectSize(current->AliveObject());
238         current->ResetWasted();
239         FreeRegion(current);
240     });
241 }
242 
TryFillSweptRegion()243 bool SharedSparseSpace::TryFillSweptRegion()
244 {
245     if (sweptList_.empty()) {
246         return false;
247     }
248     Region *region = nullptr;
249     while ((region = GetSweptRegionSafe()) != nullptr) {
250         allocator_->CollectFreeObjectSet(region);
251         region->ResetSwept();
252     }
253     return true;
254 }
255 
FinishFillSweptRegion()256 bool SharedSparseSpace::FinishFillSweptRegion()
257 {
258     bool ret = TryFillSweptRegion();
259     sweepState_ = SweepState::SWEPT;
260     return ret;
261 }
262 
AddSweepingRegion(Region * region)263 void SharedSparseSpace::AddSweepingRegion(Region *region)
264 {
265     sweepingList_.emplace_back(region);
266 }
267 
SortSweepingRegion()268 void SharedSparseSpace::SortSweepingRegion()
269 {
270     // Sweep low alive object size at first
271     std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
272         return first->AliveObject() > second->AliveObject();
273     });
274 }
275 
GetSweepingRegionSafe()276 Region *SharedSparseSpace::GetSweepingRegionSafe()
277 {
278     LockHolder holder(lock_);
279     Region *region = nullptr;
280     if (!sweepingList_.empty()) {
281         region = sweepingList_.back();
282         sweepingList_.pop_back();
283     }
284     return region;
285 }
286 
AddSweptRegionSafe(Region * region)287 void SharedSparseSpace::AddSweptRegionSafe(Region *region)
288 {
289     LockHolder holder(lock_);
290     sweptList_.emplace_back(region);
291 }
292 
GetSweptRegionSafe()293 Region *SharedSparseSpace::GetSweptRegionSafe()
294 {
295     LockHolder holder(lock_);
296     Region *region = nullptr;
297     if (!sweptList_.empty()) {
298         region = sweptList_.back();
299         sweptList_.pop_back();
300     }
301     return region;
302 }
303 
FreeRegion(Region * current,bool isMain)304 void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
305 {
306     uintptr_t freeStart = current->GetBegin();
307     current->IterateAllMarkedBits([this, &freeStart, isMain](void *mem) {
308         auto header = reinterpret_cast<TaggedObject *>(mem);
309         auto klass = header->GetClass();
310         auto size = klass->SizeFromJSHClass(header);
311 
312         uintptr_t freeEnd = ToUintPtr(mem);
313         if (freeStart != freeEnd) {
314             FreeLiveRange(freeStart, freeEnd, isMain);
315         }
316         freeStart = freeEnd + size;
317     });
318     uintptr_t freeEnd = current->GetEnd();
319     if (freeStart != freeEnd) {
320         FreeLiveRange(freeStart, freeEnd, isMain);
321     }
322 }
323 
DetachFreeObjectSet(Region * region)324 void SharedSparseSpace::DetachFreeObjectSet(Region *region)
325 {
326     allocator_->DetachFreeObjectSet(region);
327 }
328 
FreeLiveRange(uintptr_t freeStart,uintptr_t freeEnd,bool isMain)329 void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
330 {
331     // No need to clear rememberset here, because shared region has no remember set now.
332     allocator_->Free(freeStart, freeEnd - freeStart, isMain);
333 }
334 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const335 void SharedSparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
336 {
337     allocator_->FillBumpPointer();
338     EnumerateRegions([&](Region *region) {
339         uintptr_t curPtr = region->GetBegin();
340         uintptr_t endPtr = region->GetEnd();
341         while (curPtr < endPtr) {
342             auto freeObject = FreeObject::Cast(curPtr);
343             size_t objSize;
344             // If curPtr is freeObject, It must to mark unpoison first.
345             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
346             if (!freeObject->IsFreeObject()) {
347                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
348                 visitor(obj);
349                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
350             } else {
351                 freeObject->AsanUnPoisonFreeObject();
352                 objSize = freeObject->Available();
353                 freeObject->AsanPoisonFreeObject();
354             }
355             curPtr += objSize;
356             CHECK_OBJECT_SIZE(objSize);
357         }
358         CHECK_REGION_END(curPtr, endPtr);
359     });
360 }
361 
GetHeapObjectSize() const362 size_t SharedSparseSpace::GetHeapObjectSize() const
363 {
364     return liveObjectSize_;
365 }
366 
IncreaseAllocatedSize(size_t size)367 void SharedSparseSpace::IncreaseAllocatedSize(size_t size)
368 {
369     allocator_->IncreaseAllocatedSize(size);
370 }
371 
GetTotalAllocatedSize() const372 size_t SharedSparseSpace::GetTotalAllocatedSize() const
373 {
374     return allocator_->GetAllocatedSize();
375 }
376 
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)377 void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
378 {
379     ASSERT(size <= alignedSize);
380     if (LIKELY(!allocationCounter_.IsActive())) {
381         return;
382     }
383     if (alignedSize >= allocationCounter_.NextBytes()) {
384         allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
385     }
386     allocationCounter_.AdvanceAllocationInspector(alignedSize);
387 }
388 
CheckAndTriggerLocalFullMark()389 void SharedSparseSpace::CheckAndTriggerLocalFullMark()
390 {
391     if (liveObjectSize_ >= triggerLocalFullMarkLimit_) {
392         sHeap_->TryTriggerLocalConcurrentMarking();
393     }
394 }
395 
IncAllocSObjectSize(uintptr_t object,size_t size)396 void SharedSparseSpace::IncAllocSObjectSize(uintptr_t object, size_t size)
397 {
398     if (object != 0) {
399         IncreaseLiveObjectSize(size);
400         if (sHeap_->IsReadyToConcurrentMark()) {
401             Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
402         }
403 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
404         InvokeAllocationInspector(object, size, size);
405 #endif
406     }
407 }
408 
SharedAppSpawnSpace(SharedHeap * heap,size_t initialCapacity)409 SharedAppSpawnSpace::SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity)
410     : SharedSparseSpace(heap, MemSpaceType::SHARED_APPSPAWN_SPACE, initialCapacity, initialCapacity)
411 {
412 }
413 
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const414 void SharedAppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
415 {
416     EnumerateRegions([&](Region *current) {
417         current->IterateAllMarkedBits([&](void *mem) {
418             ASSERT(current->InRange(ToUintPtr(mem)));
419             visitor(reinterpret_cast<TaggedObject *>(mem));
420         });
421     });
422 }
423 
SharedNonMovableSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)424 SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
425     : SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
426 {
427 }
428 
SharedOldSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)429 SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
430     : SharedSparseSpace(heap, MemSpaceType::SHARED_OLD_SPACE, initialCapacity, maximumCapacity)
431 {
432 }
433 
SelectCSets()434 void SharedOldSpace::SelectCSets()
435 {
436     EnumerateRegions([this](Region *region) {
437         if (!region->MostObjectAlive()) {
438             collectRegionSet_.emplace_back(region);
439         }
440     });
441 #ifdef NDEBUG
442     if (collectRegionSet_.size() < MIN_COLLECT_REGION_SIZE) {
443         LOG_ECMA_MEM(DEBUG) << "Selected SCSet number: " << collectRegionSet_.size() << " are too few";
444         collectRegionSet_.clear();
445         return;
446     }
447 #endif
448     // sort
449     std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
450         return first->AliveObject() < second->AliveObject();
451     });
452 
453     // Limit cset size
454     int64_t leftEvacuateSize = MAX_EVACUATION_SIZE;
455     size_t selectedNumber = 0;
456     for (; selectedNumber < collectRegionSet_.size(); selectedNumber++) {
457         Region *region = collectRegionSet_[selectedNumber];
458         leftEvacuateSize -= static_cast<int64_t>(region->AliveObject());
459         if (leftEvacuateSize > 0) {
460             RemoveCSetRegion(region);
461             allocator_->DetachFreeObjectSet(region);
462             region->SetGCFlag(RegionGCFlags::IN_SHARED_COLLECT_SET);
463             region->ResetAliveObject();
464         } else {
465             break;
466         }
467     }
468     if (collectRegionSet_.size() > selectedNumber) {
469         collectRegionSet_.resize(selectedNumber);
470     }
471 }
472 
RevertCSets()473 void SharedOldSpace::RevertCSets()
474 {
475     EnumerateCollectRegionSet([this](Region *region) {
476         region->ClearGCFlag(RegionGCFlags::IN_SHARED_COLLECT_SET);
477         AddCSetRegion(region);
478         allocator_->CollectFreeObjectSet(region);
479     });
480     collectRegionSet_.clear();
481 }
482 
ReclaimCSets()483 void SharedOldSpace::ReclaimCSets()
484 {
485     EnumerateCollectRegionSet([this](Region *region) {
486         region->DeleteCrossRegionRSet();
487         region->DestroyFreeObjectSets();
488         heapRegionAllocator_->FreeRegion(region, 0, true);
489     });
490     collectRegionSet_.clear();
491 }
492 
AddCSetRegion(Region * region)493 void SharedOldSpace::AddCSetRegion(Region *region)
494 {
495     ASSERT(region != nullptr);
496     regionList_.AddNode(region);
497 }
498 
RemoveCSetRegion(Region * region)499 void SharedOldSpace::RemoveCSetRegion(Region *region)
500 {
501     ASSERT(region != nullptr);
502     regionList_.RemoveNode(region);
503 }
504 
Merge(SharedLocalSpace * localSpace)505 void SharedOldSpace::Merge(SharedLocalSpace *localSpace)
506 {
507     localSpace->FreeBumpPoint();
508     LockHolder lock(lock_);
509     size_t oldCommittedSize = committedSize_;
510     localSpace->EnumerateRegions([&](Region *region) {
511         localSpace->DetachFreeObjectSet(region);
512         localSpace->RemoveRegion(region);
513         localSpace->DecreaseLiveObjectSize(region->AliveObject());
514         AddRegion(region);
515         IncreaseLiveObjectSize(region->AliveObject());
516         allocator_->CollectFreeObjectSet(region);
517     });
518     if (committedSize_ > GetOverShootMaximumCapacity()) {
519         LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
520         if (sHeap_->CanThrowOOMError()) {
521             sHeap_->ShouldThrowOOMError(true);
522         }
523         IncreaseMergeSize(committedSize_ - oldCommittedSize);
524         // if throw OOM, temporarily increase space size to avoid vm crash
525         IncreaseOutOfMemoryOvershootSize(committedSize_ - GetOverShootMaximumCapacity());
526     }
527 
528     localSpace->GetRegionList().Clear();
529     allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
530 }
531 
SharedLocalSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)532 SharedLocalSpace::SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
533     : SharedSparseSpace(heap, MemSpaceType::SHARED_LOCAL_SPACE, initialCapacity, maximumCapacity) {}
534 
AddRegionToList(Region * region)535 bool SharedLocalSpace::AddRegionToList(Region *region)
536 {
537     if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
538         LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
539         return false;
540     }
541     AddRegion(region);
542     allocator_->CollectFreeObjectSet(region);
543     IncreaseLiveObjectSize(region->AliveObject());
544     return true;
545 }
546 
FreeBumpPoint()547 void SharedLocalSpace::FreeBumpPoint()
548 {
549     allocator_->FreeBumpPoint();
550 }
551 
Stop()552 void SharedLocalSpace::Stop()
553 {
554     Region *currentRegion = GetCurrentRegion();
555     if (currentRegion != nullptr) {
556         currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
557     }
558 }
559 
ForceExpandInSharedGC(JSThread * thread)560 void SharedLocalSpace::ForceExpandInSharedGC(JSThread *thread)
561 {
562     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
563     AddRegion(region);
564     allocator_->AddFree(region);
565 }
566 
Allocate(size_t size,bool isExpand)567 uintptr_t SharedLocalSpace::Allocate(size_t size, bool isExpand)
568 {
569     auto object = allocator_->Allocate(size);
570     if (object == 0 && isExpand) {
571         // Shared Full GC will compress all regions and cannot recognize all threads' region.
572         if (!Expand(Runtime::GetInstance()->GetMainThread())) {
573             ForceExpandInSharedGC(Runtime::GetInstance()->GetMainThread());
574             sHeap_->ShouldThrowOOMError(true);
575         }
576         object = allocator_->Allocate(size);
577         ASSERT(object != 0);
578     }
579     if (object != 0) {
580         Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
581     }
582     return object;
583 }
584 
SharedReadOnlySpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)585 SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
586     : Space(
587           heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity)
588 {
589 }
590 
Expand(JSThread * thread)591 bool SharedReadOnlySpace::Expand(JSThread *thread)
592 {
593     if (committedSize_ >= initialCapacity_ + outOfMemoryOvershootSize_ &&
594         !heap_->NeedStopCollection()) {
595         return false;
596     }
597     uintptr_t top = allocator_.GetTop();
598     auto currentRegion = GetCurrentRegion();
599     if (currentRegion != nullptr) {
600         currentRegion->SetHighWaterMark(top);
601     }
602     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, heap_);
603     if (region == nullptr) { // LOCV_EXCL_BR_LINE
604         LOG_ECMA(FATAL) << "SharedReadOnlySpace::Expand:region is nullptr";
605     }
606     allocator_.Reset(region->GetBegin(), region->GetEnd());
607     AddRegion(region);
608     return true;
609 }
610 
Allocate(JSThread * thread,size_t size)611 uintptr_t SharedReadOnlySpace::Allocate(JSThread *thread, size_t size)
612 {
613 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
614     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
615         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
616         UNREACHABLE();
617     }
618 #endif
619     thread->CheckSafepointIfSuspended();
620     LockHolder holder(allocateLock_);
621     auto object = allocator_.Allocate(size);
622     if (object != 0) {
623         return object;
624     }
625     if (Expand(thread)) {
626         object = allocator_.Allocate(size);
627     }
628     return object;
629 }
630 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const631 void SharedReadOnlySpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
632 {
633     size_t size = allocator_.Available();
634     if (size != 0) {
635         FreeObject::FillFreeObject(heap_, allocator_.GetTop(), size);
636     }
637     EnumerateRegions([&](Region *region) {
638         uintptr_t curPtr = region->GetBegin();
639         uintptr_t endPtr = region->GetEnd();
640         while (curPtr < endPtr) {
641             auto freeObject = FreeObject::Cast(curPtr);
642             size_t objSize;
643             // If curPtr is freeObject, It must to mark unpoison first.
644             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
645             if (!freeObject->IsFreeObject()) {
646                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
647                 visitor(obj);
648                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
649             } else {
650                 freeObject->AsanUnPoisonFreeObject();
651                 objSize = freeObject->Available();
652                 freeObject->AsanPoisonFreeObject();
653             }
654             curPtr += objSize;
655             CHECK_OBJECT_SIZE(objSize);
656         }
657         CHECK_REGION_END(curPtr, endPtr);
658     });
659 }
660 
SharedHugeObjectSpace(BaseHeap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)661 SharedHugeObjectSpace::SharedHugeObjectSpace(BaseHeap *heap, HeapRegionAllocator *heapRegionAllocator,
662                                              size_t initialCapacity, size_t maximumCapacity)
663     : Space(heap, heapRegionAllocator, MemSpaceType::SHARED_HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
664 {
665     triggerLocalFullMarkLimit_ = maximumCapacity * HUGE_OBJECT_SIZE_RATIO;
666 }
667 
668 
Allocate(JSThread * thread,size_t objectSize,AllocateEventType allocType)669 uintptr_t SharedHugeObjectSpace::Allocate(JSThread *thread, size_t objectSize, AllocateEventType allocType)
670 {
671 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
672     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
673         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
674         UNREACHABLE();
675     }
676 #endif
677     // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
678     // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
679     size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
680     if (allocType == AllocateEventType::NORMAL) {
681         thread->CheckSafepointIfSuspended();
682         CheckAndTriggerLocalFullMark(thread, alignedSize);
683     }
684     LockHolder lock(allocateLock_);
685     if (CommittedSizeExceed(alignedSize)) {
686         LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
687         return 0;
688     }
689     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
690     if (region == nullptr) { // LOCV_EXCL_BR_LINE
691         LOG_ECMA(FATAL) << "SharedHugeObjectSpace::Allocate:region is nullptr";
692     }
693     AddRegion(region);
694     // It need to mark unpoison when huge object being allocated.
695     ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
696 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
697     InvokeAllocationInspector(region->GetBegin(), objectSize);
698 #endif
699     return region->GetBegin();
700 }
701 
Sweep()702 void SharedHugeObjectSpace::Sweep()
703 {
704     Region *currentRegion = GetRegionList().GetFirst();
705     while (currentRegion != nullptr) {
706         Region *next = currentRegion->GetNext();
707         bool isMarked = false;
708         currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
709         if (!isMarked) {
710             GetRegionList().RemoveNode(currentRegion);
711             hugeNeedFreeList_.AddNode(currentRegion);
712         }
713         currentRegion = next;
714     }
715 }
716 
GetHeapObjectSize() const717 size_t SharedHugeObjectSpace::GetHeapObjectSize() const
718 {
719     return committedSize_;
720 }
721 
IterateOverObjects(const std::function<void (TaggedObject * object)> & objectVisitor) const722 void SharedHugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
723 {
724     EnumerateRegions([&](Region *region) {
725         uintptr_t curPtr = region->GetBegin();
726         objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
727     });
728 }
729 
ReclaimHugeRegion()730 void SharedHugeObjectSpace::ReclaimHugeRegion()
731 {
732     if (hugeNeedFreeList_.IsEmpty()) {
733         return;
734     }
735     do {
736         Region *last = hugeNeedFreeList_.PopBack();
737         ClearAndFreeRegion(last);
738     } while (!hugeNeedFreeList_.IsEmpty());
739 }
740 
InvokeAllocationInspector(Address object,size_t objectSize)741 void SharedHugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
742 {
743     if (LIKELY(!allocationCounter_.IsActive())) {
744         return;
745     }
746     if (objectSize >= allocationCounter_.NextBytes()) {
747         allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
748     }
749     allocationCounter_.AdvanceAllocationInspector(objectSize);
750 }
751 
CheckAndTriggerLocalFullMark(JSThread * thread,size_t size)752 void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size)
753 {
754     if (committedSize_ >= triggerLocalFullMarkLimit_) {
755         reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking();
756     } else {
757         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
758         if (!thread->IsJitThread()) {
759             localHeap->TryTriggerFullMarkBySharedSize(size);
760         }
761     }
762 }
763 }  // namespace panda::ecmascript
764