• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/shared_heap/shared_space.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/heap-inl.h"
20 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
21 #include "ecmascript/checkpoint/thread_state_transition.h"
22 #include "ecmascript/runtime_lock.h"
23 
24 namespace panda::ecmascript {
SharedSparseSpace(SharedHeap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)25 SharedSparseSpace::SharedSparseSpace(SharedHeap *heap,
26                                      MemSpaceType type,
27                                      size_t initialCapacity,
28                                      size_t maximumCapacity)
29     : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
30       sweepState_(SweepState::NO_SWEEP),
31       sHeap_(heap),
32       liveObjectSize_(0)
33 {
34     triggerLocalFullMarkLimit_ = maximumCapacity * LIVE_OBJECT_SIZE_RATIO;
35     allocator_ = new FreeListAllocator<FreeObject>(heap);
36 }
37 
Reset()38 void SharedSparseSpace::Reset()
39 {
40     allocator_->RebuildFreeList();
41     ReclaimRegions();
42     liveObjectSize_ = 0;
43 }
44 
ResetTopPointer(uintptr_t top)45 void SharedSparseSpace::ResetTopPointer(uintptr_t top)
46 {
47     allocator_->ResetTopPointer(top);
48 }
49 
50 // only used in share heap initialize before first vmThread created.
AllocateWithoutGC(JSThread * thread,size_t size)51 uintptr_t SharedSparseSpace::AllocateWithoutGC(JSThread *thread, size_t size)
52 {
53     uintptr_t object = TryAllocate(thread, size);
54     CHECK_SOBJECT_NOT_NULL();
55     object = AllocateWithExpand(thread, size);
56     return object;
57 }
58 
Allocate(JSThread * thread,size_t size,bool allowGC)59 uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowGC)
60 {
61 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
62     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
63         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
64         UNREACHABLE();
65     }
66 #endif
67     // Shared old space cannot use this allocate func. Shared full gc may happen in trigger and thread state update.
68     // Shared old space pointer might change by shared full gc.
69     // jit thread no heap
70     allowGC = allowGC && (!thread->IsJitThread());
71     if (allowGC) {
72         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
73         localHeap->TryTriggerFullMarkBySharedSize(size);
74     }
75     uintptr_t object = TryAllocate(thread, size);
76     CHECK_SOBJECT_NOT_NULL();
77     if (sweepState_ == SweepState::SWEEPING) {
78         object = AllocateAfterSweepingCompleted(thread, size);
79         CHECK_SOBJECT_NOT_NULL();
80     }
81     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
82     if (allowGC && sHeap_->CheckAndTriggerSharedGC(thread)) {
83         object = TryAllocate(thread, size);
84         CHECK_SOBJECT_NOT_NULL();
85     }
86     object = AllocateWithExpand(thread, size);
87     CHECK_SOBJECT_NOT_NULL();
88     if (allowGC) {
89         sHeap_->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
90         object = Allocate(thread, size, false);
91     }
92     return object;
93 }
94 
TryAllocateAndExpand(JSThread * thread,size_t size,bool expand)95 uintptr_t SharedSparseSpace::TryAllocateAndExpand(JSThread *thread, size_t size, bool expand)
96 {
97     uintptr_t object = TryAllocate(thread, size);
98     CHECK_SOBJECT_NOT_NULL();
99     if (sweepState_ == SweepState::SWEEPING) {
100         object = AllocateAfterSweepingCompleted(thread, size);
101         CHECK_SOBJECT_NOT_NULL();
102     }
103     if (expand) {
104         object = AllocateWithExpand(thread, size);
105     }
106     return object;
107 }
108 
AllocateNoGCAndExpand(JSThread * thread,size_t size)109 uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size)
110 {
111 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
112     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
113         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
114         UNREACHABLE();
115     }
116 #endif
117     uintptr_t object = TryAllocate(thread, size);
118     CHECK_SOBJECT_NOT_NULL();
119     if (sweepState_ == SweepState::SWEEPING) {
120         object = AllocateAfterSweepingCompleted(thread, size);
121     }
122     return object;
123 }
124 
TryAllocate(JSThread * thread,size_t size)125 uintptr_t SharedSparseSpace::TryAllocate([[maybe_unused]] JSThread *thread, size_t size)
126 {
127     LockHolder lock(allocateLock_);
128     uintptr_t object = allocator_->Allocate(size);
129     IncAllocSObjectSize(object, size);
130     return object;
131 }
132 
AllocateWithExpand(JSThread * thread,size_t size)133 uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size)
134 {
135     LockHolder lock(allocateLock_);
136     // In order to avoid expand twice by different threads, try allocate first.
137     CheckAndTriggerLocalFullMark();
138     auto object = allocator_->Allocate(size);
139     if (object == 0 && Expand(thread)) {
140         object = allocator_->Allocate(size);
141     }
142     IncAllocSObjectSize(object, size);
143     return object;
144 }
145 
Expand(JSThread * thread)146 bool SharedSparseSpace::Expand(JSThread *thread)
147 {
148     if (CommittedSizeExceed()) {
149         LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
150         return false;
151     }
152     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
153     if (region == nullptr) {
154         LOG_ECMA(FATAL) << "SharedSparseSpace::Expand:region is nullptr";
155     }
156     AddRegion(region);
157     allocator_->AddFree(region);
158     return true;
159 }
160 
AllocateDeserializeRegion(JSThread * thread)161 Region *SharedSparseSpace::AllocateDeserializeRegion(JSThread *thread)
162 {
163     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
164     if (region == nullptr) {
165         LOG_ECMA(FATAL) << "SharedSparseSpace::AllocateDeserializeRegion:region is nullptr";
166     }
167     return region;
168 }
169 
MergeDeserializeAllocateRegions(const std::vector<Region * > & allocateRegions)170 void SharedSparseSpace::MergeDeserializeAllocateRegions(const std::vector<Region *> &allocateRegions)
171 {
172     LockHolder lock(allocateLock_);
173     for (auto region : allocateRegions) {
174         AddRegion(region);
175         allocator_->AddFree(region);
176         allocator_->ResetTopPointer(region->GetHighWaterMark());
177         region->SetHighWaterMark(region->GetEnd());
178     }
179 }
180 
AllocateAfterSweepingCompleted(JSThread * thread,size_t size)181 uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted([[maybe_unused]] JSThread *thread, size_t size)
182 {
183     LockHolder lock(allocateLock_);
184     uintptr_t object = 0U;
185     if (sweepState_ != SweepState::SWEEPING) {
186         object = allocator_->Allocate(size);
187         IncAllocSObjectSize(object, size);
188         return object;
189     }
190     if (TryFillSweptRegion()) {
191         object = allocator_->Allocate(size);
192         IncAllocSObjectSize(object, size);
193         if (object != 0) {
194             return object;
195         }
196     }
197     // Parallel sweep and fill
198     sHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
199     object = allocator_->Allocate(size);
200     IncAllocSObjectSize(object, size);
201     return object;
202 }
203 
PrepareSweeping()204 void SharedSparseSpace::PrepareSweeping()
205 {
206     liveObjectSize_ = 0;
207     EnumerateRegions([this](Region *current) {
208         IncreaseLiveObjectSize(current->AliveObject());
209         current->ResetWasted();
210         AddSweepingRegion(current);
211     });
212     SortSweepingRegion();
213     sweepState_ = SweepState::SWEEPING;
214     allocator_->RebuildFreeList();
215 }
216 
AsyncSweep(bool isMain)217 void SharedSparseSpace::AsyncSweep(bool isMain)
218 {
219     Region *current = GetSweepingRegionSafe();
220     while (current != nullptr) {
221         FreeRegion(current, isMain);
222         // Main thread sweeping region is added;
223         if (!isMain) {
224             AddSweptRegionSafe(current);
225         }
226         current = GetSweepingRegionSafe();
227     }
228 }
229 
Sweep()230 void SharedSparseSpace::Sweep()
231 {
232     liveObjectSize_ = 0;
233     allocator_->RebuildFreeList();
234     EnumerateRegions([this](Region *current) {
235         IncreaseLiveObjectSize(current->AliveObject());
236         current->ResetWasted();
237         FreeRegion(current);
238     });
239 }
240 
TryFillSweptRegion()241 bool SharedSparseSpace::TryFillSweptRegion()
242 {
243     if (sweptList_.empty()) {
244         return false;
245     }
246     Region *region = nullptr;
247     while ((region = GetSweptRegionSafe()) != nullptr) {
248         allocator_->CollectFreeObjectSet(region);
249         region->ResetSwept();
250     }
251     return true;
252 }
253 
FinishFillSweptRegion()254 bool SharedSparseSpace::FinishFillSweptRegion()
255 {
256     bool ret = TryFillSweptRegion();
257     sweepState_ = SweepState::SWEPT;
258     return ret;
259 }
260 
AddSweepingRegion(Region * region)261 void SharedSparseSpace::AddSweepingRegion(Region *region)
262 {
263     sweepingList_.emplace_back(region);
264 }
265 
SortSweepingRegion()266 void SharedSparseSpace::SortSweepingRegion()
267 {
268     // Sweep low alive object size at first
269     std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
270         return first->AliveObject() < second->AliveObject();
271     });
272 }
273 
GetSweepingRegionSafe()274 Region *SharedSparseSpace::GetSweepingRegionSafe()
275 {
276     LockHolder holder(lock_);
277     Region *region = nullptr;
278     if (!sweepingList_.empty()) {
279         region = sweepingList_.back();
280         sweepingList_.pop_back();
281     }
282     return region;
283 }
284 
AddSweptRegionSafe(Region * region)285 void SharedSparseSpace::AddSweptRegionSafe(Region *region)
286 {
287     LockHolder holder(lock_);
288     sweptList_.emplace_back(region);
289 }
290 
GetSweptRegionSafe()291 Region *SharedSparseSpace::GetSweptRegionSafe()
292 {
293     LockHolder holder(lock_);
294     Region *region = nullptr;
295     if (!sweptList_.empty()) {
296         region = sweptList_.back();
297         sweptList_.pop_back();
298     }
299     return region;
300 }
301 
FreeRegion(Region * current,bool isMain)302 void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
303 {
304     uintptr_t freeStart = current->GetBegin();
305     current->IterateAllMarkedBits([this, &freeStart, isMain](void *mem) {
306         auto header = reinterpret_cast<TaggedObject *>(mem);
307         auto klass = header->GetClass();
308         auto size = klass->SizeFromJSHClass(header);
309 
310         uintptr_t freeEnd = ToUintPtr(mem);
311         if (freeStart != freeEnd) {
312             FreeLiveRange(freeStart, freeEnd, isMain);
313         }
314         freeStart = freeEnd + size;
315     });
316     uintptr_t freeEnd = current->GetEnd();
317     if (freeStart != freeEnd) {
318         FreeLiveRange(freeStart, freeEnd, isMain);
319     }
320 }
321 
DetachFreeObjectSet(Region * region)322 void SharedSparseSpace::DetachFreeObjectSet(Region *region)
323 {
324     allocator_->DetachFreeObjectSet(region);
325 }
326 
FreeLiveRange(uintptr_t freeStart,uintptr_t freeEnd,bool isMain)327 void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
328 {
329     // No need to clear rememberset here, because shared region has no remember set now.
330     allocator_->Free(freeStart, freeEnd - freeStart, isMain);
331 }
332 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const333 void SharedSparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
334 {
335     allocator_->FillBumpPointer();
336     EnumerateRegions([&](Region *region) {
337         uintptr_t curPtr = region->GetBegin();
338         uintptr_t endPtr = region->GetEnd();
339         while (curPtr < endPtr) {
340             auto freeObject = FreeObject::Cast(curPtr);
341             size_t objSize;
342             // If curPtr is freeObject, It must to mark unpoison first.
343             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
344             if (!freeObject->IsFreeObject()) {
345                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
346                 visitor(obj);
347                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
348             } else {
349                 freeObject->AsanUnPoisonFreeObject();
350                 objSize = freeObject->Available();
351                 freeObject->AsanPoisonFreeObject();
352             }
353             curPtr += objSize;
354             CHECK_OBJECT_SIZE(objSize);
355         }
356         CHECK_REGION_END(curPtr, endPtr);
357     });
358 }
359 
GetHeapObjectSize() const360 size_t SharedSparseSpace::GetHeapObjectSize() const
361 {
362     return liveObjectSize_;
363 }
364 
IncreaseAllocatedSize(size_t size)365 void SharedSparseSpace::IncreaseAllocatedSize(size_t size)
366 {
367     allocator_->IncreaseAllocatedSize(size);
368 }
369 
GetTotalAllocatedSize() const370 size_t SharedSparseSpace::GetTotalAllocatedSize() const
371 {
372     return allocator_->GetAllocatedSize();
373 }
374 
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)375 void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
376 {
377     ASSERT(size <= alignedSize);
378     if (LIKELY(!allocationCounter_.IsActive())) {
379         return;
380     }
381     if (alignedSize >= allocationCounter_.NextBytes()) {
382         allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
383     }
384     allocationCounter_.AdvanceAllocationInspector(alignedSize);
385 }
386 
CheckAndTriggerLocalFullMark()387 void SharedSparseSpace::CheckAndTriggerLocalFullMark()
388 {
389     if (liveObjectSize_ >= triggerLocalFullMarkLimit_) {
390         sHeap_->TryTriggerLocalConcurrentMarking();
391     }
392 }
393 
IncAllocSObjectSize(uintptr_t object,size_t size)394 void SharedSparseSpace::IncAllocSObjectSize(uintptr_t object, size_t size)
395 {
396     if (object != 0) {
397         IncreaseLiveObjectSize(size);
398         if (sHeap_->IsReadyToConcurrentMark()) {
399             Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
400         }
401 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
402         InvokeAllocationInspector(object, size, size);
403 #endif
404     }
405 }
406 
SharedAppSpawnSpace(SharedHeap * heap,size_t initialCapacity)407 SharedAppSpawnSpace::SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity)
408     : SharedSparseSpace(heap, MemSpaceType::SHARED_APPSPAWN_SPACE, initialCapacity, initialCapacity)
409 {
410 }
411 
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const412 void SharedAppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
413 {
414     EnumerateRegions([&](Region *current) {
415         current->IterateAllMarkedBits([&](void *mem) {
416             ASSERT(current->InRange(ToUintPtr(mem)));
417             visitor(reinterpret_cast<TaggedObject *>(mem));
418         });
419     });
420 }
421 
SharedNonMovableSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)422 SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
423     : SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
424 {
425 }
426 
SharedOldSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)427 SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
428     : SharedSparseSpace(heap, MemSpaceType::SHARED_OLD_SPACE, initialCapacity, maximumCapacity)
429 {
430 }
431 
Merge(SharedLocalSpace * localSpace)432 void SharedOldSpace::Merge(SharedLocalSpace *localSpace)
433 {
434     localSpace->FreeBumpPoint();
435     LockHolder lock(lock_);
436     size_t oldCommittedSize = committedSize_;
437     localSpace->EnumerateRegions([&](Region *region) {
438         localSpace->DetachFreeObjectSet(region);
439         localSpace->RemoveRegion(region);
440         localSpace->DecreaseLiveObjectSize(region->AliveObject());
441         AddRegion(region);
442         IncreaseLiveObjectSize(region->AliveObject());
443         allocator_->CollectFreeObjectSet(region);
444     });
445     size_t hugeSpaceCommitSize = sHeap_->GetHugeObjectSpace()->GetCommittedSize();
446     if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
447         LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
448         if (sHeap_->CanThrowOOMError()) {
449             sHeap_->ShouldThrowOOMError(true);
450         }
451         IncreaseMergeSize(committedSize_ - oldCommittedSize);
452         // if throw OOM, temporarily increase space size to avoid vm crash
453         IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
454     }
455 
456     localSpace->GetRegionList().Clear();
457     allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
458 }
459 
SharedLocalSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)460 SharedLocalSpace::SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
461     : SharedSparseSpace(heap, MemSpaceType::SHARED_LOCAL_SPACE, initialCapacity, maximumCapacity) {}
462 
AddRegionToList(Region * region)463 bool SharedLocalSpace::AddRegionToList(Region *region)
464 {
465     if (committedSize_ >= maximumCapacity_) {
466         LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
467         return false;
468     }
469     AddRegion(region);
470     allocator_->CollectFreeObjectSet(region);
471     IncreaseLiveObjectSize(region->AliveObject());
472     return true;
473 }
474 
FreeBumpPoint()475 void SharedLocalSpace::FreeBumpPoint()
476 {
477     allocator_->FreeBumpPoint();
478 }
479 
Stop()480 void SharedLocalSpace::Stop()
481 {
482     Region *currentRegion = GetCurrentRegion();
483     if (currentRegion != nullptr) {
484         currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
485     }
486 }
487 
Allocate(size_t size,bool isExpand)488 uintptr_t SharedLocalSpace::Allocate(size_t size, bool isExpand)
489 {
490     auto object = allocator_->Allocate(size);
491     if (object == 0) {
492         // Shared Full GC will compress all regions and cannot recognize all threads' region.
493         if (isExpand && Expand(Runtime::GetInstance()->GetMainThread())) {
494             object = allocator_->Allocate(size);
495         }
496     }
497     if (object != 0) {
498         Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
499     }
500     return object;
501 }
502 
SharedReadOnlySpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)503 SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
504     : Space(
505           heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity)
506 {
507 }
508 
Expand(JSThread * thread)509 bool SharedReadOnlySpace::Expand(JSThread *thread)
510 {
511     if (committedSize_ >= initialCapacity_ + outOfMemoryOvershootSize_ &&
512         !heap_->NeedStopCollection()) {
513         return false;
514     }
515     uintptr_t top = allocator_.GetTop();
516     auto currentRegion = GetCurrentRegion();
517     if (currentRegion != nullptr) {
518         currentRegion->SetHighWaterMark(top);
519     }
520     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, heap_);
521     if (region == nullptr) {
522         LOG_ECMA(FATAL) << "SharedReadOnlySpace::Expand:region is nullptr";
523     }
524     allocator_.Reset(region->GetBegin(), region->GetEnd());
525     AddRegion(region);
526     return true;
527 }
528 
Allocate(JSThread * thread,size_t size)529 uintptr_t SharedReadOnlySpace::Allocate(JSThread *thread, size_t size)
530 {
531 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
532     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
533         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
534         UNREACHABLE();
535     }
536 #endif
537     thread->CheckSafepointIfSuspended();
538     LockHolder holder(allocateLock_);
539     auto object = allocator_.Allocate(size);
540     if (object != 0) {
541         return object;
542     }
543     if (Expand(thread)) {
544         object = allocator_.Allocate(size);
545     }
546     return object;
547 }
548 
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const549 void SharedReadOnlySpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
550 {
551     size_t size = allocator_.Available();
552     if (size != 0) {
553         FreeObject::FillFreeObject(heap_, allocator_.GetTop(), size);
554     }
555     EnumerateRegions([&](Region *region) {
556         if (region->InCollectSet()) {
557             return;
558         }
559         uintptr_t curPtr = region->GetBegin();
560         uintptr_t endPtr = region->GetEnd();
561         while (curPtr < endPtr) {
562             auto freeObject = FreeObject::Cast(curPtr);
563             size_t objSize;
564             // If curPtr is freeObject, It must to mark unpoison first.
565             ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
566             if (!freeObject->IsFreeObject()) {
567                 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
568                 visitor(obj);
569                 objSize = obj->GetClass()->SizeFromJSHClass(obj);
570             } else {
571                 freeObject->AsanUnPoisonFreeObject();
572                 objSize = freeObject->Available();
573                 freeObject->AsanPoisonFreeObject();
574             }
575             curPtr += objSize;
576             CHECK_OBJECT_SIZE(objSize);
577         }
578         CHECK_REGION_END(curPtr, endPtr);
579     });
580 }
581 
SharedHugeObjectSpace(BaseHeap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)582 SharedHugeObjectSpace::SharedHugeObjectSpace(BaseHeap *heap, HeapRegionAllocator *heapRegionAllocator,
583                                              size_t initialCapacity, size_t maximumCapacity)
584     : Space(heap, heapRegionAllocator, MemSpaceType::SHARED_HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
585 {
586     triggerLocalFullMarkLimit_ = maximumCapacity * HUGE_OBJECT_SIZE_RATIO;
587 }
588 
589 
Allocate(JSThread * thread,size_t objectSize,AllocateEventType allocType)590 uintptr_t SharedHugeObjectSpace::Allocate(JSThread *thread, size_t objectSize, AllocateEventType allocType)
591 {
592 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
593     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
594         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
595         UNREACHABLE();
596     }
597 #endif
598     // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
599     // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
600     size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
601     if (allocType == AllocateEventType::NORMAL) {
602         thread->CheckSafepointIfSuspended();
603         CheckAndTriggerLocalFullMark(thread, alignedSize);
604     }
605     LockHolder lock(allocateLock_);
606     if (CommittedSizeExceed(alignedSize)) {
607         LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
608         return 0;
609     }
610     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
611     if (region == nullptr) {
612         LOG_ECMA(FATAL) << "SharedHugeObjectSpace::Allocate:region is nullptr";
613     }
614     AddRegion(region);
615     // It need to mark unpoison when huge object being allocated.
616     ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
617 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
618     InvokeAllocationInspector(region->GetBegin(), objectSize);
619 #endif
620     return region->GetBegin();
621 }
622 
Sweep()623 void SharedHugeObjectSpace::Sweep()
624 {
625     Region *currentRegion = GetRegionList().GetFirst();
626     while (currentRegion != nullptr) {
627         Region *next = currentRegion->GetNext();
628         bool isMarked = false;
629         currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
630         if (!isMarked) {
631             GetRegionList().RemoveNode(currentRegion);
632             hugeNeedFreeList_.AddNode(currentRegion);
633         }
634         currentRegion = next;
635     }
636 }
637 
GetHeapObjectSize() const638 size_t SharedHugeObjectSpace::GetHeapObjectSize() const
639 {
640     return committedSize_;
641 }
642 
IterateOverObjects(const std::function<void (TaggedObject * object)> & objectVisitor) const643 void SharedHugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
644 {
645     EnumerateRegions([&](Region *region) {
646         uintptr_t curPtr = region->GetBegin();
647         objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
648     });
649 }
650 
ReclaimHugeRegion()651 void SharedHugeObjectSpace::ReclaimHugeRegion()
652 {
653     if (hugeNeedFreeList_.IsEmpty()) {
654         return;
655     }
656     do {
657         Region *last = hugeNeedFreeList_.PopBack();
658         ClearAndFreeRegion(last);
659     } while (!hugeNeedFreeList_.IsEmpty());
660 }
661 
InvokeAllocationInspector(Address object,size_t objectSize)662 void SharedHugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
663 {
664     if (LIKELY(!allocationCounter_.IsActive())) {
665         return;
666     }
667     if (objectSize >= allocationCounter_.NextBytes()) {
668         allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
669     }
670     allocationCounter_.AdvanceAllocationInspector(objectSize);
671 }
672 
CheckAndTriggerLocalFullMark(JSThread * thread,size_t size)673 void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size)
674 {
675     if (committedSize_ >= triggerLocalFullMarkLimit_) {
676         reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking();
677     } else {
678         auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
679         if (!thread->IsJitThread()) {
680             localHeap->TryTriggerFullMarkBySharedSize(size);
681         }
682     }
683 }
684 }  // namespace panda::ecmascript
685