1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/shared_heap/shared_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/heap-inl.h"
20 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
21
22 namespace panda::ecmascript {
SharedSparseSpace(SharedHeap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)23 SharedSparseSpace::SharedSparseSpace(SharedHeap *heap,
24 MemSpaceType type,
25 size_t initialCapacity,
26 size_t maximumCapacity)
27 : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
28 sweepState_(SweepState::NO_SWEEP),
29 sHeap_(heap),
30 liveObjectSize_(0)
31 {
32 triggerLocalFullMarkLimit_ = maximumCapacity * LIVE_OBJECT_SIZE_RATIO;
33 allocator_ = new FreeListAllocator<FreeObject>(heap);
34 }
35
Reset()36 void SharedSparseSpace::Reset()
37 {
38 allocator_->RebuildFreeList();
39 ReclaimRegions();
40 liveObjectSize_ = 0;
41 }
42
ResetTopPointer(uintptr_t top)43 void SharedSparseSpace::ResetTopPointer(uintptr_t top)
44 {
45 allocator_->ResetTopPointer(top);
46 }
47
48 // only used in share heap initialize before first vmThread created.
AllocateWithoutGC(JSThread * thread,size_t size)49 uintptr_t SharedSparseSpace::AllocateWithoutGC(JSThread *thread, size_t size)
50 {
51 uintptr_t object = TryAllocate(thread, size);
52 CHECK_SOBJECT_NOT_NULL();
53 object = AllocateWithExpand(thread, size);
54 return object;
55 }
56
Allocate(JSThread * thread,size_t size,bool allowGC)57 uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowGC)
58 {
59 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
60 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
61 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
62 UNREACHABLE();
63 }
64 #endif
65 // Shared old space cannot use this allocate func. Shared full gc may happen in trigger and thread state update.
66 // Shared old space pointer might change by shared full gc.
67 // jit thread no heap
68 allowGC = allowGC && (!thread->IsJitThread());
69 if (allowGC) {
70 auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
71 localHeap->TryTriggerFullMarkBySharedSize(size);
72 }
73 uintptr_t object = TryAllocate(thread, size);
74 CHECK_SOBJECT_NOT_NULL();
75 if (sweepState_ == SweepState::SWEEPING) {
76 object = AllocateAfterSweepingCompleted(thread, size);
77 CHECK_SOBJECT_NOT_NULL();
78 }
79 // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
80 if (allowGC && sHeap_->CheckAndTriggerSharedGC(thread)) {
81 object = TryAllocate(thread, size);
82 CHECK_SOBJECT_NOT_NULL();
83 }
84 object = AllocateWithExpand(thread, size);
85 CHECK_SOBJECT_NOT_NULL();
86 if (allowGC) {
87 sHeap_->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
88 object = Allocate(thread, size, false);
89 }
90 return object;
91 }
92
TryAllocateAndExpand(JSThread * thread,size_t size,bool expand)93 uintptr_t SharedSparseSpace::TryAllocateAndExpand(JSThread *thread, size_t size, bool expand)
94 {
95 uintptr_t object = TryAllocate(thread, size);
96 CHECK_SOBJECT_NOT_NULL();
97 if (sweepState_ == SweepState::SWEEPING) {
98 object = AllocateAfterSweepingCompleted(thread, size);
99 CHECK_SOBJECT_NOT_NULL();
100 }
101 if (expand) {
102 object = AllocateWithExpand(thread, size);
103 }
104 return object;
105 }
106
AllocateNoGCAndExpand(JSThread * thread,size_t size)107 uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size)
108 {
109 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
110 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
111 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
112 UNREACHABLE();
113 }
114 #endif
115 uintptr_t object = TryAllocate(thread, size);
116 CHECK_SOBJECT_NOT_NULL();
117 if (sweepState_ == SweepState::SWEEPING) {
118 object = AllocateAfterSweepingCompleted(thread, size);
119 }
120 return object;
121 }
122
TryAllocate(JSThread * thread,size_t size)123 uintptr_t SharedSparseSpace::TryAllocate([[maybe_unused]] JSThread *thread, size_t size)
124 {
125 LockHolder lock(allocateLock_);
126 uintptr_t object = allocator_->Allocate(size);
127 IncAllocSObjectSize(object, size);
128 return object;
129 }
130
AllocateWithExpand(JSThread * thread,size_t size)131 uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size)
132 {
133 LockHolder lock(allocateLock_);
134 // In order to avoid expand twice by different threads, try allocate first.
135 CheckAndTriggerLocalFullMark();
136 auto object = allocator_->Allocate(size);
137 if (object == 0 && Expand(thread)) {
138 object = allocator_->Allocate(size);
139 }
140 IncAllocSObjectSize(object, size);
141 return object;
142 }
143
Expand(JSThread * thread)144 bool SharedSparseSpace::Expand(JSThread *thread)
145 {
146 if (CommittedSizeExceed()) {
147 LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
148 return false;
149 }
150 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
151 if (region == nullptr) { // LOCV_EXCL_BR_LINE
152 LOG_ECMA(FATAL) << "SharedSparseSpace::Expand:region is nullptr";
153 }
154 AddRegion(region);
155 allocator_->AddFree(region);
156 return true;
157 }
158
AllocateDeserializeRegion(JSThread * thread)159 Region *SharedSparseSpace::AllocateDeserializeRegion(JSThread *thread)
160 {
161 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
162 if (region == nullptr) { // LOCV_EXCL_BR_LINE
163 LOG_ECMA(FATAL) << "SharedSparseSpace::AllocateDeserializeRegion:region is nullptr";
164 }
165 return region;
166 }
167
MergeDeserializeAllocateRegions(const std::vector<Region * > & allocateRegions)168 void SharedSparseSpace::MergeDeserializeAllocateRegions(const std::vector<Region *> &allocateRegions)
169 {
170 LockHolder lock(allocateLock_);
171 for (auto region : allocateRegions) {
172 AddRegion(region);
173 allocator_->AddFree(region);
174 allocator_->ResetTopPointer(region->GetHighWaterMark());
175 region->SetHighWaterMark(region->GetEnd());
176 }
177 }
178
AllocateAfterSweepingCompleted(JSThread * thread,size_t size)179 uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted([[maybe_unused]] JSThread *thread, size_t size)
180 {
181 LockHolder lock(allocateLock_);
182 uintptr_t object = 0U;
183 if (sweepState_ != SweepState::SWEEPING) {
184 object = allocator_->Allocate(size);
185 IncAllocSObjectSize(object, size);
186 return object;
187 }
188 if (TryFillSweptRegion()) {
189 object = allocator_->Allocate(size);
190 IncAllocSObjectSize(object, size);
191 if (object != 0) {
192 return object;
193 }
194 }
195 // Parallel sweep and fill
196 sHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
197 object = allocator_->Allocate(size);
198 IncAllocSObjectSize(object, size);
199 return object;
200 }
201
PrepareSweeping()202 void SharedSparseSpace::PrepareSweeping()
203 {
204 liveObjectSize_ = 0;
205 ASSERT(GetSweepingRegionSafe() == nullptr);
206 ASSERT(GetSweptRegionSafe() == nullptr);
207 EnumerateRegions([this](Region *current) {
208 ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
209 ASSERT(!current->InSCollectSet());
210 IncreaseLiveObjectSize(current->AliveObject());
211 current->ResetWasted();
212 AddSweepingRegion(current);
213 });
214 SortSweepingRegion();
215 sweepState_ = SweepState::SWEEPING;
216 allocator_->RebuildFreeList();
217 }
218
AsyncSweep(bool isMain)219 void SharedSparseSpace::AsyncSweep(bool isMain)
220 {
221 Region *current = GetSweepingRegionSafe();
222 while (current != nullptr) {
223 FreeRegion(current, isMain);
224 // Main thread sweeping region is added;
225 if (!isMain) {
226 AddSweptRegionSafe(current);
227 }
228 current = GetSweepingRegionSafe();
229 }
230 }
231
Sweep()232 void SharedSparseSpace::Sweep()
233 {
234 liveObjectSize_ = 0;
235 allocator_->RebuildFreeList();
236 EnumerateRegions([this](Region *current) {
237 IncreaseLiveObjectSize(current->AliveObject());
238 current->ResetWasted();
239 FreeRegion(current);
240 });
241 }
242
TryFillSweptRegion()243 bool SharedSparseSpace::TryFillSweptRegion()
244 {
245 if (sweptList_.empty()) {
246 return false;
247 }
248 Region *region = nullptr;
249 while ((region = GetSweptRegionSafe()) != nullptr) {
250 allocator_->CollectFreeObjectSet(region);
251 region->ResetSwept();
252 }
253 return true;
254 }
255
FinishFillSweptRegion()256 bool SharedSparseSpace::FinishFillSweptRegion()
257 {
258 bool ret = TryFillSweptRegion();
259 sweepState_ = SweepState::SWEPT;
260 return ret;
261 }
262
AddSweepingRegion(Region * region)263 void SharedSparseSpace::AddSweepingRegion(Region *region)
264 {
265 sweepingList_.emplace_back(region);
266 }
267
SortSweepingRegion()268 void SharedSparseSpace::SortSweepingRegion()
269 {
270 // Sweep low alive object size at first
271 std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
272 return first->AliveObject() > second->AliveObject();
273 });
274 }
275
GetSweepingRegionSafe()276 Region *SharedSparseSpace::GetSweepingRegionSafe()
277 {
278 LockHolder holder(lock_);
279 Region *region = nullptr;
280 if (!sweepingList_.empty()) {
281 region = sweepingList_.back();
282 sweepingList_.pop_back();
283 }
284 return region;
285 }
286
AddSweptRegionSafe(Region * region)287 void SharedSparseSpace::AddSweptRegionSafe(Region *region)
288 {
289 LockHolder holder(lock_);
290 sweptList_.emplace_back(region);
291 }
292
GetSweptRegionSafe()293 Region *SharedSparseSpace::GetSweptRegionSafe()
294 {
295 LockHolder holder(lock_);
296 Region *region = nullptr;
297 if (!sweptList_.empty()) {
298 region = sweptList_.back();
299 sweptList_.pop_back();
300 }
301 return region;
302 }
303
FreeRegion(Region * current,bool isMain)304 void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
305 {
306 uintptr_t freeStart = current->GetBegin();
307 current->IterateAllMarkedBits([this, &freeStart, isMain](void *mem) {
308 auto header = reinterpret_cast<TaggedObject *>(mem);
309 auto size = header->GetSize();
310
311 uintptr_t freeEnd = ToUintPtr(mem);
312 if (freeStart != freeEnd) {
313 FreeLiveRange(freeStart, freeEnd, isMain);
314 }
315 freeStart = freeEnd + size;
316 });
317 uintptr_t freeEnd = current->GetEnd();
318 if (freeStart != freeEnd) {
319 FreeLiveRange(freeStart, freeEnd, isMain);
320 }
321 }
322
DetachFreeObjectSet(Region * region)323 void SharedSparseSpace::DetachFreeObjectSet(Region *region)
324 {
325 allocator_->DetachFreeObjectSet(region);
326 }
327
FreeLiveRange(uintptr_t freeStart,uintptr_t freeEnd,bool isMain)328 void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
329 {
330 // No need to clear rememberset here, because shared region has no remember set now.
331 allocator_->Free(freeStart, freeEnd - freeStart, isMain);
332 }
333
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const334 void SharedSparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
335 {
336 allocator_->FillBumpPointer();
337 EnumerateRegions([&](Region *region) {
338 uintptr_t curPtr = region->GetBegin();
339 uintptr_t endPtr = region->GetEnd();
340 while (curPtr < endPtr) {
341 auto freeObject = FreeObject::Cast(curPtr);
342 size_t objSize;
343 // If curPtr is freeObject, It must to mark unpoison first.
344 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
345 if (!freeObject->IsFreeObject()) {
346 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
347 visitor(obj);
348 objSize = obj->GetSize();
349 } else {
350 freeObject->AsanUnPoisonFreeObject();
351 objSize = freeObject->Available();
352 freeObject->AsanPoisonFreeObject();
353 }
354 curPtr += objSize;
355 CHECK_OBJECT_SIZE(objSize);
356 }
357 CHECK_REGION_END(curPtr, endPtr);
358 });
359 }
360
GetHeapObjectSize() const361 size_t SharedSparseSpace::GetHeapObjectSize() const
362 {
363 return liveObjectSize_;
364 }
365
IncreaseAllocatedSize(size_t size)366 void SharedSparseSpace::IncreaseAllocatedSize(size_t size)
367 {
368 allocator_->IncreaseAllocatedSize(size);
369 }
370
GetTotalAllocatedSize() const371 size_t SharedSparseSpace::GetTotalAllocatedSize() const
372 {
373 return allocator_->GetAllocatedSize();
374 }
375
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)376 void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
377 {
378 ASSERT(size <= alignedSize);
379 if (LIKELY(!allocationCounter_.IsActive())) {
380 return;
381 }
382 if (alignedSize >= allocationCounter_.NextBytes()) {
383 allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
384 }
385 allocationCounter_.AdvanceAllocationInspector(alignedSize);
386 }
387
CheckAndTriggerLocalFullMark()388 void SharedSparseSpace::CheckAndTriggerLocalFullMark()
389 {
390 if (liveObjectSize_ >= triggerLocalFullMarkLimit_) {
391 sHeap_->TryTriggerLocalConcurrentMarking();
392 }
393 }
394
IncAllocSObjectSize(uintptr_t object,size_t size)395 void SharedSparseSpace::IncAllocSObjectSize(uintptr_t object, size_t size)
396 {
397 if (object != 0) {
398 IncreaseLiveObjectSize(size);
399 if (sHeap_->IsReadyToConcurrentMark()) {
400 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
401 }
402 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
403 InvokeAllocationInspector(object, size, size);
404 #endif
405 }
406 }
407
SharedAppSpawnSpace(SharedHeap * heap,size_t initialCapacity)408 SharedAppSpawnSpace::SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity)
409 : SharedSparseSpace(heap, MemSpaceType::SHARED_APPSPAWN_SPACE, initialCapacity, initialCapacity)
410 {
411 }
412
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const413 void SharedAppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
414 {
415 EnumerateRegions([&](Region *current) {
416 current->IterateAllMarkedBits([&](void *mem) {
417 ASSERT(current->InRange(ToUintPtr(mem)));
418 visitor(reinterpret_cast<TaggedObject *>(mem));
419 });
420 });
421 }
422
SharedNonMovableSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)423 SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
424 : SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
425 {
426 }
427
SharedOldSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)428 SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
429 : SharedSparseSpace(heap, MemSpaceType::SHARED_OLD_SPACE, initialCapacity, maximumCapacity)
430 {
431 }
432
SelectCSets()433 void SharedOldSpace::SelectCSets()
434 {
435 EnumerateRegions([this](Region *region) {
436 if (!region->MostObjectAlive()) {
437 collectRegionSet_.emplace_back(region);
438 }
439 });
440 #ifdef NDEBUG
441 if (collectRegionSet_.size() < MIN_COLLECT_REGION_SIZE) {
442 LOG_ECMA_MEM(DEBUG) << "Selected SCSet number: " << collectRegionSet_.size() << " are too few";
443 collectRegionSet_.clear();
444 return;
445 }
446 #endif
447 // sort
448 std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
449 return first->AliveObject() < second->AliveObject();
450 });
451
452 // Limit cset size
453 int64_t leftEvacuateSize = MAX_EVACUATION_SIZE;
454 size_t selectedNumber = 0;
455 for (; selectedNumber < collectRegionSet_.size(); selectedNumber++) {
456 Region *region = collectRegionSet_[selectedNumber];
457 leftEvacuateSize -= static_cast<int64_t>(region->AliveObject());
458 if (leftEvacuateSize > 0) {
459 RemoveCSetRegion(region);
460 allocator_->DetachFreeObjectSet(region);
461 region->SetGCFlag(RegionGCFlags::IN_SHARED_COLLECT_SET);
462 region->ResetAliveObject();
463 } else {
464 break;
465 }
466 }
467 if (collectRegionSet_.size() > selectedNumber) {
468 collectRegionSet_.resize(selectedNumber);
469 }
470 }
471
RevertCSets()472 void SharedOldSpace::RevertCSets()
473 {
474 EnumerateCollectRegionSet([this](Region *region) {
475 region->ClearGCFlag(RegionGCFlags::IN_SHARED_COLLECT_SET);
476 AddCSetRegion(region);
477 allocator_->CollectFreeObjectSet(region);
478 });
479 collectRegionSet_.clear();
480 }
481
ReclaimCSets()482 void SharedOldSpace::ReclaimCSets()
483 {
484 EnumerateCollectRegionSet([this](Region *region) {
485 region->DeleteCrossRegionRSet();
486 region->DestroyFreeObjectSets();
487 heapRegionAllocator_->FreeRegion(region, 0, true);
488 });
489 collectRegionSet_.clear();
490 }
491
AddCSetRegion(Region * region)492 void SharedOldSpace::AddCSetRegion(Region *region)
493 {
494 ASSERT(region != nullptr);
495 regionList_.AddNode(region);
496 }
497
RemoveCSetRegion(Region * region)498 void SharedOldSpace::RemoveCSetRegion(Region *region)
499 {
500 ASSERT(region != nullptr);
501 regionList_.RemoveNode(region);
502 }
503
Merge(SharedLocalSpace * localSpace)504 void SharedOldSpace::Merge(SharedLocalSpace *localSpace)
505 {
506 localSpace->FreeBumpPoint();
507 LockHolder lock(lock_);
508 size_t oldCommittedSize = committedSize_;
509 localSpace->EnumerateRegions([&](Region *region) {
510 localSpace->DetachFreeObjectSet(region);
511 localSpace->RemoveRegion(region);
512 localSpace->DecreaseLiveObjectSize(region->AliveObject());
513 AddRegion(region);
514 IncreaseLiveObjectSize(region->AliveObject());
515 allocator_->CollectFreeObjectSet(region);
516 });
517 if (committedSize_ > GetOverShootMaximumCapacity()) {
518 LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
519 if (sHeap_->CanThrowOOMError()) {
520 sHeap_->ShouldThrowOOMError(true);
521 }
522 IncreaseMergeSize(committedSize_ - oldCommittedSize);
523 // if throw OOM, temporarily increase space size to avoid vm crash
524 IncreaseOutOfMemoryOvershootSize(committedSize_ - GetOverShootMaximumCapacity());
525 }
526
527 localSpace->GetRegionList().Clear();
528 allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
529 }
530
SharedLocalSpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)531 SharedLocalSpace::SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
532 : SharedSparseSpace(heap, MemSpaceType::SHARED_LOCAL_SPACE, initialCapacity, maximumCapacity) {}
533
AddRegionToList(Region * region)534 bool SharedLocalSpace::AddRegionToList(Region *region)
535 {
536 if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
537 LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
538 return false;
539 }
540 AddRegion(region);
541 allocator_->CollectFreeObjectSet(region);
542 IncreaseLiveObjectSize(region->AliveObject());
543 return true;
544 }
545
FreeBumpPoint()546 void SharedLocalSpace::FreeBumpPoint()
547 {
548 allocator_->FreeBumpPoint();
549 }
550
Stop()551 void SharedLocalSpace::Stop()
552 {
553 Region *currentRegion = GetCurrentRegion();
554 if (currentRegion != nullptr) {
555 currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
556 }
557 }
558
ForceExpandInSharedGC(JSThread * thread)559 void SharedLocalSpace::ForceExpandInSharedGC(JSThread *thread)
560 {
561 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
562 AddRegion(region);
563 allocator_->AddFree(region);
564 }
565
Allocate(size_t size,bool isExpand)566 uintptr_t SharedLocalSpace::Allocate(size_t size, bool isExpand)
567 {
568 auto object = allocator_->Allocate(size);
569 if (object == 0 && isExpand) {
570 // Shared Full GC will compress all regions and cannot recognize all threads' region.
571 if (!Expand(Runtime::GetInstance()->GetMainThread())) {
572 ForceExpandInSharedGC(Runtime::GetInstance()->GetMainThread());
573 sHeap_->ShouldThrowOOMError(true);
574 }
575 object = allocator_->Allocate(size);
576 ASSERT(object != 0);
577 }
578 if (object != 0) {
579 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
580 }
581 return object;
582 }
583
SharedReadOnlySpace(SharedHeap * heap,size_t initialCapacity,size_t maximumCapacity)584 SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
585 : Space(
586 heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity)
587 {
588 }
589
Expand(JSThread * thread)590 bool SharedReadOnlySpace::Expand(JSThread *thread)
591 {
592 if (committedSize_ >= initialCapacity_ + outOfMemoryOvershootSize_ &&
593 !heap_->NeedStopCollection()) {
594 return false;
595 }
596 uintptr_t top = allocator_.GetTop();
597 auto currentRegion = GetCurrentRegion();
598 if (currentRegion != nullptr) {
599 currentRegion->SetHighWaterMark(top);
600 }
601 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, heap_);
602 if (region == nullptr) { // LOCV_EXCL_BR_LINE
603 LOG_ECMA(FATAL) << "SharedReadOnlySpace::Expand:region is nullptr";
604 }
605 allocator_.Reset(region->GetBegin(), region->GetEnd());
606 AddRegion(region);
607 return true;
608 }
609
Allocate(JSThread * thread,size_t size)610 uintptr_t SharedReadOnlySpace::Allocate(JSThread *thread, size_t size)
611 {
612 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
613 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
614 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
615 UNREACHABLE();
616 }
617 #endif
618 thread->CheckSafepointIfSuspended();
619 LockHolder holder(allocateLock_);
620 auto object = allocator_.Allocate(size);
621 if (object != 0) {
622 return object;
623 }
624 if (Expand(thread)) {
625 object = allocator_.Allocate(size);
626 }
627 return object;
628 }
629
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const630 void SharedReadOnlySpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
631 {
632 size_t size = allocator_.Available();
633 if (size != 0) {
634 FreeObject::FillFreeObject(heap_, allocator_.GetTop(), size);
635 }
636 EnumerateRegions([&](Region *region) {
637 uintptr_t curPtr = region->GetBegin();
638 uintptr_t endPtr = region->GetEnd();
639 while (curPtr < endPtr) {
640 auto freeObject = FreeObject::Cast(curPtr);
641 size_t objSize;
642 // If curPtr is freeObject, It must to mark unpoison first.
643 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
644 if (!freeObject->IsFreeObject()) {
645 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
646 visitor(obj);
647 objSize = obj->GetSize();
648 } else {
649 freeObject->AsanUnPoisonFreeObject();
650 objSize = freeObject->Available();
651 freeObject->AsanPoisonFreeObject();
652 }
653 curPtr += objSize;
654 CHECK_OBJECT_SIZE(objSize);
655 }
656 CHECK_REGION_END(curPtr, endPtr);
657 });
658 }
659
SharedHugeObjectSpace(BaseHeap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)660 SharedHugeObjectSpace::SharedHugeObjectSpace(BaseHeap *heap, HeapRegionAllocator *heapRegionAllocator,
661 size_t initialCapacity, size_t maximumCapacity)
662 : Space(heap, heapRegionAllocator, MemSpaceType::SHARED_HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
663 {
664 triggerLocalFullMarkLimit_ = maximumCapacity * HUGE_OBJECT_SIZE_RATIO;
665 }
666
667
Allocate(JSThread * thread,size_t objectSize,AllocateEventType allocType)668 uintptr_t SharedHugeObjectSpace::Allocate(JSThread *thread, size_t objectSize, AllocateEventType allocType)
669 {
670 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
671 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
672 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
673 UNREACHABLE();
674 }
675 #endif
676 // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
677 // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
678 size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
679 if (allocType == AllocateEventType::NORMAL) {
680 thread->CheckSafepointIfSuspended();
681 CheckAndTriggerLocalFullMark(thread, alignedSize);
682 }
683 LockHolder lock(allocateLock_);
684 if (CommittedSizeExceed(alignedSize)) {
685 LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
686 return 0;
687 }
688 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
689 if (region == nullptr) { // LOCV_EXCL_BR_LINE
690 LOG_ECMA(FATAL) << "SharedHugeObjectSpace::Allocate:region is nullptr";
691 }
692 AddRegion(region);
693 // It need to mark unpoison when huge object being allocated.
694 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
695 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
696 InvokeAllocationInspector(region->GetBegin(), objectSize);
697 #endif
698 return region->GetBegin();
699 }
700
Sweep()701 void SharedHugeObjectSpace::Sweep()
702 {
703 Region *currentRegion = GetRegionList().GetFirst();
704 while (currentRegion != nullptr) {
705 Region *next = currentRegion->GetNext();
706 bool isMarked = false;
707 currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
708 if (!isMarked) {
709 GetRegionList().RemoveNode(currentRegion);
710 hugeNeedFreeList_.AddNode(currentRegion);
711 }
712 currentRegion = next;
713 }
714 }
715
GetHeapObjectSize() const716 size_t SharedHugeObjectSpace::GetHeapObjectSize() const
717 {
718 return committedSize_;
719 }
720
IterateOverObjects(const std::function<void (TaggedObject * object)> & objectVisitor) const721 void SharedHugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
722 {
723 EnumerateRegions([&](Region *region) {
724 uintptr_t curPtr = region->GetBegin();
725 objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
726 });
727 }
728
ReclaimHugeRegion()729 void SharedHugeObjectSpace::ReclaimHugeRegion()
730 {
731 if (hugeNeedFreeList_.IsEmpty()) {
732 return;
733 }
734 do {
735 Region *last = hugeNeedFreeList_.PopBack();
736 ClearAndFreeRegion(last);
737 } while (!hugeNeedFreeList_.IsEmpty());
738 }
739
InvokeAllocationInspector(Address object,size_t objectSize)740 void SharedHugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
741 {
742 if (LIKELY(!allocationCounter_.IsActive())) {
743 return;
744 }
745 if (objectSize >= allocationCounter_.NextBytes()) {
746 allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
747 }
748 allocationCounter_.AdvanceAllocationInspector(objectSize);
749 }
750
CheckAndTriggerLocalFullMark(JSThread * thread,size_t size)751 void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size)
752 {
753 if (committedSize_ >= triggerLocalFullMarkLimit_) {
754 reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking();
755 } else {
756 auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
757 if (!thread->IsJitThread()) {
758 localHeap->TryTriggerFullMarkBySharedSize(size);
759 }
760 }
761 }
762 } // namespace panda::ecmascript
763