1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/sparse_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/heap-inl.h"
20
21 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)22 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
23 : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
24 sweepState_(SweepState::NO_SWEEP),
25 localHeap_(heap),
26 liveObjectSize_(0)
27 {
28 allocator_ = new FreeListAllocator<FreeObject>(heap);
29 }
30
Initialize()31 void SparseSpace::Initialize()
32 {
33 JSThread *thread = localHeap_->GetJSThread();
34 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
35 region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
36 AddRegion(region);
37
38 allocator_->Initialize(region);
39 }
40
Reset()41 void SparseSpace::Reset()
42 {
43 allocator_->RebuildFreeList();
44 ReclaimRegions();
45 liveObjectSize_ = 0;
46 }
47
ResetTopPointer(uintptr_t top)48 void SparseSpace::ResetTopPointer(uintptr_t top)
49 {
50 allocator_->ResetTopPointer(top);
51 }
52
Allocate(size_t size,bool allowGC)53 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
54 {
55 ASSERT(spaceType_ != MemSpaceType::OLD_SPACE);
56 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
57 if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
58 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
59 UNREACHABLE();
60 }
61 #endif
62 auto object = allocator_->Allocate(size);
63 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
64
65 if (sweepState_ == SweepState::SWEEPING) {
66 object = AllocateAfterSweepingCompleted(size);
67 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
68 }
69
70 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
71 if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
72 object = allocator_->Allocate(size);
73 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
74 }
75
76 if (Expand()) {
77 object = allocator_->Allocate(size);
78 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
79 }
80
81 if (allowGC) {
82 localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
83 object = Allocate(size, false);
84 // Size is already increment
85 }
86 return object;
87 }
88
Expand()89 bool SparseSpace::Expand()
90 {
91 if (CommittedSizeExceed()) {
92 LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
93 return false;
94 }
95 JSThread *thread = localHeap_->GetJSThread();
96 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
97 region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
98 AddRegion(region);
99 allocator_->AddFree(region);
100 return true;
101 }
102
AllocateAfterSweepingCompleted(size_t size)103 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
104 {
105 ASSERT(sweepState_ == SweepState::SWEEPING);
106 if (TryFillSweptRegion()) {
107 auto object = allocator_->Allocate(size);
108 if (object != 0) {
109 return object;
110 }
111 }
112 // Parallel sweep and fill
113 localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
114 return allocator_->Allocate(size);
115 }
116
PrepareSweeping()117 void SparseSpace::PrepareSweeping()
118 {
119 liveObjectSize_ = 0;
120 ASSERT(GetSweepingRegionSafe() == nullptr);
121 ASSERT(GetSweptRegionSafe() == nullptr);
122 EnumerateRegions([this](Region *current) {
123 if (!current->InCollectSet()) {
124 ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
125 if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
126 current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) { // LOCV_EXCL_BR_LINE
127 LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
128 }
129 IncreaseLiveObjectSize(current->AliveObject());
130 current->ResetWasted();
131 current->SwapOldToNewRSetForCS();
132 current->SwapLocalToShareRSetForCS();
133 AddSweepingRegion(current);
134 }
135 });
136 SortSweepingRegion();
137 sweepState_ = SweepState::SWEEPING;
138 allocator_->RebuildFreeList();
139 }
140
AsyncSweep(bool isMain)141 void SparseSpace::AsyncSweep(bool isMain)
142 {
143 Region *current = GetSweepingRegionSafe();
144 while (current != nullptr) {
145 FreeRegion(current, isMain);
146 // Main thread sweeping region is added;
147 if (!isMain) {
148 AddSweptRegionSafe(current);
149 } else {
150 current->MergeOldToNewRSetForCS();
151 current->MergeLocalToShareRSetForCS();
152 }
153 current = GetSweepingRegionSafe();
154 }
155 }
156
Sweep()157 void SparseSpace::Sweep()
158 {
159 liveObjectSize_ = 0;
160 allocator_->RebuildFreeList();
161 EnumerateRegions([this](Region *current) {
162 if (!current->InCollectSet()) {
163 IncreaseLiveObjectSize(current->AliveObject());
164 current->ResetWasted();
165 FreeRegion(current);
166 }
167 });
168 }
169
TryFillSweptRegion()170 bool SparseSpace::TryFillSweptRegion()
171 {
172 if (sweptList_.empty()) {
173 return false;
174 }
175 Region *region = nullptr;
176 while ((region = GetSweptRegionSafe()) != nullptr) {
177 allocator_->CollectFreeObjectSet(region);
178 region->ResetSwept();
179 region->MergeOldToNewRSetForCS();
180 region->MergeLocalToShareRSetForCS();
181 }
182 return true;
183 }
184
FinishFillSweptRegion()185 bool SparseSpace::FinishFillSweptRegion()
186 {
187 bool ret = TryFillSweptRegion();
188 sweepState_ = SweepState::SWEPT;
189 return ret;
190 }
191
AddSweepingRegion(Region * region)192 void SparseSpace::AddSweepingRegion(Region *region)
193 {
194 sweepingList_.emplace_back(region);
195 }
196
SortSweepingRegion()197 void SparseSpace::SortSweepingRegion()
198 {
199 // Sweep low alive object size at first
200 std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
201 return first->AliveObject() > second->AliveObject();
202 });
203 }
204
GetSweepingRegionSafe()205 Region *SparseSpace::GetSweepingRegionSafe()
206 {
207 LockHolder holder(lock_);
208 Region *region = nullptr;
209 if (!sweepingList_.empty()) {
210 region = sweepingList_.back();
211 sweepingList_.pop_back();
212 }
213 return region;
214 }
215
AddSweptRegionSafe(Region * region)216 void SparseSpace::AddSweptRegionSafe(Region *region)
217 {
218 LockHolder holder(lock_);
219 sweptList_.emplace_back(region);
220 region->SetSwept();
221 }
222
GetSweptRegionSafe()223 Region *SparseSpace::GetSweptRegionSafe()
224 {
225 LockHolder holder(lock_);
226 Region *region = nullptr;
227 if (!sweptList_.empty()) {
228 region = sweptList_.back();
229 sweptList_.pop_back();
230 }
231 return region;
232 }
233
TryToGetSuitableSweptRegion(size_t size)234 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
235 {
236 if (sweepState_ != SweepState::SWEEPING) {
237 return nullptr;
238 }
239 if (sweptList_.empty()) {
240 return nullptr;
241 }
242 LockHolder holder(lock_);
243 for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
244 if (allocator_->MatchFreeObjectSet(*iter, size)) {
245 Region *region = *iter;
246 sweptList_.erase(iter);
247 return region;
248 }
249 }
250 return nullptr;
251 }
252
FreeRegion(Region * current,bool isMain)253 void SparseSpace::FreeRegion(Region *current, bool isMain)
254 {
255 uintptr_t freeStart = current->GetBegin();
256 current->IterateAllMarkedBits([this, ¤t, &freeStart, isMain](void *mem) {
257 ASSERT(current->InRange(ToUintPtr(mem)));
258 auto header = reinterpret_cast<TaggedObject *>(mem);
259 auto klass = header->GetClass();
260 auto size = klass->SizeFromJSHClass(header);
261
262 uintptr_t freeEnd = ToUintPtr(mem);
263 if (freeStart != freeEnd) {
264 FreeLiveRange(current, freeStart, freeEnd, isMain);
265 }
266 freeStart = freeEnd + size;
267 });
268 uintptr_t freeEnd = current->GetEnd();
269 if (freeStart != freeEnd) {
270 FreeLiveRange(current, freeStart, freeEnd, isMain);
271 }
272 }
273
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)274 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
275 {
276 localHeap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
277 allocator_->Free(freeStart, freeEnd - freeStart, isMain);
278 }
279
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const280 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
281 {
282 allocator_->FillBumpPointer();
283 EnumerateRegions([&](Region *region) {
284 if (region->InCollectSet()) {
285 return;
286 }
287 uintptr_t curPtr = region->GetBegin();
288 uintptr_t endPtr = region->GetEnd();
289 while (curPtr < endPtr) {
290 auto freeObject = FreeObject::Cast(curPtr);
291 size_t objSize;
292 // If curPtr is freeObject, It must to mark unpoison first.
293 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
294 if (!freeObject->IsFreeObject()) {
295 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
296 visitor(obj);
297 objSize = obj->GetClass()->SizeFromJSHClass(obj);
298 } else {
299 freeObject->AsanUnPoisonFreeObject();
300 objSize = freeObject->Available();
301 freeObject->AsanPoisonFreeObject();
302 }
303 curPtr += objSize;
304 CHECK_OBJECT_SIZE(objSize);
305 }
306 CHECK_REGION_END(curPtr, endPtr);
307 });
308 }
309
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const310 void SparseSpace::IterateOldToNewOverObjects(
311 const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
312 {
313 auto cb = [visitor](void *mem) -> bool {
314 ObjectSlot slot(ToUintPtr(mem));
315 visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
316 return true;
317 };
318 EnumerateRegions([cb] (Region *region) {
319 region->IterateAllSweepingRSetBits(cb);
320 region->IterateAllOldToNewBits(cb);
321 });
322 }
323
GetHeapObjectSize() const324 size_t SparseSpace::GetHeapObjectSize() const
325 {
326 return liveObjectSize_;
327 }
328
IncreaseAllocatedSize(size_t size)329 void SparseSpace::IncreaseAllocatedSize(size_t size)
330 {
331 allocator_->IncreaseAllocatedSize(size);
332 }
333
GetTotalAllocatedSize() const334 size_t SparseSpace::GetTotalAllocatedSize() const
335 {
336 return allocator_->GetAllocatedSize();
337 }
338
DetachFreeObjectSet(Region * region)339 void SparseSpace::DetachFreeObjectSet(Region *region)
340 {
341 allocator_->DetachFreeObjectSet(region);
342 }
343
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)344 void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
345 {
346 ASSERT(size <= alignedSize);
347 if (LIKELY(!allocationCounter_.IsActive())) { // LCOV_EXCL_BR_LINE
348 return;
349 }
350 if (alignedSize >= allocationCounter_.NextBytes()) {
351 allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
352 }
353 allocationCounter_.AdvanceAllocationInspector(alignedSize);
354 }
355
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)356 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
357 : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
358
TrySweepToGetSuitableRegion(size_t size)359 Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
360 {
361 // Try Sweeping region to get space for allocation
362 // since sweepingList_ is ordered, we just need to check once
363 Region *availableRegion = GetSweepingRegionSafe();
364 if (availableRegion != nullptr) {
365 FreeRegion(availableRegion, false);
366 // if region has free enough space for the size,
367 // free the region from current space
368 // and return for local space to use
369 // otherwise, we add region to sweptList_.
370 if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
371 return availableRegion;
372 } else {
373 AddSweptRegionSafe(availableRegion);
374 }
375 }
376 return nullptr;
377 }
378
TryToGetExclusiveRegion(size_t size)379 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
380 {
381 if (sweepState_ == SweepState::SWEEPING) {
382 Region *availableRegion = nullptr;
383 availableRegion = TryToGetSuitableSweptRegion(size);
384 if (availableRegion == nullptr) {
385 availableRegion = TrySweepToGetSuitableRegion(size);
386 }
387 if (availableRegion) {
388 FreeRegionFromSpace(availableRegion);
389 }
390 return availableRegion;
391 } else {
392 LockHolder lock(lock_);
393 uintptr_t result = allocator_->LookupSuitableFreeObject(size);
394 if (result != 0) {
395 // Remove region from global old space
396 Region *region = Region::ObjectAddressToRange(result);
397 RemoveRegion(region);
398 allocator_->DetachFreeObjectSet(region);
399 DecreaseLiveObjectSize(region->AliveObject());
400 return region;
401 }
402 }
403 return nullptr;
404 }
405
FreeRegionFromSpace(Region * region)406 void OldSpace::FreeRegionFromSpace(Region *region)
407 {
408 region->ResetSwept();
409 region->MergeOldToNewRSetForCS();
410 region->MergeLocalToShareRSetForCS();
411 LockHolder holder(lock_);
412 RemoveRegion(region);
413 DecreaseLiveObjectSize(region->AliveObject());
414 }
415
AllocateFast(size_t size)416 uintptr_t OldSpace::AllocateFast(size_t size)
417 {
418 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
419 if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
420 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
421 UNREACHABLE();
422 }
423 #endif
424 auto object = allocator_->Allocate(size);
425 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
426
427 if (sweepState_ == SweepState::SWEEPING) {
428 object = AllocateAfterSweepingCompleted(size);
429 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
430 }
431 return 0;
432 }
433
AllocateSlow(size_t size,bool tryFast)434 uintptr_t OldSpace::AllocateSlow(size_t size, bool tryFast)
435 {
436 if (tryFast) {
437 uintptr_t object = AllocateFast(size);
438 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
439 }
440 if (Expand()) {
441 uintptr_t object = allocator_->Allocate(size);
442 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
443 }
444 return 0;
445 }
446
Merge(LocalSpace * localSpace)447 void OldSpace::Merge(LocalSpace *localSpace)
448 {
449 localSpace->FreeBumpPoint();
450 LockHolder lock(lock_);
451 size_t oldCommittedSize = committedSize_;
452 localSpace->EnumerateRegions([&](Region *region) {
453 localSpace->DetachFreeObjectSet(region);
454 localSpace->RemoveRegion(region);
455 localSpace->DecreaseLiveObjectSize(region->AliveObject());
456 AddRegion(region);
457 IncreaseLiveObjectSize(region->AliveObject());
458 allocator_->CollectFreeObjectSet(region);
459 });
460 size_t hugeSpaceCommitSize = localHeap_->GetHugeObjectSpace()->GetCommittedSize();
461 if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
462 LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
463 if (localHeap_->CanThrowOOMError()) {
464 localHeap_->ShouldThrowOOMError(true);
465 }
466 IncreaseMergeSize(committedSize_ - oldCommittedSize);
467 size_t committedOverSizeLimit = committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity();
468 IncreaseCommittedOverSizeLimit(committedOverSizeLimit);
469 // if throw OOM, temporarily increase space size to avoid vm crash
470 IncreaseOutOfMemoryOvershootSize(committedOverSizeLimit);
471 }
472
473 localSpace->GetRegionList().Clear();
474 allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
475 }
476
SelectCSet()477 void OldSpace::SelectCSet()
478 {
479 if (localHeap_->IsMarking()) {
480 localHeap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
481 }
482 if (localHeap_->InSensitiveStatus()) {
483 return;
484 }
485 CheckRegionSize();
486 // 1、Select region which alive object larger than limit
487 int64_t evacuateSizeLimit = 0;
488 if (!localHeap_->IsInBackground()) {
489 evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
490 EnumerateRegions([this](Region *region) {
491 if (!region->MostObjectAlive()) {
492 collectRegionSet_.emplace_back(region);
493 }
494 });
495 } else {
496 evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
497 EnumerateRegions([this](Region *region) {
498 if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
499 collectRegionSet_.emplace_back(region);
500 }
501 });
502 }
503 if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
504 LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
505 collectRegionSet_.clear();
506 return;
507 }
508 // sort
509 std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
510 return first->AliveObject() < second->AliveObject();
511 });
512
513 // Limit cset size
514 unsigned long selectedRegionNumber = 0;
515 int64_t expectFreeSize =
516 static_cast<int64_t>(localHeap_->GetCommittedSize() - localHeap_->GetHeapAliveSizeAfterGC());
517 int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
518 EnumerateCollectRegionSet([&](Region *current) {
519 if (evacuateSize > 0) {
520 selectedRegionNumber++;
521 evacuateSize -= current->AliveObject();
522 } else {
523 return;
524 }
525 });
526 LOG_ECMA_MEM(DEBUG) << "Max evacuation size is 6_MB. The CSet region number: "
527 << selectedRegionNumber;
528 selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
529 if (collectRegionSet_.size() > selectedRegionNumber) {
530 collectRegionSet_.resize(selectedRegionNumber);
531 }
532
533 localHeap_->GetEcmaGCStats()->SetRecordData(
534 RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
535 EnumerateCollectRegionSet([&](Region *current) {
536 RemoveRegion(current);
537 DecreaseLiveObjectSize(current->AliveObject());
538 allocator_->DetachFreeObjectSet(current);
539 current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
540 });
541 sweepState_ = SweepState::NO_SWEEP;
542 LOG_ECMA_MEM(DEBUG) << "Select CSet success: number is " << collectRegionSet_.size();
543 }
544
CheckRegionSize()545 void OldSpace::CheckRegionSize()
546 {
547 #ifndef NDEBUG
548 if (sweepState_ == SweepState::SWEEPING) {
549 localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
550 }
551 size_t available = allocator_->GetAvailableSize();
552 size_t wasted = allocator_->GetWastedSize();
553 if (GetHeapObjectSize() + wasted + available != objectSize_) {
554 LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
555 << ", free object size:" << available
556 << ", wasted size:" << wasted
557 << ", but exception total size:" << objectSize_;
558 }
559 #endif
560 }
561
RevertCSet()562 void OldSpace::RevertCSet()
563 {
564 EnumerateCollectRegionSet([&](Region *region) {
565 region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
566 AddRegion(region);
567 allocator_->CollectFreeObjectSet(region);
568 IncreaseLiveObjectSize(region->AliveObject());
569 });
570 collectRegionSet_.clear();
571 }
572
ReclaimCSet()573 void OldSpace::ReclaimCSet()
574 {
575 size_t cachedSize = localHeap_->GetRegionCachedSize();
576 EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
577 region->DeleteCrossRegionRSet();
578 region->DeleteOldToNewRSet();
579 region->DeleteLocalToShareRSet();
580 region->DeleteSweepingOldToNewRSet();
581 region->DeleteSweepingLocalToShareRSet();
582 region->DestroyFreeObjectSets();
583 heapRegionAllocator_->FreeRegion(region, cachedSize);
584 });
585 collectRegionSet_.clear();
586 }
587
SwapRegion(Region * region,SemiSpace * fromSpace)588 bool OldSpace::SwapRegion(Region *region, SemiSpace *fromSpace)
589 {
590 if (committedSize_ + region->GetCapacity() > maximumCapacity_) {
591 return false;
592 }
593 fromSpace->RemoveRegion(region);
594 region->InitializeFreeObjectSets();
595 region->ResetRegionFlag(RegionSpaceFlag::IN_OLD_SPACE, RegionGCFlags::IN_NEW_TO_OLD_SET);
596
597 regionList_.AddNodeToFront(region);
598 IncreaseCommitted(region->GetCapacity());
599 IncreaseObjectSize(region->GetSize());
600 IncreaseLiveObjectSize(region->AliveObject());
601 return true;
602 }
603
PrepareSweepNewToOldRegions()604 void OldSpace::PrepareSweepNewToOldRegions()
605 {
606 EnumerateRegions([this](Region *current) {
607 if (current->InNewToOldSet()) {
608 ASSERT(!current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT));
609 if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
610 current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) { // LOCV_EXCL_BR_LINE
611 LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
612 }
613 current->ResetWasted();
614 current->SwapOldToNewRSetForCS();
615 current->SwapLocalToShareRSetForCS();
616 current->ClearGCFlag(RegionGCFlags::IN_NEW_TO_OLD_SET);
617 AddSweepingRegion(current);
618 }
619 });
620 sweepState_ = SweepState::SWEEPING;
621 }
622
SweepNewToOldRegions()623 void OldSpace::SweepNewToOldRegions()
624 {
625 EnumerateRegions([this](Region *current) {
626 if (current->InNewToOldSet()) {
627 current->ResetWasted();
628 current->ClearGCFlag(RegionGCFlags::IN_NEW_TO_OLD_SET);
629 FreeRegion(current);
630 }
631 });
632 }
633
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)634 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
635 : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
636
AddRegionToList(Region * region)637 bool LocalSpace::AddRegionToList(Region *region)
638 {
639 if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
640 LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
641 return false;
642 }
643 AddRegion(region);
644 allocator_->CollectFreeObjectSet(region);
645 IncreaseLiveObjectSize(region->AliveObject());
646 return true;
647 }
648
FreeBumpPoint()649 void LocalSpace::FreeBumpPoint()
650 {
651 allocator_->FreeBumpPoint();
652 }
653
Stop()654 void LocalSpace::Stop()
655 {
656 Region *currentRegion = GetCurrentRegion();
657 if (GetCurrentRegion() != nullptr) {
658 // Do not use allocator_->GetTop(), because it may point to freeObj from other regions.
659 currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
660 }
661 }
662
CheckAndAllocate(size_t size)663 uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
664 {
665 if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
666 !localHeap_->GetOldGCRequested() && !localHeap_->NeedStopCollection()) {
667 localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
668 }
669 return Allocate(size);
670 }
671
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)672 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
673 : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
674 {
675 }
676
AppSpawnSpace(Heap * heap,size_t initialCapacity)677 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
678 : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
679 {
680 }
681
AllocateSync(size_t size)682 uintptr_t AppSpawnSpace::AllocateSync(size_t size)
683 {
684 LockHolder holder(mutex_);
685 return Allocate(size);
686 }
687
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const688 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
689 {
690 EnumerateRegions([&](Region *current) {
691 current->IterateAllMarkedBits([&](void *mem) {
692 ASSERT(current->InRange(ToUintPtr(mem)));
693 visitor(reinterpret_cast<TaggedObject *>(mem));
694 });
695 });
696 }
697
ForceExpandInGC()698 void LocalSpace::ForceExpandInGC()
699 {
700 JSThread *thread = localHeap_->GetJSThread();
701 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
702 region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
703 AddRegion(region);
704 allocator_->AddFree(region);
705 }
706
Allocate(size_t size,bool isExpand)707 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
708 {
709 auto object = allocator_->Allocate(size);
710 if (object == 0 && isExpand) {
711 if (!Expand()) {
712 ForceExpandInGC();
713 localHeap_->ShouldThrowOOMError(true);
714 }
715 object = allocator_->Allocate(size);
716 ASSERT(object != 0);
717 }
718 if (object != 0) {
719 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
720 }
721 return object;
722 }
723
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)724 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
725 : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
726 {
727 }
728
~MachineCodeSpace()729 MachineCodeSpace::~MachineCodeSpace()
730 {
731 if (localHeap_->GetEcmaVM()->GetJSOptions().GetEnableJitFort()) {
732 if (jitFort_) {
733 delete jitFort_;
734 jitFort_ = nullptr;
735 }
736 }
737 }
738
PrepareSweeping()739 void MachineCodeSpace::PrepareSweeping()
740 {
741 // fill free obj before sparse space prepare sweeping rebuild freelist, as may fail set free obj
742 // when iterate machine code space in GetMachineCodeObject
743 allocator_->FillBumpPointer();
744 SparseSpace::PrepareSweeping();
745 if (jitFort_) {
746 jitFort_->PrepareSweeping();
747 }
748 }
749
Sweep()750 void MachineCodeSpace::Sweep()
751 {
752 SparseSpace::Sweep();
753 if (jitFort_) {
754 jitFort_->Sweep();
755 }
756 }
757
AsyncSweep(bool isMain)758 void MachineCodeSpace::AsyncSweep(bool isMain)
759 {
760 LockHolder holder(asyncSweepMutex_);
761 SparseSpace::AsyncSweep(isMain);
762 if (jitFort_) {
763 jitFort_->AsyncSweep();
764 }
765 }
766
JitFortAllocate(MachineCodeDesc * desc)767 uintptr_t MachineCodeSpace::JitFortAllocate(MachineCodeDesc *desc)
768 {
769 if (!jitFort_) {
770 jitFort_ = new JitFort();
771 }
772 localHeap_->GetSweeper()->EnsureTaskFinishedNoCheck(spaceType_);
773 return jitFort_->Allocate(desc);
774 }
775
Allocate(size_t size,bool allowGC)776 uintptr_t MachineCodeSpace::Allocate(size_t size, bool allowGC)
777 {
778 return SparseSpace::Allocate(size, allowGC);
779 }
780
Allocate(size_t size,MachineCodeDesc * desc,bool allowGC)781 uintptr_t MachineCodeSpace::Allocate(size_t size, MachineCodeDesc *desc, bool allowGC)
782 {
783 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
784 if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
785 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
786 UNREACHABLE();
787 }
788 #endif
789 // Include JitFort allocation size in Space LiveObjectSize and Region AliveObject size
790 // in CHECK_AND_INC_OBJ_SIZE. Could be a problem with InvokeAllocationInspectr with
791 // instruction separated from Machine Code object into Jit FortSpace.
792
793 auto object = allocator_->Allocate(size);
794 CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
795
796 if (sweepState_ == SweepState::SWEEPING) {
797 object = AllocateAfterSweepingCompleted(size);
798 CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
799 }
800
801 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
802 if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
803 object = allocator_->Allocate(size);
804 CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
805 }
806
807 if (Expand()) {
808 object = allocator_->Allocate(size);
809 CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
810 }
811
812 if (allowGC) {
813 localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
814 object = Allocate(size, desc, false);
815 // Size is already increment
816 }
817 return object;
818 }
819
CheckMachineCodeObject(uintptr_t curPtr,uintptr_t & machineCode,uintptr_t pc)820 size_t MachineCodeSpace::CheckMachineCodeObject(uintptr_t curPtr, uintptr_t &machineCode, uintptr_t pc)
821 {
822 auto freeObject = FreeObject::Cast(curPtr);
823 size_t objSize = 0;
824 if (!freeObject->IsFreeObject()) {
825 auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
826 if (obj->IsInText(pc)) {
827 machineCode = curPtr;
828 }
829 objSize = obj->GetClass()->SizeFromJSHClass(obj);
830 } else {
831 objSize = freeObject->Available();
832 }
833 return objSize;
834 }
835
GetMachineCodeObject(uintptr_t pc)836 uintptr_t MachineCodeSpace::GetMachineCodeObject(uintptr_t pc)
837 {
838 uintptr_t machineCode = 0;
839 LockHolder holder(asyncSweepMutex_);
840 allocator_->FillBumpPointer();
841
842 EnumerateRegions([&](Region *region) {
843 if (machineCode != 0) {
844 return;
845 }
846 if (region->InCollectSet() || (!region->InRange(pc) && !InJitFortRange(pc))) {
847 return;
848 }
849 uintptr_t curPtr = region->GetBegin();
850 uintptr_t endPtr = region->GetEnd();
851 while (curPtr < endPtr) {
852 size_t objSize = CheckMachineCodeObject(curPtr, machineCode, pc);
853 if (machineCode != 0) {
854 return;
855 }
856 curPtr += objSize;
857 CHECK_OBJECT_SIZE(objSize);
858 }
859 CHECK_REGION_END(curPtr, endPtr);
860 });
861 return machineCode;
862 }
863 } // namespace panda::ecmascript
864