1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/sparse_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/concurrent_sweeper.h"
20 #include "ecmascript/mem/free_object_set.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/mem_controller.h"
23 #include "ecmascript/runtime_call_id.h"
24
25 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)26 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
27 : Space(heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
28 sweepState_(SweepState::NO_SWEEP),
29 heap_(heap),
30 liveObjectSize_(0)
31 {
32 allocator_ = new FreeListAllocator(heap);
33 }
34
Initialize()35 void SparseSpace::Initialize()
36 {
37 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE,
38 heap_->GetJSThread());
39 region->InitializeFreeObjectSets();
40 AddRegion(region);
41
42 allocator_->Initialize(region);
43 }
44
Reset()45 void SparseSpace::Reset()
46 {
47 allocator_->RebuildFreeList();
48 ReclaimRegions();
49 liveObjectSize_ = 0;
50 }
51
Allocate(size_t size,bool allowGC)52 uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
53 {
54 auto object = allocator_->Allocate(size);
55 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
56
57 if (sweepState_ == SweepState::SWEEPING) {
58 object = AllocateAfterSweepingCompleted(size);
59 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
60 }
61
62 if (allowGC) {
63 // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
64 heap_->CheckAndTriggerOldGC();
65 }
66
67 if (Expand()) {
68 object = allocator_->Allocate(size);
69 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
70 return object;
71 }
72
73 if (allowGC) {
74 heap_->CollectGarbage(TriggerGCType::OLD_GC);
75 object = Allocate(size, false);
76 // Size is already increment
77 }
78 return object;
79 }
80
Expand()81 bool SparseSpace::Expand()
82 {
83 if (committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_) {
84 LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
85 return false;
86 }
87
88 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_->GetJSThread());
89 region->InitializeFreeObjectSets();
90 AddRegion(region);
91 allocator_->AddFree(region);
92 return true;
93 }
94
AllocateAfterSweepingCompleted(size_t size)95 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
96 {
97 ASSERT(sweepState_ == SweepState::SWEEPING);
98 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ConcurrentSweepingWait);
99 if (TryFillSweptRegion()) {
100 auto object = allocator_->Allocate(size);
101 if (object != 0) {
102 return object;
103 }
104 }
105 // Parallel sweep and fill
106 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
107 return allocator_->Allocate(size);
108 }
109
PrepareSweeping()110 void SparseSpace::PrepareSweeping()
111 {
112 liveObjectSize_ = 0;
113 EnumerateRegions([this](Region *current) {
114 if (!current->InCollectSet()) {
115 IncreaseLiveObjectSize(current->AliveObject());
116 current->ResetWasted();
117 current->SwapRSetForConcurrentSweeping();
118 AddSweepingRegion(current);
119 }
120 });
121 SortSweepingRegion();
122 sweepState_ = SweepState::SWEEPING;
123 allocator_->RebuildFreeList();
124 }
125
AsyncSweep(bool isMain)126 void SparseSpace::AsyncSweep(bool isMain)
127 {
128 Region *current = GetSweepingRegionSafe();
129 while (current != nullptr) {
130 FreeRegion(current, isMain);
131 // Main thread sweeping region is added;
132 if (!isMain) {
133 AddSweptRegionSafe(current);
134 current->SetSwept();
135 } else {
136 current->MergeRSetForConcurrentSweeping();
137 }
138 current = GetSweepingRegionSafe();
139 }
140 }
141
Sweep()142 void SparseSpace::Sweep()
143 {
144 liveObjectSize_ = 0;
145 allocator_->RebuildFreeList();
146 EnumerateRegions([this](Region *current) {
147 if (!current->InCollectSet()) {
148 IncreaseLiveObjectSize(current->AliveObject());
149 current->ResetWasted();
150 FreeRegion(current);
151 }
152 });
153 }
154
TryFillSweptRegion()155 bool SparseSpace::TryFillSweptRegion()
156 {
157 if (sweptList_.empty()) {
158 return false;
159 }
160 Region *region = nullptr;
161 while ((region = GetSweptRegionSafe()) != nullptr) {
162 allocator_->CollectFreeObjectSet(region);
163 region->ResetSwept();
164 region->MergeRSetForConcurrentSweeping();
165 }
166 return true;
167 }
168
FinishFillSweptRegion()169 bool SparseSpace::FinishFillSweptRegion()
170 {
171 bool ret = TryFillSweptRegion();
172 sweepState_ = SweepState::SWEPT;
173 return ret;
174 }
175
AddSweepingRegion(Region * region)176 void SparseSpace::AddSweepingRegion(Region *region)
177 {
178 sweepingList_.emplace_back(region);
179 }
180
SortSweepingRegion()181 void SparseSpace::SortSweepingRegion()
182 {
183 // Sweep low alive object size at first
184 std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
185 return first->AliveObject() < second->AliveObject();
186 });
187 }
188
GetSweepingRegionSafe()189 Region *SparseSpace::GetSweepingRegionSafe()
190 {
191 os::memory::LockHolder holder(lock_);
192 Region *region = nullptr;
193 if (!sweepingList_.empty()) {
194 region = sweepingList_.back();
195 sweepingList_.pop_back();
196 }
197 return region;
198 }
199
AddSweptRegionSafe(Region * region)200 void SparseSpace::AddSweptRegionSafe(Region *region)
201 {
202 os::memory::LockHolder holder(lock_);
203 sweptList_.emplace_back(region);
204 }
205
GetSweptRegionSafe()206 Region *SparseSpace::GetSweptRegionSafe()
207 {
208 os::memory::LockHolder holder(lock_);
209 Region *region = nullptr;
210 if (!sweptList_.empty()) {
211 region = sweptList_.back();
212 sweptList_.pop_back();
213 }
214 return region;
215 }
216
TryToGetSuitableSweptRegion(size_t size)217 Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
218 {
219 if (sweepState_ != SweepState::SWEEPING) {
220 return nullptr;
221 }
222 if (sweptList_.empty()) {
223 return nullptr;
224 }
225 os::memory::LockHolder holder(lock_);
226 for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
227 if (allocator_->MatchFreeObjectSet(*iter, size)) {
228 Region *region = *iter;
229 region->ResetSwept();
230 region->MergeRSetForConcurrentSweeping();
231 RemoveRegion(region);
232 DecreaseLiveObjectSize(region->AliveObject());
233 sweptList_.erase(iter);
234 return region;
235 }
236 }
237 return nullptr;
238 }
239
FreeRegion(Region * current,bool isMain)240 void SparseSpace::FreeRegion(Region *current, bool isMain)
241 {
242 uintptr_t freeStart = current->GetBegin();
243 current->IterateAllMarkedBits([this, ¤t, &freeStart, isMain](void *mem) {
244 ASSERT(current->InRange(ToUintPtr(mem)));
245 auto header = reinterpret_cast<TaggedObject *>(mem);
246 auto klass = header->GetClass();
247 auto size = klass->SizeFromJSHClass(header);
248
249 uintptr_t freeEnd = ToUintPtr(mem);
250 if (freeStart != freeEnd) {
251 FreeLiveRange(current, freeStart, freeEnd, isMain);
252 }
253 freeStart = freeEnd + size;
254 });
255 uintptr_t freeEnd = current->GetEnd();
256 if (freeStart != freeEnd) {
257 FreeLiveRange(current, freeStart, freeEnd, isMain);
258 }
259 }
260
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)261 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
262 {
263 heap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
264 allocator_->Free(freeStart, freeEnd - freeStart, isMain);
265 }
266
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const267 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
268 {
269 allocator_->FillBumpPointer();
270 EnumerateRegions([&](Region *region) {
271 if (region->InCollectSet()) {
272 return;
273 }
274 uintptr_t curPtr = region->GetBegin();
275 uintptr_t endPtr = region->GetEnd();
276 while (curPtr < endPtr) {
277 auto freeObject = FreeObject::Cast(curPtr);
278 size_t objSize;
279 // If curPtr is freeObject, It must to mark unpoison first.
280 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
281 if (!freeObject->IsFreeObject()) {
282 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
283 visitor(obj);
284 objSize = obj->GetClass()->SizeFromJSHClass(obj);
285 } else {
286 freeObject->AsanUnPoisonFreeObject();
287 objSize = freeObject->Available();
288 freeObject->AsanPoisonFreeObject();
289 }
290 curPtr += objSize;
291 CHECK_OBJECT_SIZE(objSize);
292 }
293 CHECK_REGION_END(curPtr, endPtr);
294 });
295 }
296
IterateOldToNewOverObjects(const std::function<void (TaggedObject * object,JSTaggedValue value)> & visitor) const297 void SparseSpace::IterateOldToNewOverObjects(
298 const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
299 {
300 auto cb = [visitor](void *mem) -> bool {
301 ObjectSlot slot(ToUintPtr(mem));
302 visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
303 return true;
304 };
305 EnumerateRegions([cb] (Region *region) {
306 region->IterateAllSweepingRSetBits(cb);
307 region->IterateAllOldToNewBits(cb);
308 });
309 }
310
GetHeapObjectSize() const311 size_t SparseSpace::GetHeapObjectSize() const
312 {
313 return liveObjectSize_;
314 }
315
IncreaseAllocatedSize(size_t size)316 void SparseSpace::IncreaseAllocatedSize(size_t size)
317 {
318 allocator_->IncreaseAllocatedSize(size);
319 }
320
GetTotalAllocatedSize() const321 size_t SparseSpace::GetTotalAllocatedSize() const
322 {
323 return allocator_->GetAllocatedSize();
324 }
325
DetachFreeObjectSet(Region * region)326 void SparseSpace::DetachFreeObjectSet(Region *region)
327 {
328 allocator_->DetachFreeObjectSet(region);
329 }
330
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)331 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
332 : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
333
TryToGetExclusiveRegion(size_t size)334 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
335 {
336 os::memory::LockHolder lock(lock_);
337 uintptr_t result = allocator_->LookupSuitableFreeObject(size);
338 if (result != 0) {
339 // Remove region from global old space
340 Region *region = Region::ObjectAddressToRange(result);
341 RemoveRegion(region);
342 allocator_->DetachFreeObjectSet(region);
343 DecreaseLiveObjectSize(region->AliveObject());
344 return region;
345 }
346 if (sweepState_ == SweepState::SWEEPING) {
347 return TryToGetSuitableSweptRegion(size);
348 }
349 return nullptr;
350 }
351
Merge(LocalSpace * localSpace)352 void OldSpace::Merge(LocalSpace *localSpace)
353 {
354 localSpace->FreeBumpPoint();
355 os::memory::LockHolder lock(lock_);
356 size_t oldCommittedSize = committedSize_;
357 localSpace->EnumerateRegions([&](Region *region) {
358 localSpace->DetachFreeObjectSet(region);
359 localSpace->RemoveRegion(region);
360 localSpace->DecreaseLiveObjectSize(region->AliveObject());
361 AddRegion(region);
362 IncreaseLiveObjectSize(region->AliveObject());
363 allocator_->CollectFreeObjectSet(region);
364 });
365 size_t hugeSpaceCommitSize = heap_->GetHugeObjectSpace()->GetCommittedSize();
366 if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
367 LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
368 heap_->ShouldThrowOOMError(true);
369 IncreaseMergeSize(committedSize_- oldCommittedSize);
370 // if throw OOM, temporarily increase space size to avoid vm crash
371 IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
372 }
373
374 localSpace->GetRegionList().Clear();
375 allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
376 }
377
SelectCSet()378 void OldSpace::SelectCSet()
379 {
380 if (sweepState_ != SweepState::SWEPT) {
381 return;
382 }
383 CheckRegionSize();
384 // 1、Select region which alive object larger than 80%
385 EnumerateRegions([this](Region *region) {
386 if (!region->MostObjectAlive()) {
387 collectRegionSet_.emplace_back(region);
388 }
389 });
390 if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
391 LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
392 collectRegionSet_.clear();
393 return;
394 }
395 // sort
396 std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
397 return first->AliveObject() < second->AliveObject();
398 });
399 unsigned long selectedRegionNumber = 0;
400 int64_t evacuateSize = PARTIAL_GC_MAX_EVACUATION_SIZE;
401 EnumerateCollectRegionSet([&](Region *current) {
402 if (evacuateSize > 0) {
403 selectedRegionNumber++;
404 evacuateSize -= current->AliveObject();
405 } else {
406 return;
407 }
408 });
409 OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Max evacuation size is 4_MB. The CSet region number: "
410 << selectedRegionNumber;
411 selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
412 if (collectRegionSet_.size() > selectedRegionNumber) {
413 collectRegionSet_.resize(selectedRegionNumber);
414 }
415
416 EnumerateCollectRegionSet([&](Region *current) {
417 RemoveRegion(current);
418 DecreaseLiveObjectSize(current->AliveObject());
419 allocator_->DetachFreeObjectSet(current);
420 current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
421 });
422 sweepState_ = SweepState::NO_SWEEP;
423 OPTIONAL_LOG(heap_->GetEcmaVM(), INFO) << "Select CSet success: number is " << collectRegionSet_.size();
424 }
425
CheckRegionSize()426 void OldSpace::CheckRegionSize()
427 {
428 #ifndef NDEBUG
429 if (sweepState_ == SweepState::SWEEPING) {
430 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
431 }
432 size_t available = allocator_->GetAvailableSize();
433 size_t wasted = allocator_->GetWastedSize();
434 if (GetHeapObjectSize() + wasted + available != objectSize_) {
435 LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
436 << ", free object size:" << available
437 << ", wasted size:" << wasted
438 << ", but exception total size:" << objectSize_;
439 }
440 #endif
441 }
442
RevertCSet()443 void OldSpace::RevertCSet()
444 {
445 EnumerateCollectRegionSet([&](Region *region) {
446 region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
447 AddRegion(region);
448 allocator_->CollectFreeObjectSet(region);
449 IncreaseLiveObjectSize(region->AliveObject());
450 });
451 collectRegionSet_.clear();
452 }
453
ReclaimCSet()454 void OldSpace::ReclaimCSet()
455 {
456 EnumerateCollectRegionSet([this](Region *region) {
457 region->DeleteCrossRegionRSet();
458 region->DeleteOldToNewRSet();
459 region->DeleteSweepingRSet();
460 region->DestroyFreeObjectSets();
461 heapRegionAllocator_->FreeRegion(region);
462 });
463 collectRegionSet_.clear();
464 }
465
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)466 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
467 : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
468
AddRegionToList(Region * region)469 bool LocalSpace::AddRegionToList(Region *region)
470 {
471 if (committedSize_ >= maximumCapacity_) {
472 LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
473 return false;
474 }
475 AddRegion(region);
476 allocator_->CollectFreeObjectSet(region);
477 IncreaseLiveObjectSize(region->AliveObject());
478 return true;
479 }
480
FreeBumpPoint()481 void LocalSpace::FreeBumpPoint()
482 {
483 allocator_->FreeBumpPoint();
484 }
485
Stop()486 void LocalSpace::Stop()
487 {
488 if (GetCurrentRegion() != nullptr) {
489 GetCurrentRegion()->SetHighWaterMark(allocator_->GetTop());
490 }
491 }
492
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)493 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
494 : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
495 {
496 }
497
AppSpawnSpace(Heap * heap,size_t initialCapacity)498 AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
499 : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
500 {
501 }
502
IterateOverMarkedObjects(const std::function<void (TaggedObject * object)> & visitor) const503 void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
504 {
505 EnumerateRegions([&](Region *current) {
506 current->IterateAllMarkedBits([&](void *mem) {
507 ASSERT(current->InRange(ToUintPtr(mem)));
508 visitor(reinterpret_cast<TaggedObject *>(mem));
509 });
510 });
511 }
512
Allocate(size_t size,bool isExpand)513 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
514 {
515 auto object = allocator_->Allocate(size);
516 if (object == 0) {
517 if (isExpand && Expand()) {
518 object = allocator_->Allocate(size);
519 }
520 }
521 if (object != 0) {
522 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
523 }
524 return object;
525 }
526
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)527 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
528 : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
529 {
530 }
531 } // namespace panda::ecmascript
532