1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/sparse_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/concurrent_sweeper.h"
20 #include "ecmascript/mem/free_object_set.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/mem_controller.h"
23 #include "ecmascript/mem/remembered_set.h"
24 #include "ecmascript/runtime_call_id.h"
25
26 namespace panda::ecmascript {
SparseSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)27 SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
28 : Space(heap, type, initialCapacity, maximumCapacity), sweepState_(SweepState::NO_SWEEP), liveObjectSize_(0)
29 {
30 allocator_ = new FreeListAllocator(heap);
31 }
32
Initialize()33 void SparseSpace::Initialize()
34 {
35 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE);
36 region->InitializeSet();
37 region->SetFlag(GetRegionFlag());
38 if (spaceType_ == MemSpaceType::MACHINE_CODE_SPACE) {
39 int res = region->SetCodeExecutableAndReadable();
40 LOG_ECMA_MEM(DEBUG) << "MachineCodeSpace::Expand() SetCodeExecutableAndReadable" << res;
41 }
42 AddRegion(region);
43
44 allocator_->Initialize(region);
45 }
46
Reset()47 void SparseSpace::Reset()
48 {
49 allocator_->RebuildFreeList();
50 ReclaimRegions();
51 }
52
Allocate(size_t size,bool isAllowGC)53 uintptr_t SparseSpace::Allocate(size_t size, bool isAllowGC)
54 {
55 auto object = allocator_->Allocate(size);
56 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
57
58 if (sweepState_ == SweepState::SWEEPING) {
59 object = AllocateAfterSweepingCompleted(size);
60 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
61 }
62
63 if (isAllowGC) {
64 // Check whether it is necessary to trigger Old GC before expanding or OOM risk.
65 heap_->CheckAndTriggerOldGC();
66 }
67
68 if (Expand()) {
69 object = allocator_->Allocate(size);
70 CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
71 return object;
72 }
73
74 if (isAllowGC) {
75 heap_->CollectGarbage(TriggerGCType::OLD_GC);
76 object = Allocate(size, false);
77 // Size is already increment
78 }
79 return object;
80 }
81
Expand()82 bool SparseSpace::Expand()
83 {
84 if (committedSize_ >= initialCapacity_) {
85 LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of old space is too big. ";
86 return false;
87 }
88
89 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE);
90 region->SetFlag(GetRegionFlag());
91 if (spaceType_ == MemSpaceType::MACHINE_CODE_SPACE) {
92 int res = region->SetCodeExecutableAndReadable();
93 LOG_ECMA_MEM(DEBUG) << "MachineCodeSpace::Expand() SetCodeExecutableAndReadable" << res;
94 }
95 region->InitializeSet();
96 AddRegion(region);
97 allocator_->AddFree(region);
98 return true;
99 }
100
AllocateAfterSweepingCompleted(size_t size)101 uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
102 {
103 ASSERT(sweepState_ == SweepState::SWEEPING);
104 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ConcurrentSweepingWait);
105 if (FillSweptRegion()) {
106 auto object = allocator_->Allocate(size);
107 if (object != 0) {
108 liveObjectSize_ += size;
109 return object;
110 }
111 }
112 // Parallel
113 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
114 return allocator_->Allocate(size);
115 }
116
PrepareSweeping()117 void SparseSpace::PrepareSweeping()
118 {
119 liveObjectSize_ = 0;
120 EnumerateRegions([this](Region *current) {
121 if (!current->InCollectSet()) {
122 IncrementLiveObjectSize(current->AliveObject());
123 current->ResetWasted();
124 AddSweepingRegion(current);
125 }
126 });
127 SortSweepingRegion();
128 sweepState_ = SweepState::SWEEPING;
129 allocator_->RebuildFreeList();
130 }
131
AsyncSweeping(bool isMain)132 void SparseSpace::AsyncSweeping(bool isMain)
133 {
134 Region *current = GetSweepingRegionSafe();
135 while (current != nullptr) {
136 FreeRegion(current, isMain);
137 // Main thread sweeping region is added;
138 if (!isMain) {
139 AddSweptRegionSafe(current);
140 }
141 current = GetSweepingRegionSafe();
142 }
143 }
144
Sweeping()145 void SparseSpace::Sweeping()
146 {
147 liveObjectSize_ = 0;
148 sweepState_ = SweepState::SWEEPING;
149 allocator_->RebuildFreeList();
150 EnumerateRegions([this](Region *current) {
151 if (!current->InCollectSet()) {
152 IncrementLiveObjectSize(current->AliveObject());
153 current->ResetWasted();
154 FreeRegion(current);
155 }
156 });
157 }
158
FillSweptRegion()159 bool SparseSpace::FillSweptRegion()
160 {
161 if (sweptList_.empty()) {
162 return false;
163 }
164 Region *region = nullptr;
165 while ((region = GetSweptRegionSafe()) != nullptr) {
166 allocator_->CollectFreeObjectSet(region);
167 }
168 sweepState_ = SweepState::SWEPT;
169 return true;
170 }
171
AddSweepingRegion(Region * region)172 void SparseSpace::AddSweepingRegion(Region *region)
173 {
174 sweepingList_.emplace_back(region);
175 }
176
SortSweepingRegion()177 void SparseSpace::SortSweepingRegion()
178 {
179 // Sweep low alive object size at first
180 std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
181 return first->AliveObject() < second->AliveObject();
182 });
183 }
184
GetSweepingRegionSafe()185 Region *SparseSpace::GetSweepingRegionSafe()
186 {
187 os::memory::LockHolder holder(lock_);
188 Region *region = nullptr;
189 if (!sweepingList_.empty()) {
190 region = sweepingList_.back();
191 sweepingList_.pop_back();
192 }
193 return region;
194 }
195
AddSweptRegionSafe(Region * region)196 void SparseSpace::AddSweptRegionSafe(Region *region)
197 {
198 os::memory::LockHolder holder(lock_);
199 sweptList_.emplace_back(region);
200 }
201
GetSweptRegionSafe()202 Region *SparseSpace::GetSweptRegionSafe()
203 {
204 os::memory::LockHolder holder(lock_);
205 Region *region = nullptr;
206 if (!sweptList_.empty()) {
207 region = sweptList_.back();
208 sweptList_.pop_back();
209 }
210 return region;
211 }
212
FreeRegion(Region * current,bool isMain)213 void SparseSpace::FreeRegion(Region *current, bool isMain)
214 {
215 auto markBitmap = current->GetMarkBitmap();
216 ASSERT(markBitmap != nullptr);
217 uintptr_t freeStart = current->GetBegin();
218 markBitmap->IterateOverMarkedChunks([this, ¤t, &freeStart, isMain](void *mem) {
219 ASSERT(current->InRange(ToUintPtr(mem)));
220 auto header = reinterpret_cast<TaggedObject *>(mem);
221 auto klass = header->GetClass();
222 auto size = klass->SizeFromJSHClass(header);
223
224 uintptr_t freeEnd = ToUintPtr(mem);
225 if (freeStart != freeEnd) {
226 FreeLiveRange(current, freeStart, freeEnd, isMain);
227 }
228 freeStart = freeEnd + size;
229 });
230 uintptr_t freeEnd = current->GetEnd();
231 if (freeStart != freeEnd) {
232 FreeLiveRange(current, freeStart, freeEnd, isMain);
233 }
234 }
235
FreeLiveRange(Region * current,uintptr_t freeStart,uintptr_t freeEnd,bool isMain)236 void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
237 {
238 heap_->ClearSlotsRange(current, freeStart, freeEnd);
239 allocator_->Free(freeStart, freeEnd - freeStart, isMain);
240 }
241
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const242 void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
243 {
244 allocator_->FillBumpPoint();
245 EnumerateRegions([&](Region *region) {
246 if (region->InCollectSet()) {
247 return;
248 }
249 uintptr_t curPtr = region->GetBegin();
250 uintptr_t endPtr = region->GetEnd();
251 while (curPtr < endPtr) {
252 auto freeObject = FreeObject::Cast(curPtr);
253 size_t objSize;
254 if (!freeObject->IsFreeObject()) {
255 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
256 visitor(obj);
257 objSize = obj->GetClass()->SizeFromJSHClass(obj);
258 } else {
259 objSize = freeObject->Available();
260 }
261 curPtr += objSize;
262 CHECK_OBJECT_SIZE(objSize);
263 }
264 CHECK_REGION_END(curPtr, endPtr);
265 });
266 }
267
GetHeapObjectSize() const268 size_t SparseSpace::GetHeapObjectSize() const
269 {
270 return liveObjectSize_;
271 }
272
GetTotalAllocatedSize() const273 size_t SparseSpace::GetTotalAllocatedSize() const
274 {
275 return allocator_->GetAllocatedSize();
276 }
277
DetachFreeObjectSet(Region * region)278 void SparseSpace::DetachFreeObjectSet(Region *region)
279 {
280 allocator_->DetachFreeObjectSet(region);
281 }
282
OldSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)283 OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
284 : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
285
TryToGetExclusiveRegion(size_t size)286 Region *OldSpace::TryToGetExclusiveRegion(size_t size)
287 {
288 os::memory::LockHolder lock(lock_);
289 uintptr_t result = allocator_->LookupSuitableFreeObject(size);
290 if (result != 0) {
291 // Remove region from global old space
292 Region *region = Region::ObjectAddressToRange(result);
293 RemoveRegion(region);
294 allocator_->DetachFreeObjectSet(region);
295 DecrementLiveObjectSize(region->AliveObject());
296 return region;
297 }
298 return nullptr;
299 }
300
Merge(LocalSpace * localSpace)301 void OldSpace::Merge(LocalSpace *localSpace)
302 {
303 localSpace->FreeBumpPoint();
304 os::memory::LockHolder lock(lock_);
305 localSpace->EnumerateRegions([&](Region *region) {
306 localSpace->DetachFreeObjectSet(region);
307 localSpace->RemoveRegion(region);
308 localSpace->DecrementLiveObjectSize(region->AliveObject());
309 region->SetSpace(this);
310 AddRegion(region);
311 IncrementLiveObjectSize(region->AliveObject());
312 allocator_->CollectFreeObjectSet(region);
313 });
314 if (committedSize_ >= maximumCapacity_) {
315 LOG_ECMA_MEM(FATAL) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
316 }
317
318 localSpace->GetRegionList().Clear();
319 allocator_->IncrementAllocatedSize(localSpace->GetTotalAllocatedSize());
320 }
321
SelectCSet()322 void OldSpace::SelectCSet()
323 {
324 if (sweepState_ != SweepState::SWEPT) {
325 return;
326 }
327 CheckRegionSize();
328 // 1、Select region which alive object largger than 80%
329 EnumerateRegions([this](Region *region) {
330 if (!region->MostObjectAlive()) {
331 collectRegionSet_.emplace_back(region);
332 }
333 });
334 if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
335 LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
336 isCSetEmpty_ = true;
337 collectRegionSet_.clear();
338 return;
339 }
340 // sort
341 std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
342 return first->AliveObject() < second->AliveObject();
343 });
344 unsigned long selectedRegionNumber = GetSelectedRegionNumber();
345 if (collectRegionSet_.size() > selectedRegionNumber) {
346 collectRegionSet_.resize(selectedRegionNumber);
347 }
348
349 EnumerateCollectRegionSet([&](Region *current) {
350 RemoveRegion(current);
351 DecrementLiveObjectSize(current->AliveObject());
352 allocator_->DetachFreeObjectSet(current);
353 current->SetFlag(RegionFlags::IS_IN_COLLECT_SET);
354 });
355 isCSetEmpty_ = false;
356 sweepState_ = SweepState::NO_SWEEP;
357 LOG_ECMA_MEM(DEBUG) << "Select CSet success: number is " << collectRegionSet_.size();
358 }
359
CheckRegionSize()360 void OldSpace::CheckRegionSize()
361 {
362 #ifndef NDEBUG
363 if (sweepState_ == SweepState::SWEEPING) {
364 heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
365 }
366 size_t available = allocator_->GetAvailableSize();
367 size_t wasted = allocator_->GetWastedSize();
368 if (GetHeapObjectSize() + wasted + available != objectSize_) {
369 LOG(DEBUG, RUNTIME) << "Actual live object size:" << GetHeapObjectSize()
370 << ", free object size:" << available
371 << ", wasted size:" << wasted
372 << ", but exception totoal size:" << objectSize_;
373 }
374 #endif
375 }
376
RevertCSet()377 void OldSpace::RevertCSet()
378 {
379 EnumerateCollectRegionSet([&](Region *region) {
380 region->ClearFlag(RegionFlags::IS_IN_COLLECT_SET);
381 region->SetSpace(this);
382 AddRegion(region);
383 allocator_->CollectFreeObjectSet(region);
384 IncrementLiveObjectSize(region->AliveObject());
385 });
386 collectRegionSet_.clear();
387 isCSetEmpty_ = true;
388 }
389
ReclaimCSet()390 void OldSpace::ReclaimCSet()
391 {
392 EnumerateCollectRegionSet([this](Region *region) {
393 region->SetSpace(nullptr);
394 region->DeleteMarkBitmap();
395 region->DeleteCrossRegionRememberedSet();
396 region->DeleteOldToNewRememberedSet();
397 region->DestroySet();
398 heapRegionAllocator_->FreeRegion(region);
399 });
400 collectRegionSet_.clear();
401 isCSetEmpty_ = true;
402 }
403
LocalSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)404 LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
405 : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
406
AddRegionToList(Region * region)407 bool LocalSpace::AddRegionToList(Region *region)
408 {
409 if (committedSize_ >= maximumCapacity_) {
410 LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
411 return false;
412 }
413 region->SetSpace(this);
414 AddRegion(region);
415 allocator_->CollectFreeObjectSet(region);
416 IncrementLiveObjectSize(region->AliveObject());
417 return true;
418 }
419
FreeBumpPoint()420 void LocalSpace::FreeBumpPoint()
421 {
422 allocator_->FreeBumpPoint();
423 }
424
NonMovableSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)425 NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
426 : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
427 {
428 }
429
Allocate(size_t size,bool isExpand)430 uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
431 {
432 auto object = allocator_->Allocate(size);
433 if (object == 0) {
434 if (isExpand && Expand()) {
435 object = allocator_->Allocate(size);
436 }
437 }
438 return object;
439 }
440
MachineCodeSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)441 MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
442 : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
443 {
444 }
445 } // namespace panda::ecmascript
446