1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/linear_space.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/allocator-inl.h"
20 #include "ecmascript/mem/mem_controller.h"
21
22 namespace panda::ecmascript {
LinearSpace(Heap * heap,MemSpaceType type,size_t initialCapacity,size_t maximumCapacity)23 LinearSpace::LinearSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
24 : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
25 localHeap_(heap),
26 thread_(heap->GetJSThread()),
27 waterLine_(0)
28 {
29 }
30
Allocate(size_t size,bool isPromoted)31 uintptr_t LinearSpace::Allocate(size_t size, bool isPromoted)
32 {
33 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
34 if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) {
35 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
36 UNREACHABLE();
37 }
38 #endif
39 auto object = allocator_.Allocate(size);
40 if (object != 0) {
41 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
42 // can not heap sampling in gc.
43 if (!isPromoted) {
44 InvokeAllocationInspector(object, size, size);
45 }
46 #endif
47 return object;
48 }
49 if (Expand(isPromoted)) {
50 if (!isPromoted) {
51 if (!localHeap_->NeedStopCollection() ||
52 (localHeap_->IsJustFinishStartup() && localHeap_->ObjectExceedJustFinishStartupThresholdForCM())) {
53 localHeap_->TryTriggerIncrementalMarking();
54 localHeap_->TryTriggerIdleCollection();
55 localHeap_->TryTriggerConcurrentMarking();
56 }
57 }
58 object = allocator_.Allocate(size);
59 } else if (localHeap_->IsMarking() || !localHeap_->IsEmptyIdleTask()) {
60 // Temporary adjust semi space capacity
61 if (localHeap_->IsConcurrentFullMark()) {
62 overShootSize_ = localHeap_->CalculateLinearSpaceOverShoot();
63 } else {
64 size_t stepOverShootSize = localHeap_->GetEcmaParamConfiguration().GetSemiSpaceStepOvershootSize();
65 size_t maxOverShootSize = std::max(initialCapacity_ / 2, stepOverShootSize); // 2: half
66 if (overShootSize_ < maxOverShootSize) {
67 overShootSize_ += stepOverShootSize;
68 }
69 }
70
71 if (Expand(isPromoted)) {
72 object = allocator_.Allocate(size);
73 }
74 }
75 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
76 if (object != 0 && !isPromoted) {
77 InvokeAllocationInspector(object, size, size);
78 }
79 #endif
80 return object;
81 }
82
Expand(bool isPromoted)83 bool LinearSpace::Expand(bool isPromoted)
84 {
85 if (committedSize_ >= initialCapacity_ + overShootSize_ + outOfMemoryOvershootSize_ &&
86 (isPromoted || !localHeap_->NeedStopCollection())) {
87 return false;
88 }
89
90 uintptr_t top = allocator_.GetTop();
91 auto currentRegion = GetCurrentRegion();
92 if (currentRegion != nullptr) {
93 if (!isPromoted) {
94 if (currentRegion->HasAgeMark()) {
95 allocateAfterLastGC_ +=
96 currentRegion->GetAllocatedBytes(top) - currentRegion->GetAllocatedBytes(waterLine_);
97 } else {
98 allocateAfterLastGC_ += currentRegion->GetAllocatedBytes(top);
99 }
100 } else {
101 // For GC
102 survivalObjectSize_ += currentRegion->GetAllocatedBytes(top);
103 }
104 currentRegion->SetHighWaterMark(top);
105 }
106 JSThread *thread = localHeap_->GetJSThread();
107 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_,
108 thread_->IsConcurrentMarkingOrFinished());
109 allocator_.Reset(region->GetBegin(), region->GetEnd());
110 AddRegion(region);
111 return true;
112 }
113
Stop()114 void LinearSpace::Stop()
115 {
116 if (GetCurrentRegion() != nullptr) {
117 GetCurrentRegion()->SetHighWaterMark(allocator_.GetTop());
118 }
119 }
120
ResetAllocator()121 void LinearSpace::ResetAllocator()
122 {
123 auto currentRegion = GetCurrentRegion();
124 if (currentRegion != nullptr) {
125 allocator_.Reset(currentRegion->GetBegin(), currentRegion->GetEnd(), currentRegion->GetHighWaterMark());
126 }
127 }
128
IterateOverObjects(const std::function<void (TaggedObject * object)> & visitor) const129 void LinearSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
130 {
131 auto current = GetCurrentRegion();
132 EnumerateRegions([&](Region *region) {
133 auto curPtr = region->GetBegin();
134 uintptr_t endPtr = 0;
135 if (region == current) {
136 auto top = allocator_.GetTop();
137 endPtr = curPtr + region->GetAllocatedBytes(top);
138 } else {
139 endPtr = curPtr + region->GetAllocatedBytes();
140 }
141
142 size_t objSize;
143 while (curPtr < endPtr) {
144 auto freeObject = FreeObject::Cast(curPtr);
145 // If curPtr is freeObject, It must to mark unpoison first.
146 ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
147 if (!freeObject->IsFreeObject()) {
148 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
149 visitor(obj);
150 objSize = obj->GetClass()->SizeFromJSHClass(obj);
151 } else {
152 freeObject->AsanUnPoisonFreeObject();
153 objSize = freeObject->Available();
154 freeObject->AsanPoisonFreeObject();
155 }
156 curPtr += objSize;
157 CHECK_OBJECT_SIZE(objSize);
158 }
159 CHECK_REGION_END(curPtr, endPtr);
160 });
161 }
162
InvokeAllocationInspector(Address object,size_t size,size_t alignedSize)163 void LinearSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
164 {
165 ASSERT(size <= alignedSize);
166 if (LIKELY(!allocationCounter_.IsActive())) {
167 return;
168 }
169 if (alignedSize >= allocationCounter_.NextBytes()) {
170 allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
171 }
172 allocationCounter_.AdvanceAllocationInspector(alignedSize);
173 }
174
EdenSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)175 EdenSpace::EdenSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
176 : LinearSpace(heap, MemSpaceType::EDEN_SPACE, initialCapacity, maximumCapacity)
177 {
178 size_t memSize = AlignUp(maximumCapacity_, DEFAULT_REGION_SIZE);
179 memMap_ = PageMap(memSize, PAGE_PROT_READWRITE, DEFAULT_REGION_SIZE);
180 JSThread::ThreadId threadId = 0;
181 if (heap->EnablePageTagThreadId()) {
182 threadId = heap->GetJSThread()->GetThreadId();
183 }
184 PageTag(memMap_.GetMem(), memMap_.GetSize(), PageTagType::HEAP, ToSpaceTypeName(MemSpaceType::EDEN_SPACE),
185 threadId);
186 auto mem = ToUintPtr(memMap_.GetMem());
187 auto count = memMap_.GetSize() / DEFAULT_REGION_SIZE;
188 while (count-- > 0) {
189 freeRegions_.emplace_back(ToVoidPtr(mem), DEFAULT_REGION_SIZE);
190 mem = mem + DEFAULT_REGION_SIZE;
191 }
192 }
193
~EdenSpace()194 EdenSpace::~EdenSpace()
195 {
196 PageUnmap(memMap_);
197 }
198
Initialize()199 void EdenSpace::Initialize()
200 {
201 auto region = AllocRegion();
202 if (UNLIKELY(region == nullptr)) {
203 LOG_GC(ERROR) << "region is nullptr";
204 return;
205 }
206 AddRegion(region);
207 allocator_.Reset(region->GetBegin(), region->GetEnd());
208 localHeap_->InstallEdenAllocator();
209 }
210
Restart()211 void EdenSpace::Restart()
212 {
213 overShootSize_ = 0;
214 survivalObjectSize_ = 0;
215 allocateAfterLastGC_ = 0;
216 isFull_ = false;
217 Initialize();
218 }
219
AllocateSync(size_t size)220 uintptr_t EdenSpace::AllocateSync(size_t size)
221 {
222 LockHolder lock(lock_);
223 return Allocate(size);
224 }
225
Allocate(size_t size)226 uintptr_t EdenSpace::Allocate(size_t size)
227 {
228 if (isFull_) {
229 return 0;
230 }
231 auto object = allocator_.Allocate(size);
232 if (object != 0) {
233 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
234 // can not heap sampling in gc.
235 InvokeAllocationInspector(object, size, size);
236 #endif
237 return object;
238 }
239 if (Expand()) {
240 if (!localHeap_->NeedStopCollection()) {
241 localHeap_->TryTriggerIncrementalMarking();
242 localHeap_->TryTriggerIdleCollection();
243 localHeap_->TryTriggerConcurrentMarking();
244 }
245 object = allocator_.Allocate(size);
246 } else {
247 isFull_ = true;
248 localHeap_->ReleaseEdenAllocator();
249 }
250 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
251 if (object != 0) {
252 InvokeAllocationInspector(object, size, size);
253 }
254 #endif
255 return object;
256 }
257
AllocRegion()258 Region *EdenSpace::AllocRegion()
259 {
260 if (freeRegions_.empty()) {
261 return nullptr;
262 }
263 auto memmap = freeRegions_.back();
264 freeRegions_.pop_back();
265 heapRegionAllocator_->IncreaseAnnoMemoryUsage(memmap.GetSize());
266 auto mem = reinterpret_cast<uintptr_t>(memmap.GetMem());
267 // Check that the address is 256K byte aligned
268 LOG_ECMA_IF(AlignUp(mem, PANDA_POOL_ALIGNMENT_IN_BYTES) != mem, FATAL) << "region not align by 256KB";
269
270 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
271 uintptr_t begin = AlignUp(mem + sizeof(Region), static_cast<size_t>(MemAlignment::MEM_ALIGN_REGION));
272 uintptr_t end = mem + memmap.GetSize();
273 auto region = new (ToVoidPtr(mem)) Region(localHeap_->GetNativeAreaAllocator(), mem, begin, end,
274 GetRegionFlag(), RegionTypeFlag::DEFAULT);
275 region->Initialize();
276 return region;
277 }
278
Expand()279 bool EdenSpace::Expand()
280 {
281 Region *region = AllocRegion();
282 if (region == nullptr) {
283 return false;
284 }
285
286 uintptr_t top = allocator_.GetTop();
287 auto currentRegion = GetCurrentRegion();
288 if (currentRegion != nullptr) {
289 if (currentRegion->HasAgeMark()) {
290 allocateAfterLastGC_ +=
291 currentRegion->GetAllocatedBytes(top) - currentRegion->GetAllocatedBytes(waterLine_);
292 } else {
293 allocateAfterLastGC_ += currentRegion->GetAllocatedBytes(top);
294 }
295 currentRegion->SetHighWaterMark(top);
296 }
297 allocator_.Reset(region->GetBegin(), region->GetEnd());
298 AddRegion(region);
299 return true;
300 }
301
ReclaimRegions(size_t cachedSize)302 void EdenSpace::ReclaimRegions([[maybe_unused]] size_t cachedSize)
303 {
304 const auto spaceName = ToSpaceTypeName(MemSpaceType::EDEN_SPACE);
305 EnumerateRegions([this, &spaceName](Region *current) {
306 LOG_GC(DEBUG) << "Clear region from: " << current << " to " << spaceName;
307 current->DeleteLocalToShareRSet();
308 DecreaseCommitted(current->GetCapacity());
309 DecreaseObjectSize(current->GetSize());
310 current->Invalidate();
311 current->ClearMembers();
312 void *mem = ToVoidPtr(current->GetAllocateBase());
313 size_t memSize = current->GetCapacity();
314 freeRegions_.emplace_back(mem, memSize);
315 heapRegionAllocator_->DecreaseAnnoMemoryUsage(memSize);
316 });
317 regionList_.Clear();
318 committedSize_ = 0;
319 }
320
GetHeapObjectSize() const321 size_t EdenSpace::GetHeapObjectSize() const
322 {
323 return survivalObjectSize_ + allocateAfterLastGC_;
324 }
325
GetSurvivalObjectSize() const326 size_t EdenSpace::GetSurvivalObjectSize() const
327 {
328 return survivalObjectSize_;
329 }
330
SetOverShootSize(size_t size)331 void EdenSpace::SetOverShootSize(size_t size)
332 {
333 overShootSize_ = size;
334 }
335
GetAllocatedSizeSinceGC(uintptr_t top) const336 size_t EdenSpace::GetAllocatedSizeSinceGC(uintptr_t top) const
337 {
338 size_t currentRegionSize = 0;
339 auto currentRegion = GetCurrentRegion();
340 if (currentRegion != nullptr) {
341 currentRegionSize = currentRegion->GetAllocatedBytes(top);
342 if (currentRegion->HasAgeMark()) {
343 currentRegionSize -= currentRegion->GetAllocatedBytes(waterLine_);
344 }
345 }
346 return allocateAfterLastGC_ + currentRegionSize;
347 }
348
SemiSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)349 SemiSpace::SemiSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
350 : LinearSpace(heap, MemSpaceType::SEMI_SPACE, initialCapacity, maximumCapacity),
351 minimumCapacity_(initialCapacity) {}
352
Initialize()353 void SemiSpace::Initialize()
354 {
355 JSThread *thread = localHeap_->GetJSThread();
356 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
357 AddRegion(region);
358 allocator_.Reset(region->GetBegin(), region->GetEnd());
359 }
360
Restart(size_t overShootSize)361 void SemiSpace::Restart(size_t overShootSize)
362 {
363 overShootSize_ = overShootSize;
364 survivalObjectSize_ = 0;
365 allocateAfterLastGC_ = 0;
366 Initialize();
367 }
368
CalculateNewOverShootSize()369 size_t SemiSpace::CalculateNewOverShootSize()
370 {
371 return committedSize_ <= maximumCapacity_ ?
372 0 : AlignUp((committedSize_ - maximumCapacity_) / 2, DEFAULT_REGION_SIZE); // 2 is the half.
373 }
374
CommittedSizeIsLarge()375 bool SemiSpace::CommittedSizeIsLarge()
376 {
377 return committedSize_ >= maximumCapacity_ * 2; // 2 is the half.
378 }
379
AllocateSync(size_t size)380 uintptr_t SemiSpace::AllocateSync(size_t size)
381 {
382 LockHolder lock(lock_);
383 return Allocate(size, true);
384 }
385
SwapRegion(Region * region,SemiSpace * fromSpace)386 bool SemiSpace::SwapRegion(Region *region, SemiSpace *fromSpace)
387 {
388 LockHolder lock(lock_);
389 if (committedSize_ + region->GetCapacity() > maximumCapacity_ + overShootSize_) {
390 return false;
391 }
392 fromSpace->RemoveRegion(region);
393
394 region->SetGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
395
396 if (UNLIKELY(heap_->ShouldVerifyHeap())) {
397 region->ResetInactiveSemiSpace();
398 }
399
400 regionList_.AddNodeToFront(region);
401 IncreaseCommitted(region->GetCapacity());
402 IncreaseObjectSize(region->GetSize());
403 survivalObjectSize_ += region->GetAllocatedBytes();
404 return true;
405 }
406
SetWaterLine()407 void SemiSpace::SetWaterLine()
408 {
409 waterLine_ = allocator_.GetTop();
410 allocateAfterLastGC_ = 0;
411 Region *last = GetCurrentRegion();
412 if (last != nullptr) {
413 last->SetGCFlag(RegionGCFlags::HAS_AGE_MARK);
414
415 EnumerateRegions([&last](Region *current) {
416 if (current != last) {
417 current->SetGCFlag(RegionGCFlags::BELOW_AGE_MARK);
418 }
419 });
420 survivalObjectSize_ += last->GetAllocatedBytes(waterLine_);
421 } else {
422 LOG_GC(INFO) << "SetWaterLine: No region survival in current gc, current region available size: "
423 << allocator_.Available();
424 }
425 }
426
GetHeapObjectSize() const427 size_t SemiSpace::GetHeapObjectSize() const
428 {
429 return survivalObjectSize_ + allocateAfterLastGC_;
430 }
431
GetSurvivalObjectSize() const432 size_t SemiSpace::GetSurvivalObjectSize() const
433 {
434 return survivalObjectSize_;
435 }
436
SetOverShootSize(size_t size)437 void SemiSpace::SetOverShootSize(size_t size)
438 {
439 overShootSize_ = size;
440 }
441
AdjustCapacity(size_t allocatedSizeSinceGC,JSThread * thread)442 bool SemiSpace::AdjustCapacity(size_t allocatedSizeSinceGC, JSThread *thread)
443 {
444 if (allocatedSizeSinceGC <= initialCapacity_ * GROW_OBJECT_SURVIVAL_RATE / GROWING_FACTOR) {
445 return false;
446 }
447 double curObjectSurvivalRate = static_cast<double>(survivalObjectSize_) / allocatedSizeSinceGC;
448 double initialObjectRate = static_cast<double>(survivalObjectSize_) / initialCapacity_;
449 if (curObjectSurvivalRate > GROW_OBJECT_SURVIVAL_RATE || initialObjectRate > GROW_OBJECT_SURVIVAL_RATE) {
450 if (initialCapacity_ >= maximumCapacity_) {
451 return false;
452 }
453 size_t newCapacity = initialCapacity_ * GROWING_FACTOR;
454 SetInitialCapacity(std::min(newCapacity, maximumCapacity_));
455 if (newCapacity == maximumCapacity_) {
456 localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(
457 thread,
458 JSObjectResizingStrategy::PROPERTIES_GROW_SIZE * 2); // 2: double
459 }
460 return true;
461 } else if (curObjectSurvivalRate < SHRINK_OBJECT_SURVIVAL_RATE) {
462 if (initialCapacity_ <= minimumCapacity_) {
463 return false;
464 }
465 double speed = localHeap_->GetMemController()->GetNewSpaceAllocationThroughputPerMS();
466 if (speed > LOW_ALLOCATION_SPEED_PER_MS) {
467 return false;
468 }
469 size_t newCapacity = initialCapacity_ / GROWING_FACTOR;
470 SetInitialCapacity(std::max(newCapacity, minimumCapacity_));
471 localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(thread);
472 return true;
473 }
474 return false;
475 }
476
GetAllocatedSizeSinceGC(uintptr_t top) const477 size_t SemiSpace::GetAllocatedSizeSinceGC(uintptr_t top) const
478 {
479 size_t currentRegionSize = 0;
480 auto currentRegion = GetCurrentRegion();
481 if (currentRegion != nullptr) {
482 currentRegionSize = currentRegion->GetAllocatedBytes(top);
483 if (currentRegion->HasAgeMark()) {
484 currentRegionSize -= currentRegion->GetAllocatedBytes(waterLine_);
485 }
486 }
487 return allocateAfterLastGC_ + currentRegionSize;
488 }
489
SnapshotSpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity)490 SnapshotSpace::SnapshotSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
491 : LinearSpace(heap, MemSpaceType::SNAPSHOT_SPACE, initialCapacity, maximumCapacity) {}
492
ReadOnlySpace(Heap * heap,size_t initialCapacity,size_t maximumCapacity,MemSpaceType type)493 ReadOnlySpace::ReadOnlySpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
494 : LinearSpace(heap, type, initialCapacity, maximumCapacity) {}
495 } // namespace panda::ecmascript
496