1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/js_tagged_value-inl.h"
17 #include "ecmascript/mem/mem_controller.h"
18 #include "ecmascript/mem/region-inl.h"
19 #include "ecmascript/mem/space.h"
20 #include "ecmascript/platform/os.h"
21
22 namespace panda::ecmascript {
Space(BaseHeap * heap,HeapRegionAllocator * heapRegionAllocator,MemSpaceType spaceType,size_t initialCapacity,size_t maximumCapacity)23 Space::Space(BaseHeap* heap, HeapRegionAllocator *heapRegionAllocator,
24 MemSpaceType spaceType, size_t initialCapacity,
25 size_t maximumCapacity)
26 : heap_(heap),
27 heapRegionAllocator_(heapRegionAllocator),
28 spaceType_(spaceType),
29 initialCapacity_(initialCapacity),
30 maximumCapacity_(maximumCapacity),
31 committedSize_(0)
32 {
33 }
34
AddAllocationInspector(AllocationInspector * inspector)35 void Space::AddAllocationInspector(AllocationInspector* inspector)
36 {
37 allocationCounter_.AddAllocationInspector(inspector);
38 }
39
ClearAllocationInspector()40 void Space::ClearAllocationInspector()
41 {
42 allocationCounter_.ClearAllocationInspector();
43 }
44
SwapAllocationCounter(Space * space)45 void Space::SwapAllocationCounter(Space *space)
46 {
47 std::swap(allocationCounter_, space->allocationCounter_);
48 }
49
Destroy()50 void Space::Destroy()
51 {
52 ReclaimRegions();
53 }
54
ReclaimRegions(size_t cachedSize)55 void Space::ReclaimRegions(size_t cachedSize)
56 {
57 EnumerateRegions([this, &cachedSize](Region *current) { ClearAndFreeRegion(current, cachedSize); });
58 regionList_.Clear();
59 committedSize_ = 0;
60 }
61
ClearAndFreeRegion(Region * region,size_t cachedSize)62 void Space::ClearAndFreeRegion(Region *region, size_t cachedSize)
63 {
64 LOG_ECMA_MEM(DEBUG) << "Clear region from:" << region << " to " << ToSpaceTypeName(spaceType_);
65 region->DeleteCrossRegionRSet();
66 region->DeleteNewToEdenRSet();
67 region->DeleteOldToNewRSet();
68 region->DeleteLocalToShareRSet();
69 region->DeleteSweepingOldToNewRSet();
70 region->DeleteSweepingLocalToShareRSet();
71 DecreaseCommitted(region->GetCapacity());
72 DecreaseObjectSize(region->GetSize());
73 if (spaceType_ == MemSpaceType::OLD_SPACE || spaceType_ == MemSpaceType::NON_MOVABLE ||
74 spaceType_ == MemSpaceType::MACHINE_CODE_SPACE || spaceType_ == MemSpaceType::LOCAL_SPACE ||
75 spaceType_ == MemSpaceType::APPSPAWN_SPACE || spaceType_ == MemSpaceType::SHARED_NON_MOVABLE ||
76 spaceType_ == MemSpaceType::SHARED_OLD_SPACE || spaceType_ == MemSpaceType::SHARED_LOCAL_SPACE) {
77 region->DestroyFreeObjectSets();
78 }
79 // regions of EdenSpace are allocated in EdenSpace constructor and fixed, not allocate by heapRegionAllocator_
80 if (spaceType_ != MemSpaceType::EDEN_SPACE) {
81 heapRegionAllocator_->FreeRegion(region, cachedSize);
82 }
83 }
84
HugeObjectSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)85 HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
86 size_t initialCapacity, size_t maximumCapacity)
87 : Space(heap, heapRegionAllocator, MemSpaceType::HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
88 {
89 }
90
HugeObjectSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity,MemSpaceType spaceType)91 HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
92 size_t initialCapacity, size_t maximumCapacity, MemSpaceType spaceType)
93 : Space(heap, heapRegionAllocator, spaceType, initialCapacity, maximumCapacity)
94 {
95 }
96
HugeMachineCodeSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)97 HugeMachineCodeSpace::HugeMachineCodeSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
98 size_t initialCapacity, size_t maximumCapacity)
99 : HugeObjectSpace(heap, heapRegionAllocator, initialCapacity,
100 maximumCapacity, MemSpaceType::HUGE_MACHINE_CODE_SPACE)
101 {
102 }
103
GetMachineCodeObject(uintptr_t pc) const104 uintptr_t HugeMachineCodeSpace::GetMachineCodeObject(uintptr_t pc) const
105 {
106 uintptr_t machineCode = 0;
107 EnumerateRegions([&](Region *region) {
108 if (machineCode != 0) {
109 return;
110 }
111 if (!region->InRange(pc)) {
112 return;
113 }
114 uintptr_t curPtr = region->GetBegin();
115 auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
116 if (obj->IsInText(pc)) {
117 machineCode = curPtr;
118 }
119 });
120 return machineCode;
121 }
122
AllocateFort(size_t objectSize,JSThread * thread,void * pDesc)123 Region *HugeMachineCodeSpace::AllocateFort(size_t objectSize, JSThread *thread, void *pDesc)
124 {
125 // A Huge machine code object is consisted of contiguous 256Kb aligned blocks.
126 // For JitFort, a huge machine code object starts with a page aligned mutable area
127 // (which holds Region and MachineCode object header, FuncEntryDesc and StackMap), followed
128 // by a page aligned immutable (JitFort space) area for JIT generated native instructions code.
129 //
130 // allocation sizes for Huge Machine Code:
131 // a: mutable area size (aligned up to PageSize()) =
132 // sizeof(Region) + HUGE_OBJECT_BITSET_SIZE + MachineCode::SIZE + payLoadSize - instructionsSize
133 // (note: payLoadSize = funcDesc size + stackMap size + instructionsSize)
134 // b: immutable area (starts on native page boundary) size = instructionsSize
135 // c: size to mmap for huge machine code object = Alignup(a + b, 256 Kbyte)
136 //
137 // mmap to enable JIT_FORT rights control:
138 // 1. first mmap (without JIT_FORT option flag) region of size c above
139 // 2. then mmap immutable area with MAP_FIXED and JIT_FORT option flag (to be used by codesigner verify/copy)
140 MachineCodeDesc *desc = reinterpret_cast<MachineCodeDesc *>(pDesc);
141 size_t mutableSize = AlignUp(
142 objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE - desc->instructionsSize, PageSize());
143 size_t allocSize = AlignUp(mutableSize + desc->instructionsSize, PANDA_POOL_ALIGNMENT_IN_BYTES);
144 if (heap_->OldSpaceExceedCapacity(allocSize)) {
145 LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
146 return 0;
147 }
148 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, allocSize, thread, heap_);
149 desc->instructionsAddr = region->GetAllocateBase() + mutableSize;
150
151 // Enabe JitFort rights control
152 [[maybe_unused]] void *addr = PageMapExecFortSpace((void *)desc->instructionsAddr, allocSize - mutableSize,
153 PageProtectProt(reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetDisableCodeSign() ||
154 !JitFort::IsResourceAvailable()));
155
156 ASSERT(addr == (void *)desc->instructionsAddr);
157 return region;
158 }
159
160
Allocate(size_t objectSize,JSThread * thread,void * pDesc,AllocateEventType allocType)161 uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread, void *pDesc,
162 AllocateEventType allocType)
163 {
164 // JitFort path
165 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
166 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
167 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
168 UNREACHABLE();
169 }
170 #endif
171 if (allocType == AllocateEventType::NORMAL) {
172 thread->CheckSafepointIfSuspended();
173 }
174 Region *region;
175 if (reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() &&
176 reinterpret_cast<MachineCodeDesc *>(pDesc)->isAsyncCompileMode) {
177 region = reinterpret_cast<Region *>(reinterpret_cast<MachineCodeDesc *>(pDesc)->hugeObjRegion);
178 } else {
179 region = AllocateFort(objectSize, thread, pDesc);
180 }
181 if (UNLIKELY(region == nullptr)) {
182 LOG_GC(ERROR) << "HugeMachineCodeSpace::Allocate: region is nullptr";
183 return 0;
184 }
185 AddRegion(region);
186 // It need to mark unpoison when huge object being allocated.
187 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
188 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
189 InvokeAllocationInspector(region->GetBegin(), objectSize);
190 #endif
191 return region->GetBegin();
192 }
193
Allocate(size_t objectSize,JSThread * thread)194 uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread)
195 {
196 // non JitFort path
197 return HugeObjectSpace::Allocate(objectSize, thread);
198 }
199
Allocate(size_t objectSize,JSThread * thread,AllocateEventType allocType)200 uintptr_t HugeObjectSpace::Allocate(size_t objectSize, JSThread *thread, AllocateEventType allocType)
201 {
202 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
203 if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
204 LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
205 UNREACHABLE();
206 }
207 #endif
208 if (allocType == AllocateEventType::NORMAL) {
209 thread->CheckSafepointIfSuspended();
210 }
211 // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
212 // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
213 size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
214 if (heap_->OldSpaceExceedCapacity(alignedSize)) {
215 LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
216 return 0;
217 }
218 Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
219 AddRegion(region);
220 // It need to mark unpoison when huge object being allocated.
221 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
222 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
223 InvokeAllocationInspector(region->GetBegin(), objectSize);
224 #endif
225 return region->GetBegin();
226 }
227
Sweep()228 void HugeObjectSpace::Sweep()
229 {
230 Region *currentRegion = GetRegionList().GetFirst();
231 while (currentRegion != nullptr) {
232 Region *next = currentRegion->GetNext();
233 bool isMarked = false;
234 currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
235 if (!isMarked) {
236 GetRegionList().RemoveNode(currentRegion);
237 hugeNeedFreeList_.AddNode(currentRegion);
238 }
239 currentRegion = next;
240 }
241 }
242
GetHeapObjectSize() const243 size_t HugeObjectSpace::GetHeapObjectSize() const
244 {
245 return committedSize_;
246 }
247
IterateOverObjects(const std::function<void (TaggedObject * object)> & objectVisitor) const248 void HugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
249 {
250 EnumerateRegions([&](Region *region) {
251 uintptr_t curPtr = region->GetBegin();
252 objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
253 });
254 }
255
ReclaimHugeRegion()256 void HugeObjectSpace::ReclaimHugeRegion()
257 {
258 if (hugeNeedFreeList_.IsEmpty()) {
259 return;
260 }
261 do {
262 Region *last = hugeNeedFreeList_.PopBack();
263 ClearAndFreeRegion(last);
264 } while (!hugeNeedFreeList_.IsEmpty());
265 }
266
InvokeAllocationInspector(Address object,size_t objectSize)267 void HugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
268 {
269 if (LIKELY(!allocationCounter_.IsActive())) {
270 return;
271 }
272 if (objectSize >= allocationCounter_.NextBytes()) {
273 allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
274 }
275 allocationCounter_.AdvanceAllocationInspector(objectSize);
276 }
277 } // namespace panda::ecmascript
278