• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/space.h"
17 
18 #include "common_components/heap/heap.h"
19 #include "common_interfaces/heap/heap_allocator.h"
20 #include "ecmascript/js_tagged_value-inl.h"
21 #include "ecmascript/mem/mem_controller.h"
22 #include "ecmascript/mem/region-inl.h"
23 #include "ecmascript/platform/os.h"
24 
25 namespace panda::ecmascript {
Space(BaseHeap * heap,HeapRegionAllocator * heapRegionAllocator,MemSpaceType spaceType,size_t initialCapacity,size_t maximumCapacity)26 Space::Space(BaseHeap* heap, HeapRegionAllocator *heapRegionAllocator,
27              MemSpaceType spaceType, size_t initialCapacity,
28              size_t maximumCapacity)
29     : heap_(heap),
30       heapRegionAllocator_(heapRegionAllocator),
31       spaceType_(spaceType),
32       initialCapacity_(initialCapacity),
33       maximumCapacity_(maximumCapacity),
34       committedSize_(0)
35 {
36     ASSERT(heap != nullptr);
37     ASSERT(heapRegionAllocator != nullptr);
38 }
39 
AddAllocationInspector(AllocationInspector * inspector)40 void Space::AddAllocationInspector(AllocationInspector* inspector)
41 {
42     ASSERT(inspector != nullptr);
43     allocationCounter_.AddAllocationInspector(inspector);
44 }
45 
ClearAllocationInspector()46 void Space::ClearAllocationInspector()
47 {
48     allocationCounter_.ClearAllocationInspector();
49 }
50 
SwapAllocationCounter(Space * space)51 void Space::SwapAllocationCounter(Space *space)
52 {
53     ASSERT(space != nullptr);
54     std::swap(allocationCounter_, space->allocationCounter_);
55 }
56 
Destroy()57 void Space::Destroy()
58 {
59     ReclaimRegions();
60 }
61 
ReclaimRegions(size_t cachedSize)62 void Space::ReclaimRegions(size_t cachedSize)
63 {
64     ASSERT(cachedSize >= 0);
65     EnumerateRegions([this, &cachedSize](Region *current) { ClearAndFreeRegion(current, cachedSize); });
66     regionList_.Clear();
67     committedSize_ = 0;
68 }
69 
ClearAndFreeRegion(Region * region,size_t cachedSize)70 void Space::ClearAndFreeRegion(Region *region, size_t cachedSize)
71 {
72     ASSERT(region != nullptr);
73     LOG_ECMA_MEM(DEBUG) << "Clear region from:" << region << " to " << ToSpaceTypeName(spaceType_);
74     region->DeleteCrossRegionRSet();
75     region->DeleteOldToNewRSet();
76     region->DeleteLocalToShareRSet();
77     region->DeleteSweepingOldToNewRSet();
78     region->DeleteSweepingLocalToShareRSet();
79     DecreaseCommitted(region->GetCapacity());
80     DecreaseObjectSize(region->GetSize());
81     if (spaceType_ == MemSpaceType::OLD_SPACE || spaceType_ == MemSpaceType::NON_MOVABLE ||
82         spaceType_ == MemSpaceType::MACHINE_CODE_SPACE || spaceType_ == MemSpaceType::LOCAL_SPACE ||
83         spaceType_ == MemSpaceType::APPSPAWN_SPACE || spaceType_ == MemSpaceType::SHARED_NON_MOVABLE ||
84         spaceType_ == MemSpaceType::SHARED_OLD_SPACE || spaceType_ == MemSpaceType::SHARED_LOCAL_SPACE) {
85         region->DestroyFreeObjectSets();
86     }
87     heapRegionAllocator_->FreeRegion(region, cachedSize);
88 }
89 
HugeObjectSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)90 HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
91                                  size_t initialCapacity, size_t maximumCapacity)
92     : Space(heap, heapRegionAllocator, MemSpaceType::HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
93 {
94 }
95 
HugeObjectSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity,MemSpaceType spaceType)96 HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
97                                  size_t initialCapacity, size_t maximumCapacity, MemSpaceType spaceType)
98     : Space(heap, heapRegionAllocator, spaceType, initialCapacity, maximumCapacity)
99 {
100 }
101 
HugeMachineCodeSpace(Heap * heap,HeapRegionAllocator * heapRegionAllocator,size_t initialCapacity,size_t maximumCapacity)102 HugeMachineCodeSpace::HugeMachineCodeSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
103                                            size_t initialCapacity, size_t maximumCapacity)
104     : HugeObjectSpace(heap, heapRegionAllocator, initialCapacity,
105         maximumCapacity, MemSpaceType::HUGE_MACHINE_CODE_SPACE)
106 {
107 }
108 
GetMachineCodeObject(uintptr_t pc) const109 uintptr_t HugeMachineCodeSpace::GetMachineCodeObject(uintptr_t pc) const
110 {
111     ASSERT(!g_isEnableCMCGC);
112     uintptr_t machineCode = 0;
113     EnumerateRegions([&](Region *region) {
114         if (machineCode != 0) {
115             return;
116         }
117         if (!region->InRange(pc)) {
118             return;
119         }
120         uintptr_t curPtr = region->GetBegin();
121         auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
122         if (obj->IsInText(pc)) {
123             machineCode = curPtr;
124         }
125     });
126     return machineCode;
127 }
128 
AllocateFortForCMC(size_t objectSize,JSThread * thread,void * pDesc)129 void* HugeMachineCodeSpace::AllocateFortForCMC(size_t objectSize, JSThread *thread, void *pDesc)
130 {
131     ASSERT(thread != nullptr);
132     ASSERT(pDesc != nullptr);
133     MachineCodeDesc *desc = reinterpret_cast<MachineCodeDesc *>(pDesc);
134 
135     constexpr size_t REGION_HEADER_SIZE = common::Heap::GetNormalRegionHeaderSize();
136     size_t mutableSize = AlignUp(objectSize + REGION_HEADER_SIZE - desc->instructionsSize, PageSize());
137     size_t fortSize = AlignUp(desc->instructionsSize, PageSize());
138     size_t allocSize = mutableSize + fortSize;
139     uintptr_t machineCodeObj = static_cast<uintptr_t>(
140         common::HeapAllocator::AllocateLargeJitFortRegion(allocSize, common::LanguageType::DYNAMIC));
141     ASSERT(machineCodeObj != 0);
142     if (heap_->OldSpaceExceedCapacity(fortSize)) {
143         LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
144         return 0;
145     }
146 
147     desc->instructionsAddr = machineCodeObj - REGION_HEADER_SIZE + mutableSize;
148 
149     // Enable JitFort rights control
150     [[maybe_unused]] void *addr = PageMapExecFortSpace((void *)desc->instructionsAddr, fortSize,
151         PageProtectProt(reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetDisableCodeSign() ||
152             !JitFort::IsResourceAvailable()));
153 
154     ASSERT(addr == (void *)desc->instructionsAddr);
155     return (void*)machineCodeObj;
156 }
157 
AllocateFort(size_t objectSize,JSThread * thread,void * pDesc)158 Region *HugeMachineCodeSpace::AllocateFort(size_t objectSize, JSThread *thread, void *pDesc)
159 {
160     // A Huge machine code object is consisted of contiguous 256Kb aligned blocks.
161     // For JitFort, a huge machine code object starts with a page aligned mutable area
162     // (which holds Region and MachineCode object header, FuncEntryDesc and StackMap), followed
163     // by a page aligned immutable (JitFort space) area for JIT generated native instructions code.
164     //
165     // allocation sizes for Huge Machine Code:
166     //     a: mutable area size (aligned up to PageSize()) =
167     //         sizeof(Region) + HUGE_OBJECT_BITSET_SIZE + MachineCode::SIZE + payLoadSize - instructionsSize
168     //         (note: payLoadSize = funcDesc size + stackMap size + instructionsSize)
169     //     b: immutable area (starts on native page boundary) size = instructionsSize
170     //     c: size to mmap for huge machine code object = Alignup(a + b, 256 Kbyte)
171     //
172     // mmap to enable JIT_FORT rights control:
173     //     1. first mmap (without JIT_FORT option flag) region of size c above
174     //     2. then mmap immutable area with MAP_FIXED and JIT_FORT option flag (to be used by codesigner verify/copy)
175     ASSERT(thread != nullptr);
176     ASSERT(pDesc != nullptr);
177     MachineCodeDesc *desc = reinterpret_cast<MachineCodeDesc *>(pDesc);
178     size_t mutableSize = AlignUp(
179         objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE - desc->instructionsSize, PageSize());
180     size_t allocSize = AlignUp(mutableSize + desc->instructionsSize, PANDA_POOL_ALIGNMENT_IN_BYTES);
181     if (heap_->OldSpaceExceedCapacity(allocSize)) {
182         LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
183         return 0;
184     }
185     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, allocSize, thread, heap_);
186     desc->instructionsAddr = region->GetAllocateBase() + mutableSize;
187 
188     // Enabe JitFort rights control
189     [[maybe_unused]] void *addr = PageMapExecFortSpace((void *)desc->instructionsAddr, allocSize - mutableSize,
190         PageProtectProt(reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetDisableCodeSign() ||
191             !JitFort::IsResourceAvailable()));
192 
193     ASSERT(addr == (void *)desc->instructionsAddr);
194     return region;
195 }
196 
Allocate(size_t objectSize,JSThread * thread,void * pDesc,AllocateEventType allocType)197 uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread, void *pDesc,
198     AllocateEventType allocType)
199 {
200     ASSERT(thread != nullptr);
201     ASSERT(pDesc != nullptr);
202     // JitFort path
203 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
204     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
205         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
206         UNREACHABLE();
207     }
208 #endif
209     if (!g_isEnableCMCGC && allocType == AllocateEventType::NORMAL) {
210         // CMCGC: huge machine code is allocated in async way and should not take much time,
211         // so we skip the safepoint check at this point.
212         thread->CheckSafepointIfSuspended();
213     }
214     void *machineCodeObj;
215     if (reinterpret_cast<Heap*>(heap_)->GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() &&
216         reinterpret_cast<MachineCodeDesc*>(pDesc)->isAsyncCompileMode) {
217         machineCodeObj = reinterpret_cast<void*>(reinterpret_cast<MachineCodeDesc*>(pDesc)->hugeObjRegion);
218     } else {
219         if (g_isEnableCMCGC) {
220             machineCodeObj = AllocateFortForCMC(objectSize, thread, pDesc);
221         } else {
222             machineCodeObj = AllocateFort(objectSize, thread, pDesc);
223         }
224     }
225     if (UNLIKELY(machineCodeObj == nullptr)) { // LCOV_EXCL_BR_LINE
226         LOG_GC(ERROR) << "HugeMachineCodeSpace::Allocate: region is nullptr";
227         return 0;
228     }
229     if (!g_isEnableCMCGC) {
230         Region *region = reinterpret_cast<Region *>(machineCodeObj);
231         AddRegion(region);
232         // It need to mark unpoison when huge object being allocated.
233         ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
234 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
235         InvokeAllocationInspector(region->GetBegin(), objectSize);
236 #endif
237         return region->GetBegin();
238     } else {
239         return reinterpret_cast<uintptr_t>(machineCodeObj);
240     }
241 }
242 
Allocate(size_t objectSize,JSThread * thread)243 uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread)
244 {
245     ASSERT(!g_isEnableCMCGC);
246     // non JitFort path
247     return HugeObjectSpace::Allocate(objectSize, thread);
248 }
249 
Allocate(size_t objectSize,JSThread * thread,AllocateEventType allocType)250 uintptr_t HugeObjectSpace::Allocate(size_t objectSize, JSThread *thread, AllocateEventType allocType)
251 {
252 #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
253     if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
254         LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
255         UNREACHABLE();
256     }
257 #endif
258     if (allocType == AllocateEventType::NORMAL) {
259         thread->CheckSafepointIfSuspended();
260     }
261     // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
262     // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
263     size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
264     if (heap_->OldSpaceExceedCapacity(alignedSize)) {
265         LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
266         return 0;
267     }
268     Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
269     AddRegion(region);
270     // It need to mark unpoison when huge object being allocated.
271     ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
272 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
273     InvokeAllocationInspector(region->GetBegin(), objectSize);
274 #endif
275     return region->GetBegin();
276 }
277 
Sweep()278 void HugeObjectSpace::Sweep()
279 {
280     Region *currentRegion = GetRegionList().GetFirst();
281     while (currentRegion != nullptr) {
282         Region *next = currentRegion->GetNext();
283         bool isMarked = false;
284         currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
285         if (!isMarked) {
286             GetRegionList().RemoveNode(currentRegion);
287             hugeNeedFreeList_.AddNode(currentRegion);
288         }
289         currentRegion = next;
290     }
291 }
292 
GetHeapObjectSize() const293 size_t HugeObjectSpace::GetHeapObjectSize() const
294 {
295     return committedSize_;
296 }
297 
IterateOverObjects(const std::function<void (TaggedObject * object)> & objectVisitor) const298 void HugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
299 {
300     EnumerateRegions([&](Region *region) {
301         uintptr_t curPtr = region->GetBegin();
302         objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
303     });
304 }
305 
ReclaimHugeRegion()306 void HugeObjectSpace::ReclaimHugeRegion()
307 {
308     if (hugeNeedFreeList_.IsEmpty()) {
309         return;
310     }
311     do {
312         Region *last = hugeNeedFreeList_.PopBack();
313         ClearAndFreeRegion(last);
314     } while (!hugeNeedFreeList_.IsEmpty());
315 }
316 
InvokeAllocationInspector(Address object,size_t objectSize)317 void HugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
318 {
319     if (LIKELY(!allocationCounter_.IsActive())) { // LCOV_EXCL_BR_LINE
320         return;
321     }
322     if (objectSize >= allocationCounter_.NextBytes()) {
323         allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
324     }
325     allocationCounter_.AdvanceAllocationInspector(objectSize);
326 }
327 }  // namespace panda::ecmascript
328