• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/mem_map_allocator.h"
17 
18 #include "common_components/platform/cpu.h"
19 #include "ecmascript/mem/tagged_state_word.h"
20 #include "ecmascript/platform/os.h"
21 #include "ecmascript/platform/parameters.h"
22 
23 namespace panda::ecmascript {
GetInstance()24 MemMapAllocator *MemMapAllocator::GetInstance()
25 {
26     static MemMapAllocator *vmAllocator_ = new MemMapAllocator();
27     return vmAllocator_;
28 }
29 
InitializeRegularRegionMap(size_t alignment)30 void MemMapAllocator::InitializeRegularRegionMap([[maybe_unused]] size_t alignment)
31 {
32 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
33     size_t initialRegularObjectCapacity = std::min(capacity_ / 3, INITIAL_REGULAR_OBJECT_CAPACITY);
34     size_t i = 0;
35     while (i < MEM_MAP_RETRY_NUM) {
36         void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(REGULAR_OBJECT_MEM_MAP_BEGIN_ADDR)) +
37             i * STEP_INCREASE_MEM_MAP_ADDR);
38         MemMap memMap = PageMap(initialRegularObjectCapacity, PAGE_PROT_NONE, alignment, addr);
39         if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr)) {
40             PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::HEAP);
41             PageRelease(memMap.GetMem(), memMap.GetSize());
42             memMapPool_.InsertMemMap(memMap);
43             memMapPool_.SplitMemMapToCache(memMap);
44             break;
45         } else {
46             PageUnmap(memMap);
47             LOG_ECMA(ERROR) << "Regular object mem map big addr fail: " << errno;
48         }
49         i++;
50     }
51 #endif
52 }
53 
InitializeHugeRegionMap(size_t alignment)54 void MemMapAllocator::InitializeHugeRegionMap(size_t alignment)
55 {
56     size_t initialHugeObjectCapacity = std::min(capacity_ / 3, INITIAL_HUGE_OBJECT_CAPACITY);
57 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
58     size_t i = 0;
59     while (i <= MEM_MAP_RETRY_NUM) {
60         void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(HUGE_OBJECT_MEM_MAP_BEGIN_ADDR)) +
61             i * STEP_INCREASE_MEM_MAP_ADDR);
62         MemMap memMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment, addr);
63         if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr) || i == MEM_MAP_RETRY_NUM) {
64             PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::HEAP);
65             PageRelease(memMap.GetMem(), memMap.GetSize());
66             memMapFreeList_.Initialize(memMap, capacity_);
67             break;
68         } else {
69             PageUnmap(memMap);
70             LOG_ECMA(ERROR) << "Huge object mem map big addr fail: " << errno;
71         }
72         i++;
73     }
74 #else
75     MemMap hugeMemMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment);
76     PageTag(hugeMemMap.GetMem(), hugeMemMap.GetSize(), PageTagType::HEAP);
77     PageRelease(hugeMemMap.GetMem(), hugeMemMap.GetSize());
78     memMapFreeList_.Initialize(hugeMemMap, capacity_);
79 #endif
80 }
81 
InitializeCompressRegionMap(size_t alignment)82 void MemMapAllocator::InitializeCompressRegionMap(size_t alignment)
83 {
84     size_t initialNonmovableObjectCapacity =
85         AlignUp(std::min(capacity_ / 2, INITIAL_NONMOVALBE_OBJECT_CAPACITY), DEFAULT_REGION_SIZE);
86 
87 #if defined(PANDA_TARGET_64)
88     size_t alignNonmovableObjectCapacity = initialNonmovableObjectCapacity * 2;
89 #else
90     size_t alignNonmovableObjectCapacity = initialNonmovableObjectCapacity;
91 #endif
92 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
93     size_t i = 0;
94     while (i <= MEM_MAP_RETRY_NUM) {
95         void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(HUGE_OBJECT_MEM_MAP_BEGIN_ADDR)) +
96             i * STEP_INCREASE_MEM_MAP_ADDR);
97         MemMap memMap = PageMap(alignNonmovableObjectCapacity, PAGE_PROT_NONE, alignment, addr);
98         if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr) || i == MEM_MAP_RETRY_NUM) {
99             memMap = AlignMemMapTo4G(memMap, initialNonmovableObjectCapacity);
100             compressMemMapPool_.InsertMemMap(memMap);
101             compressMemMapPool_.SplitMemMapToCache(memMap);
102             break;
103         } else {
104             PageUnmap(memMap);
105             LOG_ECMA(ERROR) << "Nonmovable object mem map big addr fail: " << errno;
106         }
107         i++;
108     }
109 #else
110     MemMap memMap = PageMap(alignNonmovableObjectCapacity, PAGE_PROT_NONE, alignment);
111     memMap = AlignMemMapTo4G(memMap, initialNonmovableObjectCapacity);
112     compressMemMapPool_.InsertMemMap(memMap);
113     compressMemMapPool_.SplitMemMapToCache(memMap);
114 #endif
115 }
116 
117 // Truncate mem to make sure align to same range inner 4G.
AlignMemMapTo4G(const MemMap & memMap,size_t targetSize)118 MemMap MemMapAllocator::AlignMemMapTo4G(const MemMap &memMap, size_t targetSize)
119 {
120 #if defined(PANDA_TARGET_64)
121     uintptr_t startAddr = ToUintPtr(memMap.GetMem());
122     uintptr_t alignAddr = AlignUp(startAddr, 4_GB);
123 
124     size_t leftSize = alignAddr - startAddr;
125     uintptr_t remainderAddr = alignAddr;
126     if (leftSize > memMap.GetSize()) {
127         remainderAddr = startAddr;
128     } else if (leftSize > targetSize) {
129         remainderAddr = alignAddr - targetSize;
130     }
131 
132 #ifndef PANDA_TARGET_WINDOWS
133     uintptr_t leftUnmapAddr = startAddr;
134     size_t leftUnmapSize = remainderAddr - leftUnmapAddr;
135     PageUnmap(MemMap(ToVoidPtr(leftUnmapAddr), leftUnmapSize));
136 
137     uintptr_t rightUnmapAddr = remainderAddr + targetSize;
138     size_t rightUnmapSize = (startAddr + memMap.GetSize()) - rightUnmapAddr;
139     PageUnmap(MemMap(ToVoidPtr(rightUnmapAddr), rightUnmapSize));
140 #endif
141 
142     static constexpr uint64_t mask = 0xFFFFFFFF00000000;
143     TaggedStateWord::BASE_ADDRESS = remainderAddr & mask;
144 
145     MemMap reminderMemMap(ToVoidPtr(remainderAddr), targetSize);
146     PageTag(reminderMemMap.GetOriginAddr(), reminderMemMap.GetSize(), PageTagType::HEAP);
147     PageRelease(reminderMemMap.GetMem(), reminderMemMap.GetSize());
148 
149     return reminderMemMap;
150 #else
151     TaggedStateWord::BASE_ADDRESS = 0;
152     PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::HEAP);
153     PageRelease(memMap.GetMem(), memMap.GetSize());
154     return memMap;
155 #endif
156 }
157 
PageProtectMem(bool machineCodeSpace,void * mem,size_t size,bool isEnableJitFort)158 static bool PageProtectMem(bool machineCodeSpace, void *mem, size_t size, [[maybe_unused]] bool isEnableJitFort)
159 {
160     int prot = machineCodeSpace ? PAGE_PROT_EXEC_READWRITE : PAGE_PROT_READWRITE;
161 
162     if (!machineCodeSpace) {
163         return PageProtect(mem, size, prot);
164     }
165 
166     // MachineCode and HugeMachineCode space pages:
167 #if defined(PANDA_TARGET_ARM64) && defined(PANDA_TARGET_OHOS)
168     if (isEnableJitFort) {
169         // if JitFort enabled, Jit code will be in JitFort space, so only need READWRITE here
170         return PageProtect(mem, size, PAGE_PROT_READWRITE);
171     } else {
172         // else Jit code will be in MachineCode space, need EXEC_READWRITE and MAP_EXECUTABLE (0x1000)
173         void *addr = PageMapExecFortSpace(mem, size, PAGE_PROT_EXEC_READWRITE);
174         if (addr != mem) {
175             return false;
176         }
177         return true;
178     }
179 #else
180     // not running phone kernel. Jit code will be MachineCode space
181     return PageProtect(mem, size, PAGE_PROT_EXEC_READWRITE);
182 #endif
183 }
184 
Allocate(const uint32_t threadId,size_t size,size_t alignment,const std::string & spaceName,bool regular,bool isCompress,bool isMachineCode,bool isEnableJitFort,bool shouldPageTag)185 MemMap MemMapAllocator::Allocate(const uint32_t threadId, size_t size, size_t alignment,
186                                  const std::string &spaceName, bool regular, [[maybe_unused]]bool isCompress,
187                                  bool isMachineCode, bool isEnableJitFort, bool shouldPageTag)
188 {
189     PageTagType type = isMachineCode ? PageTagType::MACHINE_CODE : PageTagType::HEAP;
190 
191     if (regular) {
192         if (isCompress) {
193             return AllocateFromCompressPool(threadId, size, alignment, spaceName, isMachineCode,
194                 isEnableJitFort, shouldPageTag, type);
195         } else {
196             return AllocateFromMemPool(threadId, size, alignment, spaceName, isMachineCode,
197                 isEnableJitFort, shouldPageTag, type);
198         }
199     } else {
200         if (UNLIKELY(memMapTotalSize_ + size > capacity_)) { // LCOV_EXCL_BR_LINE
201             LOG_GC(ERROR) << "memory map overflow";
202             return MemMap();
203         }
204         MemMap mem = memMapFreeList_.GetMemFromList(size);
205         if (mem.GetMem() != nullptr) {
206             InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
207             IncreaseMemMapTotalSize(size);
208         }
209         return mem;
210     }
211 }
212 
AllocateFromCompressPool(const uint32_t threadId,size_t size,size_t alignment,const std::string & spaceName,bool isMachineCode,bool isEnableJitFort,bool shouldPageTag,PageTagType type)213 MemMap MemMapAllocator::AllocateFromCompressPool(const uint32_t threadId, size_t size, size_t alignment,
214                                                  const std::string &spaceName, bool isMachineCode,
215                                                  bool isEnableJitFort, bool shouldPageTag, PageTagType type)
216 {
217     MemMap mem = compressMemMapPool_.GetRegularMemFromCommitted(size);
218     if (mem.GetMem() != nullptr) {
219         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
220         return mem;
221     }
222     if (UNLIKELY(memMapTotalSize_ + size > capacity_)) {
223         LOG_GC(ERROR) << "memory map overflow";
224         return MemMap();
225     }
226 
227     mem = compressMemMapPool_.GetMemFromCache(size);
228     if (mem.GetMem() != nullptr) {
229         memMapTotalSize_ += size;
230         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
231         return mem;
232     }
233 
234 #if !defined(PANDA_TARGET_64)
235     mem = PageMap(REGULAR_REGION_MMAP_SIZE, PAGE_PROT_NONE, alignment);
236     compressMemMapPool_.InsertMemMap(mem);
237     mem = compressMemMapPool_.SplitMemFromCache(mem);
238     if (mem.GetMem() != nullptr) {
239         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
240         memMapTotalSize_ += mem.GetSize();
241         return mem;
242     }
243 #endif
244     LOG_GC(ERROR) << "compress pool overflow";
245     return MemMap();
246 }
247 
AllocateFromMemPool(const uint32_t threadId,size_t size,size_t alignment,const std::string & spaceName,bool isMachineCode,bool isEnableJitFort,bool shouldPageTag,PageTagType type)248 MemMap MemMapAllocator::AllocateFromMemPool(const uint32_t threadId, size_t size, size_t alignment,
249                                             const std::string &spaceName, bool isMachineCode, bool isEnableJitFort,
250                                             bool shouldPageTag, PageTagType type)
251 {
252     MemMap mem = memMapPool_.GetRegularMemFromCommitted(size);
253     if (mem.GetMem() != nullptr) {
254         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
255         return mem;
256     }
257     if (UNLIKELY(memMapTotalSize_ + size > capacity_)) {
258         LOG_GC(ERROR) << "memory map overflow";
259         return MemMap();
260     }
261 
262     mem = memMapPool_.GetMemFromCache(size);
263     if (mem.GetMem() != nullptr) {
264         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
265         IncreaseMemMapTotalSize(size);
266         return mem;
267     }
268 
269     mem = PageMap(REGULAR_REGION_MMAP_SIZE, PAGE_PROT_NONE, alignment);
270     memMapPool_.InsertMemMap(mem);
271     mem = memMapPool_.SplitMemFromCache(mem);
272     if (mem.GetMem() != nullptr) {
273         InitialMemPool(mem, threadId, size, spaceName, isMachineCode, isEnableJitFort, shouldPageTag, type);
274         IncreaseMemMapTotalSize(size);
275     }
276     return mem;
277 }
278 
InitialMemPool(MemMap & mem,const uint32_t threadId,size_t size,const std::string & spaceName,bool isMachineCode,bool isEnableJitFort,bool shouldPageTag,PageTagType type)279 MemMap MemMapAllocator::InitialMemPool(MemMap &mem, const uint32_t threadId, size_t size, const std::string &spaceName,
280                                        bool isMachineCode, bool isEnableJitFort,
281                                        bool shouldPageTag, PageTagType type)
282 {
283     bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
284     if (!res) {
285         return MemMap();
286     }
287     if (shouldPageTag) {
288         PageTag(mem.GetMem(), size, type, spaceName, threadId);
289     }
290     return mem;
291 }
292 
CacheOrFree(void * mem,size_t size,bool isRegular,bool isCompress,size_t cachedSize,bool shouldPageTag,bool skipCache)293 void MemMapAllocator::CacheOrFree(void *mem, size_t size, bool isRegular, bool isCompress, size_t cachedSize,
294                                   bool shouldPageTag, bool skipCache)
295 {
296     // Clear ThreadId tag and tag the mem with ARKTS HEAP.
297     if (shouldPageTag) {
298         PageTag(mem, size, PageTagType::HEAP);
299     }
300     if (!skipCache && isRegular && !isCompress && !memMapPool_.IsRegularCommittedFull(cachedSize)) {
301         // Cache regions to accelerate allocation.
302         memMapPool_.AddMemToCommittedCache(mem, size);
303         return;
304     }
305     Free(mem, size, isRegular, isCompress);
306     if (!skipCache && isRegular && !isCompress && memMapPool_.ShouldFreeMore(cachedSize) > 0) {
307         int freeNum = memMapPool_.ShouldFreeMore(cachedSize);
308         for (int i = 0; i < freeNum; i++) {
309             void *freeMem = memMapPool_.GetRegularMemFromCommitted(size).GetMem();
310             if (freeMem != nullptr) {
311                 Free(freeMem, size, isRegular, isCompress);
312             } else {
313                 return;
314             }
315         }
316     }
317 }
318 
Free(void * mem,size_t size,bool isRegular,bool isCompress)319 void MemMapAllocator::Free(void *mem, size_t size, bool isRegular, bool isCompress)
320 {
321     DecreaseMemMapTotalSize(size);
322     if (!PageProtect(mem, size, PAGE_PROT_NONE)) { // LCOV_EXCL_BR_LINE
323         return;
324     }
325     PageRelease(mem, size);
326     if (isRegular) {
327         if (isCompress) {
328             compressMemMapPool_.AddMemToCache(mem, size);
329         } else {
330             memMapPool_.AddMemToCache(mem, size);
331         }
332     } else {
333         memMapFreeList_.AddMemToList(MemMap(mem, size));
334     }
335 }
336 
AdapterSuitablePoolCapacity(bool isLargeHeap)337 void MemMapAllocator::AdapterSuitablePoolCapacity(bool isLargeHeap)
338 {
339     size_t physicalSize = common::PhysicalSize();
340     uint64_t poolSize;
341     if (isLargeHeap) {
342         poolSize = LARGE_HEAP_POOL_SIZE;
343     } else {
344         poolSize = GetPoolSize(MAX_MEM_POOL_CAPACITY);
345     }
346     if (g_isEnableCMCGC) {
347         constexpr double capacityRate = DEFAULT_CAPACITY_RATE;
348         capacity_ = std::min<size_t>(physicalSize * capacityRate, poolSize);
349 #ifndef PANDA_TARGET_32
350         // 2: double size, for cmc copy
351         capacity_ *= 2;
352 #endif
353     } else {
354         capacity_ = std::min<size_t>(physicalSize * DEFAULT_CAPACITY_RATE, poolSize);
355     }
356 
357     LOG_GC(INFO) << "Ark Auto adapter memory pool capacity:" << capacity_;
358 }
359 
TransferToInfiniteModeForGC()360 void MemMapAllocator::TransferToInfiniteModeForGC()
361 {
362     capacity_ = std::numeric_limits<size_t>::max();
363     LOG_GC(INFO) << "MemMapAllocator transfer to infinite mode:" << capacity_;
364 }
365 }  // namespace panda::ecmascript
366