• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
17 #define ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
18 
19 #include <chrono>
20 #include <deque>
21 #include <map>
22 #include <random>
23 #include <set>
24 
25 #include "ecmascript/base/config.h"
26 #include "ecmascript/ecma_macros.h"
27 #include "ecmascript/log_wrapper.h"
28 #include "ecmascript/mem/mem.h"
29 #include "ecmascript/mem/mem_common.h"
30 #include "ecmascript/platform/map.h"
31 #include "ecmascript/platform/mutex.h"
32 
33 namespace panda::ecmascript {
34 // Regular region with length of DEFAULT_REGION_SIZE(256kb)
35 class MemMapPool {
36 public:
37     MemMapPool() = default;
38     ~MemMapPool() = default;
39 
Finalize()40     void Finalize()
41     {
42         LockHolder lock(lock_);
43         for (auto &it : memMapVector_) {
44             PageUnmap(it);
45         }
46         for (auto &it : regularMapCommitted_) {
47             PageUnmap(it);
48         }
49         regularMapCommitted_.clear();
50         memMapVector_.clear();
51         memMapCache_.clear();
52     }
53 
54     NO_COPY_SEMANTIC(MemMapPool);
55     NO_MOVE_SEMANTIC(MemMapPool);
56 
GetMemFromCache(size_t size)57     MemMap GetMemFromCache([[maybe_unused]] size_t size)
58     {
59         ASSERT(size == REGULAR_MMAP_SIZE);
60         LockHolder lock(lock_);
61         if (!memMapCache_.empty()) {
62             MemMap mem = memMapCache_.back();
63             memMapCache_.pop_back();
64             return mem;
65         }
66         return MemMap();
67     }
68 
GetRegularMemFromCommitted(size_t size)69     MemMap GetRegularMemFromCommitted([[maybe_unused]] size_t size)
70     {
71         ASSERT(size == REGULAR_MMAP_SIZE);
72         LockHolder lock(lock_);
73         if (!regularMapCommitted_.empty()) {
74             MemMap mem = regularMapCommitted_.back();
75             regularMapCommitted_.pop_back();
76             return mem;
77         }
78         return MemMap();
79     }
80 
IsRegularCommittedFull(size_t cachedSize)81     bool IsRegularCommittedFull(size_t cachedSize)
82     {
83         LockHolder lock(lock_);
84         size_t size = regularMapCommitted_.size();
85         return size >= (cachedSize / REGULAR_MMAP_SIZE) ? true : false;
86     }
87 
ShouldFreeMore(size_t cachedSize)88     int ShouldFreeMore(size_t cachedSize)
89     {
90         LockHolder lock(lock_);
91         int result = static_cast<int>(regularMapCommitted_.size());
92         return result - static_cast<int>(cachedSize / REGULAR_MMAP_SIZE);
93     }
94 
AddMemToCommittedCache(void * mem,size_t size)95     void AddMemToCommittedCache(void *mem, size_t size)
96     {
97         ASSERT(size == REGULAR_MMAP_SIZE);
98         LockHolder lock(lock_);
99         regularMapCommitted_.emplace_back(mem, size);
100     }
101 
AddMemToCache(void * mem,size_t size)102     void AddMemToCache(void *mem, size_t size)
103     {
104         ASSERT(size == REGULAR_MMAP_SIZE);
105         LockHolder lock(lock_);
106         memMapCache_.emplace_back(mem, size);
107     }
108 
SplitMemFromCache(MemMap memMap)109     MemMap SplitMemFromCache(MemMap memMap)
110     {
111         LockHolder lock(lock_);
112         auto remainderMem = reinterpret_cast<uintptr_t>(memMap.GetMem()) + REGULAR_MMAP_SIZE;
113         size_t remainderSize = AlignDown(memMap.GetSize() - REGULAR_MMAP_SIZE, REGULAR_MMAP_SIZE);
114         size_t count = remainderSize / REGULAR_MMAP_SIZE;
115         while (count-- > 0) {
116             memMapCache_.emplace_back(reinterpret_cast<void *>(remainderMem), REGULAR_MMAP_SIZE);
117             remainderMem = remainderMem + REGULAR_MMAP_SIZE;
118         }
119         return MemMap(memMap.GetMem(), REGULAR_MMAP_SIZE);
120     }
121 
SplitMemMapToCache(MemMap memMap)122     void SplitMemMapToCache(MemMap memMap)
123     {
124         auto memAddr = reinterpret_cast<uintptr_t>(memMap.GetMem());
125         size_t memTotalSize = AlignDown(memMap.GetSize(), REGULAR_MMAP_SIZE);
126         size_t count = memTotalSize / REGULAR_MMAP_SIZE;
127         while (count-- > 0) {
128             memMapCache_.emplace_back(reinterpret_cast<void *>(memAddr), REGULAR_MMAP_SIZE);
129             memAddr += REGULAR_MMAP_SIZE;
130         }
131     }
132 
InsertMemMap(MemMap memMap)133     void InsertMemMap(MemMap memMap)
134     {
135         LockHolder lock(lock_);
136         memMapVector_.emplace_back(memMap);
137     }
138 
139 private:
140     static constexpr size_t REGULAR_MMAP_SIZE = 256_KB;
141     Mutex lock_;
142     std::deque<MemMap> memMapCache_;
143     std::vector<MemMap> regularMapCommitted_;
144     std::vector<MemMap> memMapVector_;
145 };
146 
147 // Non regular region with length of DEFAULT_REGION_SIZE(256kb) multiple
148 class MemMapFreeList {
149 public:
150     MemMapFreeList() = default;
151     ~MemMapFreeList() = default;
152 
Initialize(MemMap memMap,size_t capacity)153     void Initialize(MemMap memMap, size_t capacity)
154     {
155         memMaps_.emplace_back(memMap);
156         freeList_.emplace(memMap.GetSize(), memMap);
157         capacity_ = capacity;
158     }
159 
Finalize()160     void Finalize()
161     {
162         for (auto &memMap : memMaps_) {
163             PageUnmap(memMap);
164         }
165         memMaps_.clear();
166         freeList_.clear();
167     }
168 
ResetCapacity(size_t capacity)169     void ResetCapacity(size_t capacity)
170     {
171         capacity_ = capacity;
172     }
173 
174     NO_COPY_SEMANTIC(MemMapFreeList);
175     NO_MOVE_SEMANTIC(MemMapFreeList);
176 
MergeList()177     void MergeList()
178     {
179         auto it = freeList_.begin();
180         while (it != freeList_.end()) {
181             bool isEqual = false;
182             void *startMem = (*it).second.GetMem();
183             size_t newSize = (*it).second.GetSize();
184             auto startIt = it++;
185             if (it == freeList_.end()) {
186                 break;
187             }
188             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
189             while (it != freeList_.end() && next == (*it).second.GetMem()) {
190                 newSize += (*it).second.GetSize();
191                 it = freeList_.erase(it);
192                 next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
193                 isEqual = true;
194             }
195             if (isEqual) {
196                 freeList_.erase(startIt);
197                 freeList_.emplace(newSize, MemMap(startMem, newSize));
198             }
199         }
200     }
201 
GetMemFromList(size_t size)202     MemMap GetMemFromList(size_t size)
203     {
204         if (freeListPoolSize_ + size > capacity_) {
205             LOG_GC(ERROR) << "Freelist pool oom: overflow(" << freeListPoolSize_ << ")";
206             return MemMap();
207         }
208         LockHolder lock(lock_);
209         auto iterate = freeList_.lower_bound(size);
210         if (iterate == freeList_.end()) {
211             MergeList();
212             iterate = freeList_.lower_bound(size);
213             // Unable to get memory from freeList, use PageMap
214             if (iterate == freeList_.end()) {
215                 size_t incrementCapacity = std::max(size, INCREMENT_HUGE_OBJECT_CAPACITY);
216                 MemMap smemMap = PageMap(incrementCapacity, PAGE_PROT_NONE, DEFAULT_REGION_SIZE);
217                 LOG_GC(INFO) << "Huge object mem pool increase PageMap size: " << smemMap.GetSize();
218                 memMaps_.emplace_back(smemMap);
219                 freeList_.emplace(smemMap.GetSize(), smemMap);
220                 iterate = freeList_.lower_bound(size);
221                 ASSERT(iterate != freeList_.end());
222             }
223         }
224         MemMap memMap = iterate->second;
225         size_t remainderSize = memMap.GetSize() - size;
226         freeList_.erase(iterate);
227         if (remainderSize >= DEFAULT_REGION_SIZE) {
228             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memMap.GetMem()) + size);
229             freeList_.emplace(remainderSize, MemMap(next, remainderSize));
230         }
231         freeListPoolSize_ += size;
232         return MemMap(memMap.GetMem(), size);
233     }
234 
AddMemToList(MemMap memMap)235     void AddMemToList(MemMap memMap)
236     {
237         LockHolder lock(lock_);
238         freeListPoolSize_ -= memMap.GetSize();
239         freeList_.emplace(memMap.GetSize(), memMap);
240     }
241 
242 private:
243     Mutex lock_;
244     std::vector<MemMap> memMaps_;
245     std::multimap<size_t, MemMap> freeList_;
246     std::atomic_size_t freeListPoolSize_ {0};
247     size_t capacity_ {0};
248 };
249 
250 class MemMapAllocator {
251 public:
252     MemMapAllocator() = default;
253     ~MemMapAllocator() = default;
254 
255     NO_COPY_SEMANTIC(MemMapAllocator);
256     NO_MOVE_SEMANTIC(MemMapAllocator);
257 
Initialize(size_t alignment,bool isLargeHeap)258     void Initialize(size_t alignment, bool isLargeHeap)
259     {
260         AdapterSuitablePoolCapacity(isLargeHeap);
261         memMapTotalSize_ = 0;
262         if (!g_isEnableCMCGC) {
263             InitializeHugeRegionMap(alignment);
264             InitializeRegularRegionMap(alignment);
265             InitializeCompressRegionMap(alignment);
266         }
267     }
268 
Finalize()269     void Finalize()
270     {
271         memMapTotalSize_ = 0;
272         capacity_ = 0;
273         memMapFreeList_.Finalize();
274         memMapPool_.Finalize();
275         compressMemMapPool_.Finalize();
276     }
277 
GetCapacity()278     size_t GetCapacity()
279     {
280         return capacity_;
281     }
282 
ResetLargePoolSize()283     void ResetLargePoolSize()
284     {
285         capacity_ = LARGE_HEAP_POOL_SIZE;
286         memMapFreeList_.ResetCapacity(capacity_);
287     }
288 
IncreaseMemMapTotalSize(size_t bytes)289     void IncreaseMemMapTotalSize(size_t bytes)
290     {
291         memMapTotalSize_.fetch_add(bytes);
292         ECMA_BYTRACE_COUNT_TRACE(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "Heap size (KB)", memMapTotalSize_ / 1_KB);
293     }
294 
DecreaseMemMapTotalSize(size_t bytes)295     void DecreaseMemMapTotalSize(size_t bytes)
296     {
297         memMapTotalSize_.fetch_sub(bytes);
298         ECMA_BYTRACE_COUNT_TRACE(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "Heap size (KB)", memMapTotalSize_ / 1_KB);
299     }
300 
301     static MemMapAllocator *GetInstance();
302 
303     MemMap Allocate(const uint32_t threadId, size_t size, size_t alignment,
304                     const std::string &spaceName, bool regular, bool isCompress, bool isMachineCode,
305                     bool isEnableJitFort, bool shouldPageTag);
306 
307     void CacheOrFree(void *mem, size_t size, bool isRegular, bool isCompress, size_t cachedSize,
308                      bool shouldPageTag, bool skipCache);
309 
310     // This is only used when allocating region failed during GC, since it's unsafe to do HeapDump or throw OOM,
311     // just make MemMapAllocator infinite to complete this GC, this will temporarily lead that all JSThread could
312     // always AllcationRegion success, breaking the global region limit, but thread calling this will soon complete
313     // GC and then fatal.
314     void TransferToInfiniteModeForGC();
315 
316 private:
317     void InitializeRegularRegionMap(size_t alignment);
318     void InitializeHugeRegionMap(size_t alignment);
319     void InitializeCompressRegionMap(size_t alignment);
320 
321     MemMap AllocateFromMemPool(const uint32_t threadId, size_t size, size_t alignment,
322                                const std::string &spaceName, bool isMachineCode, bool isEnableJitFort,
323                                bool shouldPageTag, PageTagType type);
324     MemMap AllocateFromCompressPool(const uint32_t threadId, size_t size, size_t alignment,
325                                     const std::string &spaceName, bool isMachineCode, bool isEnableJitFort,
326                                     bool shouldPageTag, PageTagType type);
327     MemMap InitialMemPool(MemMap &mem, const uint32_t threadId, size_t size, const std::string &spaceName,
328                           bool isMachineCode, bool isEnableJitFort, bool shouldPageTag, PageTagType type);
329     MemMap AlignMemMapTo4G(const MemMap &memMap, size_t targetSize);
330     // Random generate big mem map addr to avoid js heap is written by others
RandomGenerateBigAddr(uint64_t addr)331     void *RandomGenerateBigAddr(uint64_t addr)
332     {
333         // Use the current time as the seed
334         unsigned seed = static_cast<unsigned>(std::chrono::system_clock::now().time_since_epoch().count());
335         std::mt19937_64 generator(seed);
336 
337         // Generate a random number between 0 and RANDOM_NUM_MAX
338         std::uniform_int_distribution<uint64_t> distribution(0, RANDOM_NUM_MAX);
339         uint64_t randomNum = distribution(generator);
340 
341         // Big addr random change in 0x2000000000 ~ 0x2FF0000000
342         return reinterpret_cast<void *>(addr + (randomNum << RANDOM_SHIFT_BIT));
343     }
344 
345     static constexpr size_t REGULAR_REGION_MMAP_SIZE = 4_MB;
346     static constexpr uint64_t HUGE_OBJECT_MEM_MAP_BEGIN_ADDR = 0x1000000000;
347     static constexpr uint64_t REGULAR_OBJECT_MEM_MAP_BEGIN_ADDR = 0x2000000000;
348     static constexpr uint64_t STEP_INCREASE_MEM_MAP_ADDR = 0x1000000000;
349     static constexpr size_t RANDOM_NUM_MAX = 0xFF;
350     static constexpr size_t RANDOM_SHIFT_BIT = 28;
351     static constexpr size_t MEM_MAP_RETRY_NUM = 10;
352 
353     void AdapterSuitablePoolCapacity(bool isLargeHeap);
354     void Free(void *mem, size_t size, bool isRegular, bool isCompress);
355     MemMapPool memMapPool_;
356     MemMapPool compressMemMapPool_;
357     MemMapFreeList memMapFreeList_;
358     std::atomic_size_t memMapTotalSize_ {0};
359     size_t capacity_ {0};
360 };
361 }  // namespace panda::ecmascript
362 #endif  // ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
363