• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
17 #define ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
18 
19 #include <deque>
20 #include <map>
21 #include <random>
22 #include <set>
23 
24 #include "ecmascript/platform/map.h"
25 #include "ecmascript/mem/mem.h"
26 #include "ecmascript/mem/mem_common.h"
27 #include "ecmascript/log_wrapper.h"
28 
29 #include "ecmascript/platform/mutex.h"
30 
31 namespace panda::ecmascript {
32 // Regular region with length of DEFAULT_REGION_SIZE(256kb)
33 class MemMapPool {
34 public:
35     MemMapPool() = default;
36     ~MemMapPool() = default;
37 
Finalize()38     void Finalize()
39     {
40         LockHolder lock(lock_);
41         for (auto &it : memMapVector_) {
42             PageUnmap(it);
43         }
44         for (auto &it : regularMapCommitted_) {
45             PageUnmap(it);
46         }
47         regularMapCommitted_.clear();
48         memMapVector_.clear();
49         memMapCache_.clear();
50     }
51 
52     NO_COPY_SEMANTIC(MemMapPool);
53     NO_MOVE_SEMANTIC(MemMapPool);
54 
GetMemFromCache(size_t size)55     MemMap GetMemFromCache([[maybe_unused]] size_t size)
56     {
57         ASSERT(size == REGULAR_MMAP_SIZE);
58         LockHolder lock(lock_);
59         if (!memMapCache_.empty()) {
60             MemMap mem = memMapCache_.front();
61             memMapCache_.pop_front();
62             return mem;
63         }
64         return MemMap();
65     }
66 
GetRegularMemFromCommitted(size_t size)67     MemMap GetRegularMemFromCommitted([[maybe_unused]] size_t size)
68     {
69         ASSERT(size == REGULAR_MMAP_SIZE);
70         LockHolder lock(lock_);
71         if (!regularMapCommitted_.empty()) {
72             MemMap mem = regularMapCommitted_.back();
73             regularMapCommitted_.pop_back();
74             return mem;
75         }
76         return MemMap();
77     }
78 
IsRegularCommittedFull(size_t cachedSize)79     bool IsRegularCommittedFull(size_t cachedSize)
80     {
81         LockHolder lock(lock_);
82         size_t size = regularMapCommitted_.size();
83         return size > (cachedSize / REGULAR_MMAP_SIZE) ? true : false;
84     }
85 
ShouldFreeMore(size_t cachedSize)86     int ShouldFreeMore(size_t cachedSize)
87     {
88         LockHolder lock(lock_);
89         int result = static_cast<int>(regularMapCommitted_.size());
90         return result - static_cast<int>(cachedSize / REGULAR_MMAP_SIZE);
91     }
92 
AddMemToCommittedCache(void * mem,size_t size)93     void AddMemToCommittedCache(void *mem, size_t size)
94     {
95         ASSERT(size == REGULAR_MMAP_SIZE);
96         LockHolder lock(lock_);
97         regularMapCommitted_.emplace_back(mem, size);
98     }
99 
AddMemToCache(void * mem,size_t size)100     void AddMemToCache(void *mem, size_t size)
101     {
102         ASSERT(size == REGULAR_MMAP_SIZE);
103         LockHolder lock(lock_);
104         memMapCache_.emplace_back(mem, size);
105     }
106 
SplitMemFromCache(MemMap memMap)107     MemMap SplitMemFromCache(MemMap memMap)
108     {
109         LockHolder lock(lock_);
110         auto remainderMem = reinterpret_cast<uintptr_t>(memMap.GetMem()) + REGULAR_MMAP_SIZE;
111         size_t remainderSize = AlignDown(memMap.GetSize() - REGULAR_MMAP_SIZE, REGULAR_MMAP_SIZE);
112         size_t count = remainderSize / REGULAR_MMAP_SIZE;
113         while (count-- > 0) {
114             memMapCache_.emplace_back(reinterpret_cast<void *>(remainderMem), REGULAR_MMAP_SIZE);
115             remainderMem = remainderMem + REGULAR_MMAP_SIZE;
116         }
117         return MemMap(memMap.GetMem(), REGULAR_MMAP_SIZE);
118     }
119 
SplitMemMapToCache(MemMap memMap)120     void SplitMemMapToCache(MemMap memMap)
121     {
122         auto memAddr = reinterpret_cast<uintptr_t>(memMap.GetMem());
123         size_t memTotalSize = AlignDown(memMap.GetSize(), REGULAR_MMAP_SIZE);
124         size_t count = memTotalSize / REGULAR_MMAP_SIZE;
125         while (count-- > 0) {
126             memMapCache_.emplace_back(reinterpret_cast<void *>(memAddr), REGULAR_MMAP_SIZE);
127             memAddr += REGULAR_MMAP_SIZE;
128         }
129     }
130 
InsertMemMap(MemMap memMap)131     void InsertMemMap(MemMap memMap)
132     {
133         LockHolder lock(lock_);
134         memMapVector_.emplace_back(memMap);
135     }
136 
137 private:
138     static constexpr size_t REGULAR_MMAP_SIZE = 256_KB;
139     Mutex lock_;
140     std::deque<MemMap> memMapCache_;
141     std::vector<MemMap> regularMapCommitted_;
142     std::vector<MemMap> memMapVector_;
143 };
144 
145 // Non regular region with length of DEFAULT_REGION_SIZE(256kb) multiple
146 class MemMapFreeList {
147 public:
148     MemMapFreeList() = default;
149     ~MemMapFreeList() = default;
150 
Initialize(MemMap memMap,size_t capacity)151     void Initialize(MemMap memMap, size_t capacity)
152     {
153         memMaps_.emplace_back(memMap);
154         freeList_.emplace(memMap.GetSize(), memMap);
155         capacity_ = capacity;
156     }
157 
Finalize()158     void Finalize()
159     {
160         for (auto &memMap : memMaps_) {
161             PageUnmap(memMap);
162         }
163         memMaps_.clear();
164         freeList_.clear();
165     }
166 
167     NO_COPY_SEMANTIC(MemMapFreeList);
168     NO_MOVE_SEMANTIC(MemMapFreeList);
169 
MergeList()170     void MergeList()
171     {
172         auto it = freeList_.begin();
173         while (it != freeList_.end()) {
174             bool isEqual = false;
175             void *startMem = (*it).second.GetMem();
176             size_t newSize = (*it).second.GetSize();
177             auto startIt = it++;
178             if (it == freeList_.end()) {
179                 break;
180             }
181             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
182             while (it != freeList_.end() && next == (*it).second.GetMem()) {
183                 newSize += (*it).second.GetSize();
184                 it = freeList_.erase(it);
185                 next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
186                 isEqual = true;
187             }
188             if (isEqual) {
189                 freeList_.erase(startIt);
190                 freeList_.emplace(newSize, MemMap(startMem, newSize));
191             }
192         }
193     }
194 
GetMemFromList(size_t size)195     MemMap GetMemFromList(size_t size)
196     {
197         if (freeListPoolSize_ + size > capacity_) {
198             LOG_GC(ERROR) << "Freelist pool oom: overflow(" << freeListPoolSize_ << ")";
199             return MemMap();
200         }
201         LockHolder lock(lock_);
202         auto iterate = freeList_.lower_bound(size);
203         if (iterate == freeList_.end()) {
204             MergeList();
205             iterate = freeList_.lower_bound(size);
206             // Unable to get memory from freeList, use PageMap
207             if (iterate == freeList_.end()) {
208                 size_t incrementCapacity = std::max(size, INCREMENT_HUGE_OBJECT_CAPACITY);
209                 MemMap smemMap = PageMap(incrementCapacity, PAGE_PROT_NONE, DEFAULT_REGION_SIZE);
210                 LOG_GC(INFO) << "Huge object mem pool increase PageMap size: " << smemMap.GetSize();
211                 memMaps_.emplace_back(smemMap);
212                 freeList_.emplace(smemMap.GetSize(), smemMap);
213                 iterate = freeList_.lower_bound(size);
214                 ASSERT(iterate != freeList_.end());
215             }
216         }
217         MemMap memMap = iterate->second;
218         size_t remainderSize = memMap.GetSize() - size;
219         freeList_.erase(iterate);
220         if (remainderSize >= DEFAULT_REGION_SIZE) {
221             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memMap.GetMem()) + size);
222             freeList_.emplace(remainderSize, MemMap(next, remainderSize));
223         }
224         freeListPoolSize_ += size;
225         return MemMap(memMap.GetMem(), size);
226     }
227 
AddMemToList(MemMap memMap)228     void AddMemToList(MemMap memMap)
229     {
230         LockHolder lock(lock_);
231         freeListPoolSize_ -= memMap.GetSize();
232         freeList_.emplace(memMap.GetSize(), memMap);
233     }
234 
235 private:
236     Mutex lock_;
237     std::vector<MemMap> memMaps_;
238     std::multimap<size_t, MemMap> freeList_;
239     std::atomic_size_t freeListPoolSize_ {0};
240     size_t capacity_ {0};
241 };
242 
243 class MemMapAllocator {
244 public:
245     MemMapAllocator() = default;
246     ~MemMapAllocator() = default;
247 
248     NO_COPY_SEMANTIC(MemMapAllocator);
249     NO_MOVE_SEMANTIC(MemMapAllocator);
250 
Initialize(size_t alignment)251     void Initialize(size_t alignment)
252     {
253         AdapterSuitablePoolCapacity();
254         memMapTotalSize_ = 0;
255         size_t initialHugeObjectCapacity = std::min(capacity_ / 2, INITIAL_HUGE_OBJECT_CAPACITY);
256         MemMap hugeMemMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment);
257         PageTag(hugeMemMap.GetMem(), hugeMemMap.GetSize(), PageTagType::MEMPOOL_CACHE);
258         PageRelease(hugeMemMap.GetMem(), hugeMemMap.GetSize());
259         memMapFreeList_.Initialize(hugeMemMap, capacity_);
260 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
261         size_t initialRegularObjectCapacity = std::min(capacity_ / 2, INITIAL_REGULAR_OBJECT_CAPACITY);
262         MemMap memMap = PageMap(initialRegularObjectCapacity, PAGE_PROT_NONE, alignment, RandomGenerateBigAddr());
263         PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::MEMPOOL_CACHE);
264         PageRelease(memMap.GetMem(), memMap.GetSize());
265         memMapPool_.InsertMemMap(memMap);
266         memMapPool_.SplitMemMapToCache(memMap);
267 #endif
268     }
269 
Finalize()270     void Finalize()
271     {
272         memMapTotalSize_ = 0;
273         capacity_ = 0;
274         memMapFreeList_.Finalize();
275         memMapPool_.Finalize();
276     }
277 
GetCapacity()278     size_t GetCapacity()
279     {
280         return capacity_;
281     }
282 
IncreaseAndCheckReserved(size_t size)283     void IncreaseAndCheckReserved(size_t size)
284     {
285         if (reserved_ + size > capacity_) {
286             LOG_GC(ERROR) << "pool is empty, reserved = " << reserved_ << ", capacity_ = " <<
287                 capacity_ << ", size = " << size;
288         }
289         reserved_ += size;
290         LOG_GC(DEBUG) << "Ark IncreaseAndCheckReserved reserved = " << reserved_ << ", capacity_ = " << capacity_;
291     }
292 
DecreaseReserved(size_t size)293     void DecreaseReserved(size_t size)
294     {
295         reserved_ -= size;
296         LOG_GC(DEBUG) << "Ark DecreaseReserved reserved = " << reserved_ << ", capacity_ = " << capacity_;
297     }
298 
299     static MemMapAllocator *GetInstance();
300 
301     MemMap Allocate(const uint32_t threadId, size_t size, size_t alignment,
302                     const std::string &spaceName, bool regular, bool isMachineCode);
303 
304     void CacheOrFree(void *mem, size_t size, bool isRegular, size_t cachedSize);
305 
306 private:
307     // Random generate big mem map addr to avoid js heap is written by others
RandomGenerateBigAddr()308     void *RandomGenerateBigAddr()
309     {
310         // Use the current time as the seed
311         unsigned seed = static_cast<unsigned>(std::chrono::system_clock::now().time_since_epoch().count());
312         std::mt19937_64 generator(seed);
313 
314         // Generate a random number between 0 and RANDOM_NUM_MAX
315         std::uniform_int_distribution<uint64_t> distribution(0, RANDOM_NUM_MAX);
316         uint64_t randomNum = distribution(generator);
317 
318         // Big addr random change in 0x10000000000 ~ 0x1FF00000000
319         return reinterpret_cast<void *>(BIG_MEM_MAP_BEGIN_ADDR + (randomNum << UINT32_BIT));
320     }
321 
322     static constexpr size_t REGULAR_REGION_MMAP_SIZE = 4_MB;
323     static constexpr uint64_t BIG_MEM_MAP_BEGIN_ADDR = 0x10000000000;
324     static constexpr size_t RANDOM_NUM_MAX = 0xFF;
325     static constexpr size_t UINT32_BIT = 32;
326 
327     void AdapterSuitablePoolCapacity();
328     void Free(void *mem, size_t size, bool isRegular);
329     MemMapPool memMapPool_;
330     MemMapFreeList memMapFreeList_;
331     std::atomic_size_t memMapTotalSize_ {0};
332     size_t capacity_ {0};
333     size_t reserved_ {0};
334 };
335 }  // namespace panda::ecmascript
336 #endif  // ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
337