• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
17 #define ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
18 
19 #include <deque>
20 #include <map>
21 #include <set>
22 
23 #include "ecmascript/platform/map.h"
24 #include "ecmascript/mem/mem.h"
25 #include "ecmascript/mem/mem_common.h"
26 #include "ecmascript/log_wrapper.h"
27 
28 #include "libpandabase/os/mutex.h"
29 
30 namespace panda::ecmascript {
31 // Regular region with length of DEFAULT_REGION_SIZE(256kb)
32 class MemMapPool {
33 public:
34     MemMapPool() = default;
35     ~MemMapPool() = default;
36 
Finalize()37     void Finalize()
38     {
39         os::memory::LockHolder lock(lock_);
40         for (auto &it : memMapVector_) {
41             PageUnmap(it);
42         }
43         memMapVector_.clear();
44         memMapCache_.clear();
45     }
46 
47     NO_COPY_SEMANTIC(MemMapPool);
48     NO_MOVE_SEMANTIC(MemMapPool);
49 
GetMemFromCache(size_t size)50     MemMap GetMemFromCache([[maybe_unused]] size_t size)
51     {
52         ASSERT(size == REGULAR_MMAP_SIZE);
53         os::memory::LockHolder lock(lock_);
54         if (!memMapCache_.empty()) {
55             MemMap mem = memMapCache_.front();
56             memMapCache_.pop_front();
57             return mem;
58         }
59         return MemMap();
60     }
61 
AddMemToCache(void * mem,size_t size)62     void AddMemToCache(void *mem, size_t size)
63     {
64         ASSERT(size == REGULAR_MMAP_SIZE);
65         os::memory::LockHolder lock(lock_);
66         memMapCache_.emplace_back(mem, size);
67     }
68 
SplitMemFromCache(MemMap memMap)69     MemMap SplitMemFromCache(MemMap memMap)
70     {
71         os::memory::LockHolder lock(lock_);
72         auto remainderMem = reinterpret_cast<uintptr_t>(memMap.GetMem()) + REGULAR_MMAP_SIZE;
73         size_t remainderSize = AlignDown(memMap.GetSize() - REGULAR_MMAP_SIZE, REGULAR_MMAP_SIZE);
74         size_t count = remainderSize / REGULAR_MMAP_SIZE;
75         while (count-- > 0) {
76             memMapCache_.emplace_back(reinterpret_cast<void *>(remainderMem), REGULAR_MMAP_SIZE);
77             remainderMem = remainderMem + REGULAR_MMAP_SIZE;
78         }
79         return MemMap(memMap.GetMem(), REGULAR_MMAP_SIZE);
80     }
81 
InsertMemMap(MemMap memMap)82     void InsertMemMap(MemMap memMap)
83     {
84         os::memory::LockHolder lock(lock_);
85         memMapVector_.emplace_back(memMap);
86     }
87 
88 private:
89     static constexpr size_t REGULAR_MMAP_SIZE = 256_KB;
90     os::memory::Mutex lock_;
91     std::deque<MemMap> memMapCache_;
92     std::vector<MemMap> memMapVector_;
93 };
94 
95 // Non regular region with length of DEFAULT_REGION_SIZE(256kb) multiple
96 class MemMapFreeList {
97 public:
98     MemMapFreeList() = default;
99     ~MemMapFreeList() = default;
100 
Initialize(MemMap memMap)101     void Initialize(MemMap memMap)
102     {
103         memMap_ = memMap;
104         freeList_.emplace(memMap.GetSize(), memMap);
105         capacity_ = memMap.GetSize();
106     }
107 
Finalize()108     void Finalize()
109     {
110         PageUnmap(memMap_);
111         freeList_.clear();
112         freeSet_.clear();
113     }
114 
115     NO_COPY_SEMANTIC(MemMapFreeList);
116     NO_MOVE_SEMANTIC(MemMapFreeList);
117 
MergeList()118     void MergeList()
119     {
120         auto it = freeList_.begin();
121         while (it != freeList_.end()) {
122             bool isEqual = false;
123             void *startMem = (*it).second.GetMem();
124             size_t newSize = (*it).second.GetSize();
125             auto startIt = it++;
126             if (it == freeList_.end()) {
127                 break;
128             }
129             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
130             while (it != freeList_.end() && next == (*it).second.GetMem()) {
131                 newSize += (*it).second.GetSize();
132                 freeList_.erase(it++);
133                 next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
134                 isEqual = true;
135             }
136             if (isEqual) {
137                 freeList_.erase(startIt);
138                 freeList_.emplace(newSize, MemMap(startMem, newSize));
139             }
140         }
141     }
142 
GetMemFromList(size_t size)143     MemMap GetMemFromList(size_t size)
144     {
145         if (freeListPoolSize_ + size > capacity_) {
146             LOG_GC(ERROR) << "Freelist pool oom: overflow(" << freeListPoolSize_ << ")";
147             return MemMap();
148         }
149         os::memory::LockHolder lock(lock_);
150         auto iterate = freeList_.lower_bound(size);
151         if (iterate == freeList_.end()) {
152             MergeList();
153             iterate = freeList_.lower_bound(size);
154             // Unable to get memory from freeList, use PageMap
155             if (iterate == freeList_.end()) {
156                 if (freeListPoolSize_ + freeSetPoolSize_ + size > capacity_) {
157                     LOG_GC(ERROR) << "Freeset pool oom: overflow(" << freeSetPoolSize_ << ")";
158                     return MemMap();
159                 }
160                 MemMap smemMap = PageMap(size, PAGE_PROT_NONE, DEFAULT_REGION_SIZE);
161                 freeSet_.emplace(reinterpret_cast<uintptr_t>(smemMap.GetMem()));
162                 freeSetPoolSize_ += size;
163                 return smemMap;
164             }
165         }
166         MemMap memMap = iterate->second;
167         size_t remainderSize = memMap.GetSize() - size;
168         freeList_.erase(iterate);
169         if (remainderSize >= DEFAULT_REGION_SIZE) {
170             auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memMap.GetMem()) + size);
171             freeList_.emplace(remainderSize, MemMap(next, remainderSize));
172         }
173         freeListPoolSize_ += size;
174         return MemMap(memMap.GetMem(), size);
175     }
176 
AddMemToList(MemMap memMap)177     void AddMemToList(MemMap memMap)
178     {
179         os::memory::LockHolder lock(lock_);
180         auto search = freeSet_.find(reinterpret_cast<uintptr_t>(memMap.GetMem()));
181         if (UNLIKELY(search != freeSet_.end())) {
182             freeSetPoolSize_ -= memMap.GetSize();
183             freeSet_.erase(search);
184             PageUnmap(memMap);
185         } else {
186             freeListPoolSize_ -= memMap.GetSize();
187             freeList_.emplace(memMap.GetSize(), memMap);
188         }
189     }
190 
191 private:
192     os::memory::Mutex lock_;
193     MemMap memMap_;
194     std::multimap<size_t, MemMap> freeList_;
195     std::set<uintptr_t> freeSet_;
196     std::atomic_size_t freeListPoolSize_ {0};
197     std::atomic_size_t freeSetPoolSize_ {0};
198     size_t capacity_ {0};
199 };
200 
201 class MemMapAllocator {
202 public:
203     MemMapAllocator() = default;
204     ~MemMapAllocator() = default;
205 
206     NO_COPY_SEMANTIC(MemMapAllocator);
207     NO_MOVE_SEMANTIC(MemMapAllocator);
208 
Initialize(size_t alignment)209     void Initialize(size_t alignment)
210     {
211         AdapterSuitablePoolCapacity();
212         memMapTotalSize_ = 0;
213         size_t hugeObjectCapacity = std::min(capacity_ / 2, MAX_HUGE_OBJECT_CAPACITY);
214         MemMap memMap = PageMap(hugeObjectCapacity, PAGE_PROT_NONE, alignment);
215         PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::MEMPOOL_CACHE);
216         PageRelease(memMap.GetMem(), memMap.GetSize());
217         memMapFreeList_.Initialize(memMap);
218     }
219 
Finalize()220     void Finalize()
221     {
222         memMapTotalSize_ = 0;
223         capacity_ = 0;
224         memMapFreeList_.Finalize();
225         memMapPool_.Finalize();
226     }
227 
GetCapacity()228     size_t GetCapacity()
229     {
230         return capacity_;
231     }
232 
IncreaseAndCheckReserved(size_t size)233     void IncreaseAndCheckReserved(size_t size)
234     {
235         if (reserved_ + size > capacity_) {
236             LOG_GC(ERROR) << "pool is empty, reserved = " << reserved_ << ", capacity_ = " <<
237                 capacity_ << ", size = " << size;
238         }
239         reserved_ += size;
240         LOG_GC(DEBUG) << "Ark IncreaseAndCheckReserved reserved = " << reserved_ << ", capacity_ = " << capacity_;
241     }
242 
DecreaseReserved(size_t size)243     void DecreaseReserved(size_t size)
244     {
245         reserved_ -= size;
246         LOG_GC(DEBUG) << "Ark DecreaseReserved reserved = " << reserved_ << ", capacity_ = " << capacity_;
247     }
248 
249     static MemMapAllocator *GetInstance();
250 
251     MemMap Allocate(size_t size, size_t alignment, bool regular, bool isMachineCode);
252 
253     void Free(void *mem, size_t size, bool isRegular);
254 
255 private:
256     static constexpr size_t REGULAR_REGION_MMAP_SIZE = 4_MB;
257 
258     void AdapterSuitablePoolCapacity();
259 
260     MemMapPool memMapPool_;
261     MemMapFreeList memMapFreeList_;
262     std::atomic_size_t memMapTotalSize_ {0};
263     size_t capacity_ {0};
264     size_t reserved_ {0};
265 };
266 }  // namespace panda::ecmascript
267 #endif  // ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
268