• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
17 #define LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
18 
19 #include <utility>
20 #ifdef PANDA_QEMU_BUILD
21 // Unfortunately, madvise on QEMU works differently, and we should zeroed pages by hand.
22 #include <securec.h>
23 #endif
24 #include "mmap_mem_pool.h"
25 #include "mem.h"
26 #include "os/mem.h"
27 #include "utils/logger.h"
28 #include "mem/arena-inl.h"
29 #include "mem/mem_config.h"
30 #include "utils/asan_interface.h"
31 
32 namespace ark {
33 
34 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
35 #define LOG_MMAP_MEM_POOL(level) LOG(level, MEMORYPOOL) << "MmapMemPool: "
36 
37 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
PopFreePool(size_t size)38 inline Pool MmapPoolMap::PopFreePool(size_t size)
39 {
40     auto element = freePools_.lower_bound(size);
41     if (element == freePools_.end()) {
42         return NULLPOOL;
43     }
44     auto mmapPool = element->second;
45     ASSERT(!mmapPool->IsUsed(freePools_.end()));
46     auto elementSize = element->first;
47     ASSERT(elementSize == mmapPool->GetSize());
48     auto elementMem = mmapPool->GetMem();
49 
50     if (unreturnedPool_.GetMmapPool() == mmapPool) {
51         unreturnedPool_ = UnreturnedToOSPool();
52     }
53 
54     mmapPool->SetFreePoolsIter(freePools_.end());
55     Pool pool(size, elementMem);
56     freePools_.erase(element);
57     if (size < elementSize) {
58         Pool newPool(elementSize - size, ToVoidPtr(ToUintPtr(elementMem) + size));
59         mmapPool->SetSize(size);
60         auto newMmapPool = new MmapPool(newPool, freePools_.end());
61         poolMap_.insert(std::pair<void *, MmapPool *>(newPool.GetMem(), newMmapPool));
62         auto newFreePoolsIter = freePools_.insert(std::pair<size_t, MmapPool *>(newPool.GetSize(), newMmapPool));
63         newMmapPool->SetFreePoolsIter(newFreePoolsIter);
64         newMmapPool->SetReturnedToOS(mmapPool->IsReturnedToOS());
65     }
66     if (OS_ALLOC_POLICY == OSPagesAllocPolicy::ZEROED_MEMORY && !mmapPool->IsReturnedToOS()) {
67         uintptr_t poolStart = ToUintPtr(pool.GetMem());
68         size_t poolSize = pool.GetSize();
69         LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool to get zeroed memory: start = " << pool.GetMem()
70                                  << " with size " << poolSize;
71         os::mem::ReleasePages(poolStart, poolStart + poolSize);
72     }
73     return pool;
74 }
75 
76 template <OSPagesPolicy OS_PAGES_POLICY>
PushFreePool(Pool pool)77 inline std::pair<size_t, OSPagesPolicy> MmapPoolMap::PushFreePool(Pool pool)
78 {
79     bool returnedToOs = OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN;
80     auto mmapPoolElement = poolMap_.find(pool.GetMem());
81     if (UNLIKELY(mmapPoolElement == poolMap_.end())) {
82         LOG_MMAP_MEM_POOL(FATAL) << "can't find mmap pool in the pool map when PushFreePool";
83     }
84 
85     auto mmapPool = mmapPoolElement->second;
86     ASSERT(mmapPool->IsUsed(freePools_.end()));
87 
88     auto prevPool = (mmapPoolElement != poolMap_.begin()) ? (prev(mmapPoolElement, 1)->second) : nullptr;
89     if (prevPool != nullptr && !prevPool->IsUsed(freePools_.end())) {
90         unreturnedPool_ = unreturnedPool_.GetMmapPool() == prevPool ? UnreturnedToOSPool() : unreturnedPool_;
91         ASSERT(ToUintPtr(prevPool->GetMem()) + prevPool->GetSize() == ToUintPtr(mmapPool->GetMem()));
92         returnedToOs = returnedToOs && prevPool->IsReturnedToOS();
93         freePools_.erase(prevPool->GetFreePoolsIter());
94         prevPool->SetSize(prevPool->GetSize() + mmapPool->GetSize());
95         delete mmapPool;
96         poolMap_.erase(mmapPoolElement--);
97         mmapPool = prevPool;
98     }
99 
100     auto nextPool = (mmapPoolElement != prev(poolMap_.end(), 1)) ? (next(mmapPoolElement, 1)->second) : nullptr;
101     if (nextPool != nullptr && !nextPool->IsUsed(freePools_.end())) {
102         unreturnedPool_ = unreturnedPool_.GetMmapPool() == nextPool ? UnreturnedToOSPool() : unreturnedPool_;
103         ASSERT(ToUintPtr(mmapPool->GetMem()) + mmapPool->GetSize() == ToUintPtr(nextPool->GetMem()));
104         returnedToOs = returnedToOs && nextPool->IsReturnedToOS();
105         freePools_.erase(nextPool->GetFreePoolsIter());
106         mmapPool->SetSize(nextPool->GetSize() + mmapPool->GetSize());
107         delete nextPool;
108         poolMap_.erase(++mmapPoolElement);
109     } else if (nextPool == nullptr) {
110         // It is the last pool. Transform it to free space.
111         poolMap_.erase(mmapPoolElement);
112         size_t size = mmapPool->GetSize();
113         delete mmapPool;
114         if (returnedToOs) {
115             return {size, OSPagesPolicy::IMMEDIATE_RETURN};
116         }
117         return {size, OSPagesPolicy::NO_RETURN};
118     }
119 
120     auto res = freePools_.insert(std::pair<size_t, MmapPool *>(mmapPool->GetSize(), mmapPool));
121     mmapPool->SetFreePoolsIter(res);
122     mmapPool->SetReturnedToOS(returnedToOs);
123     return {0, OS_PAGES_POLICY};
124 }
125 
IterateOverFreePools(const std::function<void (size_t,MmapPool *)> & visitor)126 inline void MmapPoolMap::IterateOverFreePools(const std::function<void(size_t, MmapPool *)> &visitor)
127 {
128     for (auto &it : freePools_) {
129         visitor(it.first, it.second);
130     }
131 }
132 
AddNewPool(Pool pool)133 inline void MmapPoolMap::AddNewPool(Pool pool)
134 {
135     auto newMmapPool = new MmapPool(pool, freePools_.end());
136     poolMap_.insert(std::pair<void *, MmapPool *>(pool.GetMem(), newMmapPool));
137 }
138 
GetAllSize()139 inline size_t MmapPoolMap::GetAllSize() const
140 {
141     size_t bytes = 0;
142     for (const auto &pool : freePools_) {
143         bytes += pool.first;
144     }
145     return bytes;
146 }
147 
HaveEnoughFreePools(size_t poolsNum,size_t poolSize)148 inline bool MmapPoolMap::HaveEnoughFreePools(size_t poolsNum, size_t poolSize) const
149 {
150     ASSERT(poolSize != 0);
151     size_t pools = 0;
152     for (auto pool = freePools_.rbegin(); pool != freePools_.rend(); pool++) {
153         if (pool->first < poolSize) {
154             return false;
155         }
156         if ((pools += pool->first / poolSize) >= poolsNum) {
157             return true;
158         }
159     }
160     return false;
161 }
162 
MmapMemPool()163 inline MmapMemPool::MmapMemPool() : MemPool("MmapMemPool"), nonObjectSpacesCurrentSize_ {0}, nonObjectSpacesMaxSize_ {0}
164 {
165     ASSERT(static_cast<uint64_t>(mem::MemConfig::GetHeapSizeLimit()) <= PANDA_MAX_HEAP_SIZE);
166     uint64_t objectSpaceSize = mem::MemConfig::GetHeapSizeLimit();
167     if (objectSpaceSize > PANDA_MAX_HEAP_SIZE) {
168         LOG_MMAP_MEM_POOL(FATAL) << "The memory limits is too high. We can't allocate so much memory from the system";
169     }
170     ASSERT(objectSpaceSize <= PANDA_MAX_HEAP_SIZE);
171 #if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS)
172     void *mem = ark::os::mem::MapRWAnonymousInFirst4GB(ToVoidPtr(PANDA_32BITS_HEAP_START_ADDRESS), objectSpaceSize,
173                                                        // Object space must be aligned to PANDA_POOL_ALIGNMENT_IN_BYTES
174                                                        PANDA_POOL_ALIGNMENT_IN_BYTES);
175     ASSERT((ToUintPtr(mem) < PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS) || (objectSpaceSize == 0));
176     ASSERT(ToUintPtr(mem) + objectSpaceSize <= PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS);
177 #else
178     // We should get aligned to PANDA_POOL_ALIGNMENT_IN_BYTES size
179     void *mem = ark::os::mem::MapRWAnonymousWithAlignmentRaw(objectSpaceSize, PANDA_POOL_ALIGNMENT_IN_BYTES);
180 #endif
181     LOG_IF(((mem == nullptr) && (objectSpaceSize != 0)), FATAL, MEMORYPOOL)
182         << "MmapMemPool: couldn't mmap " << objectSpaceSize << " bytes of memory for the system";
183     ASSERT(AlignUp(ToUintPtr(mem), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(mem));
184     minObjectMemoryAddr_ = ToUintPtr(mem);
185     mmapedObjectMemorySize_ = objectSpaceSize;
186     commonSpace_.Initialize(minObjectMemoryAddr_, objectSpaceSize);
187     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_CODE)] = mem::MemConfig::GetCodeCacheSizeLimit();
188     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_COMPILER)] =
189         mem::MemConfig::GetCompilerMemorySizeLimit();
190     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_INTERNAL)] =
191         mem::MemConfig::GetInternalMemorySizeLimit();
192     // Should be fixed in 9888
193     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_FRAMES)] = std::numeric_limits<size_t>::max();
194     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_NATIVE_STACKS)] =
195         mem::MemConfig::GetNativeStacksMemorySizeLimit();
196     LOG_MMAP_MEM_POOL(DEBUG) << "Successfully initialized MMapMemPool. Object memory start from addr "
197                              << ToVoidPtr(minObjectMemoryAddr_) << " Preallocated size is equal to " << objectSpaceSize;
198 }
199 
FindAndSetUnreturnedFreePool()200 inline bool MmapPoolMap::FindAndSetUnreturnedFreePool()
201 {
202     ASSERT(unreturnedPool_.IsEmpty());
203     for (auto &&[_, pool] : freePools_) {
204         if (!pool->IsReturnedToOS()) {
205             unreturnedPool_ = UnreturnedToOSPool(pool);
206             return true;
207         }
208     }
209     return false;
210 }
211 
ReleasePagesInUnreturnedPool(size_t poolSize)212 inline void MmapPoolMap::ReleasePagesInUnreturnedPool(size_t poolSize)
213 {
214     ASSERT(poolSize != 0 && !unreturnedPool_.IsEmpty());
215 
216     auto pool = unreturnedPool_.GetAndClearUnreturnedPool(poolSize);
217     auto unreturnedMem = ToUintPtr(pool.GetMem());
218     os::mem::ReleasePages(unreturnedMem, unreturnedMem + pool.GetSize());
219     if (unreturnedPool_.GetUnreturnedSize() == 0) {
220         unreturnedPool_.SetReturnedToOS();
221         unreturnedPool_ = UnreturnedToOSPool();
222     }
223     LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool.GetMem() << " with size "
224                              << pool.GetSize();
225 }
226 
ReleasePagesInFreePools()227 inline void MmapPoolMap::ReleasePagesInFreePools()
228 {
229     IterateOverFreePools([](size_t poolSize, MmapPool *pool) {
230         // Iterate over non returned to OS pools:
231         if (!pool->IsReturnedToOS()) {
232             pool->SetReturnedToOS(true);
233             auto poolStart = ToUintPtr(pool->GetMem());
234             LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool->GetMem() << " with size "
235                                      << poolSize;
236             os::mem::ReleasePages(poolStart, poolStart + poolSize);
237         }
238     });
239 }
240 
ClearNonObjectMmapedPools()241 inline void MmapMemPool::ClearNonObjectMmapedPools()
242 {
243     for (auto i : nonObjectMmapedPools_) {
244         Pool pool = std::get<0>(i.second);
245         [[maybe_unused]] AllocatorInfo info = std::get<1>(i.second);
246         [[maybe_unused]] SpaceType type = std::get<2>(i.second);
247 
248         ASSERT(info.GetType() != AllocatorType::UNDEFINED);
249         ASSERT(type != SpaceType::SPACE_TYPE_UNDEFINED);
250         // does not clears non_object_mmaped_pools_ record because can fail (int munmap(void*, size_t) returned -1)
251         FreeRawMemImpl(pool.GetMem(), pool.GetSize());
252     }
253     nonObjectMmapedPools_.clear();
254 }
255 
~MmapMemPool()256 inline MmapMemPool::~MmapMemPool()
257 {
258     ClearNonObjectMmapedPools();
259     void *mmapedMemAddr = ToVoidPtr(minObjectMemoryAddr_);
260     if (mmapedMemAddr == nullptr) {
261         ASSERT(mmapedObjectMemorySize_ == 0);
262         return;
263     }
264 
265     ASSERT(poolMap_.IsEmpty());
266 
267     // NOTE(dtrubenkov): consider madvise(mem, total_size_, MADV_DONTNEED); when possible
268     if (auto unmapRes = ark::os::mem::UnmapRaw(mmapedMemAddr, mmapedObjectMemorySize_)) {
269         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmapRes->ToString();
270     }
271 }
272 
273 template <class ArenaT, OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocArenaImpl(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)274 inline ArenaT *MmapMemPool::AllocArenaImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType,
275                                            const void *allocatorAddr)
276 {
277     os::memory::LockHolder lk(lock_);
278     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new arena with size " << std::dec << size << " for "
279                              << SpaceTypeToString(spaceType);
280     Pool poolForArena = AllocPoolUnsafe<OS_ALLOC_POLICY>(size, spaceType, allocatorType, allocatorAddr);
281     void *mem = poolForArena.GetMem();
282     if (UNLIKELY(mem == nullptr)) {
283         LOG_MMAP_MEM_POOL(ERROR) << "Failed to allocate new arena"
284                                  << " for " << SpaceTypeToString(spaceType);
285         return nullptr;
286     }
287     ASSERT(poolForArena.GetSize() == size);
288     auto arenaBuffOffs =
289         AlignUp(ToUintPtr(mem) + sizeof(ArenaT), GetAlignmentInBytes(ARENA_DEFAULT_ALIGNMENT)) - ToUintPtr(mem);
290     mem = new (mem) ArenaT(size - arenaBuffOffs, ToVoidPtr(ToUintPtr(mem) + arenaBuffOffs));
291     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new arena with size " << std::dec << poolForArena.GetSize()
292                              << " at addr = " << std::hex << poolForArena.GetMem() << " for "
293                              << SpaceTypeToString(spaceType);
294     return static_cast<ArenaT *>(mem);
295 }
296 
297 template <class ArenaT, OSPagesPolicy OS_PAGES_POLICY>
FreeArenaImpl(ArenaT * arena)298 inline void MmapMemPool::FreeArenaImpl(ArenaT *arena)
299 {
300     os::memory::LockHolder lk(lock_);
301     size_t size = arena->GetSize() + (ToUintPtr(arena->GetMem()) - ToUintPtr(arena));
302     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
303     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free arena with size " << std::dec << size << " at addr = " << std::hex
304                              << arena;
305     FreePoolUnsafe<OS_PAGES_POLICY>(arena, size);
306     LOG_MMAP_MEM_POOL(DEBUG) << "Free arena call finished";
307 }
308 
AllocRawMemNonObjectImpl(size_t size,SpaceType spaceType)309 inline void *MmapMemPool::AllocRawMemNonObjectImpl(size_t size, SpaceType spaceType)
310 {
311     ASSERT(!IsHeapSpace(spaceType));
312     void *mem = nullptr;
313     if (LIKELY(nonObjectSpacesMaxSize_[SpaceTypeToIndex(spaceType)] >=
314                nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)] + size)) {
315         mem = ark::os::mem::MapRWAnonymousWithAlignmentRaw(size, PANDA_POOL_ALIGNMENT_IN_BYTES);
316         if (mem != nullptr) {
317             nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)] += size;
318         }
319     }
320     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(spaceType) << " - " << std::dec
321                              << nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)];
322     return mem;
323 }
324 
325 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocRawMemObjectImpl(size_t size,SpaceType type)326 inline void *MmapMemPool::AllocRawMemObjectImpl(size_t size, SpaceType type)
327 {
328     ASSERT(IsHeapSpace(type));
329     void *mem = commonSpace_.template AllocRawMem<OS_ALLOC_POLICY>(size, &commonSpacePools_);
330     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(type) << " - " << std::dec
331                              << commonSpace_.GetOccupiedMemorySize();
332     return mem;
333 }
334 
335 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocRawMemImpl(size_t size,SpaceType type)336 inline void *MmapMemPool::AllocRawMemImpl(size_t size, SpaceType type)
337 {
338     os::memory::LockHolder lk(lock_);
339     ASSERT(size % ark::os::mem::GetPageSize() == 0);
340     // NOTE: We need this check because we use this memory for Pools too
341     // which require PANDA_POOL_ALIGNMENT_IN_BYTES alignment
342     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
343     void *mem = nullptr;
344     switch (type) {
345         // Internal spaces
346         case SpaceType::SPACE_TYPE_COMPILER:
347         case SpaceType::SPACE_TYPE_INTERNAL:
348         case SpaceType::SPACE_TYPE_CODE:
349         case SpaceType::SPACE_TYPE_FRAMES:
350         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
351             ASSERT(!IsHeapSpace(type));
352             mem = AllocRawMemNonObjectImpl(size, type);
353             break;
354         // Heap spaces:
355         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
356         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
357         case SpaceType::SPACE_TYPE_OBJECT:
358             mem = AllocRawMemObjectImpl<OS_ALLOC_POLICY>(size, type);
359             break;
360         default:
361             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(type) << " for AllocRawMem.";
362     }
363     if (UNLIKELY(mem == nullptr)) {
364         LOG_MMAP_MEM_POOL(DEBUG) << "OOM when trying to allocate " << size << " bytes for " << SpaceTypeToString(type);
365         // We have OOM and must return nullptr
366         mem = nullptr;
367     } else {
368         LOG_MMAP_MEM_POOL(DEBUG) << "Allocate raw memory with size " << size << " at addr = " << mem << " for "
369                                  << SpaceTypeToString(type);
370     }
371     return mem;
372 }
373 
374 /* static */
FreeRawMemImpl(void * mem,size_t size)375 inline void MmapMemPool::FreeRawMemImpl(void *mem, size_t size)
376 {
377     if (auto unmapRes = ark::os::mem::UnmapRaw(mem, size)) {
378         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmapRes->ToString();
379     }
380     LOG_MMAP_MEM_POOL(DEBUG) << "Deallocated raw memory with size " << size << " at addr = " << mem;
381 }
382 
383 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocPoolUnsafe(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)384 inline Pool MmapMemPool::AllocPoolUnsafe(size_t size, SpaceType spaceType, AllocatorType allocatorType,
385                                          const void *allocatorAddr)
386 {
387     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
388     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
389     Pool pool = NULLPOOL;
390     bool addToPoolMap = false;
391     // Try to find free pool from the allocated earlier
392     switch (spaceType) {
393         case SpaceType::SPACE_TYPE_CODE:
394         case SpaceType::SPACE_TYPE_COMPILER:
395         case SpaceType::SPACE_TYPE_INTERNAL:
396         case SpaceType::SPACE_TYPE_FRAMES:
397         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
398             // We always use mmap for these space types
399             break;
400         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
401         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
402         case SpaceType::SPACE_TYPE_OBJECT:
403             addToPoolMap = true;
404             pool = commonSpacePools_.template PopFreePool<OS_ALLOC_POLICY>(size);
405             break;
406         default:
407             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(spaceType)
408                                      << " for AllocPoolUnsafe.";
409     }
410     if (pool.GetMem() != nullptr) {
411         LOG_MMAP_MEM_POOL(DEBUG) << "Reuse pool with size " << pool.GetSize() << " at addr = " << pool.GetMem()
412                                  << " for " << SpaceTypeToString(spaceType);
413     }
414     if (pool.GetMem() == nullptr) {
415         void *mem = AllocRawMemImpl<OS_ALLOC_POLICY>(size, spaceType);
416         if (mem != nullptr) {
417             pool = Pool(size, mem);
418         }
419     }
420     if (pool.GetMem() == nullptr) {
421         return pool;
422     }
423     ASAN_UNPOISON_MEMORY_REGION(pool.GetMem(), pool.GetSize());
424     if (UNLIKELY(allocatorAddr == nullptr)) {
425         // Save a pointer to the first byte of a Pool
426         allocatorAddr = pool.GetMem();
427     }
428     if (addToPoolMap) {
429         poolMap_.AddPoolToMap(ToVoidPtr(ToUintPtr(pool.GetMem()) - GetMinObjectAddress()), pool.GetSize(), spaceType,
430                               allocatorType, allocatorAddr);
431 #ifdef PANDA_QEMU_BUILD
432         // Unfortunately, madvise on QEMU works differently, and we should zeroed pages by hand.
433         if (OS_ALLOC_POLICY == OSPagesAllocPolicy::ZEROED_MEMORY) {
434             memset_s(pool.GetMem(), pool.GetSize(), 0, pool.GetSize());
435         }
436 #endif
437     } else {
438         AddToNonObjectPoolsMap(std::make_tuple(pool, AllocatorInfo(allocatorType, allocatorAddr), spaceType));
439     }
440     os::mem::TagAnonymousMemory(pool.GetMem(), pool.GetSize(), SpaceTypeToString(spaceType));
441     ASSERT(AlignUp(ToUintPtr(pool.GetMem()), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(pool.GetMem()));
442     return pool;
443 }
444 
445 template <OSPagesPolicy OS_PAGES_POLICY>
FreePoolUnsafe(void * mem,size_t size)446 inline void MmapMemPool::FreePoolUnsafe(void *mem, size_t size)
447 {
448     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
449     ASAN_POISON_MEMORY_REGION(mem, size);
450     SpaceType poolSpaceType = GetSpaceTypeForAddrImpl(mem);
451     switch (poolSpaceType) {
452         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
453         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
454         case SpaceType::SPACE_TYPE_OBJECT: {
455             auto freeSize = commonSpacePools_.PushFreePool<OS_PAGES_POLICY>(Pool(size, mem));
456             commonSpace_.FreeMem(freeSize.first, freeSize.second);
457             break;
458         }
459         case SpaceType::SPACE_TYPE_COMPILER:
460         case SpaceType::SPACE_TYPE_INTERNAL:
461         case SpaceType::SPACE_TYPE_CODE:
462         case SpaceType::SPACE_TYPE_FRAMES:
463         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
464             ASSERT(!IsHeapSpace(poolSpaceType));
465             nonObjectSpacesCurrentSize_[SpaceTypeToIndex(poolSpaceType)] -= size;
466             FreeRawMemImpl(mem, size);
467             break;
468         default:
469             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(poolSpaceType)
470                                      << " for FreePoolUnsafe.";
471     }
472     os::mem::TagAnonymousMemory(mem, size, nullptr);
473     if (IsHeapSpace(poolSpaceType)) {
474         poolMap_.RemovePoolFromMap(ToVoidPtr(ToUintPtr(mem) - GetMinObjectAddress()), size);
475         if constexpr (OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN) {
476             LOG_MMAP_MEM_POOL(DEBUG) << "IMMEDIATE_RETURN and release pages for this pool";
477             os::mem::ReleasePages(ToUintPtr(mem), ToUintPtr(mem) + size);
478         }
479     } else {
480         RemoveFromNonObjectPoolsMap(mem);
481     }
482     LOG_MMAP_MEM_POOL(DEBUG) << "Freed " << std::dec << size << " memory for " << SpaceTypeToString(poolSpaceType);
483 }
484 
485 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocPoolImpl(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)486 inline Pool MmapMemPool::AllocPoolImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType,
487                                        const void *allocatorAddr)
488 {
489     os::memory::LockHolder lk(lock_);
490     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new pool with size " << std::dec << size << " for "
491                              << SpaceTypeToString(spaceType);
492     Pool pool = AllocPoolUnsafe<OS_ALLOC_POLICY>(size, spaceType, allocatorType, allocatorAddr);
493     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new pool with size " << std::dec << pool.GetSize()
494                              << " at addr = " << std::hex << pool.GetMem() << " for " << SpaceTypeToString(spaceType);
495     return pool;
496 }
497 
498 template <OSPagesPolicy OS_PAGES_POLICY>
FreePoolImpl(void * mem,size_t size)499 inline void MmapMemPool::FreePoolImpl(void *mem, size_t size)
500 {
501     os::memory::LockHolder lk(lock_);
502     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free pool with size " << std::dec << size << " at addr = " << std::hex << mem;
503     FreePoolUnsafe<OS_PAGES_POLICY>(mem, size);
504     LOG_MMAP_MEM_POOL(DEBUG) << "Free pool call finished";
505 }
506 
AddToNonObjectPoolsMap(std::tuple<Pool,AllocatorInfo,SpaceType> poolInfo)507 inline void MmapMemPool::AddToNonObjectPoolsMap(std::tuple<Pool, AllocatorInfo, SpaceType> poolInfo)
508 {
509     void *poolAddr = std::get<0>(poolInfo).GetMem();
510     ASSERT(nonObjectMmapedPools_.find(poolAddr) == nonObjectMmapedPools_.end());
511     nonObjectMmapedPools_.insert({poolAddr, poolInfo});
512 }
513 
RemoveFromNonObjectPoolsMap(void * poolAddr)514 inline void MmapMemPool::RemoveFromNonObjectPoolsMap(void *poolAddr)
515 {
516     auto element = nonObjectMmapedPools_.find(poolAddr);
517     ASSERT(element != nonObjectMmapedPools_.end());
518     nonObjectMmapedPools_.erase(element);
519 }
520 
FindAddrInNonObjectPoolsMap(const void * addr)521 inline std::tuple<Pool, AllocatorInfo, SpaceType> MmapMemPool::FindAddrInNonObjectPoolsMap(const void *addr) const
522 {
523     auto element = nonObjectMmapedPools_.lower_bound(addr);
524     uintptr_t poolStart =
525         (element != nonObjectMmapedPools_.end()) ? ToUintPtr(element->first) : (std::numeric_limits<uintptr_t>::max());
526     if (ToUintPtr(addr) < poolStart) {
527         ASSERT(element != nonObjectMmapedPools_.begin());
528         element = std::prev(element);
529         poolStart = ToUintPtr(element->first);
530     }
531     ASSERT(element != nonObjectMmapedPools_.end());
532     [[maybe_unused]] uintptr_t poolEnd = poolStart + std::get<0>(element->second).GetSize();
533     ASSERT((poolStart <= ToUintPtr(addr)) && (ToUintPtr(addr) < poolEnd));
534     return element->second;
535 }
536 
GetAllocatorInfoForAddrImpl(const void * addr)537 inline AllocatorInfo MmapMemPool::GetAllocatorInfoForAddrImpl(const void *addr) const
538 {
539     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
540         os::memory::LockHolder lk(lock_);
541         return std::get<1>(FindAddrInNonObjectPoolsMap(addr));
542     }
543     AllocatorInfo info = poolMap_.GetAllocatorInfo(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
544     ASSERT(info.GetType() != AllocatorType::UNDEFINED);
545     ASSERT(info.GetAllocatorHeaderAddr() != nullptr);
546     return info;
547 }
548 
GetSpaceTypeForAddrImpl(const void * addr)549 inline SpaceType MmapMemPool::GetSpaceTypeForAddrImpl(const void *addr) const
550 {
551     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
552         os::memory::LockHolder lk(lock_);
553         // <2> is a pointer to SpaceType
554         return std::get<2>(FindAddrInNonObjectPoolsMap(addr));
555     }
556     // Since this method is designed to work without locks for fast space reading, we add an
557     // annotation here to prevent a false alarm with PoolMap::PoolInfo::Destroy.
558     // This data race is not real because this method can be called only for a valid memory
559     // i.e., memory that was initialized and not freed after that.
560     TSAN_ANNOTATE_IGNORE_WRITES_BEGIN();
561     SpaceType spaceType = poolMap_.GetSpaceType(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
562     TSAN_ANNOTATE_IGNORE_WRITES_END();
563     ASSERT(spaceType != SpaceType::SPACE_TYPE_UNDEFINED);
564     return spaceType;
565 }
566 
GetStartAddrPoolForAddrImpl(const void * addr)567 inline void *MmapMemPool::GetStartAddrPoolForAddrImpl(const void *addr) const
568 {
569     // We optimized this call and expect that it will be used only for object space
570     ASSERT(!(ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress()));
571     void *poolStartAddr = poolMap_.GetFirstByteOfPoolForAddr(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
572     return ToVoidPtr(ToUintPtr(poolStartAddr) + GetMinObjectAddress());
573 }
574 
GetObjectSpaceFreeBytes()575 inline size_t MmapMemPool::GetObjectSpaceFreeBytes() const
576 {
577     os::memory::LockHolder lk(lock_);
578 
579     size_t unusedBytes = commonSpace_.GetFreeSpace();
580     size_t freedBytes = commonSpacePools_.GetAllSize();
581     ASSERT(unusedBytes + freedBytes <= commonSpace_.GetMaxSize());
582     return unusedBytes + freedBytes;
583 }
584 
HaveEnoughPoolsInObjectSpace(size_t poolsNum,size_t poolSize)585 inline bool MmapMemPool::HaveEnoughPoolsInObjectSpace(size_t poolsNum, size_t poolSize) const
586 {
587     os::memory::LockHolder lk(lock_);
588 
589     size_t unusedBytes = commonSpace_.GetFreeSpace();
590     ASSERT(poolSize != 0);
591     size_t pools = unusedBytes / poolSize;
592     if (pools >= poolsNum) {
593         return true;
594     }
595     return commonSpacePools_.HaveEnoughFreePools(poolsNum - pools, poolSize);
596 }
597 
GetObjectUsedBytes()598 inline size_t MmapMemPool::GetObjectUsedBytes() const
599 {
600     os::memory::LockHolder lk(lock_);
601     ASSERT(commonSpace_.GetOccupiedMemorySize() >= commonSpacePools_.GetAllSize());
602     return commonSpace_.GetOccupiedMemorySize() - commonSpacePools_.GetAllSize();
603 }
604 
ReleaseFreePagesToOS()605 inline void MmapMemPool::ReleaseFreePagesToOS()
606 {
607     os::memory::LockHolder lk(lock_);
608     auto unreturnedSize = commonSpacePools_.GetUnreturnedToOsSize();
609     if (unreturnedSize != 0) {
610         commonSpacePools_.ReleasePagesInUnreturnedPool(unreturnedSize);
611     }
612     commonSpacePools_.ReleasePagesInFreePools();
613     unreturnedSize = commonSpace_.GetUnreturnedToOsSize();
614     if (unreturnedSize != 0) {
615         commonSpace_.ReleasePagesInMainPool(unreturnedSize);
616     }
617 }
618 
ReleaseFreePagesToOSWithInterruption(const InterruptFlag & interruptFlag)619 inline bool MmapMemPool::ReleaseFreePagesToOSWithInterruption(const InterruptFlag &interruptFlag)
620 {
621     bool wasInterrupted = ReleasePagesInUnreturnedPoolWithInterruption(interruptFlag);
622     if (wasInterrupted) {
623         return true;
624     }
625     wasInterrupted = ReleasePagesInFreePoolsWithInterruption(interruptFlag);
626     if (wasInterrupted) {
627         return true;
628     }
629     wasInterrupted = ReleasePagesInMainPoolWithInterruption(interruptFlag);
630     return wasInterrupted;
631 }
632 
ReleasePagesInUnreturnedPoolWithInterruption(const InterruptFlag & interruptFlag)633 inline bool MmapMemPool::ReleasePagesInUnreturnedPoolWithInterruption(const InterruptFlag &interruptFlag)
634 {
635     while (true) {
636         {
637             os::memory::LockHolder lk(lock_);
638             auto unreturnedSize = commonSpacePools_.GetUnreturnedToOsSize();
639             if (unreturnedSize == 0) {
640                 return false;
641             }
642             if (interruptFlag == ReleasePagesStatus::NEED_INTERRUPT) {
643                 return true;
644             }
645             auto poolSize = std::min(RELEASE_MEM_SIZE, unreturnedSize);
646             commonSpacePools_.ReleasePagesInUnreturnedPool(poolSize);
647         }
648         /* @sync 1
649          * @description Wait for interruption from G1GC Mixed collection
650          */
651     }
652 }
653 
ReleasePagesInFreePoolsWithInterruption(const InterruptFlag & interruptFlag)654 inline bool MmapMemPool::ReleasePagesInFreePoolsWithInterruption(const InterruptFlag &interruptFlag)
655 {
656     while (true) {
657         {
658             os::memory::LockHolder lk(lock_);
659             auto poolFound = commonSpacePools_.FindAndSetUnreturnedFreePool();
660             if (!poolFound) {
661                 return false;
662             }
663         }
664         auto wasInterrupted = ReleasePagesInUnreturnedPoolWithInterruption(interruptFlag);
665         if (wasInterrupted) {
666             return true;
667         }
668     }
669 }
670 
ReleasePagesInMainPoolWithInterruption(const InterruptFlag & interruptFlag)671 inline bool MmapMemPool::ReleasePagesInMainPoolWithInterruption(const InterruptFlag &interruptFlag)
672 {
673     while (true) {
674         os::memory::LockHolder lk(lock_);
675         auto unreturnedSize = commonSpace_.GetUnreturnedToOsSize();
676         if (unreturnedSize == 0) {
677             return false;
678         }
679         if (interruptFlag == ReleasePagesStatus::NEED_INTERRUPT) {
680             return true;
681         }
682         auto poolSize = std::min(RELEASE_MEM_SIZE, unreturnedSize);
683         commonSpace_.ReleasePagesInMainPool(poolSize);
684     }
685 }
686 
ReleasePagesInMainPool(size_t poolSize)687 inline void MmapMemPool::SpaceMemory::ReleasePagesInMainPool(size_t poolSize)
688 {
689     ASSERT(poolSize != 0);
690     Pool mainPool = GetAndClearUnreturnedToOSMemory(poolSize);
691     auto poolStart = ToUintPtr(mainPool.GetMem());
692     os::mem::ReleasePages(poolStart, poolStart + mainPool.GetSize());
693     LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from common_space: start = " << mainPool.GetMem() << " with size "
694                              << mainPool.GetSize();
695 }
696 
697 #undef LOG_MMAP_MEM_POOL
698 
699 }  // namespace ark
700 
701 #endif  // LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
702