• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
17 #define LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
18 
19 #include <utility>
20 #ifdef PANDA_QEMU_BUILD
21 // Unfortunately, madvise on QEMU works differently, and we should zeroed pages by hand.
22 #include <securec.h>
23 #endif
24 #include "mmap_mem_pool.h"
25 #include "mem.h"
26 #include "os/mem.h"
27 #include "utils/logger.h"
28 #include "mem/arena-inl.h"
29 #include "mem/mem_config.h"
30 #include "utils/asan_interface.h"
31 
32 namespace ark {
33 
34 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
35 #define LOG_MMAP_MEM_POOL(level) LOG(level, MEMORYPOOL) << "MmapMemPool: "
36 
37 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
38 // CC-OFFNXT(G.FUD.06) perf critical, ODR
PopFreePool(size_t size)39 inline Pool MmapPoolMap::PopFreePool(size_t size)
40 {
41     auto element = freePools_.lower_bound(size);
42     if (element == freePools_.end()) {
43         return NULLPOOL;
44     }
45     auto mmapPool = element->second;
46     ASSERT(!mmapPool->IsUsed(freePools_.end()));
47     auto elementSize = element->first;
48     ASSERT(elementSize == mmapPool->GetSize());
49     auto elementMem = mmapPool->GetMem();
50 
51     if (unreturnedPool_.GetMmapPool() == mmapPool) {
52         unreturnedPool_ = UnreturnedToOSPool();
53     }
54 
55     mmapPool->SetFreePoolsIter(freePools_.end());
56     Pool pool(size, elementMem);
57     freePools_.erase(element);
58     if (size < elementSize) {
59         Pool newPool(elementSize - size, ToVoidPtr(ToUintPtr(elementMem) + size));
60         mmapPool->SetSize(size);
61         auto newMmapPool = new MmapPool(newPool, freePools_.end());
62         poolMap_.insert(std::pair<void *, MmapPool *>(newPool.GetMem(), newMmapPool));
63         auto newFreePoolsIter = freePools_.insert(std::pair<size_t, MmapPool *>(newPool.GetSize(), newMmapPool));
64         newMmapPool->SetFreePoolsIter(newFreePoolsIter);
65         newMmapPool->SetReturnedToOS(mmapPool->IsReturnedToOS());
66     }
67     if (OS_ALLOC_POLICY == OSPagesAllocPolicy::ZEROED_MEMORY && !mmapPool->IsReturnedToOS()) {
68         uintptr_t poolStart = ToUintPtr(pool.GetMem());
69         size_t poolSize = pool.GetSize();
70         LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool to get zeroed memory: start = " << pool.GetMem()
71                                  << " with size " << poolSize;
72         os::mem::ReleasePages(poolStart, poolStart + poolSize);
73     }
74     return pool;
75 }
76 
77 template <OSPagesPolicy OS_PAGES_POLICY>
78 // CC-OFFNXT(G.FUD.06) perf critical, ODR
PushFreePool(Pool pool)79 inline std::pair<size_t, OSPagesPolicy> MmapPoolMap::PushFreePool(Pool pool)
80 {
81     bool returnedToOs = OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN;
82     auto mmapPoolElement = poolMap_.find(pool.GetMem());
83     if (UNLIKELY(mmapPoolElement == poolMap_.end())) {
84         LOG_MMAP_MEM_POOL(FATAL) << "can't find mmap pool in the pool map when PushFreePool";
85     }
86 
87     auto mmapPool = mmapPoolElement->second;
88     ASSERT(mmapPool->IsUsed(freePools_.end()));
89 
90     auto prevPool = (mmapPoolElement != poolMap_.begin()) ? (prev(mmapPoolElement, 1)->second) : nullptr;
91     if (prevPool != nullptr && !prevPool->IsUsed(freePools_.end())) {
92         unreturnedPool_ = unreturnedPool_.GetMmapPool() == prevPool ? UnreturnedToOSPool() : unreturnedPool_;
93         ASSERT(ToUintPtr(prevPool->GetMem()) + prevPool->GetSize() == ToUintPtr(mmapPool->GetMem()));
94         returnedToOs = returnedToOs && prevPool->IsReturnedToOS();
95         freePools_.erase(prevPool->GetFreePoolsIter());
96         prevPool->SetSize(prevPool->GetSize() + mmapPool->GetSize());
97         delete mmapPool;
98         poolMap_.erase(mmapPoolElement--);
99         mmapPool = prevPool;
100     }
101 
102     auto nextPool = (mmapPoolElement != prev(poolMap_.end(), 1)) ? (next(mmapPoolElement, 1)->second) : nullptr;
103     if (nextPool != nullptr && !nextPool->IsUsed(freePools_.end())) {
104         unreturnedPool_ = unreturnedPool_.GetMmapPool() == nextPool ? UnreturnedToOSPool() : unreturnedPool_;
105         ASSERT(ToUintPtr(mmapPool->GetMem()) + mmapPool->GetSize() == ToUintPtr(nextPool->GetMem()));
106         returnedToOs = returnedToOs && nextPool->IsReturnedToOS();
107         freePools_.erase(nextPool->GetFreePoolsIter());
108         mmapPool->SetSize(nextPool->GetSize() + mmapPool->GetSize());
109         delete nextPool;
110         poolMap_.erase(++mmapPoolElement);
111     } else if (nextPool == nullptr) {
112         // It is the last pool. Transform it to free space.
113         poolMap_.erase(mmapPoolElement);
114         size_t size = mmapPool->GetSize();
115         delete mmapPool;
116         if (returnedToOs) {
117             return {size, OSPagesPolicy::IMMEDIATE_RETURN};
118         }
119         return {size, OSPagesPolicy::NO_RETURN};
120     }
121 
122     auto res = freePools_.insert(std::pair<size_t, MmapPool *>(mmapPool->GetSize(), mmapPool));
123     mmapPool->SetFreePoolsIter(res);
124     mmapPool->SetReturnedToOS(returnedToOs);
125     return {0, OS_PAGES_POLICY};
126 }
127 
IterateOverFreePools(const std::function<void (size_t,MmapPool *)> & visitor)128 inline void MmapPoolMap::IterateOverFreePools(const std::function<void(size_t, MmapPool *)> &visitor)
129 {
130     for (auto &it : freePools_) {
131         visitor(it.first, it.second);
132     }
133 }
134 
AddNewPool(Pool pool)135 inline void MmapPoolMap::AddNewPool(Pool pool)
136 {
137     auto newMmapPool = new MmapPool(pool, freePools_.end());
138     poolMap_.insert(std::pair<void *, MmapPool *>(pool.GetMem(), newMmapPool));
139 }
140 
GetAllSize()141 inline size_t MmapPoolMap::GetAllSize() const
142 {
143     size_t bytes = 0;
144     for (const auto &pool : freePools_) {
145         bytes += pool.first;
146     }
147     return bytes;
148 }
149 
150 // CC-OFFNXT(G.FUD.06) solid logic, ODR
HaveEnoughFreePools(size_t poolsNum,size_t poolSize)151 inline bool MmapPoolMap::HaveEnoughFreePools(size_t poolsNum, size_t poolSize) const
152 {
153     ASSERT(poolSize != 0);
154     size_t pools = 0;
155     for (auto pool = freePools_.rbegin(); pool != freePools_.rend(); pool++) {
156         if (pool->first < poolSize) {
157             return false;
158         }
159         if ((pools += pool->first / poolSize) >= poolsNum) {
160             return true;
161         }
162     }
163     return false;
164 }
165 
166 // CC-OFFNXT(G.FUD.06) solid logic, ODR
MmapMemPool()167 inline MmapMemPool::MmapMemPool() : MemPool("MmapMemPool"), nonObjectSpacesCurrentSize_ {0}, nonObjectSpacesMaxSize_ {0}
168 {
169     ASSERT(static_cast<uint64_t>(mem::MemConfig::GetHeapSizeLimit()) <= PANDA_MAX_HEAP_SIZE);
170     uint64_t objectSpaceSize = mem::MemConfig::GetHeapSizeLimit();
171     if (objectSpaceSize > PANDA_MAX_HEAP_SIZE) {
172         LOG_MMAP_MEM_POOL(FATAL) << "The memory limits is too high. We can't allocate so much memory from the system";
173     }
174     ASSERT(objectSpaceSize <= PANDA_MAX_HEAP_SIZE);
175 #if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS)
176     void *mem = ark::os::mem::MapRWAnonymousInFirst4GB(ToVoidPtr(PANDA_32BITS_HEAP_START_ADDRESS), objectSpaceSize,
177                                                        // Object space must be aligned to PANDA_POOL_ALIGNMENT_IN_BYTES
178                                                        PANDA_POOL_ALIGNMENT_IN_BYTES);
179     ASSERT((ToUintPtr(mem) < PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS) || (objectSpaceSize == 0));
180     ASSERT(ToUintPtr(mem) + objectSpaceSize <= PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS);
181 #else
182     // We should get aligned to PANDA_POOL_ALIGNMENT_IN_BYTES size
183     void *mem = ark::os::mem::MapRWAnonymousWithAlignmentRaw(objectSpaceSize, PANDA_POOL_ALIGNMENT_IN_BYTES);
184 #endif
185     LOG_IF(((mem == nullptr) && (objectSpaceSize != 0)), FATAL, MEMORYPOOL)
186         << "MmapMemPool: couldn't mmap " << objectSpaceSize << " bytes of memory for the system";
187     ASSERT(AlignUp(ToUintPtr(mem), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(mem));
188     minObjectMemoryAddr_ = ToUintPtr(mem);
189     mmapedObjectMemorySize_ = objectSpaceSize;
190     commonSpace_.Initialize(minObjectMemoryAddr_, objectSpaceSize);
191     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_CODE)] = mem::MemConfig::GetCodeCacheSizeLimit();
192     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_COMPILER)] =
193         mem::MemConfig::GetCompilerMemorySizeLimit();
194     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_INTERNAL)] =
195         mem::MemConfig::GetInternalMemorySizeLimit();
196     // Should be fixed in 9888
197     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_FRAMES)] = std::numeric_limits<size_t>::max();
198     nonObjectSpacesMaxSize_[SpaceTypeToIndex(SpaceType::SPACE_TYPE_NATIVE_STACKS)] =
199         mem::MemConfig::GetNativeStacksMemorySizeLimit();
200     LOG_MMAP_MEM_POOL(DEBUG) << "Successfully initialized MMapMemPool. Object memory start from addr "
201                              << ToVoidPtr(minObjectMemoryAddr_) << " Preallocated size is equal to " << objectSpaceSize;
202 }
203 
FindAndSetUnreturnedFreePool()204 inline bool MmapPoolMap::FindAndSetUnreturnedFreePool()
205 {
206     ASSERT(unreturnedPool_.IsEmpty());
207     for (auto &&[_, pool] : freePools_) {
208         if (!pool->IsReturnedToOS()) {
209             unreturnedPool_ = UnreturnedToOSPool(pool);
210             return true;
211         }
212     }
213     return false;
214 }
215 
ReleasePagesInUnreturnedPool(size_t poolSize)216 inline void MmapPoolMap::ReleasePagesInUnreturnedPool(size_t poolSize)
217 {
218     ASSERT(poolSize != 0 && !unreturnedPool_.IsEmpty());
219 
220     auto pool = unreturnedPool_.GetAndClearUnreturnedPool(poolSize);
221     auto unreturnedMem = ToUintPtr(pool.GetMem());
222     os::mem::ReleasePages(unreturnedMem, unreturnedMem + pool.GetSize());
223     if (unreturnedPool_.GetUnreturnedSize() == 0) {
224         unreturnedPool_.SetReturnedToOS();
225         unreturnedPool_ = UnreturnedToOSPool();
226     }
227     LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool.GetMem() << " with size "
228                              << pool.GetSize();
229 }
230 
ReleasePagesInFreePools()231 inline void MmapPoolMap::ReleasePagesInFreePools()
232 {
233     IterateOverFreePools([](size_t poolSize, MmapPool *pool) {
234         // Iterate over non returned to OS pools:
235         if (!pool->IsReturnedToOS()) {
236             pool->SetReturnedToOS(true);
237             auto poolStart = ToUintPtr(pool->GetMem());
238             LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool->GetMem() << " with size "
239                                      << poolSize;
240             os::mem::ReleasePages(poolStart, poolStart + poolSize);
241         }
242     });
243 }
244 
ClearNonObjectMmapedPools()245 inline void MmapMemPool::ClearNonObjectMmapedPools()
246 {
247     for (auto i : nonObjectMmapedPools_) {
248         Pool pool = std::get<0>(i.second);
249         [[maybe_unused]] AllocatorInfo info = std::get<1>(i.second);
250         [[maybe_unused]] SpaceType type = std::get<2>(i.second);
251 
252         ASSERT(info.GetType() != AllocatorType::UNDEFINED);
253         ASSERT(type != SpaceType::SPACE_TYPE_UNDEFINED);
254         // does not clears non_object_mmaped_pools_ record because can fail (int munmap(void*, size_t) returned -1)
255         FreeRawMemImpl(pool.GetMem(), pool.GetSize());
256     }
257     nonObjectMmapedPools_.clear();
258 }
259 
~MmapMemPool()260 inline MmapMemPool::~MmapMemPool()
261 {
262     ClearNonObjectMmapedPools();
263     void *mmapedMemAddr = ToVoidPtr(minObjectMemoryAddr_);
264     if (mmapedMemAddr == nullptr) {
265         ASSERT(mmapedObjectMemorySize_ == 0);
266         return;
267     }
268 
269     ASSERT(poolMap_.IsEmpty());
270 
271     // NOTE(dtrubenkov): consider madvise(mem, total_size_, MADV_DONTNEED); when possible
272     if (auto unmapRes = ark::os::mem::UnmapRaw(mmapedMemAddr, mmapedObjectMemorySize_)) {
273         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmapRes->ToString();
274     }
275 }
276 
277 template <class ArenaT, OSPagesAllocPolicy OS_ALLOC_POLICY>
278 // CC-OFFNXT(G.FUD.06) perf critical
AllocArenaImpl(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)279 inline ArenaT *MmapMemPool::AllocArenaImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType,
280                                            const void *allocatorAddr)
281 {
282     os::memory::LockHolder lk(lock_);
283     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new arena with size " << std::dec << size << " for "
284                              << SpaceTypeToString(spaceType);
285     Pool poolForArena = AllocPoolUnsafe<OS_ALLOC_POLICY>(size, spaceType, allocatorType, allocatorAddr);
286     void *mem = poolForArena.GetMem();
287     if (UNLIKELY(mem == nullptr)) {
288         LOG_MMAP_MEM_POOL(ERROR) << "Failed to allocate new arena"
289                                  << " for " << SpaceTypeToString(spaceType);
290         return nullptr;
291     }
292     ASSERT(poolForArena.GetSize() == size);
293     auto arenaBuffOffs =
294         AlignUp(ToUintPtr(mem) + sizeof(ArenaT), GetAlignmentInBytes(ARENA_DEFAULT_ALIGNMENT)) - ToUintPtr(mem);
295     mem = new (mem) ArenaT(size - arenaBuffOffs, ToVoidPtr(ToUintPtr(mem) + arenaBuffOffs));
296     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new arena with size " << std::dec << poolForArena.GetSize()
297                              << " at addr = " << std::hex << poolForArena.GetMem() << " for "
298                              << SpaceTypeToString(spaceType);
299     return static_cast<ArenaT *>(mem);
300 }
301 
302 template <class ArenaT, OSPagesPolicy OS_PAGES_POLICY>
303 // CC-OFFNXT(G.FUD.06) perf critical, ODR
FreeArenaImpl(ArenaT * arena)304 inline void MmapMemPool::FreeArenaImpl(ArenaT *arena)
305 {
306     os::memory::LockHolder lk(lock_);
307     size_t size = arena->GetSize() + (ToUintPtr(arena->GetMem()) - ToUintPtr(arena));
308     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
309     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free arena with size " << std::dec << size << " at addr = " << std::hex
310                              << arena;
311     FreePoolUnsafe<OS_PAGES_POLICY>(arena, size);
312     LOG_MMAP_MEM_POOL(DEBUG) << "Free arena call finished";
313 }
314 
315 // CC-OFFNXT(G.FUD.06) solid logic, ODR
AllocRawMemNonObjectImpl(size_t size,SpaceType spaceType)316 inline void *MmapMemPool::AllocRawMemNonObjectImpl(size_t size, SpaceType spaceType)
317 {
318     ASSERT(!IsHeapSpace(spaceType));
319     void *mem = nullptr;
320     if (LIKELY(nonObjectSpacesMaxSize_[SpaceTypeToIndex(spaceType)] >=
321                nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)] + size)) {
322         mem = ark::os::mem::MapRWAnonymousWithAlignmentRaw(size, PANDA_POOL_ALIGNMENT_IN_BYTES);
323         if (mem != nullptr) {
324             nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)] += size;
325         }
326     }
327     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(spaceType) << " - " << std::dec
328                              << nonObjectSpacesCurrentSize_[SpaceTypeToIndex(spaceType)];
329     return mem;
330 }
331 
332 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocRawMemObjectImpl(size_t size,SpaceType type)333 inline void *MmapMemPool::AllocRawMemObjectImpl(size_t size, SpaceType type)
334 {
335     ASSERT(IsHeapSpace(type));
336     void *mem = commonSpace_.template AllocRawMem<OS_ALLOC_POLICY>(size, &commonSpacePools_);
337     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(type) << " - " << std::dec
338                              << commonSpace_.GetOccupiedMemorySize();
339     return mem;
340 }
341 
342 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
343 // CC-OFFNXT(G.FUD.06) perf critical, solid logic, ODR
AllocRawMemImpl(size_t size,SpaceType type)344 inline void *MmapMemPool::AllocRawMemImpl(size_t size, SpaceType type)
345 {
346     os::memory::LockHolder lk(lock_);
347     ASSERT(size % ark::os::mem::GetPageSize() == 0);
348     // NOTE: We need this check because we use this memory for Pools too
349     // which require PANDA_POOL_ALIGNMENT_IN_BYTES alignment
350     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
351     void *mem = nullptr;
352     switch (type) {
353         // Internal spaces
354         case SpaceType::SPACE_TYPE_COMPILER:
355         case SpaceType::SPACE_TYPE_INTERNAL:
356         case SpaceType::SPACE_TYPE_CODE:
357         case SpaceType::SPACE_TYPE_FRAMES:
358         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
359             ASSERT(!IsHeapSpace(type));
360             mem = AllocRawMemNonObjectImpl(size, type);
361             break;
362         // Heap spaces:
363         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
364         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
365         case SpaceType::SPACE_TYPE_OBJECT:
366             mem = AllocRawMemObjectImpl<OS_ALLOC_POLICY>(size, type);
367             break;
368         default:
369             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(type) << " for AllocRawMem.";
370     }
371     if (UNLIKELY(mem == nullptr)) {
372         LOG_MMAP_MEM_POOL(DEBUG) << "OOM when trying to allocate " << size << " bytes for " << SpaceTypeToString(type);
373         // We have OOM and must return nullptr
374         mem = nullptr;
375     } else {
376         LOG_MMAP_MEM_POOL(DEBUG) << "Allocate raw memory with size " << size << " at addr = " << mem << " for "
377                                  << SpaceTypeToString(type);
378     }
379     return mem;
380 }
381 
382 /* static */
FreeRawMemImpl(void * mem,size_t size)383 inline void MmapMemPool::FreeRawMemImpl(void *mem, size_t size)
384 {
385     if (auto unmapRes = ark::os::mem::UnmapRaw(mem, size)) {
386         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmapRes->ToString();
387     }
388     LOG_MMAP_MEM_POOL(DEBUG) << "Deallocated raw memory with size " << size << " at addr = " << mem;
389 }
390 
391 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
392 // CC-OFFNXT(G.FUD.06, G.FUN.01-CPP) perf critical, solid logic
AllocPoolUnsafe(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)393 inline Pool MmapMemPool::AllocPoolUnsafe(size_t size, SpaceType spaceType, AllocatorType allocatorType,
394                                          const void *allocatorAddr)
395 {
396     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
397     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
398     Pool pool = NULLPOOL;
399     bool addToPoolMap = false;
400     // Try to find free pool from the allocated earlier
401     switch (spaceType) {
402         case SpaceType::SPACE_TYPE_CODE:
403         case SpaceType::SPACE_TYPE_COMPILER:
404         case SpaceType::SPACE_TYPE_INTERNAL:
405         case SpaceType::SPACE_TYPE_FRAMES:
406         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
407             // We always use mmap for these space types
408             break;
409         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
410         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
411         case SpaceType::SPACE_TYPE_OBJECT:
412             addToPoolMap = true;
413             pool = commonSpacePools_.template PopFreePool<OS_ALLOC_POLICY>(size);
414             break;
415         default:
416             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(spaceType)
417                                      << " for AllocPoolUnsafe.";
418     }
419     if (pool.GetMem() != nullptr) {
420         LOG_MMAP_MEM_POOL(DEBUG) << "Reuse pool with size " << pool.GetSize() << " at addr = " << pool.GetMem()
421                                  << " for " << SpaceTypeToString(spaceType);
422     }
423     if (pool.GetMem() == nullptr) {
424         void *mem = AllocRawMemImpl<OS_ALLOC_POLICY>(size, spaceType);
425         if (mem != nullptr) {
426             pool = Pool(size, mem);
427         }
428     }
429     if (pool.GetMem() == nullptr) {
430         return pool;
431     }
432     ASAN_UNPOISON_MEMORY_REGION(pool.GetMem(), pool.GetSize());
433     if (UNLIKELY(allocatorAddr == nullptr)) {
434         // Save a pointer to the first byte of a Pool
435         allocatorAddr = pool.GetMem();
436     }
437     if (addToPoolMap) {
438         poolMap_.AddPoolToMap(ToVoidPtr(ToUintPtr(pool.GetMem()) - GetMinObjectAddress()), pool.GetSize(), spaceType,
439                               allocatorType, allocatorAddr);
440 #ifdef PANDA_QEMU_BUILD
441         // Unfortunately, madvise on QEMU works differently, and we should zeroed pages by hand.
442         if (OS_ALLOC_POLICY == OSPagesAllocPolicy::ZEROED_MEMORY) {
443             memset_s(pool.GetMem(), pool.GetSize(), 0, pool.GetSize());
444         }
445 #endif
446     } else {
447         AddToNonObjectPoolsMap(std::make_tuple(pool, AllocatorInfo(allocatorType, allocatorAddr), spaceType));
448     }
449     os::mem::TagAnonymousMemory(pool.GetMem(), pool.GetSize(), SpaceTypeToString(spaceType));
450     ASSERT(AlignUp(ToUintPtr(pool.GetMem()), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(pool.GetMem()));
451     return pool;
452 }
453 
454 template <OSPagesPolicy OS_PAGES_POLICY>
455 // CC-OFFNXT(G.FUD.06) perf critical, solid logic, ODR
FreePoolUnsafe(void * mem,size_t size)456 inline void MmapMemPool::FreePoolUnsafe(void *mem, size_t size)
457 {
458     ASSERT(size == AlignUp(size, ark::os::mem::GetPageSize()));
459     ASAN_POISON_MEMORY_REGION(mem, size);
460     SpaceType poolSpaceType = GetSpaceTypeForAddrImpl(mem);
461     switch (poolSpaceType) {
462         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
463         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
464         case SpaceType::SPACE_TYPE_OBJECT: {
465             auto freeSize = commonSpacePools_.PushFreePool<OS_PAGES_POLICY>(Pool(size, mem));
466             commonSpace_.FreeMem(freeSize.first, freeSize.second);
467             break;
468         }
469         case SpaceType::SPACE_TYPE_COMPILER:
470         case SpaceType::SPACE_TYPE_INTERNAL:
471         case SpaceType::SPACE_TYPE_CODE:
472         case SpaceType::SPACE_TYPE_FRAMES:
473         case SpaceType::SPACE_TYPE_NATIVE_STACKS:
474             ASSERT(!IsHeapSpace(poolSpaceType));
475             nonObjectSpacesCurrentSize_[SpaceTypeToIndex(poolSpaceType)] -= size;
476             FreeRawMemImpl(mem, size);
477             break;
478         default:
479             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(poolSpaceType)
480                                      << " for FreePoolUnsafe.";
481     }
482     os::mem::TagAnonymousMemory(mem, size, nullptr);
483     if (IsHeapSpace(poolSpaceType)) {
484         poolMap_.RemovePoolFromMap(ToVoidPtr(ToUintPtr(mem) - GetMinObjectAddress()), size);
485         if constexpr (OS_PAGES_POLICY == OSPagesPolicy::IMMEDIATE_RETURN) {
486             LOG_MMAP_MEM_POOL(DEBUG) << "IMMEDIATE_RETURN and release pages for this pool";
487             os::mem::ReleasePages(ToUintPtr(mem), ToUintPtr(mem) + size);
488         }
489     } else {
490         RemoveFromNonObjectPoolsMap(mem);
491     }
492     LOG_MMAP_MEM_POOL(DEBUG) << "Freed " << std::dec << size << " memory for " << SpaceTypeToString(poolSpaceType);
493 }
494 
495 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocPoolImpl(size_t size,SpaceType spaceType,AllocatorType allocatorType,const void * allocatorAddr)496 inline Pool MmapMemPool::AllocPoolImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType,
497                                        const void *allocatorAddr)
498 {
499     os::memory::LockHolder lk(lock_);
500     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new pool with size " << std::dec << size << " for "
501                              << SpaceTypeToString(spaceType);
502     Pool pool = AllocPoolUnsafe<OS_ALLOC_POLICY>(size, spaceType, allocatorType, allocatorAddr);
503     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new pool with size " << std::dec << pool.GetSize()
504                              << " at addr = " << std::hex << pool.GetMem() << " for " << SpaceTypeToString(spaceType);
505     return pool;
506 }
507 
508 template <OSPagesPolicy OS_PAGES_POLICY>
FreePoolImpl(void * mem,size_t size)509 inline void MmapMemPool::FreePoolImpl(void *mem, size_t size)
510 {
511     os::memory::LockHolder lk(lock_);
512     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free pool with size " << std::dec << size << " at addr = " << std::hex << mem;
513     FreePoolUnsafe<OS_PAGES_POLICY>(mem, size);
514     LOG_MMAP_MEM_POOL(DEBUG) << "Free pool call finished";
515 }
516 
AddToNonObjectPoolsMap(std::tuple<Pool,AllocatorInfo,SpaceType> poolInfo)517 inline void MmapMemPool::AddToNonObjectPoolsMap(std::tuple<Pool, AllocatorInfo, SpaceType> poolInfo)
518 {
519     void *poolAddr = std::get<0>(poolInfo).GetMem();
520     ASSERT(nonObjectMmapedPools_.find(poolAddr) == nonObjectMmapedPools_.end());
521     nonObjectMmapedPools_.insert({poolAddr, poolInfo});
522 }
523 
RemoveFromNonObjectPoolsMap(void * poolAddr)524 inline void MmapMemPool::RemoveFromNonObjectPoolsMap(void *poolAddr)
525 {
526     auto element = nonObjectMmapedPools_.find(poolAddr);
527     ASSERT(element != nonObjectMmapedPools_.end());
528     nonObjectMmapedPools_.erase(element);
529 }
530 
531 // CC-OFFNXT(G.FUD.06) Splitting this function will degrade readability, solid logic, ODR
FindAddrInNonObjectPoolsMap(const void * addr)532 inline std::tuple<Pool, AllocatorInfo, SpaceType> MmapMemPool::FindAddrInNonObjectPoolsMap(const void *addr) const
533 {
534     auto element = nonObjectMmapedPools_.lower_bound(addr);
535     uintptr_t poolStart =
536         (element != nonObjectMmapedPools_.end()) ? ToUintPtr(element->first) : (std::numeric_limits<uintptr_t>::max());
537     if (ToUintPtr(addr) < poolStart) {
538         ASSERT(element != nonObjectMmapedPools_.begin());
539         element = std::prev(element);
540         poolStart = ToUintPtr(element->first);
541     }
542     ASSERT(element != nonObjectMmapedPools_.end());
543     [[maybe_unused]] uintptr_t poolEnd = poolStart + std::get<0>(element->second).GetSize();
544     ASSERT((poolStart <= ToUintPtr(addr)) && (ToUintPtr(addr) < poolEnd));
545     return element->second;
546 }
547 
GetAllocatorInfoForAddrImpl(const void * addr)548 inline AllocatorInfo MmapMemPool::GetAllocatorInfoForAddrImpl(const void *addr) const
549 {
550     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
551         os::memory::LockHolder lk(lock_);
552         return std::get<1>(FindAddrInNonObjectPoolsMap(addr));
553     }
554     AllocatorInfo info = poolMap_.GetAllocatorInfo(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
555     ASSERT(info.GetType() != AllocatorType::UNDEFINED);
556     ASSERT(info.GetAllocatorHeaderAddr() != nullptr);
557     return info;
558 }
559 
GetSpaceTypeForAddrImpl(const void * addr)560 inline SpaceType MmapMemPool::GetSpaceTypeForAddrImpl(const void *addr) const
561 {
562     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
563         os::memory::LockHolder lk(lock_);
564         // <2> is a pointer to SpaceType
565         return std::get<2>(FindAddrInNonObjectPoolsMap(addr));
566     }
567     // Since this method is designed to work without locks for fast space reading, we add an
568     // annotation here to prevent a false alarm with PoolMap::PoolInfo::Destroy.
569     // This data race is not real because this method can be called only for a valid memory
570     // i.e., memory that was initialized and not freed after that.
571     TSAN_ANNOTATE_IGNORE_WRITES_BEGIN();
572     SpaceType spaceType = poolMap_.GetSpaceType(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
573     TSAN_ANNOTATE_IGNORE_WRITES_END();
574     ASSERT(spaceType != SpaceType::SPACE_TYPE_UNDEFINED);
575     return spaceType;
576 }
577 
GetStartAddrPoolForAddrImpl(const void * addr)578 inline void *MmapMemPool::GetStartAddrPoolForAddrImpl(const void *addr) const
579 {
580     // We optimized this call and expect that it will be used only for object space
581     ASSERT(!(ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress()));
582     void *poolStartAddr = poolMap_.GetFirstByteOfPoolForAddr(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
583     return ToVoidPtr(ToUintPtr(poolStartAddr) + GetMinObjectAddress());
584 }
585 
GetObjectSpaceFreeBytes()586 inline size_t MmapMemPool::GetObjectSpaceFreeBytes() const
587 {
588     os::memory::LockHolder lk(lock_);
589 
590     size_t unusedBytes = commonSpace_.GetFreeSpace();
591     size_t freedBytes = commonSpacePools_.GetAllSize();
592     ASSERT(unusedBytes + freedBytes <= commonSpace_.GetMaxSize());
593     return unusedBytes + freedBytes;
594 }
595 
HaveEnoughPoolsInObjectSpace(size_t poolsNum,size_t poolSize)596 inline bool MmapMemPool::HaveEnoughPoolsInObjectSpace(size_t poolsNum, size_t poolSize) const
597 {
598     os::memory::LockHolder lk(lock_);
599 
600     size_t unusedBytes = commonSpace_.GetFreeSpace();
601     ASSERT(poolSize != 0);
602     size_t pools = unusedBytes / poolSize;
603     if (pools >= poolsNum) {
604         return true;
605     }
606     return commonSpacePools_.HaveEnoughFreePools(poolsNum - pools, poolSize);
607 }
608 
GetObjectUsedBytes()609 inline size_t MmapMemPool::GetObjectUsedBytes() const
610 {
611     os::memory::LockHolder lk(lock_);
612     ASSERT(commonSpace_.GetOccupiedMemorySize() >= commonSpacePools_.GetAllSize());
613     return commonSpace_.GetOccupiedMemorySize() - commonSpacePools_.GetAllSize();
614 }
615 
ReleaseFreePagesToOS()616 inline void MmapMemPool::ReleaseFreePagesToOS()
617 {
618     os::memory::LockHolder lk(lock_);
619     auto unreturnedSize = commonSpacePools_.GetUnreturnedToOsSize();
620     if (unreturnedSize != 0) {
621         commonSpacePools_.ReleasePagesInUnreturnedPool(unreturnedSize);
622     }
623     commonSpacePools_.ReleasePagesInFreePools();
624     unreturnedSize = commonSpace_.GetUnreturnedToOsSize();
625     if (unreturnedSize != 0) {
626         commonSpace_.ReleasePagesInMainPool(unreturnedSize);
627     }
628 }
629 
ReleaseFreePagesToOSWithInterruption(const InterruptFlag & interruptFlag)630 inline bool MmapMemPool::ReleaseFreePagesToOSWithInterruption(const InterruptFlag &interruptFlag)
631 {
632     bool wasInterrupted = ReleasePagesInUnreturnedPoolWithInterruption(interruptFlag);
633     if (wasInterrupted) {
634         return true;
635     }
636     wasInterrupted = ReleasePagesInFreePoolsWithInterruption(interruptFlag);
637     if (wasInterrupted) {
638         return true;
639     }
640     wasInterrupted = ReleasePagesInMainPoolWithInterruption(interruptFlag);
641     return wasInterrupted;
642 }
643 
644 // CC-OFFNXT(G.FUD.06) perf critical, ODR
ReleasePagesInUnreturnedPoolWithInterruption(const InterruptFlag & interruptFlag)645 inline bool MmapMemPool::ReleasePagesInUnreturnedPoolWithInterruption(const InterruptFlag &interruptFlag)
646 {
647     while (true) {
648         {
649             os::memory::LockHolder lk(lock_);
650             auto unreturnedSize = commonSpacePools_.GetUnreturnedToOsSize();
651             if (unreturnedSize == 0) {
652                 return false;
653             }
654             if (interruptFlag == ReleasePagesStatus::NEED_INTERRUPT) {
655                 return true;
656             }
657             auto poolSize = std::min(RELEASE_MEM_SIZE, unreturnedSize);
658             commonSpacePools_.ReleasePagesInUnreturnedPool(poolSize);
659         }
660         /* @sync 1
661          * @description Wait for interruption from G1GC Mixed collection
662          */
663     }
664 }
665 
666 // CC-OFFNXT(G.FUD.06) perf critical, ODR
ReleasePagesInFreePoolsWithInterruption(const InterruptFlag & interruptFlag)667 inline bool MmapMemPool::ReleasePagesInFreePoolsWithInterruption(const InterruptFlag &interruptFlag)
668 {
669     while (true) {
670         {
671             os::memory::LockHolder lk(lock_);
672             auto poolFound = commonSpacePools_.FindAndSetUnreturnedFreePool();
673             if (!poolFound) {
674                 return false;
675             }
676         }
677         auto wasInterrupted = ReleasePagesInUnreturnedPoolWithInterruption(interruptFlag);
678         if (wasInterrupted) {
679             return true;
680         }
681     }
682 }
683 
684 // CC-OFFNXT(G.FUD.06) solid logic, ODR
ReleasePagesInMainPoolWithInterruption(const InterruptFlag & interruptFlag)685 inline bool MmapMemPool::ReleasePagesInMainPoolWithInterruption(const InterruptFlag &interruptFlag)
686 {
687     while (true) {
688         os::memory::LockHolder lk(lock_);
689         auto unreturnedSize = commonSpace_.GetUnreturnedToOsSize();
690         if (unreturnedSize == 0) {
691             return false;
692         }
693         if (interruptFlag == ReleasePagesStatus::NEED_INTERRUPT) {
694             return true;
695         }
696         auto poolSize = std::min(RELEASE_MEM_SIZE, unreturnedSize);
697         commonSpace_.ReleasePagesInMainPool(poolSize);
698     }
699 }
700 
ReleasePagesInMainPool(size_t poolSize)701 inline void MmapMemPool::SpaceMemory::ReleasePagesInMainPool(size_t poolSize)
702 {
703     ASSERT(poolSize != 0);
704     Pool mainPool = GetAndClearUnreturnedToOSMemory(poolSize);
705     auto poolStart = ToUintPtr(mainPool.GetMem());
706     os::mem::ReleasePages(poolStart, poolStart + mainPool.GetSize());
707     LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from common_space: start = " << mainPool.GetMem() << " with size "
708                              << mainPool.GetSize();
709 }
710 
711 #undef LOG_MMAP_MEM_POOL
712 
713 }  // namespace ark
714 
715 #endif  // LIBPANDABASE_MEM_MMAP_MEM_POOL_INLINE_H
716