• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef LIBPANDABASE_MEM_MMAP_MEM_POOL_INL_H
17 #define LIBPANDABASE_MEM_MMAP_MEM_POOL_INL_H
18 
19 #include "mmap_mem_pool.h"
20 #include "mem.h"
21 #include "os/mem.h"
22 #include "utils/logger.h"
23 #include "mem/arena-inl.h"
24 #include "mem/mem_config.h"
25 #include "utils/asan_interface.h"
26 
27 namespace panda {
28 
29 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
30 #define LOG_MMAP_MEM_POOL(level) LOG(level, MEMORYPOOL) << "MmapMemPool: "
31 
PopFreePool(size_t size)32 inline Pool MmapPoolMap::PopFreePool(size_t size)
33 {
34     auto element = free_pools_.lower_bound(size);
35     if (element == free_pools_.end()) {
36         return NULLPOOL;
37     }
38     auto mmap_pool = element->second;
39     ASSERT(!mmap_pool->IsUsed(free_pools_.end()));
40     auto element_size = element->first;
41     ASSERT(element_size == mmap_pool->GetSize());
42     auto element_mem = mmap_pool->GetMem();
43 
44     mmap_pool->SetFreePoolsIter(free_pools_.end());
45     Pool pool(size, element_mem);
46     free_pools_.erase(element);
47     if (size < element_size) {
48         Pool new_pool(element_size - size, ToVoidPtr(ToUintPtr(element_mem) + size));
49         mmap_pool->SetSize(size);
50         auto new_mmap_pool = new MmapPool(new_pool, free_pools_.end());
51         pool_map_.insert(std::pair<void *, MmapPool *>(new_pool.GetMem(), new_mmap_pool));
52         auto new_free_pools_iter = free_pools_.insert(std::pair<size_t, MmapPool *>(new_pool.GetSize(), new_mmap_pool));
53         new_mmap_pool->SetFreePoolsIter(new_free_pools_iter);
54     }
55     return pool;
56 }
57 
PushFreePool(Pool pool)58 inline void MmapPoolMap::PushFreePool(Pool pool)
59 {
60     auto mmap_pool_element = pool_map_.find(pool.GetMem());
61     if (UNLIKELY(mmap_pool_element == pool_map_.end())) {
62         LOG_MMAP_MEM_POOL(FATAL) << "can't find mmap pool in the pool map when PushFreePool";
63     }
64 
65     auto mmap_pool = mmap_pool_element->second;
66     ASSERT(mmap_pool->IsUsed(free_pools_.end()));
67 
68     auto prev_pool = (mmap_pool_element != pool_map_.begin()) ? (prev(mmap_pool_element, 1)->second) : nullptr;
69     if (prev_pool != nullptr && !prev_pool->IsUsed(free_pools_.end())) {
70         ASSERT(ToUintPtr(prev_pool->GetMem()) + prev_pool->GetSize() == ToUintPtr(mmap_pool->GetMem()));
71         free_pools_.erase(prev_pool->GetFreePoolsIter());
72         prev_pool->SetSize(prev_pool->GetSize() + mmap_pool->GetSize());
73         delete mmap_pool;
74         pool_map_.erase(mmap_pool_element--);
75         mmap_pool = prev_pool;
76     }
77 
78     auto next_pool = (mmap_pool_element != prev(pool_map_.end(), 1)) ? (next(mmap_pool_element, 1)->second) : nullptr;
79     if (next_pool != nullptr && !next_pool->IsUsed(free_pools_.end())) {
80         ASSERT(ToUintPtr(mmap_pool->GetMem()) + mmap_pool->GetSize() == ToUintPtr(next_pool->GetMem()));
81         free_pools_.erase(next_pool->GetFreePoolsIter());
82         mmap_pool->SetSize(next_pool->GetSize() + mmap_pool->GetSize());
83         delete next_pool;
84         pool_map_.erase(++mmap_pool_element);
85     }
86 
87     auto res = free_pools_.insert(std::pair<size_t, MmapPool *>(mmap_pool->GetSize(), mmap_pool));
88     mmap_pool->SetFreePoolsIter(res);
89 }
90 
AddNewPool(Pool pool)91 inline void MmapPoolMap::AddNewPool(Pool pool)
92 {
93     auto new_mmap_pool = new MmapPool(pool, free_pools_.end());
94     pool_map_.insert(std::pair<void *, MmapPool *>(pool.GetMem(), new_mmap_pool));
95 }
96 
GetAllSize()97 inline size_t MmapPoolMap::GetAllSize() const
98 {
99     size_t bytes = 0;
100     for (const auto &pool : free_pools_) {
101         bytes += pool.first;
102     }
103     return bytes;
104 }
105 
HaveEnoughFreePools(size_t pools_num,size_t pool_size)106 inline bool MmapPoolMap::HaveEnoughFreePools(size_t pools_num, size_t pool_size) const
107 {
108     size_t pools = 0;
109     for (auto pool = free_pools_.rbegin(); pool != free_pools_.rend(); pool++) {
110         if (pool->first < pool_size) {
111             return false;
112         }
113         pools += pool->first / pool_size;
114         if (pools >= pools_num) {
115             return true;
116         }
117     }
118     return false;
119 }
120 
MmapMemPool()121 inline MmapMemPool::MmapMemPool() : MemPool("MmapMemPool")
122 {
123     ASSERT(static_cast<uint64_t>(mem::MemConfig::GetHeapSizeLimit()) <= PANDA_MAX_HEAP_SIZE);
124     uint64_t object_space_size = mem::MemConfig::GetHeapSizeLimit();
125     if (object_space_size > PANDA_MAX_HEAP_SIZE) {
126         LOG_MMAP_MEM_POOL(FATAL) << "The memory limits is too high. We can't allocate so much memory from the system";
127     }
128     ASSERT(object_space_size <= PANDA_MAX_HEAP_SIZE);
129 #if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS)
130     void *mem = panda::os::mem::MapRWAnonymousFixedRaw(ToVoidPtr(PANDA_32BITS_HEAP_START_ADDRESS), object_space_size);
131     ASSERT((ToUintPtr(mem) == PANDA_32BITS_HEAP_START_ADDRESS) || (object_space_size == 0));
132     ASSERT(ToUintPtr(mem) + object_space_size <= PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS);
133 #else
134     // We should get aligned to PANDA_POOL_ALIGNMENT_IN_BYTES size
135     void *mem = panda::os::mem::MapRWAnonymousWithAlignmentRaw(object_space_size, PANDA_POOL_ALIGNMENT_IN_BYTES);
136 #endif
137     LOG_IF(((mem == nullptr) && (object_space_size != 0)), FATAL, MEMORYPOOL)
138         << "MmapMemPool: couldn't mmap " << object_space_size << " bytes of memory for the system";
139     ASSERT(AlignUp(ToUintPtr(mem), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(mem));
140     min_object_memory_addr_ = ToUintPtr(mem);
141     mmaped_object_memory_size_ = object_space_size;
142     common_space_.Initialize(min_object_memory_addr_, object_space_size);
143     code_space_max_size_ = mem::MemConfig::GetCodeCacheSizeLimit();
144     compiler_space_max_size_ = mem::MemConfig::GetCompilerMemorySizeLimit();
145     internal_space_max_size_ = mem::MemConfig::GetInternalMemorySizeLimit();
146     LOG_MMAP_MEM_POOL(DEBUG) << "Successfully initialized MMapMemPool. Object memory start from addr "
147                              << ToVoidPtr(min_object_memory_addr_) << " Preallocated size is equal to "
148                              << object_space_size;
149 }
150 
~MmapMemPool()151 inline MmapMemPool::~MmapMemPool()
152 {
153     for (auto i : non_object_mmaped_pools_) {
154         Pool pool = std::get<0>(i.second);
155         [[maybe_unused]] AllocatorInfo info = std::get<1>(i.second);
156         [[maybe_unused]] SpaceType type = std::get<2>(i.second);
157 
158         ASSERT(info.GetType() != AllocatorType::UNDEFINED);
159         ASSERT(type != SpaceType::SPACE_TYPE_UNDEFINED);
160         // does not clears non_object_mmaped_pools_ record because can fail (int munmap(void*, size_t) returned -1)
161         FreeRawMemImpl(pool.GetMem(), pool.GetSize());
162     }
163     non_object_mmaped_pools_.clear();
164 
165     void *mmaped_mem_addr = ToVoidPtr(min_object_memory_addr_);
166     if (mmaped_mem_addr == nullptr) {
167         ASSERT(mmaped_object_memory_size_ == 0);
168         return;
169     }
170 
171     ASSERT(pool_map_.IsEmpty());
172 
173     // TODO(dtrubenkov): consider madvise(mem, total_size_, MADV_DONTNEED); when possible
174     if (auto unmap_res = panda::os::mem::UnmapRaw(mmaped_mem_addr, mmaped_object_memory_size_)) {
175         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmap_res->ToString();
176     }
177 }
178 
179 template <class ArenaT>
AllocArenaImpl(size_t size,SpaceType space_type,AllocatorType allocator_type,const void * allocator_addr)180 inline ArenaT *MmapMemPool::AllocArenaImpl(size_t size, SpaceType space_type, AllocatorType allocator_type,
181                                            const void *allocator_addr)
182 {
183     os::memory::LockHolder lk(lock_);
184     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new arena with size " << std::dec << size << " for "
185                              << SpaceTypeToString(space_type);
186     Pool pool_for_arena = AllocPoolUnsafe(size, space_type, allocator_type, allocator_addr);
187     void *mem = pool_for_arena.GetMem();
188     if (UNLIKELY(mem == nullptr)) {
189         LOG_MMAP_MEM_POOL(ERROR) << "Failed to allocate new arena"
190                                  << " for " << SpaceTypeToString(space_type);
191         return nullptr;
192     }
193     ASSERT(pool_for_arena.GetSize() == size);
194     mem = new (mem) ArenaT(size - sizeof(ArenaT), ToVoidPtr(ToUintPtr(mem) + sizeof(ArenaT)));
195     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new arena with size " << std::dec << pool_for_arena.GetSize()
196                              << " at addr = " << std::hex << pool_for_arena.GetMem() << " for "
197                              << SpaceTypeToString(space_type);
198     return static_cast<ArenaT *>(mem);
199 }
200 
201 template <class ArenaT>
FreeArenaImpl(ArenaT * arena)202 inline void MmapMemPool::FreeArenaImpl(ArenaT *arena)
203 {
204     os::memory::LockHolder lk(lock_);
205     size_t size = arena->GetSize();
206     size = size + sizeof(ArenaT);
207     ASSERT(size == AlignUp(size, panda::os::mem::GetPageSize()));
208     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free arena with size " << std::dec << size << " at addr = " << std::hex
209                              << arena;
210     FreePoolUnsafe(arena, size);
211     LOG_MMAP_MEM_POOL(DEBUG) << "Free arena call finished";
212 }
213 
AllocRawMemCompilerImpl(size_t size)214 inline void *MmapMemPool::AllocRawMemCompilerImpl(size_t size)
215 {
216     void *mem = nullptr;
217     if (LIKELY(compiler_space_max_size_ >= compiler_space_current_size_ + size || !use_compiler_space_size_limit_)) {
218         mem = panda::os::mem::MapRWAnonymousWithAlignmentRaw(size, PANDA_POOL_ALIGNMENT_IN_BYTES);
219         if (mem != nullptr) {
220             compiler_space_current_size_ += size;
221         }
222     }
223     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(SpaceType::SPACE_TYPE_COMPILER) << " - "
224                              << std::dec << compiler_space_current_size_;
225     return mem;
226 }
227 
AllocRawMemInternalImpl(size_t size)228 inline void *MmapMemPool::AllocRawMemInternalImpl(size_t size)
229 {
230     void *mem = nullptr;
231     if (LIKELY(internal_space_max_size_ >= internal_space_current_size_ + size)) {
232         mem = panda::os::mem::MapRWAnonymousWithAlignmentRaw(size, PANDA_POOL_ALIGNMENT_IN_BYTES);
233         if (mem != nullptr) {
234             internal_space_current_size_ += size;
235         }
236     }
237     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(SpaceType::SPACE_TYPE_INTERNAL) << " - "
238                              << std::dec << internal_space_current_size_;
239     return mem;
240 }
241 
AllocRawMemCodeImpl(size_t size)242 inline void *MmapMemPool::AllocRawMemCodeImpl(size_t size)
243 {
244     void *mem = nullptr;
245     if (LIKELY(code_space_max_size_ >= code_space_current_size_ + size)) {
246         mem = panda::os::mem::MapRWAnonymousWithAlignmentRaw(size, PANDA_POOL_ALIGNMENT_IN_BYTES);
247         if (mem != nullptr) {
248             code_space_current_size_ += size;
249         }
250     }
251     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(SpaceType::SPACE_TYPE_CODE) << " - "
252                              << std::dec << code_space_current_size_;
253     return mem;
254 }
255 
AllocRawMemObjectImpl(size_t size,SpaceType type)256 inline void *MmapMemPool::AllocRawMemObjectImpl(size_t size, SpaceType type)
257 {
258     void *mem = common_space_.AllocRawMem(size, &common_space_pools_);
259     LOG_MMAP_MEM_POOL(DEBUG) << "Occupied memory for " << SpaceTypeToString(type) << " - " << std::dec
260                              << common_space_.GetOccupiedMemorySize();
261     return mem;
262 }
263 
AllocRawMemImpl(size_t size,SpaceType type)264 inline void *MmapMemPool::AllocRawMemImpl(size_t size, SpaceType type)
265 {
266     os::memory::LockHolder lk(lock_);
267     ASSERT(size % panda::os::mem::GetPageSize() == 0);
268     // NOTE: We need this check because we use this memory for Pools too
269     // which require PANDA_POOL_ALIGNMENT_IN_BYTES alignment
270     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
271     void *mem = nullptr;
272     switch (type) {
273         // Internal spaces
274         case SpaceType::SPACE_TYPE_COMPILER:
275             mem = AllocRawMemCompilerImpl(size);
276             break;
277         case SpaceType::SPACE_TYPE_INTERNAL:
278             mem = AllocRawMemInternalImpl(size);
279             break;
280         case SpaceType::SPACE_TYPE_CODE:
281             mem = AllocRawMemCodeImpl(size);
282             break;
283         // Heap spaces:
284         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
285         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
286         case SpaceType::SPACE_TYPE_OBJECT:
287             mem = AllocRawMemObjectImpl(size, type);
288             break;
289         default:
290             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(type) << " for AllocRawMem.";
291     }
292     if (UNLIKELY(mem == nullptr)) {
293         LOG_MMAP_MEM_POOL(DEBUG) << "OOM when trying to allocate " << size << " bytes for " << SpaceTypeToString(type);
294         // We have OOM and must return nullptr
295         mem = nullptr;
296     } else {
297         LOG_MMAP_MEM_POOL(DEBUG) << "Allocate raw memory with size " << size << " at addr = " << mem << " for "
298                                  << SpaceTypeToString(type);
299     }
300     return mem;
301 }
302 
303 /* static */
FreeRawMemImpl(void * mem,size_t size)304 inline void MmapMemPool::FreeRawMemImpl(void *mem, size_t size)
305 {
306     if (auto unmap_res = panda::os::mem::UnmapRaw(mem, size)) {
307         LOG_MMAP_MEM_POOL(FATAL) << "Destructor unnmap  error: " << unmap_res->ToString();
308     }
309     LOG_MMAP_MEM_POOL(DEBUG) << "Deallocated raw memory with size " << size << " at addr = " << mem;
310 }
311 
AllocPoolUnsafe(size_t size,SpaceType space_type,AllocatorType allocator_type,const void * allocator_addr)312 inline Pool MmapMemPool::AllocPoolUnsafe(size_t size, SpaceType space_type, AllocatorType allocator_type,
313                                          const void *allocator_addr)
314 {
315     ASSERT(size == AlignUp(size, panda::os::mem::GetPageSize()));
316     ASSERT(size == AlignUp(size, PANDA_POOL_ALIGNMENT_IN_BYTES));
317     Pool pool = NULLPOOL;
318     bool add_to_pool_map = false;
319     // Try to find free pool from the allocated earlier
320     switch (space_type) {
321         case SpaceType::SPACE_TYPE_CODE:
322         case SpaceType::SPACE_TYPE_COMPILER:
323         case SpaceType::SPACE_TYPE_INTERNAL:
324             // We always use mmap for these space types
325             break;
326         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
327         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
328         case SpaceType::SPACE_TYPE_OBJECT:
329             add_to_pool_map = true;
330             pool = common_space_pools_.PopFreePool(size);
331             break;
332         default:
333             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(space_type)
334                                      << " for AllocPoolUnsafe.";
335     }
336     if (pool.GetMem() != nullptr) {
337         LOG_MMAP_MEM_POOL(DEBUG) << "Reuse pool with size " << pool.GetSize() << " at addr = " << pool.GetMem()
338                                  << " for " << SpaceTypeToString(space_type);
339     }
340     if (pool.GetMem() == nullptr) {
341         void *mem = AllocRawMemImpl(size, space_type);
342         if (mem != nullptr) {
343             pool = Pool(size, mem);
344         }
345     }
346     if (pool.GetMem() == nullptr) {
347         return pool;
348     }
349     ASAN_UNPOISON_MEMORY_REGION(pool.GetMem(), pool.GetSize());
350     if (UNLIKELY(allocator_addr == nullptr)) {
351         // Save a pointer to the first byte of a Pool
352         allocator_addr = pool.GetMem();
353     }
354     if (add_to_pool_map) {
355         pool_map_.AddPoolToMap(ToVoidPtr(ToUintPtr(pool.GetMem()) - GetMinObjectAddress()), pool.GetSize(), space_type,
356                                allocator_type, allocator_addr);
357     } else {
358         AddToNonObjectPoolsMap(std::make_tuple(pool, AllocatorInfo(allocator_type, allocator_addr), space_type));
359     }
360     os::mem::TagAnonymousMemory(pool.GetMem(), pool.GetSize(), SpaceTypeToString(space_type));
361     ASSERT(AlignUp(ToUintPtr(pool.GetMem()), PANDA_POOL_ALIGNMENT_IN_BYTES) == ToUintPtr(pool.GetMem()));
362     return pool;
363 }
364 
FreePoolUnsafe(void * mem,size_t size)365 inline void MmapMemPool::FreePoolUnsafe(void *mem, size_t size)
366 {
367     ASSERT(size == AlignUp(size, panda::os::mem::GetPageSize()));
368     ASAN_POISON_MEMORY_REGION(mem, size);
369     SpaceType pool_space_type = GetSpaceTypeForAddrImpl(mem);
370     bool remove_from_pool_map = false;
371     switch (pool_space_type) {
372         case SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT:
373         case SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT:
374         case SpaceType::SPACE_TYPE_OBJECT:
375             remove_from_pool_map = true;
376             common_space_pools_.PushFreePool(Pool(size, mem));
377             break;
378         case SpaceType::SPACE_TYPE_COMPILER:
379             compiler_space_current_size_ -= size;
380             FreeRawMemImpl(mem, size);
381             break;
382         case SpaceType::SPACE_TYPE_INTERNAL:
383             internal_space_current_size_ -= size;
384             FreeRawMemImpl(mem, size);
385             break;
386         case SpaceType::SPACE_TYPE_CODE:
387             code_space_current_size_ -= size;
388             FreeRawMemImpl(mem, size);
389             break;
390         default:
391             LOG_MMAP_MEM_POOL(FATAL) << "Try to use incorrect " << SpaceTypeToString(pool_space_type)
392                                      << " for FreePoolUnsafe.";
393     }
394     os::mem::TagAnonymousMemory(mem, size, nullptr);
395     if (remove_from_pool_map) {
396         pool_map_.RemovePoolFromMap(ToVoidPtr(ToUintPtr(mem) - GetMinObjectAddress()), size);
397         os::mem::ReleasePages(ToUintPtr(mem), ToUintPtr(mem) + size);
398     } else {
399         RemoveFromNonObjectPoolsMap(mem);
400     }
401     LOG_MMAP_MEM_POOL(DEBUG) << "Freed " << std::dec << size << " memory for " << SpaceTypeToString(pool_space_type);
402 }
403 
AllocPoolImpl(size_t size,SpaceType space_type,AllocatorType allocator_type,const void * allocator_addr)404 inline Pool MmapMemPool::AllocPoolImpl(size_t size, SpaceType space_type, AllocatorType allocator_type,
405                                        const void *allocator_addr)
406 {
407     os::memory::LockHolder lk(lock_);
408     LOG_MMAP_MEM_POOL(DEBUG) << "Try to get new pool with size " << std::dec << size << " for "
409                              << SpaceTypeToString(space_type);
410     Pool pool = AllocPoolUnsafe(size, space_type, allocator_type, allocator_addr);
411     LOG_MMAP_MEM_POOL(DEBUG) << "Allocated new pool with size " << std::dec << pool.GetSize()
412                              << " at addr = " << std::hex << pool.GetMem() << " for " << SpaceTypeToString(space_type);
413     return pool;
414 }
415 
FreePoolImpl(void * mem,size_t size)416 inline void MmapMemPool::FreePoolImpl(void *mem, size_t size)
417 {
418     os::memory::LockHolder lk(lock_);
419     LOG_MMAP_MEM_POOL(DEBUG) << "Try to free pool with size " << std::dec << size << " at addr = " << std::hex << mem;
420     FreePoolUnsafe(mem, size);
421     LOG_MMAP_MEM_POOL(DEBUG) << "Free pool call finished";
422 }
423 
AddToNonObjectPoolsMap(std::tuple<Pool,AllocatorInfo,SpaceType> pool_info)424 inline void MmapMemPool::AddToNonObjectPoolsMap(std::tuple<Pool, AllocatorInfo, SpaceType> pool_info)
425 {
426     void *pool_addr = std::get<0>(pool_info).GetMem();
427     ASSERT(non_object_mmaped_pools_.find(pool_addr) == non_object_mmaped_pools_.end());
428     non_object_mmaped_pools_.insert({pool_addr, pool_info});
429 }
430 
RemoveFromNonObjectPoolsMap(void * pool_addr)431 inline void MmapMemPool::RemoveFromNonObjectPoolsMap(void *pool_addr)
432 {
433     auto element = non_object_mmaped_pools_.find(pool_addr);
434     ASSERT(element != non_object_mmaped_pools_.end());
435     non_object_mmaped_pools_.erase(element);
436 }
437 
FindAddrInNonObjectPoolsMap(const void * addr)438 inline std::tuple<Pool, AllocatorInfo, SpaceType> MmapMemPool::FindAddrInNonObjectPoolsMap(const void *addr) const
439 {
440     auto element = non_object_mmaped_pools_.lower_bound(addr);
441     uintptr_t pool_start = (element != non_object_mmaped_pools_.end()) ? ToUintPtr(element->first)
442                                                                        : (std::numeric_limits<uintptr_t>::max());
443     if (ToUintPtr(addr) < pool_start) {
444         ASSERT(element != non_object_mmaped_pools_.begin());
445         element = std::prev(element);
446         pool_start = ToUintPtr(element->first);
447     }
448     ASSERT(element != non_object_mmaped_pools_.end());
449     [[maybe_unused]] uintptr_t pool_end = pool_start + std::get<0>(element->second).GetSize();
450     ASSERT((pool_start <= ToUintPtr(addr)) && (ToUintPtr(addr) < pool_end));
451     return element->second;
452 }
453 
GetAllocatorInfoForAddrImpl(const void * addr)454 inline AllocatorInfo MmapMemPool::GetAllocatorInfoForAddrImpl(const void *addr) const
455 {
456     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
457         os::memory::LockHolder lk(lock_);
458         return std::get<1>(FindAddrInNonObjectPoolsMap(addr));
459     }
460     AllocatorInfo info = pool_map_.GetAllocatorInfo(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
461     ASSERT(info.GetType() != AllocatorType::UNDEFINED);
462     ASSERT(info.GetAllocatorHeaderAddr() != nullptr);
463     return info;
464 }
465 
GetSpaceTypeForAddrImpl(const void * addr)466 inline SpaceType MmapMemPool::GetSpaceTypeForAddrImpl(const void *addr) const
467 {
468     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
469         os::memory::LockHolder lk(lock_);
470         // <2> is a pointer to SpaceType
471         return std::get<2>(FindAddrInNonObjectPoolsMap(addr));
472     }
473     SpaceType space_type = pool_map_.GetSpaceType(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
474     ASSERT(space_type != SpaceType::SPACE_TYPE_UNDEFINED);
475     return space_type;
476 }
477 
GetStartAddrPoolForAddrImpl(const void * addr)478 inline void *MmapMemPool::GetStartAddrPoolForAddrImpl(const void *addr) const
479 {
480     if ((ToUintPtr(addr) < GetMinObjectAddress()) || (ToUintPtr(addr) >= GetMaxObjectAddress())) {
481         os::memory::LockHolder lk(lock_);
482         return std::get<0>(FindAddrInNonObjectPoolsMap(addr)).GetMem();
483     }
484     void *pool_start_addr = pool_map_.GetFirstByteOfPoolForAddr(ToVoidPtr(ToUintPtr(addr) - GetMinObjectAddress()));
485     return ToVoidPtr(ToUintPtr(pool_start_addr) + GetMinObjectAddress());
486 }
487 
GetObjectSpaceFreeBytes()488 inline size_t MmapMemPool::GetObjectSpaceFreeBytes() const
489 {
490     os::memory::LockHolder lk(lock_);
491 
492     size_t unused_bytes = common_space_.GetFreeSpace();
493     size_t freed_bytes = common_space_pools_.GetAllSize();
494     ASSERT(unused_bytes + freed_bytes <= common_space_.GetMaxSize());
495     return unused_bytes + freed_bytes;
496 }
497 
HaveEnoughPoolsInObjectSpace(size_t pools_num,size_t pool_size)498 inline bool MmapMemPool::HaveEnoughPoolsInObjectSpace(size_t pools_num, size_t pool_size) const
499 {
500     os::memory::LockHolder lk(lock_);
501 
502     size_t unused_bytes = common_space_.GetFreeSpace();
503     ASSERT(pool_size != 0);
504     size_t pools = unused_bytes / pool_size;
505     if (pools >= pools_num) {
506         return true;
507     }
508     return common_space_pools_.HaveEnoughFreePools(pools_num - pools, pool_size);
509 }
510 
GetObjectUsedBytes()511 inline size_t MmapMemPool::GetObjectUsedBytes() const
512 {
513     os::memory::LockHolder lk(lock_);
514     ASSERT(common_space_.GetOccupiedMemorySize() >= common_space_pools_.GetAllSize());
515     return common_space_.GetOccupiedMemorySize() - common_space_pools_.GetAllSize();
516 }
517 
518 #undef LOG_MMAP_MEM_POOL
519 
520 }  // namespace panda
521 
522 #endif  // LIBPANDABASE_MEM_MMAP_MEM_POOL_INL_H
523