1 /* 2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef LIBPANDABASE_MEM_MMAP_MEM_POOL_H 17 #define LIBPANDABASE_MEM_MMAP_MEM_POOL_H 18 19 #include "libpandabase/mem/mem_pool.h" 20 #include "libpandabase/mem/mem.h" 21 #include "libpandabase/os/mem.h" 22 #include "libpandabase/os/mutex.h" 23 #include "libpandabase/mem/space.h" 24 25 #include <map> 26 #include <tuple> 27 #include <utility> 28 29 namespace ark { 30 31 enum class ReleasePagesStatus { RELEASING_PAGES, NEED_INTERRUPT, WAS_INTERRUPTED, FINISHED }; 32 33 class MMapMemPoolTest; 34 namespace mem::test { 35 class InternalAllocatorTest; 36 } // namespace mem::test 37 38 class MmapPool { 39 public: 40 using FreePoolsIter = std::multimap<size_t, MmapPool *>::iterator; 41 explicit MmapPool(Pool pool, FreePoolsIter freePoolsIter, bool returnedToOs = true) pool_(pool)42 : pool_(pool), returnedToOs_(returnedToOs), freePoolsIter_(freePoolsIter) 43 { 44 } 45 46 ~MmapPool() = default; 47 48 DEFAULT_COPY_SEMANTIC(MmapPool); 49 DEFAULT_MOVE_SEMANTIC(MmapPool); 50 GetSize()51 size_t GetSize() 52 { 53 return pool_.GetSize(); 54 } 55 IsReturnedToOS()56 bool IsReturnedToOS() const 57 { 58 return returnedToOs_; 59 } 60 SetReturnedToOS(bool value)61 void SetReturnedToOS(bool value) 62 { 63 returnedToOs_ = value; 64 } 65 SetSize(size_t size)66 void SetSize(size_t size) 67 { 68 pool_ = Pool(size, GetMem()); 69 } 70 GetMem()71 void *GetMem() 72 { 73 return pool_.GetMem(); 74 } 75 76 // A free pool will be store in the free_pools_, and it's iterator will be recorded in the freePoolsIter_. 77 // If the freePoolsIter_ is equal to the end of freePools_, the pool is used. IsUsed(FreePoolsIter endIter)78 bool IsUsed(FreePoolsIter endIter) 79 { 80 return freePoolsIter_ == endIter; 81 } 82 GetFreePoolsIter()83 FreePoolsIter GetFreePoolsIter() 84 { 85 return freePoolsIter_; 86 } 87 SetFreePoolsIter(FreePoolsIter freePoolsIter)88 void SetFreePoolsIter(FreePoolsIter freePoolsIter) 89 { 90 freePoolsIter_ = freePoolsIter; 91 } 92 93 private: 94 Pool pool_; 95 bool returnedToOs_; 96 // record the iterator of the pool in the multimap 97 FreePoolsIter freePoolsIter_; 98 }; 99 100 /// @brief Class represents current pool that is returning to OS 101 class UnreturnedToOSPool { 102 public: 103 UnreturnedToOSPool() = default; UnreturnedToOSPool(MmapPool * pool)104 explicit UnreturnedToOSPool(MmapPool *pool) : pool_(pool) 105 { 106 ASSERT(pool_ == nullptr || !pool_->IsReturnedToOS()); 107 } 108 109 /** 110 * Separates part of unreturned to OS memory from MmapPool 111 * and creates a pool with @param size that needs to be cleared 112 * @param size size of unreturned to OS pool 113 * @return pool that needs to be cleared 114 */ GetAndClearUnreturnedPool(size_t size)115 Pool GetAndClearUnreturnedPool(size_t size) 116 { 117 ASSERT(pool_ != nullptr); 118 ASSERT(returnedToOsSize_ + size <= pool_->GetSize()); 119 auto unreturnedMem = ToVoidPtr(ToUintPtr(pool_->GetMem()) + returnedToOsSize_); 120 returnedToOsSize_ += size; 121 return Pool(size, unreturnedMem); 122 } 123 SetReturnedToOS()124 void SetReturnedToOS() 125 { 126 ASSERT(pool_ != nullptr); 127 ASSERT(returnedToOsSize_ == pool_->GetSize()); 128 pool_->SetReturnedToOS(true); 129 } 130 IsEmpty()131 bool IsEmpty() const 132 { 133 return pool_ == nullptr; 134 } 135 GetUnreturnedSize()136 size_t GetUnreturnedSize() const 137 { 138 ASSERT(pool_ != nullptr); 139 return pool_->GetSize() - returnedToOsSize_; 140 } 141 GetMmapPool()142 MmapPool *GetMmapPool() const 143 { 144 return pool_; 145 } 146 147 private: 148 MmapPool *pool_ {nullptr}; 149 size_t returnedToOsSize_ {0}; 150 }; 151 152 class MmapPoolMap { 153 public: 154 MmapPoolMap() = default; 155 ~MmapPoolMap()156 ~MmapPoolMap() 157 { 158 for (auto &pool : poolMap_) { 159 delete pool.second; 160 } 161 } 162 163 DEFAULT_COPY_SEMANTIC(MmapPoolMap); 164 DEFAULT_MOVE_SEMANTIC(MmapPoolMap); 165 166 // Find a free pool with enough size in the map. Split the pool, if the pool size is larger than required size. 167 template <OSPagesAllocPolicy OS_ALLOC_POLICY> 168 Pool PopFreePool(size_t size); 169 170 // Push the unused pool to the map. 171 // If it is the last pool the method return its size and pages policy. 172 // Else the method returns 0. 173 template <OSPagesPolicy OS_PAGES_POLICY> 174 [[nodiscard]] std::pair<size_t, OSPagesPolicy> PushFreePool(Pool pool); 175 176 // Add a new pool to the map. This pool will be marked as used. 177 void AddNewPool(Pool pool); 178 179 // Get the sum of all free pools size. 180 size_t GetAllSize() const; 181 182 /** 183 * Iterate over all free pools 184 * @param visitor function for pool visit 185 */ 186 void IterateOverFreePools(const std::function<void(size_t, MmapPool *)> &visitor); 187 188 /** 189 * To check if we can alloc enough pools from free pools 190 * @param poolsNum the number of pools we need 191 * @param poolSize the size of the pool we need 192 * @return true if we can make sure that we have enough space in free pools to alloc pools we need 193 */ 194 bool HaveEnoughFreePools(size_t poolsNum, size_t poolSize) const; 195 196 bool FindAndSetUnreturnedFreePool(); 197 198 void ReleasePagesInUnreturnedPool(size_t poolSize); 199 200 void ReleasePagesInFreePools(); 201 GetUnreturnedToOsSize()202 size_t GetUnreturnedToOsSize() const 203 { 204 return unreturnedPool_.IsEmpty() ? 0 : unreturnedPool_.GetUnreturnedSize(); 205 } 206 207 private: 208 std::map<void *, MmapPool *> poolMap_; 209 std::multimap<size_t, MmapPool *> freePools_; 210 UnreturnedToOSPool unreturnedPool_; 211 }; 212 213 class MmapMemPool : public MemPool<MmapMemPool> { 214 public: 215 NO_COPY_SEMANTIC(MmapMemPool); 216 NO_MOVE_SEMANTIC(MmapMemPool); 217 void ClearNonObjectMmapedPools(); 218 ~MmapMemPool() override; 219 220 using InterruptFlag = std::atomic<ReleasePagesStatus>; 221 222 static constexpr size_t RELEASE_MEM_SIZE = 8_MB; 223 224 /** 225 * Get min address in pool 226 * @return min address in pool 227 */ GetMinObjectAddress()228 uintptr_t GetMinObjectAddress() const 229 { 230 return minObjectMemoryAddr_; 231 } 232 233 /** 234 * Get max address in pool 235 * @return max address in pool 236 */ GetMaxObjectAddress()237 uintptr_t GetMaxObjectAddress() const 238 { 239 return minObjectMemoryAddr_ + mmapedObjectMemorySize_; 240 } 241 GetTotalObjectSize()242 size_t GetTotalObjectSize() const 243 { 244 return mmapedObjectMemorySize_; 245 } 246 247 /** 248 * Get start address of pool for input address in this pool 249 * @param addr address in pool 250 * @return start address of pool 251 */ GetStartAddrPoolForAddr(const void * addr)252 void *GetStartAddrPoolForAddr(const void *addr) const 253 { 254 return GetStartAddrPoolForAddrImpl(addr); 255 } 256 257 size_t GetObjectSpaceFreeBytes() const; 258 259 // To check if we can alloc enough pools in object space 260 bool HaveEnoughPoolsInObjectSpace(size_t poolsNum, size_t poolSize) const; 261 262 /// Release pages in all cached free pools 263 void ReleaseFreePagesToOS(); 264 bool ReleaseFreePagesToOSWithInterruption(const InterruptFlag &interruptFlag); 265 266 /// @return used bytes count in object space (so exclude bytes in free pools) 267 size_t GetObjectUsedBytes() const; 268 269 private: 270 template <class ArenaT = Arena, OSPagesAllocPolicy OS_ALLOC_POLICY> 271 ArenaT *AllocArenaImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType, const void *allocatorAddr); 272 template <class ArenaT = Arena, OSPagesPolicy OS_PAGES_POLICY> 273 void FreeArenaImpl(ArenaT *arena); 274 275 template <OSPagesAllocPolicy OS_ALLOC_POLICY> 276 void *AllocRawMemImpl(size_t size, SpaceType type); 277 void *AllocRawMemNonObjectImpl(size_t size, SpaceType spaceType); 278 template <OSPagesAllocPolicy OS_ALLOC_POLICY> 279 void *AllocRawMemObjectImpl(size_t size, SpaceType type); 280 void FreeRawMemImpl(void *mem, size_t size); 281 282 template <OSPagesAllocPolicy OS_ALLOC_POLICY> 283 Pool AllocPoolImpl(size_t size, SpaceType spaceType, AllocatorType allocatorType, const void *allocatorAddr); 284 template <OSPagesPolicy OS_PAGES_POLICY> 285 void FreePoolImpl(void *mem, size_t size); 286 287 PANDA_PUBLIC_API AllocatorInfo GetAllocatorInfoForAddrImpl(const void *addr) const; 288 PANDA_PUBLIC_API SpaceType GetSpaceTypeForAddrImpl(const void *addr) const; 289 PANDA_PUBLIC_API void *GetStartAddrPoolForAddrImpl(const void *addr) const; 290 291 template <OSPagesAllocPolicy OS_ALLOC_POLICY> 292 Pool AllocPoolUnsafe(size_t size, SpaceType spaceType, AllocatorType allocatorType, const void *allocatorAddr); 293 template <OSPagesPolicy OS_PAGES_POLICY> 294 void FreePoolUnsafe(void *mem, size_t size); 295 296 void AddToNonObjectPoolsMap(std::tuple<Pool, AllocatorInfo, SpaceType> poolInfo); 297 void RemoveFromNonObjectPoolsMap(void *poolAddr); 298 std::tuple<Pool, AllocatorInfo, SpaceType> FindAddrInNonObjectPoolsMap(const void *addr) const; 299 300 bool ReleasePagesInUnreturnedPoolWithInterruption(const InterruptFlag &interruptFlag); 301 bool ReleasePagesInFreePoolsWithInterruption(const InterruptFlag &interruptFlag); 302 bool ReleasePagesInMainPoolWithInterruption(const InterruptFlag &interruptFlag); 303 304 MmapMemPool(); 305 306 // A super class for raw memory allocation for spaces. 307 class SpaceMemory { 308 public: Initialize(uintptr_t minAddr,size_t maxSize)309 void Initialize(uintptr_t minAddr, size_t maxSize) 310 { 311 minAddress_ = minAddr; 312 maxSize_ = maxSize; 313 curAllocOffset_ = 0U; 314 unreturnedToOsSize_ = 0U; 315 } 316 GetMinAddress()317 uintptr_t GetMinAddress() const 318 { 319 return minAddress_; 320 } 321 GetMaxSize()322 size_t GetMaxSize() const 323 { 324 return maxSize_; 325 } 326 GetOccupiedMemorySize()327 size_t GetOccupiedMemorySize() const 328 { 329 return curAllocOffset_; 330 } 331 GetFreeSpace()332 inline size_t GetFreeSpace() const 333 { 334 ASSERT(maxSize_ >= curAllocOffset_); 335 return maxSize_ - curAllocOffset_; 336 } 337 GetUnreturnedToOsSize()338 size_t GetUnreturnedToOsSize() const 339 { 340 return unreturnedToOsSize_; 341 } 342 343 template <OSPagesAllocPolicy OS_ALLOC_POLICY> AllocRawMem(size_t size,MmapPoolMap * poolMap)344 void *AllocRawMem(size_t size, MmapPoolMap *poolMap) 345 { 346 if (UNLIKELY(GetFreeSpace() < size)) { 347 return nullptr; 348 } 349 void *mem = ToVoidPtr(minAddress_ + curAllocOffset_); 350 curAllocOffset_ += size; 351 size_t memoryToClear = 0; 352 if (unreturnedToOsSize_ >= size) { 353 unreturnedToOsSize_ -= size; 354 memoryToClear = size; 355 } else { 356 memoryToClear = unreturnedToOsSize_; 357 unreturnedToOsSize_ = 0; 358 } 359 if (OS_ALLOC_POLICY == OSPagesAllocPolicy::ZEROED_MEMORY && memoryToClear != 0) { 360 uintptr_t poolStart = ToUintPtr(mem); 361 os::mem::ReleasePages(poolStart, poolStart + memoryToClear); 362 } 363 poolMap->AddNewPool(Pool(size, mem)); 364 return mem; 365 } 366 FreeMem(size_t size,OSPagesPolicy pagesPolicy)367 void FreeMem(size_t size, OSPagesPolicy pagesPolicy) 368 { 369 ASSERT(curAllocOffset_ >= size); 370 curAllocOffset_ -= size; 371 if ((pagesPolicy == OSPagesPolicy::NO_RETURN) || (unreturnedToOsSize_ != 0)) { 372 unreturnedToOsSize_ += size; 373 ASSERT(unreturnedToOsSize_ <= maxSize_); 374 } 375 } 376 377 void ReleasePagesInMainPool(size_t poolSize); 378 379 private: GetAndClearUnreturnedToOSMemory(size_t size)380 Pool GetAndClearUnreturnedToOSMemory(size_t size) 381 { 382 ASSERT(size <= unreturnedToOsSize_); 383 unreturnedToOsSize_ -= size; 384 void *mem = ToVoidPtr(minAddress_ + curAllocOffset_ + unreturnedToOsSize_); 385 return Pool(size, mem); 386 } 387 388 uintptr_t minAddress_ {0U}; ///< Min address for the space 389 size_t maxSize_ {0U}; ///< Max size in bytes for the space 390 size_t curAllocOffset_ {0U}; ///< A value of occupied memory from the minAddress_ 391 size_t unreturnedToOsSize_ {0U}; ///< A value of unreturned memory from the minAddress_ + curAllocOffset_ 392 }; 393 394 uintptr_t minObjectMemoryAddr_ {0U}; ///< Minimal address of the mmaped object memory 395 size_t mmapedObjectMemorySize_ {0U}; ///< Size of whole the mmaped object memory 396 397 SpaceMemory commonSpace_; 398 399 /// Pool map for object pools with all required information for quick search 400 PoolMap poolMap_; 401 402 MmapPoolMap commonSpacePools_; 403 404 std::array<size_t, SPACE_TYPE_SIZE> nonObjectSpacesCurrentSize_; 405 406 std::array<size_t, SPACE_TYPE_SIZE> nonObjectSpacesMaxSize_; 407 408 /// Map for non object pools allocated via mmap 409 std::map<const void *, std::tuple<Pool, AllocatorInfo, SpaceType>> nonObjectMmapedPools_; 410 // AllocRawMem is called both from alloc and externally 411 mutable os::memory::RecursiveMutex lock_; 412 413 friend class PoolManager; 414 friend class MemPool<MmapMemPool>; 415 friend class MMapMemPoolTest; 416 friend class mem::test::InternalAllocatorTest; 417 }; 418 419 } // namespace ark 420 421 #endif // LIBPANDABASE_MEM_MMAP_MEM_POOL_H 422