• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/mem/heap_space.h"
17 #include "libpandabase/mem/mem.h"
18 #include "libpandabase/mem/pool_manager.h"
19 #include "libpandabase/mem/mmap_mem_pool-inl.h"
20 #include "libpandabase/mem/mem_pool.h"
21 
22 namespace ark::mem {
23 
Initialize(size_t initialSize,size_t maxSize,uint32_t minFreePercentage,uint32_t maxFreePercentage)24 void HeapSpace::Initialize(size_t initialSize, size_t maxSize, uint32_t minFreePercentage, uint32_t maxFreePercentage)
25 {
26     ASSERT(!isInitialized_);
27     memSpace_.Initialize(initialSize, maxSize);
28     InitializePercentages(minFreePercentage, maxFreePercentage);
29     isInitialized_ = true;
30 }
31 
InitializePercentages(uint32_t minFreePercentage,uint32_t maxFreePercentage)32 void HeapSpace::InitializePercentages(uint32_t minFreePercentage, uint32_t maxFreePercentage)
33 {
34     minFreePercentage_ = static_cast<double>(std::min(minFreePercentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
35     maxFreePercentage_ = static_cast<double>(std::min(maxFreePercentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
36 }
37 
Initialize(size_t initialSize,size_t maxSize)38 void HeapSpace::ObjectMemorySpace::Initialize(size_t initialSize, size_t maxSize)
39 {
40     minSize_ = initialSize;
41     maxSize_ = maxSize;
42     ASSERT(minSize_ <= maxSize_);
43     // Set current space size as initial_size
44     currentSize_ = minSize_;
45 }
46 
ClampNewMaxSize(size_t newMaxSize)47 void HeapSpace::ObjectMemorySpace::ClampNewMaxSize(size_t newMaxSize)
48 {
49     ASSERT(newMaxSize >= currentSize_);
50     maxSize_ = std::min(newMaxSize, maxSize_);
51 }
52 
IncreaseBy(uint64_t bytes)53 inline void HeapSpace::ObjectMemorySpace::IncreaseBy(uint64_t bytes)
54 {
55     currentSize_ = std::min(AlignUp(currentSize_ + bytes, DEFAULT_ALIGNMENT_IN_BYTES), static_cast<uint64_t>(maxSize_));
56 }
57 
ReduceBy(size_t bytes)58 inline void HeapSpace::ObjectMemorySpace::ReduceBy(size_t bytes)
59 {
60     ASSERT(currentSize_ >= bytes);
61     currentSize_ = AlignUp(currentSize_ - bytes, DEFAULT_ALIGNMENT_IN_BYTES);
62     currentSize_ = std::max(currentSize_, minSize_);
63 }
64 
ComputeNewSize(size_t freeBytes,double minFreePercentage,double maxFreePercentage)65 void HeapSpace::ObjectMemorySpace::ComputeNewSize(size_t freeBytes, double minFreePercentage, double maxFreePercentage)
66 {
67     ASSERT(freeBytes <= currentSize_);
68     // How many bytes are used in space now
69     size_t usedBytes = currentSize_ - freeBytes;
70 
71     uint64_t minNeededBytes = static_cast<double>(usedBytes) / (1.0 - minFreePercentage);
72     if (currentSize_ < minNeededBytes) {
73         IncreaseBy(minNeededBytes - currentSize_);
74         return;
75     }
76 
77     uint64_t maxNeededBytes = static_cast<double>(usedBytes) / (1.0 - maxFreePercentage);
78     if (currentSize_ > maxNeededBytes) {
79         ReduceBy(currentSize_ - maxNeededBytes);
80     }
81 }
82 
GetCurrentFreeBytes(size_t bytesNotInThisSpace) const83 inline size_t HeapSpace::GetCurrentFreeBytes(size_t bytesNotInThisSpace) const
84 {
85     ASSERT(isInitialized_);
86     size_t usedBytes = PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
87     ASSERT(usedBytes >= bytesNotInThisSpace);
88     size_t usedBytesInCurrentSpace = usedBytes - bytesNotInThisSpace;
89     ASSERT(GetCurrentSize() >= usedBytesInCurrentSpace);
90     return GetCurrentSize() - usedBytesInCurrentSpace;
91 }
92 
ComputeNewSize()93 void HeapSpace::ComputeNewSize()
94 {
95     os::memory::WriteLockHolder lock(heapLock_);
96     memSpace_.ComputeNewSize(GetCurrentFreeBytes(), minFreePercentage_, maxFreePercentage_);
97     // Get current free bytes count after computing new size
98     size_t currentFreeBytesInSpace = GetCurrentFreeBytes();
99     // If saved pool size was very big and such pool can not be allocate after GC
100     // then we increase space to allocate this pool
101     if (memSpace_.savedPoolSize > currentFreeBytesInSpace) {
102         memSpace_.IncreaseBy(memSpace_.savedPoolSize - currentFreeBytesInSpace);
103         memSpace_.savedPoolSize = 0;
104         // Free bytes after increase space for new pool will = 0, so yet increase space
105         memSpace_.ComputeNewSize(0, minFreePercentage_, maxFreePercentage_);
106     }
107     // ComputeNewSize is called on GC end
108     SetIsWorkGC(false);
109 }
110 
GetHeapSize() const111 size_t HeapSpace::GetHeapSize() const
112 {
113     return PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
114 }
115 
WillAlloc(size_t poolSize,size_t currentFreeBytesInSpace,const ObjectMemorySpace * memSpace) const116 inline std::optional<size_t> HeapSpace::WillAlloc(size_t poolSize, size_t currentFreeBytesInSpace,
117                                                   const ObjectMemorySpace *memSpace) const
118 {
119     ASSERT(isInitialized_);
120     // If can allocate pool (from free pool map or non-used memory) then just do it
121     if (LIKELY(poolSize <= currentFreeBytesInSpace)) {
122         // We have enough memory for allocation, no need to increase heap
123         return {0};
124     }
125     // If we allocate pool during GC work then we must allocate new pool anyway, so we will try to increase heap space
126     if (IsWorkGC()) {
127         // if requested pool size greater free bytes in current heap space and non occupied memory then we can not
128         // allocate such pool, so we need to trigger GC
129         if (currentFreeBytesInSpace + memSpace->GetCurrentNonOccupiedSize() < poolSize) {
130             return std::nullopt;
131         }
132         // In this case we need increase space for allocate new pool
133         return {poolSize - currentFreeBytesInSpace};
134     }
135     // Otherwise we need to trigger GC
136     return std::nullopt;
137 }
138 
GetCurrentSize() const139 size_t HeapSpace::GetCurrentSize() const
140 {
141     return memSpace_.GetCurrentSize();
142 }
143 
ClampCurrentMaxHeapSize()144 void HeapSpace::ClampCurrentMaxHeapSize()
145 {
146     os::memory::WriteLockHolder lock(heapLock_);
147     memSpace_.ClampNewMaxSize(
148         AlignUp(memSpace_.GetCurrentSize() + PANDA_DEFAULT_POOL_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES));
149     PoolManager::GetMmapMemPool()->ReleaseFreePagesToOS();
150 }
151 
TryAllocPoolBase(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,size_t currentFreeBytesInSpace,ObjectMemorySpace * memSpace,OSPagesAllocPolicy allocPolicy)152 inline Pool HeapSpace::TryAllocPoolBase(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
153                                         void *allocatorPtr, size_t currentFreeBytesInSpace, ObjectMemorySpace *memSpace,
154                                         OSPagesAllocPolicy allocPolicy)
155 {
156     auto increaseBytesOrNotAlloc = WillAlloc(poolSize, currentFreeBytesInSpace, memSpace);
157     // Increase heap space if needed and allocate pool
158     if (increaseBytesOrNotAlloc) {
159         memSpace->IncreaseBy(increaseBytesOrNotAlloc.value());
160         if (allocPolicy == OSPagesAllocPolicy::NO_POLICY) {
161             return PoolManager::GetMmapMemPool()->template AllocPool<OSPagesAllocPolicy::NO_POLICY>(
162                 poolSize, spaceType, allocatorType, allocatorPtr);
163         }
164         return PoolManager::GetMmapMemPool()->template AllocPool<OSPagesAllocPolicy::ZEROED_MEMORY>(
165             poolSize, spaceType, allocatorType, allocatorPtr);
166     }
167     // Save pool size for computing new space size
168     memSpace->savedPoolSize = poolSize;
169     return NULLPOOL;
170 }
171 
TryAllocPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)172 Pool HeapSpace::TryAllocPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
173 {
174     os::memory::WriteLockHolder lock(heapLock_);
175     return TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeBytes(), &memSpace_);
176 }
177 
TryAllocArenaBase(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,size_t currentFreeBytesInSpace,ObjectMemorySpace * memSpace)178 inline Arena *HeapSpace::TryAllocArenaBase(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
179                                            void *allocatorPtr, size_t currentFreeBytesInSpace,
180                                            ObjectMemorySpace *memSpace)
181 {
182     auto increaseBytesOrNotAlloc = WillAlloc(arenaSize, currentFreeBytesInSpace, memSpace);
183     // Increase heap space if needed and allocate arena
184     if (increaseBytesOrNotAlloc.has_value()) {
185         memSpace->IncreaseBy(increaseBytesOrNotAlloc.value());
186         return PoolManager::AllocArena(arenaSize, spaceType, allocatorType, allocatorPtr);
187     }
188     // Save arena size for computing new space size
189     memSpace->savedPoolSize = arenaSize;
190     return nullptr;
191 }
192 
TryAllocArena(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)193 Arena *HeapSpace::TryAllocArena(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
194 {
195     os::memory::WriteLockHolder lock(heapLock_);
196     return TryAllocArenaBase(arenaSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeBytes(), &memSpace_);
197 }
198 
FreePool(void * poolMem,size_t poolSize,bool releasePages)199 void HeapSpace::FreePool(void *poolMem, size_t poolSize, bool releasePages)
200 {
201     os::memory::ReadLockHolder lock(heapLock_);
202     ASSERT(isInitialized_);
203     // Just free pool
204     if (releasePages) {
205         PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::IMMEDIATE_RETURN>(poolMem, poolSize);
206     } else {
207         PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::NO_RETURN>(poolMem, poolSize);
208     }
209 }
210 
FreeArena(Arena * arena)211 void HeapSpace::FreeArena(Arena *arena)
212 {
213     os::memory::ReadLockHolder lock(heapLock_);
214     ASSERT(isInitialized_);
215     // Just free arena
216     PoolManager::FreeArena(arena);
217 }
218 
Initialize(size_t initialYoungSize,bool wasSetInitialYoungSize,size_t maxYoungSize,bool wasSetMaxYoungSize,size_t initialTotalSize,size_t maxTotalSize,uint32_t minFreePercentage,uint32_t maxFreePercentage)219 void GenerationalSpaces::Initialize(size_t initialYoungSize, bool wasSetInitialYoungSize, size_t maxYoungSize,
220                                     bool wasSetMaxYoungSize, size_t initialTotalSize, size_t maxTotalSize,
221                                     uint32_t minFreePercentage, uint32_t maxFreePercentage)
222 {
223     // Temporary save total heap size parameters and set percetages
224     HeapSpace::Initialize(initialTotalSize, maxTotalSize, minFreePercentage, maxFreePercentage);
225 
226     if (!wasSetInitialYoungSize && wasSetMaxYoungSize) {
227         initialYoungSize = maxYoungSize;
228     } else if (initialYoungSize > maxYoungSize) {
229         LOG_IF(wasSetInitialYoungSize && wasSetMaxYoungSize, WARNING, RUNTIME)
230             << "Initial young size(init-young-space-size=" << initialYoungSize
231             << ") is larger than maximum young size (young-space-size=" << maxYoungSize
232             << "). Set maximum young size to " << initialYoungSize;
233         maxYoungSize = initialYoungSize;
234     }
235     youngSpace_.Initialize(initialYoungSize, maxYoungSize);
236     ASSERT(youngSpace_.GetCurrentSize() <= memSpace_.GetCurrentSize());
237     ASSERT(youngSpace_.GetMaxSize() <= memSpace_.GetMaxSize());
238     // Use mem_space_ as tenured space
239     memSpace_.Initialize(memSpace_.GetCurrentSize() - youngSpace_.GetCurrentSize(),
240                          memSpace_.GetMaxSize() - youngSpace_.GetMaxSize());
241 }
242 
GetCurrentFreeYoungSize() const243 size_t GenerationalSpaces::GetCurrentFreeYoungSize() const
244 {
245     os::memory::ReadLockHolder lock(heapLock_);
246     return GetCurrentFreeYoungSizeUnsafe();
247 }
248 
GetCurrentFreeTenuredSize() const249 size_t GenerationalSpaces::GetCurrentFreeTenuredSize() const
250 {
251     os::memory::ReadLockHolder lock(heapLock_);
252     return GetCurrentFreeTenuredSizeUnsafe();
253 }
254 
GetCurrentFreeYoungSizeUnsafe() const255 size_t GenerationalSpaces::GetCurrentFreeYoungSizeUnsafe() const
256 {
257     size_t allOccupiedYoungSize = youngSizeInSeparatePools_ + youngSizeInSharedPools_;
258     ASSERT(youngSpace_.GetCurrentSize() >= allOccupiedYoungSize);
259     return youngSpace_.GetCurrentSize() - allOccupiedYoungSize;
260 }
261 
GetCurrentFreeTenuredSizeUnsafe() const262 size_t GenerationalSpaces::GetCurrentFreeTenuredSizeUnsafe() const
263 {
264     ASSERT(sharedPoolsSize_ >= tenuredSizeInSharedPools_);
265     // bytes_not_in_tenured_space = occupied pools size by young + non-tenured size in shared pool
266     return GetCurrentFreeBytes(youngSizeInSeparatePools_ + (sharedPoolsSize_ - tenuredSizeInSharedPools_));
267 }
268 
ComputeNewSize()269 void GenerationalSpaces::ComputeNewSize()
270 {
271     os::memory::WriteLockHolder lock(heapLock_);
272     ComputeNewYoung();
273     ComputeNewTenured();
274     SetIsWorkGC(false);
275 }
276 
ComputeNewYoung()277 void GenerationalSpaces::ComputeNewYoung()
278 {
279     double minFreePercentage = GetMinFreePercentage();
280     double maxFreePercentage = GetMaxFreePercentage();
281     youngSpace_.ComputeNewSize(GetCurrentFreeYoungSizeUnsafe(), minFreePercentage, maxFreePercentage);
282     // Get free bytes count after computing new young size
283     size_t freeYoungBytesAfterComputing = GetCurrentFreeYoungSizeUnsafe();
284     // If saved pool size was very big and such pool can not be allocate in young after GC
285     // then we increase young space to allocate this pool
286     if (youngSpace_.savedPoolSize > freeYoungBytesAfterComputing) {
287         youngSpace_.IncreaseBy(youngSpace_.savedPoolSize - freeYoungBytesAfterComputing);
288         youngSpace_.savedPoolSize = 0;
289         // Free bytes after increase young space for new pool will = 0, so yet increase young space
290         youngSpace_.ComputeNewSize(0, minFreePercentage, maxFreePercentage);
291     }
292 }
293 
UpdateSize(size_t desiredYoungSize)294 void GenerationalSpaces::UpdateSize(size_t desiredYoungSize)
295 {
296     os::memory::WriteLockHolder lock(heapLock_);
297     UpdateYoungSize(desiredYoungSize);
298     ComputeNewTenured();
299     SetIsWorkGC(false);
300 }
301 
UpdateYoungSpaceMaxSize(size_t size)302 size_t GenerationalSpaces::UpdateYoungSpaceMaxSize(size_t size)
303 {
304     os::memory::WriteLockHolder lock(heapLock_);
305     size_t oldSize = youngSpace_.GetMaxSize();
306     youngSpace_.SetMaxSize(size);
307     youngSpace_.UseFullSpace();
308     return oldSize;
309 }
310 
UpdateYoungSize(size_t desiredYoungSize)311 void GenerationalSpaces::UpdateYoungSize(size_t desiredYoungSize)
312 {
313     if (desiredYoungSize < youngSpace_.GetCurrentSize()) {
314         auto allOccupiedYoungSize = youngSizeInSharedPools_ + youngSizeInSeparatePools_;
315         // we cannot reduce young size below already occupied
316         auto desiredSize = std::max(desiredYoungSize, allOccupiedYoungSize);
317         youngSpace_.ReduceBy(youngSpace_.GetCurrentSize() - desiredSize);
318     } else if (desiredYoungSize > youngSpace_.GetCurrentSize()) {
319         youngSpace_.IncreaseBy(desiredYoungSize - youngSpace_.GetCurrentSize());
320     }
321 }
322 
ComputeNewTenured()323 void GenerationalSpaces::ComputeNewTenured()
324 {
325     double minFreePercentage = GetMinFreePercentage();
326     double maxFreePercentage = GetMaxFreePercentage();
327     memSpace_.ComputeNewSize(GetCurrentFreeTenuredSizeUnsafe(), minFreePercentage, maxFreePercentage);
328     // Get free bytes count after computing new tenured size
329     size_t freeTenuredBytesAfterComputing = GetCurrentFreeTenuredSizeUnsafe();
330     // If saved pool size was very big and such pool can not be allocate in tenured after GC
331     // then we increase tenured space to allocate this pool
332     if (memSpace_.savedPoolSize > freeTenuredBytesAfterComputing) {
333         memSpace_.IncreaseBy(memSpace_.savedPoolSize - freeTenuredBytesAfterComputing);
334         memSpace_.savedPoolSize = 0;
335         // Free bytes after increase tenured space for new pool will = 0, so yet increase tenured space
336         memSpace_.ComputeNewSize(0, minFreePercentage, maxFreePercentage);
337     }
338 }
339 
GetHeapSize() const340 size_t GenerationalSpaces::GetHeapSize() const
341 {
342     os::memory::ReadLockHolder lock(heapLock_);
343     size_t usedBytesInSeparatePools = PoolManager::GetMmapMemPool()->GetObjectUsedBytes() - sharedPoolsSize_;
344     size_t usedBytesInSharedPool = youngSizeInSharedPools_ + tenuredSizeInSharedPools_;
345     return usedBytesInSeparatePools + usedBytesInSharedPool;
346 }
347 
CanAllocInSpace(bool isYoung,size_t chunkSize) const348 bool GenerationalSpaces::CanAllocInSpace(bool isYoung, size_t chunkSize) const
349 {
350     os::memory::ReadLockHolder lock(heapLock_);
351     ASSERT(isInitialized_);
352     return isYoung ? WillAlloc(chunkSize, GetCurrentFreeYoungSizeUnsafe(), &youngSpace_).has_value()
353                    : WillAlloc(chunkSize, GetCurrentFreeTenuredSizeUnsafe(), &memSpace_).has_value();
354 }
355 
GetCurrentYoungSize() const356 size_t GenerationalSpaces::GetCurrentYoungSize() const
357 {
358     os::memory::ReadLockHolder lock(heapLock_);
359     ASSERT(isInitialized_);
360     return youngSpace_.GetCurrentSize();
361 }
362 
GetMaxYoungSize() const363 size_t GenerationalSpaces::GetMaxYoungSize() const
364 {
365     ASSERT(isInitialized_);
366     return youngSpace_.GetMaxSize();
367 }
368 
UseFullYoungSpace()369 void GenerationalSpaces::UseFullYoungSpace()
370 {
371     os::memory::WriteLockHolder lock(heapLock_);
372     ASSERT(isInitialized_);
373     youngSpace_.UseFullSpace();
374 }
375 
AllocSharedPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)376 Pool GenerationalSpaces::AllocSharedPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
377                                          void *allocatorPtr)
378 {
379     os::memory::WriteLockHolder lock(heapLock_);
380     ASSERT(isInitialized_);
381     auto sharedPool = PoolManager::GetMmapMemPool()->AllocPool(poolSize, spaceType, allocatorType, allocatorPtr);
382     sharedPoolsSize_ += sharedPool.GetSize();
383     return sharedPool;
384 }
385 
AllocAlonePoolForYoung(SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)386 Pool GenerationalSpaces::AllocAlonePoolForYoung(SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
387 {
388     os::memory::WriteLockHolder lock(heapLock_);
389     ASSERT(isInitialized_);
390     auto youngPool =
391         PoolManager::GetMmapMemPool()->AllocPool(youngSpace_.GetMaxSize(), spaceType, allocatorType, allocatorPtr);
392     youngSizeInSeparatePools_ = youngPool.GetSize();
393     return youngPool;
394 }
395 
TryAllocPoolForYoung(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)396 Pool GenerationalSpaces::TryAllocPoolForYoung(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
397                                               void *allocatorPtr)
398 {
399     os::memory::WriteLockHolder lock(heapLock_);
400     auto youngPool = TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeYoungSizeUnsafe(),
401                                       &youngSpace_);
402     youngSizeInSeparatePools_ += youngPool.GetSize();
403     return youngPool;
404 }
405 
TryAllocPoolForTenured(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,OSPagesAllocPolicy allocPolicy)406 Pool GenerationalSpaces::TryAllocPoolForTenured(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
407                                                 void *allocatorPtr, OSPagesAllocPolicy allocPolicy)
408 {
409     os::memory::WriteLockHolder lock(heapLock_);
410     return TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeTenuredSizeUnsafe(),
411                             &memSpace_, allocPolicy);
412 }
413 
TryAllocPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)414 Pool GenerationalSpaces::TryAllocPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
415                                       void *allocatorPtr)
416 {
417     return TryAllocPoolForTenured(poolSize, spaceType, allocatorType, allocatorPtr);
418 }
419 
TryAllocArenaForTenured(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)420 Arena *GenerationalSpaces::TryAllocArenaForTenured(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
421                                                    void *allocatorPtr)
422 {
423     os::memory::WriteLockHolder lock(heapLock_);
424     return TryAllocArenaBase(arenaSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeTenuredSizeUnsafe(),
425                              &memSpace_);
426 }
427 
TryAllocArena(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)428 Arena *GenerationalSpaces::TryAllocArena(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
429                                          void *allocatorPtr)
430 {
431     return TryAllocArenaForTenured(arenaSize, spaceType, allocatorType, allocatorPtr);
432 }
433 
FreeSharedPool(void * poolMem,size_t poolSize)434 void GenerationalSpaces::FreeSharedPool(void *poolMem, size_t poolSize)
435 {
436     os::memory::WriteLockHolder lock(heapLock_);
437     ASSERT(sharedPoolsSize_ >= poolSize);
438     sharedPoolsSize_ -= poolSize;
439     PoolManager::GetMmapMemPool()->FreePool(poolMem, poolSize);
440 }
441 
FreeYoungPool(void * poolMem,size_t poolSize,bool releasePages)442 void GenerationalSpaces::FreeYoungPool(void *poolMem, size_t poolSize, bool releasePages)
443 {
444     os::memory::WriteLockHolder lock(heapLock_);
445     ASSERT(youngSizeInSeparatePools_ >= poolSize);
446     youngSizeInSeparatePools_ -= poolSize;
447     if (releasePages) {
448         PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::IMMEDIATE_RETURN>(poolMem, poolSize);
449     } else {
450         PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::NO_RETURN>(poolMem, poolSize);
451     }
452 }
453 
PromoteYoungPool(size_t poolSize)454 void GenerationalSpaces::PromoteYoungPool(size_t poolSize)
455 {
456     os::memory::WriteLockHolder lock(heapLock_);
457     ASSERT(youngSizeInSeparatePools_ >= poolSize);
458     auto increaseBytesOrNotAlloc = WillAlloc(poolSize, GetCurrentFreeTenuredSizeUnsafe(), &memSpace_);
459     youngSizeInSeparatePools_ -= poolSize;
460     ASSERT(increaseBytesOrNotAlloc.has_value());
461     memSpace_.IncreaseBy(increaseBytesOrNotAlloc.value());
462 }
463 
FreeTenuredPool(void * poolMem,size_t poolSize,bool releasePages)464 void GenerationalSpaces::FreeTenuredPool(void *poolMem, size_t poolSize, bool releasePages)
465 {
466     // For tenured we just free pool
467     HeapSpace::FreePool(poolMem, poolSize, releasePages);
468 }
469 
IncreaseYoungOccupiedInSharedPool(size_t chunkSize)470 void GenerationalSpaces::IncreaseYoungOccupiedInSharedPool(size_t chunkSize)
471 {
472     os::memory::WriteLockHolder lock(heapLock_);
473     ASSERT(isInitialized_);
474     size_t freeBytes = GetCurrentFreeYoungSizeUnsafe();
475     // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
476     // GC work) then we increase young space size
477     if (freeBytes < chunkSize) {
478         youngSpace_.IncreaseBy(chunkSize - freeBytes);
479     }
480     youngSizeInSharedPools_ += chunkSize;
481     ASSERT(youngSizeInSharedPools_ + tenuredSizeInSharedPools_ <= sharedPoolsSize_);
482 }
483 
IncreaseTenuredOccupiedInSharedPool(size_t chunkSize)484 void GenerationalSpaces::IncreaseTenuredOccupiedInSharedPool(size_t chunkSize)
485 {
486     os::memory::WriteLockHolder lock(heapLock_);
487     ASSERT(isInitialized_);
488     size_t freeBytes = GetCurrentFreeTenuredSizeUnsafe();
489     // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
490     // GC work) then we increase tenured space size
491     if (freeBytes < chunkSize) {
492         memSpace_.IncreaseBy(chunkSize - freeBytes);
493     }
494     tenuredSizeInSharedPools_ += chunkSize;
495     ASSERT(youngSizeInSharedPools_ + tenuredSizeInSharedPools_ <= sharedPoolsSize_);
496 }
497 
ReduceYoungOccupiedInSharedPool(size_t chunkSize)498 void GenerationalSpaces::ReduceYoungOccupiedInSharedPool(size_t chunkSize)
499 {
500     os::memory::WriteLockHolder lock(heapLock_);
501     ASSERT(isInitialized_);
502     ASSERT(youngSizeInSharedPools_ >= chunkSize);
503     youngSizeInSharedPools_ -= chunkSize;
504 }
505 
ReduceTenuredOccupiedInSharedPool(size_t chunkSize)506 void GenerationalSpaces::ReduceTenuredOccupiedInSharedPool(size_t chunkSize)
507 {
508     os::memory::WriteLockHolder lock(heapLock_);
509     ASSERT(isInitialized_);
510     ASSERT(tenuredSizeInSharedPools_ >= chunkSize);
511     tenuredSizeInSharedPools_ -= chunkSize;
512 }
513 
514 }  // namespace ark::mem
515