1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/mem/heap_space.h"
17 #include "libpandabase/mem/mem.h"
18 #include "libpandabase/mem/pool_manager.h"
19 #include "libpandabase/mem/mmap_mem_pool-inl.h"
20 #include "libpandabase/mem/mem_pool.h"
21
22 namespace ark::mem {
23
Initialize(size_t initialSize,size_t maxSize,uint32_t minFreePercentage,uint32_t maxFreePercentage)24 void HeapSpace::Initialize(size_t initialSize, size_t maxSize, uint32_t minFreePercentage, uint32_t maxFreePercentage)
25 {
26 ASSERT(!isInitialized_);
27 memSpace_.Initialize(initialSize, maxSize);
28 InitializePercentages(minFreePercentage, maxFreePercentage);
29 isInitialized_ = true;
30 }
31
InitializePercentages(uint32_t minFreePercentage,uint32_t maxFreePercentage)32 void HeapSpace::InitializePercentages(uint32_t minFreePercentage, uint32_t maxFreePercentage)
33 {
34 minFreePercentage_ = static_cast<double>(std::min(minFreePercentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
35 maxFreePercentage_ = static_cast<double>(std::min(maxFreePercentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
36 }
37
Initialize(size_t initialSize,size_t maxSize)38 void HeapSpace::ObjectMemorySpace::Initialize(size_t initialSize, size_t maxSize)
39 {
40 minSize_ = initialSize;
41 maxSize_ = maxSize;
42 ASSERT(minSize_ <= maxSize_);
43 // Set current space size as initial_size
44 currentSize_ = minSize_;
45 }
46
ClampNewMaxSize(size_t newMaxSize)47 void HeapSpace::ObjectMemorySpace::ClampNewMaxSize(size_t newMaxSize)
48 {
49 ASSERT(newMaxSize >= currentSize_);
50 maxSize_ = std::min(newMaxSize, maxSize_);
51 }
52
IncreaseBy(uint64_t bytes)53 inline void HeapSpace::ObjectMemorySpace::IncreaseBy(uint64_t bytes)
54 {
55 currentSize_ = std::min(AlignUp(currentSize_ + bytes, DEFAULT_ALIGNMENT_IN_BYTES), static_cast<uint64_t>(maxSize_));
56 }
57
ReduceBy(size_t bytes)58 inline void HeapSpace::ObjectMemorySpace::ReduceBy(size_t bytes)
59 {
60 ASSERT(currentSize_ >= bytes);
61 currentSize_ = AlignUp(currentSize_ - bytes, DEFAULT_ALIGNMENT_IN_BYTES);
62 currentSize_ = std::max(currentSize_, minSize_);
63 }
64
ComputeNewSize(size_t freeBytes,double minFreePercentage,double maxFreePercentage)65 void HeapSpace::ObjectMemorySpace::ComputeNewSize(size_t freeBytes, double minFreePercentage, double maxFreePercentage)
66 {
67 ASSERT(freeBytes <= currentSize_);
68 // How many bytes are used in space now
69 size_t usedBytes = currentSize_ - freeBytes;
70
71 uint64_t minNeededBytes = static_cast<double>(usedBytes) / (1.0 - minFreePercentage);
72 if (currentSize_ < minNeededBytes) {
73 IncreaseBy(minNeededBytes - currentSize_);
74 return;
75 }
76
77 uint64_t maxNeededBytes = static_cast<double>(usedBytes) / (1.0 - maxFreePercentage);
78 if (currentSize_ > maxNeededBytes) {
79 ReduceBy(currentSize_ - maxNeededBytes);
80 }
81 }
82
GetCurrentFreeBytes(size_t bytesNotInThisSpace) const83 inline size_t HeapSpace::GetCurrentFreeBytes(size_t bytesNotInThisSpace) const
84 {
85 ASSERT(isInitialized_);
86 size_t usedBytes = PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
87 ASSERT(usedBytes >= bytesNotInThisSpace);
88 size_t usedBytesInCurrentSpace = usedBytes - bytesNotInThisSpace;
89 ASSERT(GetCurrentSize() >= usedBytesInCurrentSpace);
90 return GetCurrentSize() - usedBytesInCurrentSpace;
91 }
92
ComputeNewSize()93 void HeapSpace::ComputeNewSize()
94 {
95 os::memory::WriteLockHolder lock(heapLock_);
96 memSpace_.ComputeNewSize(GetCurrentFreeBytes(), minFreePercentage_, maxFreePercentage_);
97 // Get current free bytes count after computing new size
98 size_t currentFreeBytesInSpace = GetCurrentFreeBytes();
99 // If saved pool size was very big and such pool can not be allocate after GC
100 // then we increase space to allocate this pool
101 if (memSpace_.savedPoolSize > currentFreeBytesInSpace) {
102 memSpace_.IncreaseBy(memSpace_.savedPoolSize - currentFreeBytesInSpace);
103 memSpace_.savedPoolSize = 0;
104 // Free bytes after increase space for new pool will = 0, so yet increase space
105 memSpace_.ComputeNewSize(0, minFreePercentage_, maxFreePercentage_);
106 }
107 // ComputeNewSize is called on GC end
108 SetIsWorkGC(false);
109 }
110
GetHeapSize() const111 size_t HeapSpace::GetHeapSize() const
112 {
113 return PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
114 }
115
WillAlloc(size_t poolSize,size_t currentFreeBytesInSpace,const ObjectMemorySpace * memSpace) const116 inline std::optional<size_t> HeapSpace::WillAlloc(size_t poolSize, size_t currentFreeBytesInSpace,
117 const ObjectMemorySpace *memSpace) const
118 {
119 ASSERT(isInitialized_);
120 // If can allocate pool (from free pool map or non-used memory) then just do it
121 if (LIKELY(poolSize <= currentFreeBytesInSpace)) {
122 // We have enough memory for allocation, no need to increase heap
123 return {0};
124 }
125 // If we allocate pool during GC work then we must allocate new pool anyway, so we will try to increase heap space
126 if (IsWorkGC()) {
127 // if requested pool size greater free bytes in current heap space and non occupied memory then we can not
128 // allocate such pool, so we need to trigger GC
129 if (currentFreeBytesInSpace + memSpace->GetCurrentNonOccupiedSize() < poolSize) {
130 return std::nullopt;
131 }
132 // In this case we need increase space for allocate new pool
133 return {poolSize - currentFreeBytesInSpace};
134 }
135 // Otherwise we need to trigger GC
136 return std::nullopt;
137 }
138
GetCurrentSize() const139 size_t HeapSpace::GetCurrentSize() const
140 {
141 return memSpace_.GetCurrentSize();
142 }
143
ClampCurrentMaxHeapSize()144 void HeapSpace::ClampCurrentMaxHeapSize()
145 {
146 os::memory::WriteLockHolder lock(heapLock_);
147 memSpace_.ClampNewMaxSize(
148 AlignUp(memSpace_.GetCurrentSize() + PANDA_DEFAULT_POOL_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES));
149 PoolManager::GetMmapMemPool()->ReleaseFreePagesToOS();
150 }
151
TryAllocPoolBase(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,size_t currentFreeBytesInSpace,ObjectMemorySpace * memSpace,OSPagesAllocPolicy allocPolicy)152 inline Pool HeapSpace::TryAllocPoolBase(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
153 void *allocatorPtr, size_t currentFreeBytesInSpace, ObjectMemorySpace *memSpace,
154 OSPagesAllocPolicy allocPolicy)
155 {
156 auto increaseBytesOrNotAlloc = WillAlloc(poolSize, currentFreeBytesInSpace, memSpace);
157 // Increase heap space if needed and allocate pool
158 if (increaseBytesOrNotAlloc) {
159 memSpace->IncreaseBy(increaseBytesOrNotAlloc.value());
160 if (allocPolicy == OSPagesAllocPolicy::NO_POLICY) {
161 return PoolManager::GetMmapMemPool()->template AllocPool<OSPagesAllocPolicy::NO_POLICY>(
162 poolSize, spaceType, allocatorType, allocatorPtr);
163 }
164 return PoolManager::GetMmapMemPool()->template AllocPool<OSPagesAllocPolicy::ZEROED_MEMORY>(
165 poolSize, spaceType, allocatorType, allocatorPtr);
166 }
167 // Save pool size for computing new space size
168 memSpace->savedPoolSize = poolSize;
169 return NULLPOOL;
170 }
171
TryAllocPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)172 Pool HeapSpace::TryAllocPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
173 {
174 os::memory::WriteLockHolder lock(heapLock_);
175 return TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeBytes(), &memSpace_);
176 }
177
TryAllocArenaBase(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,size_t currentFreeBytesInSpace,ObjectMemorySpace * memSpace)178 inline Arena *HeapSpace::TryAllocArenaBase(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
179 void *allocatorPtr, size_t currentFreeBytesInSpace,
180 ObjectMemorySpace *memSpace)
181 {
182 auto increaseBytesOrNotAlloc = WillAlloc(arenaSize, currentFreeBytesInSpace, memSpace);
183 // Increase heap space if needed and allocate arena
184 if (increaseBytesOrNotAlloc.has_value()) {
185 memSpace->IncreaseBy(increaseBytesOrNotAlloc.value());
186 return PoolManager::AllocArena(arenaSize, spaceType, allocatorType, allocatorPtr);
187 }
188 // Save arena size for computing new space size
189 memSpace->savedPoolSize = arenaSize;
190 return nullptr;
191 }
192
TryAllocArena(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)193 Arena *HeapSpace::TryAllocArena(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
194 {
195 os::memory::WriteLockHolder lock(heapLock_);
196 return TryAllocArenaBase(arenaSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeBytes(), &memSpace_);
197 }
198
FreePool(void * poolMem,size_t poolSize,bool releasePages)199 void HeapSpace::FreePool(void *poolMem, size_t poolSize, bool releasePages)
200 {
201 os::memory::ReadLockHolder lock(heapLock_);
202 ASSERT(isInitialized_);
203 // Just free pool
204 if (releasePages) {
205 PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::IMMEDIATE_RETURN>(poolMem, poolSize);
206 } else {
207 PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::NO_RETURN>(poolMem, poolSize);
208 }
209 }
210
FreeArena(Arena * arena)211 void HeapSpace::FreeArena(Arena *arena)
212 {
213 os::memory::ReadLockHolder lock(heapLock_);
214 ASSERT(isInitialized_);
215 // Just free arena
216 PoolManager::FreeArena(arena);
217 }
218
Initialize(size_t initialYoungSize,bool wasSetInitialYoungSize,size_t maxYoungSize,bool wasSetMaxYoungSize,size_t initialTotalSize,size_t maxTotalSize,uint32_t minFreePercentage,uint32_t maxFreePercentage)219 void GenerationalSpaces::Initialize(size_t initialYoungSize, bool wasSetInitialYoungSize, size_t maxYoungSize,
220 bool wasSetMaxYoungSize, size_t initialTotalSize, size_t maxTotalSize,
221 uint32_t minFreePercentage, uint32_t maxFreePercentage)
222 {
223 // Temporary save total heap size parameters and set percetages
224 HeapSpace::Initialize(initialTotalSize, maxTotalSize, minFreePercentage, maxFreePercentage);
225
226 if (!wasSetInitialYoungSize && wasSetMaxYoungSize) {
227 initialYoungSize = maxYoungSize;
228 } else if (initialYoungSize > maxYoungSize) {
229 LOG_IF(wasSetInitialYoungSize && wasSetMaxYoungSize, WARNING, RUNTIME)
230 << "Initial young size(init-young-space-size=" << initialYoungSize
231 << ") is larger than maximum young size (young-space-size=" << maxYoungSize
232 << "). Set maximum young size to " << initialYoungSize;
233 maxYoungSize = initialYoungSize;
234 }
235 youngSpace_.Initialize(initialYoungSize, maxYoungSize);
236 ASSERT(youngSpace_.GetCurrentSize() <= memSpace_.GetCurrentSize());
237 ASSERT(youngSpace_.GetMaxSize() <= memSpace_.GetMaxSize());
238 // Use mem_space_ as tenured space
239 memSpace_.Initialize(memSpace_.GetCurrentSize() - youngSpace_.GetCurrentSize(),
240 memSpace_.GetMaxSize() - youngSpace_.GetMaxSize());
241 }
242
GetCurrentFreeYoungSize() const243 size_t GenerationalSpaces::GetCurrentFreeYoungSize() const
244 {
245 os::memory::ReadLockHolder lock(heapLock_);
246 return GetCurrentFreeYoungSizeUnsafe();
247 }
248
GetCurrentFreeTenuredSize() const249 size_t GenerationalSpaces::GetCurrentFreeTenuredSize() const
250 {
251 os::memory::ReadLockHolder lock(heapLock_);
252 return GetCurrentFreeTenuredSizeUnsafe();
253 }
254
GetCurrentFreeYoungSizeUnsafe() const255 size_t GenerationalSpaces::GetCurrentFreeYoungSizeUnsafe() const
256 {
257 size_t allOccupiedYoungSize = youngSizeInSeparatePools_ + youngSizeInSharedPools_;
258 ASSERT(youngSpace_.GetCurrentSize() >= allOccupiedYoungSize);
259 return youngSpace_.GetCurrentSize() - allOccupiedYoungSize;
260 }
261
GetCurrentFreeTenuredSizeUnsafe() const262 size_t GenerationalSpaces::GetCurrentFreeTenuredSizeUnsafe() const
263 {
264 ASSERT(sharedPoolsSize_ >= tenuredSizeInSharedPools_);
265 // bytes_not_in_tenured_space = occupied pools size by young + non-tenured size in shared pool
266 return GetCurrentFreeBytes(youngSizeInSeparatePools_ + (sharedPoolsSize_ - tenuredSizeInSharedPools_));
267 }
268
ComputeNewSize()269 void GenerationalSpaces::ComputeNewSize()
270 {
271 os::memory::WriteLockHolder lock(heapLock_);
272 ComputeNewYoung();
273 ComputeNewTenured();
274 SetIsWorkGC(false);
275 }
276
ComputeNewYoung()277 void GenerationalSpaces::ComputeNewYoung()
278 {
279 double minFreePercentage = GetMinFreePercentage();
280 double maxFreePercentage = GetMaxFreePercentage();
281 youngSpace_.ComputeNewSize(GetCurrentFreeYoungSizeUnsafe(), minFreePercentage, maxFreePercentage);
282 // Get free bytes count after computing new young size
283 size_t freeYoungBytesAfterComputing = GetCurrentFreeYoungSizeUnsafe();
284 // If saved pool size was very big and such pool can not be allocate in young after GC
285 // then we increase young space to allocate this pool
286 if (youngSpace_.savedPoolSize > freeYoungBytesAfterComputing) {
287 youngSpace_.IncreaseBy(youngSpace_.savedPoolSize - freeYoungBytesAfterComputing);
288 youngSpace_.savedPoolSize = 0;
289 // Free bytes after increase young space for new pool will = 0, so yet increase young space
290 youngSpace_.ComputeNewSize(0, minFreePercentage, maxFreePercentage);
291 }
292 }
293
UpdateSize(size_t desiredYoungSize)294 void GenerationalSpaces::UpdateSize(size_t desiredYoungSize)
295 {
296 os::memory::WriteLockHolder lock(heapLock_);
297 UpdateYoungSize(desiredYoungSize);
298 ComputeNewTenured();
299 SetIsWorkGC(false);
300 }
301
UpdateYoungSize(size_t desiredYoungSize)302 void GenerationalSpaces::UpdateYoungSize(size_t desiredYoungSize)
303 {
304 if (desiredYoungSize < youngSpace_.GetCurrentSize()) {
305 auto allOccupiedYoungSize = youngSizeInSharedPools_ + youngSizeInSeparatePools_;
306 // we cannot reduce young size below already occupied
307 auto desiredSize = std::max(desiredYoungSize, allOccupiedYoungSize);
308 youngSpace_.ReduceBy(youngSpace_.GetCurrentSize() - desiredSize);
309 } else if (desiredYoungSize > youngSpace_.GetCurrentSize()) {
310 youngSpace_.IncreaseBy(desiredYoungSize - youngSpace_.GetCurrentSize());
311 }
312 }
313
ComputeNewTenured()314 void GenerationalSpaces::ComputeNewTenured()
315 {
316 double minFreePercentage = GetMinFreePercentage();
317 double maxFreePercentage = GetMaxFreePercentage();
318 memSpace_.ComputeNewSize(GetCurrentFreeTenuredSizeUnsafe(), minFreePercentage, maxFreePercentage);
319 // Get free bytes count after computing new tenured size
320 size_t freeTenuredBytesAfterComputing = GetCurrentFreeTenuredSizeUnsafe();
321 // If saved pool size was very big and such pool can not be allocate in tenured after GC
322 // then we increase tenured space to allocate this pool
323 if (memSpace_.savedPoolSize > freeTenuredBytesAfterComputing) {
324 memSpace_.IncreaseBy(memSpace_.savedPoolSize - freeTenuredBytesAfterComputing);
325 memSpace_.savedPoolSize = 0;
326 // Free bytes after increase tenured space for new pool will = 0, so yet increase tenured space
327 memSpace_.ComputeNewSize(0, minFreePercentage, maxFreePercentage);
328 }
329 }
330
GetHeapSize() const331 size_t GenerationalSpaces::GetHeapSize() const
332 {
333 os::memory::ReadLockHolder lock(heapLock_);
334 size_t usedBytesInSeparatePools = PoolManager::GetMmapMemPool()->GetObjectUsedBytes() - sharedPoolsSize_;
335 size_t usedBytesInSharedPool = youngSizeInSharedPools_ + tenuredSizeInSharedPools_;
336 return usedBytesInSeparatePools + usedBytesInSharedPool;
337 }
338
CanAllocInSpace(bool isYoung,size_t chunkSize) const339 bool GenerationalSpaces::CanAllocInSpace(bool isYoung, size_t chunkSize) const
340 {
341 os::memory::ReadLockHolder lock(heapLock_);
342 ASSERT(isInitialized_);
343 return isYoung ? WillAlloc(chunkSize, GetCurrentFreeYoungSizeUnsafe(), &youngSpace_).has_value()
344 : WillAlloc(chunkSize, GetCurrentFreeTenuredSizeUnsafe(), &memSpace_).has_value();
345 }
346
GetCurrentYoungSize() const347 size_t GenerationalSpaces::GetCurrentYoungSize() const
348 {
349 os::memory::ReadLockHolder lock(heapLock_);
350 ASSERT(isInitialized_);
351 return youngSpace_.GetCurrentSize();
352 }
353
GetMaxYoungSize() const354 size_t GenerationalSpaces::GetMaxYoungSize() const
355 {
356 ASSERT(isInitialized_);
357 return youngSpace_.GetMaxSize();
358 }
359
UseFullYoungSpace()360 void GenerationalSpaces::UseFullYoungSpace()
361 {
362 os::memory::WriteLockHolder lock(heapLock_);
363 ASSERT(isInitialized_);
364 youngSpace_.UseFullSpace();
365 }
366
AllocSharedPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)367 Pool GenerationalSpaces::AllocSharedPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
368 void *allocatorPtr)
369 {
370 os::memory::WriteLockHolder lock(heapLock_);
371 ASSERT(isInitialized_);
372 auto sharedPool = PoolManager::GetMmapMemPool()->AllocPool(poolSize, spaceType, allocatorType, allocatorPtr);
373 sharedPoolsSize_ += sharedPool.GetSize();
374 return sharedPool;
375 }
376
AllocAlonePoolForYoung(SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)377 Pool GenerationalSpaces::AllocAlonePoolForYoung(SpaceType spaceType, AllocatorType allocatorType, void *allocatorPtr)
378 {
379 os::memory::WriteLockHolder lock(heapLock_);
380 ASSERT(isInitialized_);
381 auto youngPool =
382 PoolManager::GetMmapMemPool()->AllocPool(youngSpace_.GetMaxSize(), spaceType, allocatorType, allocatorPtr);
383 youngSizeInSeparatePools_ = youngPool.GetSize();
384 return youngPool;
385 }
386
TryAllocPoolForYoung(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)387 Pool GenerationalSpaces::TryAllocPoolForYoung(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
388 void *allocatorPtr)
389 {
390 os::memory::WriteLockHolder lock(heapLock_);
391 auto youngPool = TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeYoungSizeUnsafe(),
392 &youngSpace_);
393 youngSizeInSeparatePools_ += youngPool.GetSize();
394 return youngPool;
395 }
396
TryAllocPoolForTenured(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr,OSPagesAllocPolicy allocPolicy)397 Pool GenerationalSpaces::TryAllocPoolForTenured(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
398 void *allocatorPtr, OSPagesAllocPolicy allocPolicy)
399 {
400 os::memory::WriteLockHolder lock(heapLock_);
401 return TryAllocPoolBase(poolSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeTenuredSizeUnsafe(),
402 &memSpace_, allocPolicy);
403 }
404
TryAllocPool(size_t poolSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)405 Pool GenerationalSpaces::TryAllocPool(size_t poolSize, SpaceType spaceType, AllocatorType allocatorType,
406 void *allocatorPtr)
407 {
408 return TryAllocPoolForTenured(poolSize, spaceType, allocatorType, allocatorPtr);
409 }
410
TryAllocArenaForTenured(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)411 Arena *GenerationalSpaces::TryAllocArenaForTenured(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
412 void *allocatorPtr)
413 {
414 os::memory::WriteLockHolder lock(heapLock_);
415 return TryAllocArenaBase(arenaSize, spaceType, allocatorType, allocatorPtr, GetCurrentFreeTenuredSizeUnsafe(),
416 &memSpace_);
417 }
418
TryAllocArena(size_t arenaSize,SpaceType spaceType,AllocatorType allocatorType,void * allocatorPtr)419 Arena *GenerationalSpaces::TryAllocArena(size_t arenaSize, SpaceType spaceType, AllocatorType allocatorType,
420 void *allocatorPtr)
421 {
422 return TryAllocArenaForTenured(arenaSize, spaceType, allocatorType, allocatorPtr);
423 }
424
FreeSharedPool(void * poolMem,size_t poolSize)425 void GenerationalSpaces::FreeSharedPool(void *poolMem, size_t poolSize)
426 {
427 os::memory::WriteLockHolder lock(heapLock_);
428 ASSERT(sharedPoolsSize_ >= poolSize);
429 sharedPoolsSize_ -= poolSize;
430 PoolManager::GetMmapMemPool()->FreePool(poolMem, poolSize);
431 }
432
FreeYoungPool(void * poolMem,size_t poolSize,bool releasePages)433 void GenerationalSpaces::FreeYoungPool(void *poolMem, size_t poolSize, bool releasePages)
434 {
435 os::memory::WriteLockHolder lock(heapLock_);
436 ASSERT(youngSizeInSeparatePools_ >= poolSize);
437 youngSizeInSeparatePools_ -= poolSize;
438 if (releasePages) {
439 PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::IMMEDIATE_RETURN>(poolMem, poolSize);
440 } else {
441 PoolManager::GetMmapMemPool()->FreePool<OSPagesPolicy::NO_RETURN>(poolMem, poolSize);
442 }
443 }
444
PromoteYoungPool(size_t poolSize)445 void GenerationalSpaces::PromoteYoungPool(size_t poolSize)
446 {
447 os::memory::WriteLockHolder lock(heapLock_);
448 ASSERT(youngSizeInSeparatePools_ >= poolSize);
449 auto increaseBytesOrNotAlloc = WillAlloc(poolSize, GetCurrentFreeTenuredSizeUnsafe(), &memSpace_);
450 youngSizeInSeparatePools_ -= poolSize;
451 ASSERT(increaseBytesOrNotAlloc.has_value());
452 memSpace_.IncreaseBy(increaseBytesOrNotAlloc.value());
453 }
454
FreeTenuredPool(void * poolMem,size_t poolSize,bool releasePages)455 void GenerationalSpaces::FreeTenuredPool(void *poolMem, size_t poolSize, bool releasePages)
456 {
457 // For tenured we just free pool
458 HeapSpace::FreePool(poolMem, poolSize, releasePages);
459 }
460
IncreaseYoungOccupiedInSharedPool(size_t chunkSize)461 void GenerationalSpaces::IncreaseYoungOccupiedInSharedPool(size_t chunkSize)
462 {
463 os::memory::WriteLockHolder lock(heapLock_);
464 ASSERT(isInitialized_);
465 size_t freeBytes = GetCurrentFreeYoungSizeUnsafe();
466 // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
467 // GC work) then we increase young space size
468 if (freeBytes < chunkSize) {
469 youngSpace_.IncreaseBy(chunkSize - freeBytes);
470 }
471 youngSizeInSharedPools_ += chunkSize;
472 ASSERT(youngSizeInSharedPools_ + tenuredSizeInSharedPools_ <= sharedPoolsSize_);
473 }
474
IncreaseTenuredOccupiedInSharedPool(size_t chunkSize)475 void GenerationalSpaces::IncreaseTenuredOccupiedInSharedPool(size_t chunkSize)
476 {
477 os::memory::WriteLockHolder lock(heapLock_);
478 ASSERT(isInitialized_);
479 size_t freeBytes = GetCurrentFreeTenuredSizeUnsafe();
480 // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
481 // GC work) then we increase tenured space size
482 if (freeBytes < chunkSize) {
483 memSpace_.IncreaseBy(chunkSize - freeBytes);
484 }
485 tenuredSizeInSharedPools_ += chunkSize;
486 ASSERT(youngSizeInSharedPools_ + tenuredSizeInSharedPools_ <= sharedPoolsSize_);
487 }
488
ReduceYoungOccupiedInSharedPool(size_t chunkSize)489 void GenerationalSpaces::ReduceYoungOccupiedInSharedPool(size_t chunkSize)
490 {
491 os::memory::WriteLockHolder lock(heapLock_);
492 ASSERT(isInitialized_);
493 ASSERT(youngSizeInSharedPools_ >= chunkSize);
494 youngSizeInSharedPools_ -= chunkSize;
495 }
496
ReduceTenuredOccupiedInSharedPool(size_t chunkSize)497 void GenerationalSpaces::ReduceTenuredOccupiedInSharedPool(size_t chunkSize)
498 {
499 os::memory::WriteLockHolder lock(heapLock_);
500 ASSERT(isInitialized_);
501 ASSERT(tenuredSizeInSharedPools_ >= chunkSize);
502 tenuredSizeInSharedPools_ -= chunkSize;
503 }
504
505 } // namespace ark::mem
506