1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/mem/heap_space.h"
17 #include "libpandabase/mem/mem.h"
18 #include "libpandabase/mem/pool_manager.h"
19 #include "libpandabase/mem/mmap_mem_pool-inl.h"
20
21 namespace panda::mem {
22
Initialize(size_t initial_size,size_t max_size,uint32_t min_free_percentage,uint32_t max_free_percentage)23 void HeapSpace::Initialize(size_t initial_size, size_t max_size, uint32_t min_free_percentage,
24 uint32_t max_free_percentage)
25 {
26 ASSERT(!is_initialized_);
27 mem_space_.Initialize(initial_size, max_size);
28 InitializePercentages(min_free_percentage, max_free_percentage);
29 is_initialized_ = true;
30 }
31
InitializePercentages(uint32_t min_free_percentage,uint32_t max_free_percentage)32 void HeapSpace::InitializePercentages(uint32_t min_free_percentage, uint32_t max_free_percentage)
33 {
34 min_free_percentage_ = static_cast<double>(std::min(min_free_percentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
35 max_free_percentage_ = static_cast<double>(std::min(max_free_percentage, MAX_FREE_PERCENTAGE)) / PERCENT_100_U32;
36 }
37
Initialize(size_t initial_size,size_t max_size)38 void HeapSpace::ObjectMemorySpace::Initialize(size_t initial_size, size_t max_size)
39 {
40 min_size_ = initial_size;
41 max_size_ = max_size;
42 ASSERT(min_size_ <= max_size_);
43 // Set current space size as initial_size
44 current_size_ = min_size_;
45 }
46
IncreaseBy(uint64_t bytes)47 inline void HeapSpace::ObjectMemorySpace::IncreaseBy(uint64_t bytes)
48 {
49 current_size_ =
50 std::min(AlignUp(current_size_ + bytes, DEFAULT_ALIGNMENT_IN_BYTES), static_cast<uint64_t>(max_size_));
51 }
52
ReduceBy(size_t bytes)53 inline void HeapSpace::ObjectMemorySpace::ReduceBy(size_t bytes)
54 {
55 ASSERT(current_size_ >= bytes);
56 current_size_ = AlignUp(current_size_ - bytes, DEFAULT_ALIGNMENT_IN_BYTES);
57 current_size_ = std::max(current_size_, min_size_);
58 }
59
ComputeNewSize(size_t free_bytes,double min_free_percentage,double max_free_percentage)60 void HeapSpace::ObjectMemorySpace::ComputeNewSize(size_t free_bytes, double min_free_percentage,
61 double max_free_percentage)
62 {
63 ASSERT(free_bytes <= current_size_);
64 // How many bytes are used in space now
65 size_t used_bytes = current_size_ - free_bytes;
66
67 uint64_t min_needed_bytes = static_cast<double>(used_bytes) / (1.0 - min_free_percentage);
68 if (current_size_ < min_needed_bytes) {
69 IncreaseBy(min_needed_bytes - current_size_);
70 return;
71 }
72
73 uint64_t max_needed_bytes = static_cast<double>(used_bytes) / (1.0 - max_free_percentage);
74 if (current_size_ > max_needed_bytes) {
75 ReduceBy(current_size_ - max_needed_bytes);
76 }
77 }
78
GetCurrentFreeBytes(size_t bytes_not_in_this_space) const79 inline size_t HeapSpace::GetCurrentFreeBytes(size_t bytes_not_in_this_space) const
80 {
81 ASSERT(is_initialized_);
82 size_t used_bytes = PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
83 ASSERT(used_bytes >= bytes_not_in_this_space);
84 size_t used_bytes_in_current_space = used_bytes - bytes_not_in_this_space;
85 ASSERT(GetCurrentSize() >= used_bytes_in_current_space);
86 return GetCurrentSize() - used_bytes_in_current_space;
87 }
88
ComputeNewSize()89 void HeapSpace::ComputeNewSize()
90 {
91 os::memory::WriteLockHolder lock(heap_lock_);
92 mem_space_.ComputeNewSize(GetCurrentFreeBytes(), min_free_percentage_, max_free_percentage_);
93 // Get current free bytes count after computing new size
94 size_t current_free_bytes_in_space = GetCurrentFreeBytes();
95 // If saved pool size was very big and such pool can not be allocate after GC
96 // then we increase space to allocate this pool
97 if (mem_space_.saved_pool_size > current_free_bytes_in_space) {
98 mem_space_.IncreaseBy(mem_space_.saved_pool_size - current_free_bytes_in_space);
99 mem_space_.saved_pool_size = 0;
100 // Free bytes after increase space for new pool will = 0, so yet increase space
101 mem_space_.ComputeNewSize(0, min_free_percentage_, max_free_percentage_);
102 }
103 // ComputeNewSize is called on GC end
104 SetIsWorkGC(false);
105 }
106
GetHeapSize() const107 size_t HeapSpace::GetHeapSize() const
108 {
109 return PoolManager::GetMmapMemPool()->GetObjectUsedBytes();
110 }
111
WillAlloc(size_t pool_size,size_t current_free_bytes_in_space,const ObjectMemorySpace * mem_space) const112 inline std::optional<size_t> HeapSpace::WillAlloc(size_t pool_size, size_t current_free_bytes_in_space,
113 const ObjectMemorySpace *mem_space) const
114 {
115 ASSERT(is_initialized_);
116 // If can allocate pool (from free pool map or non-used memory) then just do it
117 if (LIKELY(pool_size <= current_free_bytes_in_space)) {
118 // We have enough memory for allocation, no need to increase heap
119 return {0};
120 }
121 // If we allocate pool during GC work then we must allocate new pool anyway, so we wiil try to increase heap space
122 if (IsWorkGC()) {
123 // if requested pool size greater free bytes in current heap space and non occupied memory then we can not
124 // allocate such pool, so we need to trigger GC
125 if (current_free_bytes_in_space + mem_space->GetCurrentNonOccupiedSize() < pool_size) {
126 return std::nullopt;
127 }
128 // In this case we need increase space for allocate new pool
129 return {pool_size - current_free_bytes_in_space};
130 }
131 // Otherwise we need to trigger GC
132 return std::nullopt;
133 }
134
GetCurrentSize() const135 size_t HeapSpace::GetCurrentSize() const
136 {
137 return mem_space_.GetCurrentSize();
138 }
139
TryAllocPoolBase(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr,size_t current_free_bytes_in_space,ObjectMemorySpace * mem_space)140 inline Pool HeapSpace::TryAllocPoolBase(size_t pool_size, SpaceType space_type, AllocatorType allocator_type,
141 void *allocator_ptr, size_t current_free_bytes_in_space,
142 ObjectMemorySpace *mem_space)
143 {
144 auto increase_bytes_or_not_alloc = WillAlloc(pool_size, current_free_bytes_in_space, mem_space);
145 // Increase heap space if needed and allocate pool
146 if (increase_bytes_or_not_alloc) {
147 mem_space->IncreaseBy(increase_bytes_or_not_alloc.value());
148 return PoolManager::GetMmapMemPool()->AllocPool(pool_size, space_type, allocator_type, allocator_ptr);
149 }
150 // Save pool size for computing new space size
151 mem_space->saved_pool_size = pool_size;
152 return NULLPOOL;
153 }
154
TryAllocPool(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)155 Pool HeapSpace::TryAllocPool(size_t pool_size, SpaceType space_type, AllocatorType allocator_type, void *allocator_ptr)
156 {
157 os::memory::WriteLockHolder lock(heap_lock_);
158 return TryAllocPoolBase(pool_size, space_type, allocator_type, allocator_ptr, GetCurrentFreeBytes(), &mem_space_);
159 }
160
TryAllocArenaBase(size_t arena_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr,size_t current_free_bytes_in_space,ObjectMemorySpace * mem_space)161 inline Arena *HeapSpace::TryAllocArenaBase(size_t arena_size, SpaceType space_type, AllocatorType allocator_type,
162 void *allocator_ptr, size_t current_free_bytes_in_space,
163 ObjectMemorySpace *mem_space)
164 {
165 auto increase_bytes_or_not_alloc = WillAlloc(arena_size, current_free_bytes_in_space, mem_space);
166 // Increase heap space if needed and allocate arena
167 if (increase_bytes_or_not_alloc.has_value()) {
168 mem_space->IncreaseBy(increase_bytes_or_not_alloc.value());
169 return PoolManager::AllocArena(arena_size, space_type, allocator_type, allocator_ptr);
170 }
171 // Save arena size for computing new space size
172 mem_space->saved_pool_size = arena_size;
173 return nullptr;
174 }
175
TryAllocArena(size_t arena_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)176 Arena *HeapSpace::TryAllocArena(size_t arena_size, SpaceType space_type, AllocatorType allocator_type,
177 void *allocator_ptr)
178 {
179 os::memory::WriteLockHolder lock(heap_lock_);
180 return TryAllocArenaBase(arena_size, space_type, allocator_type, allocator_ptr, GetCurrentFreeBytes(), &mem_space_);
181 }
182
FreePool(void * pool_mem,size_t pool_size)183 void HeapSpace::FreePool(void *pool_mem, size_t pool_size)
184 {
185 os::memory::ReadLockHolder lock(heap_lock_);
186 ASSERT(is_initialized_);
187 // Just free pool
188 PoolManager::GetMmapMemPool()->FreePool(pool_mem, pool_size);
189 }
190
FreeArena(Arena * arena)191 void HeapSpace::FreeArena(Arena *arena)
192 {
193 os::memory::ReadLockHolder lock(heap_lock_);
194 ASSERT(is_initialized_);
195 // Just free arena
196 PoolManager::FreeArena(arena);
197 }
198
Initialize(size_t initial_young_size,bool was_set_initial_young_size,size_t max_young_size,bool was_set_max_young_size,size_t initial_total_size,size_t max_total_size,uint32_t min_free_percentage,uint32_t max_free_percentage)199 void GenerationalSpaces::Initialize(size_t initial_young_size, bool was_set_initial_young_size, size_t max_young_size,
200 bool was_set_max_young_size, size_t initial_total_size, size_t max_total_size,
201 uint32_t min_free_percentage, uint32_t max_free_percentage)
202 {
203 // Temporary save total heap size parameters and set percetages
204 HeapSpace::Initialize(initial_total_size, max_total_size, min_free_percentage, max_free_percentage);
205
206 if (!was_set_initial_young_size && was_set_max_young_size) {
207 initial_young_size = max_young_size;
208 } else if (initial_young_size > max_young_size) {
209 LOG_IF(was_set_initial_young_size && was_set_max_young_size, WARNING, RUNTIME)
210 << "Initial young size(init-young-space-size=" << initial_young_size
211 << ") is larger than maximum young size (young-space-size=" << max_young_size
212 << "). Set maximum young size to " << initial_young_size;
213 max_young_size = initial_young_size;
214 }
215 young_space_.Initialize(initial_young_size, max_young_size);
216 ASSERT(young_space_.GetCurrentSize() <= mem_space_.GetCurrentSize());
217 ASSERT(young_space_.GetMaxSize() <= mem_space_.GetMaxSize());
218 // Use mem_space_ as tenured space
219 mem_space_.Initialize(mem_space_.GetCurrentSize() - young_space_.GetCurrentSize(),
220 mem_space_.GetMaxSize() - young_space_.GetMaxSize());
221 }
222
GetCurrentFreeYoungSize() const223 size_t GenerationalSpaces::GetCurrentFreeYoungSize() const
224 {
225 os::memory::ReadLockHolder lock(heap_lock_);
226 return GetCurrentFreeYoungSizeUnsafe();
227 }
228
GetCurrentFreeTenuredSize() const229 size_t GenerationalSpaces::GetCurrentFreeTenuredSize() const
230 {
231 os::memory::ReadLockHolder lock(heap_lock_);
232 return GetCurrentFreeTenuredSizeUnsafe();
233 }
234
GetCurrentFreeYoungSizeUnsafe() const235 size_t GenerationalSpaces::GetCurrentFreeYoungSizeUnsafe() const
236 {
237 size_t all_occupied_young_size = young_size_in_separate_pools_ + young_size_in_shared_pools_;
238 ASSERT(young_space_.GetCurrentSize() >= all_occupied_young_size);
239 return young_space_.GetCurrentSize() - all_occupied_young_size;
240 }
241
GetCurrentFreeTenuredSizeUnsafe() const242 size_t GenerationalSpaces::GetCurrentFreeTenuredSizeUnsafe() const
243 {
244 ASSERT(shared_pools_size_ >= tenured_size_in_shared_pools_);
245 // bytes_not_in_tenured_space = occupied pools size by young + non-tenured size in shared pool
246 return GetCurrentFreeBytes(young_size_in_separate_pools_ + (shared_pools_size_ - tenured_size_in_shared_pools_));
247 }
248
ComputeNewSize()249 void GenerationalSpaces::ComputeNewSize()
250 {
251 os::memory::WriteLockHolder lock(heap_lock_);
252 ComputeNewYoung();
253 ComputeNewTenured();
254 SetIsWorkGC(false);
255 }
256
ComputeNewYoung()257 void GenerationalSpaces::ComputeNewYoung()
258 {
259 double min_free_percentage = GetMinFreePercentage();
260 double max_free_percentage = GetMaxFreePercentage();
261 young_space_.ComputeNewSize(GetCurrentFreeYoungSizeUnsafe(), min_free_percentage, max_free_percentage);
262 // Get free bytes count after computing new young size
263 size_t free_young_bytes_after_computing = GetCurrentFreeYoungSizeUnsafe();
264 // If saved pool size was very big and such pool can not be allocate in young after GC
265 // then we increase young space to allocate this pool
266 if (young_space_.saved_pool_size > free_young_bytes_after_computing) {
267 young_space_.IncreaseBy(young_space_.saved_pool_size - free_young_bytes_after_computing);
268 young_space_.saved_pool_size = 0;
269 // Free bytes after increase young space for new pool will = 0, so yet increase young space
270 young_space_.ComputeNewSize(0, min_free_percentage, max_free_percentage);
271 }
272 }
273
ComputeNewTenured()274 void GenerationalSpaces::ComputeNewTenured()
275 {
276 double min_free_percentage = GetMinFreePercentage();
277 double max_free_percentage = GetMaxFreePercentage();
278 mem_space_.ComputeNewSize(GetCurrentFreeTenuredSizeUnsafe(), min_free_percentage, max_free_percentage);
279 // Get free bytes count after computing new tenured size
280 size_t free_tenured_bytes_after_computing = GetCurrentFreeTenuredSizeUnsafe();
281 // If saved pool size was very big and such pool can not be allocate in tenured after GC
282 // then we increase tenured space to allocate this pool
283 if (mem_space_.saved_pool_size > free_tenured_bytes_after_computing) {
284 mem_space_.IncreaseBy(mem_space_.saved_pool_size - free_tenured_bytes_after_computing);
285 mem_space_.saved_pool_size = 0;
286 // Free bytes after increase tenured space for new pool will = 0, so yet increase tenured space
287 mem_space_.ComputeNewSize(0, min_free_percentage, max_free_percentage);
288 }
289 }
290
GetHeapSize() const291 size_t GenerationalSpaces::GetHeapSize() const
292 {
293 os::memory::ReadLockHolder lock(heap_lock_);
294 size_t used_bytes_in_separate_pools = PoolManager::GetMmapMemPool()->GetObjectUsedBytes() - shared_pools_size_;
295 size_t used_bytes_in_shared_pool = young_size_in_shared_pools_ + tenured_size_in_shared_pools_;
296 return used_bytes_in_separate_pools + used_bytes_in_shared_pool;
297 }
298
CanAllocInSpace(bool is_young,size_t chunk_size) const299 bool GenerationalSpaces::CanAllocInSpace(bool is_young, size_t chunk_size) const
300 {
301 os::memory::ReadLockHolder lock(heap_lock_);
302 ASSERT(is_initialized_);
303 return is_young ? WillAlloc(chunk_size, GetCurrentFreeYoungSizeUnsafe(), &young_space_).has_value()
304 : WillAlloc(chunk_size, GetCurrentFreeTenuredSizeUnsafe(), &mem_space_).has_value();
305 }
306
GetCurrentMaxYoungSize() const307 size_t GenerationalSpaces::GetCurrentMaxYoungSize() const
308 {
309 os::memory::ReadLockHolder lock(heap_lock_);
310 ASSERT(is_initialized_);
311 return young_space_.GetCurrentSize();
312 }
313
GetMaxYoungSize() const314 size_t GenerationalSpaces::GetMaxYoungSize() const
315 {
316 ASSERT(is_initialized_);
317 return young_space_.GetMaxSize();
318 }
319
UseFullYoungSpace()320 void GenerationalSpaces::UseFullYoungSpace()
321 {
322 os::memory::WriteLockHolder lock(heap_lock_);
323 ASSERT(is_initialized_);
324 young_space_.UseFullSpace();
325 }
326
AllocSharedPool(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)327 Pool GenerationalSpaces::AllocSharedPool(size_t pool_size, SpaceType space_type, AllocatorType allocator_type,
328 void *allocator_ptr)
329 {
330 os::memory::WriteLockHolder lock(heap_lock_);
331 ASSERT(is_initialized_);
332 auto shared_pool = PoolManager::GetMmapMemPool()->AllocPool(pool_size, space_type, allocator_type, allocator_ptr);
333 shared_pools_size_ += shared_pool.GetSize();
334 return shared_pool;
335 }
336
AllocAlonePoolForYoung(SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)337 Pool GenerationalSpaces::AllocAlonePoolForYoung(SpaceType space_type, AllocatorType allocator_type, void *allocator_ptr)
338 {
339 os::memory::WriteLockHolder lock(heap_lock_);
340 ASSERT(is_initialized_);
341 auto young_pool =
342 PoolManager::GetMmapMemPool()->AllocPool(young_space_.GetMaxSize(), space_type, allocator_type, allocator_ptr);
343 young_size_in_separate_pools_ = young_pool.GetSize();
344 return young_pool;
345 }
346
TryAllocPoolForYoung(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)347 Pool GenerationalSpaces::TryAllocPoolForYoung(size_t pool_size, SpaceType space_type, AllocatorType allocator_type,
348 void *allocator_ptr)
349 {
350 os::memory::WriteLockHolder lock(heap_lock_);
351 auto young_pool = TryAllocPoolBase(pool_size, space_type, allocator_type, allocator_ptr,
352 GetCurrentFreeYoungSizeUnsafe(), &young_space_);
353 young_size_in_separate_pools_ += young_pool.GetSize();
354 return young_pool;
355 }
356
TryAllocPoolForTenured(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)357 Pool GenerationalSpaces::TryAllocPoolForTenured(size_t pool_size, SpaceType space_type, AllocatorType allocator_type,
358 void *allocator_ptr)
359 {
360 os::memory::WriteLockHolder lock(heap_lock_);
361 return TryAllocPoolBase(pool_size, space_type, allocator_type, allocator_ptr, GetCurrentFreeTenuredSizeUnsafe(),
362 &mem_space_);
363 }
364
TryAllocPool(size_t pool_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)365 Pool GenerationalSpaces::TryAllocPool(size_t pool_size, SpaceType space_type, AllocatorType allocator_type,
366 void *allocator_ptr)
367 {
368 return TryAllocPoolForTenured(pool_size, space_type, allocator_type, allocator_ptr);
369 }
370
TryAllocArenaForTenured(size_t arena_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)371 Arena *GenerationalSpaces::TryAllocArenaForTenured(size_t arena_size, SpaceType space_type,
372 AllocatorType allocator_type, void *allocator_ptr)
373 {
374 os::memory::WriteLockHolder lock(heap_lock_);
375 return TryAllocArenaBase(arena_size, space_type, allocator_type, allocator_ptr, GetCurrentFreeTenuredSizeUnsafe(),
376 &mem_space_);
377 }
378
TryAllocArena(size_t arena_size,SpaceType space_type,AllocatorType allocator_type,void * allocator_ptr)379 Arena *GenerationalSpaces::TryAllocArena(size_t arena_size, SpaceType space_type, AllocatorType allocator_type,
380 void *allocator_ptr)
381 {
382 return TryAllocArenaForTenured(arena_size, space_type, allocator_type, allocator_ptr);
383 }
384
FreeSharedPool(void * pool_mem,size_t pool_size)385 void GenerationalSpaces::FreeSharedPool(void *pool_mem, size_t pool_size)
386 {
387 os::memory::WriteLockHolder lock(heap_lock_);
388 ASSERT(shared_pools_size_ >= pool_size);
389 shared_pools_size_ -= pool_size;
390 PoolManager::GetMmapMemPool()->FreePool(pool_mem, pool_size);
391 }
392
FreeYoungPool(void * pool_mem,size_t pool_size)393 void GenerationalSpaces::FreeYoungPool(void *pool_mem, size_t pool_size)
394 {
395 os::memory::WriteLockHolder lock(heap_lock_);
396 ASSERT(young_size_in_separate_pools_ >= pool_size);
397 young_size_in_separate_pools_ -= pool_size;
398 PoolManager::GetMmapMemPool()->FreePool(pool_mem, pool_size);
399 }
400
PromoteYoungPool(size_t pool_size)401 void GenerationalSpaces::PromoteYoungPool(size_t pool_size)
402 {
403 os::memory::WriteLockHolder lock(heap_lock_);
404 ASSERT(young_size_in_separate_pools_ >= pool_size);
405 young_size_in_separate_pools_ -= pool_size;
406 auto increase_bytes_or_not_alloc = WillAlloc(pool_size, GetCurrentFreeTenuredSizeUnsafe(), &mem_space_);
407 ASSERT(increase_bytes_or_not_alloc.has_value());
408 mem_space_.IncreaseBy(increase_bytes_or_not_alloc.value());
409 }
410
FreeTenuredPool(void * pool_mem,size_t pool_size)411 void GenerationalSpaces::FreeTenuredPool(void *pool_mem, size_t pool_size)
412 {
413 // For tenured we just free pool
414 HeapSpace::FreePool(pool_mem, pool_size);
415 }
416
IncreaseYoungOccupiedInSharedPool(size_t chunk_size)417 void GenerationalSpaces::IncreaseYoungOccupiedInSharedPool(size_t chunk_size)
418 {
419 os::memory::WriteLockHolder lock(heap_lock_);
420 ASSERT(is_initialized_);
421 size_t free_bytes = GetCurrentFreeYoungSizeUnsafe();
422 // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
423 // GC work) then we increase young space size
424 if (free_bytes < chunk_size) {
425 young_space_.IncreaseBy(chunk_size - free_bytes);
426 }
427 young_size_in_shared_pools_ += chunk_size;
428 ASSERT(young_size_in_shared_pools_ + tenured_size_in_shared_pools_ <= shared_pools_size_);
429 }
430
IncreaseTenuredOccupiedInSharedPool(size_t chunk_size)431 void GenerationalSpaces::IncreaseTenuredOccupiedInSharedPool(size_t chunk_size)
432 {
433 os::memory::WriteLockHolder lock(heap_lock_);
434 ASSERT(is_initialized_);
435 size_t free_bytes = GetCurrentFreeTenuredSizeUnsafe();
436 // Here we sure that we must allocate new memory, but if free bytes count less requested size (for example, during
437 // GC work) then we increase tenured space size
438 if (free_bytes < chunk_size) {
439 mem_space_.IncreaseBy(chunk_size - free_bytes);
440 }
441 tenured_size_in_shared_pools_ += chunk_size;
442 ASSERT(young_size_in_shared_pools_ + tenured_size_in_shared_pools_ <= shared_pools_size_);
443 }
444
ReduceYoungOccupiedInSharedPool(size_t chunk_size)445 void GenerationalSpaces::ReduceYoungOccupiedInSharedPool(size_t chunk_size)
446 {
447 os::memory::WriteLockHolder lock(heap_lock_);
448 ASSERT(is_initialized_);
449 ASSERT(young_size_in_shared_pools_ >= chunk_size);
450 young_size_in_shared_pools_ -= chunk_size;
451 }
452
ReduceTenuredOccupiedInSharedPool(size_t chunk_size)453 void GenerationalSpaces::ReduceTenuredOccupiedInSharedPool(size_t chunk_size)
454 {
455 os::memory::WriteLockHolder lock(heap_lock_);
456 ASSERT(is_initialized_);
457 ASSERT(tenured_size_in_shared_pools_ >= chunk_size);
458 tenured_size_in_shared_pools_ -= chunk_size;
459 }
460
461 } // namespace panda::mem
462