1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_H
17
18 #include <atomic>
19 #include <cstdint>
20
21 #include "libpandabase/utils/list.h"
22 #include "runtime/include/mem/panda_containers.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/tlab.h"
25 #include "runtime/mem/rem_set.h"
26 #include "runtime/mem/heap_space.h"
27
28 namespace ark::mem {
29
30 enum RegionFlag {
31 IS_UNUSED = 0U,
32 IS_EDEN = 1U,
33 IS_SURVIVOR = 1U << 1U,
34 IS_OLD = 1U << 2U,
35 IS_LARGE_OBJECT = 1U << 3U,
36 IS_NONMOVABLE = 1U << 4U,
37 IS_TLAB = 1U << 5U,
38 IS_COLLECTION_SET = 1U << 6U,
39 IS_FREE = 1U << 7U,
40 IS_PROMOTED = 1U << 8U,
41 IS_RESERVED = 1U << 9U,
42 IS_PINNED = 1U << 10U,
43 IS_MIXEDTLAB = 1U << 11U
44 };
45
IsYoungRegionFlag(RegionFlag flag)46 constexpr bool IsYoungRegionFlag(RegionFlag flag)
47 {
48 return flag == RegionFlag::IS_EDEN || flag == RegionFlag::IS_SURVIVOR;
49 }
50
51 static constexpr size_t DEFAULT_REGION_ALIGNMENT = 256_KB;
52 static constexpr size_t DEFAULT_REGION_SIZE = DEFAULT_REGION_ALIGNMENT;
53 static constexpr size_t DEFAULT_REGION_MASK = DEFAULT_REGION_ALIGNMENT - 1;
54
55 using RemSetT = RemSet<>;
56
57 class RegionSpace;
58 class Region {
59 public:
Region(RegionSpace * space,uintptr_t begin,uintptr_t end)60 NO_THREAD_SANITIZE explicit Region(RegionSpace *space, uintptr_t begin, uintptr_t end)
61 : space_(space), begin_(begin), end_(end), top_(begin)
62 {
63 }
64
65 ~Region() = default;
66
67 NO_COPY_SEMANTIC(Region);
68 NO_MOVE_SEMANTIC(Region);
69
70 void Destroy();
71
GetSpace()72 RegionSpace *GetSpace()
73 {
74 return space_;
75 }
76
Begin()77 uintptr_t Begin() const
78 {
79 return begin_;
80 }
81
End()82 uintptr_t End() const
83 {
84 return end_;
85 }
86
Intersect(uintptr_t begin,uintptr_t end)87 bool Intersect(uintptr_t begin, uintptr_t end) const
88 {
89 return !(end <= begin_ || end_ <= begin);
90 }
91
Top()92 uintptr_t Top() const
93 {
94 return top_;
95 }
96
SetTop(uintptr_t newTop)97 void SetTop(uintptr_t newTop)
98 {
99 ASSERT(!IsTLAB() || IsMixedTLAB());
100 top_ = newTop;
101 }
102
GetLiveBytes()103 uint32_t GetLiveBytes() const
104 {
105 ASSERT(liveBytes_ != nullptr);
106 // Atomic with relaxed order reason: load value without concurrency
107 auto liveBytes = liveBytes_->load(std::memory_order_relaxed);
108 ASSERT(liveBytes <= Size());
109 return liveBytes;
110 }
111
112 uint32_t GetAllocatedBytes() const;
113
114 double GetFragmentation() const;
115
GetGarbageBytes()116 uint32_t GetGarbageBytes() const
117 {
118 ASSERT(GetAllocatedBytes() >= GetLiveBytes());
119 return GetAllocatedBytes() - GetLiveBytes();
120 }
121
SetLiveBytes(uint32_t count)122 void SetLiveBytes(uint32_t count)
123 {
124 ASSERT(liveBytes_ != nullptr);
125 // Atomic with relaxed order reason: store value without concurrency
126 liveBytes_->store(count, std::memory_order_relaxed);
127 }
128
129 template <bool ATOMICALLY>
AddLiveBytes(uint32_t count)130 void AddLiveBytes(uint32_t count)
131 {
132 ASSERT(liveBytes_ != nullptr);
133 if constexpr (ATOMICALLY) {
134 // Atomic with seq_cst order reason: store value with concurrency
135 liveBytes_->fetch_add(count, std::memory_order_seq_cst);
136 } else {
137 auto *field = reinterpret_cast<uint32_t *>(liveBytes_);
138 *field += count;
139 }
140 }
141
142 uint32_t CalcLiveBytes() const;
143
144 uint32_t CalcMarkBytes() const;
145
GetLiveBitmap()146 MarkBitmap *GetLiveBitmap() const
147 {
148 return liveBitmap_;
149 }
150
IncreaseAllocatedObjects()151 void IncreaseAllocatedObjects()
152 {
153 // We can call it from the promoted region
154 ASSERT(liveBitmap_ != nullptr);
155 allocatedObjects_++;
156 }
157
GetAllocatedObjects()158 size_t GetAllocatedObjects()
159 {
160 ASSERT(HasFlag(RegionFlag::IS_OLD));
161 return allocatedObjects_;
162 }
163
GetMarkBitmap()164 MarkBitmap *GetMarkBitmap() const
165 {
166 return markBitmap_;
167 }
168
GetRemSet()169 RemSetT *GetRemSet()
170 {
171 return remSet_;
172 }
173
GetRemSetSize()174 size_t GetRemSetSize() const
175 {
176 return remSet_->Size();
177 }
178
AddFlag(RegionFlag flag)179 void AddFlag(RegionFlag flag)
180 {
181 flags_ |= flag;
182 }
183
RmvFlag(RegionFlag flag)184 void RmvFlag(RegionFlag flag)
185 {
186 // NOLINTNEXTLINE(hicpp-signed-bitwise)
187 flags_ &= ~flag;
188 }
189
HasFlags(RegionFlag flag)190 bool HasFlags(RegionFlag flag) const
191 {
192 return (flags_ & flag) == flag;
193 }
194
HasFlag(RegionFlag flag)195 bool HasFlag(RegionFlag flag) const
196 {
197 return (flags_ & flag) != 0;
198 }
199
IsEden()200 bool IsEden() const
201 {
202 return HasFlag(IS_EDEN);
203 }
204
IsSurvivor()205 bool IsSurvivor() const
206 {
207 return HasFlag(RegionFlag::IS_SURVIVOR);
208 }
209
IsYoung()210 bool IsYoung() const
211 {
212 return IsEden() || IsSurvivor();
213 }
214
IsInCollectionSet()215 bool IsInCollectionSet() const
216 {
217 return HasFlag(IS_COLLECTION_SET);
218 }
219
IsTLAB()220 bool IsTLAB() const
221 {
222 ASSERT((tlabVector_ == nullptr) || (top_ == begin_) || IsMixedTLAB());
223 return tlabVector_ != nullptr;
224 }
225
IsMixedTLAB()226 bool IsMixedTLAB() const
227 {
228 return HasFlag(RegionFlag::IS_MIXEDTLAB);
229 }
230
Size()231 size_t Size() const
232 {
233 return end_ - ToUintPtr(this);
234 }
235
PinObject()236 void PinObject()
237 {
238 ASSERT(pinnedObjects_ != nullptr);
239 // Atomic with seq_cst order reason: add value with concurrency
240 pinnedObjects_->fetch_add(1, std::memory_order_seq_cst);
241 }
242
UnpinObject()243 void UnpinObject()
244 {
245 ASSERT(pinnedObjects_ != nullptr);
246 // Atomic with seq_cst order reason: sub value with concurrency
247 pinnedObjects_->fetch_sub(1, std::memory_order_seq_cst);
248 }
249
HasPinnedObjects()250 bool HasPinnedObjects() const
251 {
252 ASSERT(pinnedObjects_ != nullptr);
253 // Atomic with seq_cst order reason: load value with concurrency
254 return pinnedObjects_->load(std::memory_order_seq_cst) > 0;
255 }
256
257 template <bool ATOMIC = true>
258 NO_THREAD_SANITIZE void *Alloc(size_t alignedSize);
259
260 NO_THREAD_SANITIZE void UndoAlloc(void *addr);
261
262 template <typename ObjectVisitor>
263 void IterateOverObjects(const ObjectVisitor &visitor);
264
GetLargeObject()265 ObjectHeader *GetLargeObject()
266 {
267 ASSERT(HasFlag(RegionFlag::IS_LARGE_OBJECT));
268 return reinterpret_cast<ObjectHeader *>(Begin());
269 }
270
271 bool IsInRange(const ObjectHeader *object) const;
272
273 [[nodiscard]] bool IsInAllocRange(const ObjectHeader *object) const;
274
IsAlignment(uintptr_t regionAddr,size_t regionSize)275 static bool IsAlignment(uintptr_t regionAddr, size_t regionSize)
276 {
277 ASSERT(regionSize != 0);
278 return ((regionAddr - HeapStartAddress()) % regionSize) == 0;
279 }
280
HeadSize()281 constexpr static size_t HeadSize()
282 {
283 return AlignUp(sizeof(Region), DEFAULT_ALIGNMENT_IN_BYTES);
284 }
285
RegionSize(size_t objectSize,size_t regionSize)286 constexpr static size_t RegionSize(size_t objectSize, size_t regionSize)
287 {
288 return AlignUp(HeadSize() + objectSize, regionSize);
289 }
290
HeapStartAddress()291 static uintptr_t HeapStartAddress()
292 {
293 return PoolManager::GetMmapMemPool()->GetMinObjectAddress();
294 }
295
296 InternalAllocatorPtr GetInternalAllocator();
297
298 void CreateRemSet();
299
300 void SetupAtomics();
301
302 void CreateTLABSupport();
303
304 size_t GetRemainingSizeForTLABs() const;
305 TLAB *CreateTLAB(size_t size);
306
GetLastTLAB()307 TLAB *GetLastTLAB() const
308 {
309 ASSERT(tlabVector_ != nullptr);
310 ASSERT(!tlabVector_->empty());
311 return tlabVector_->back();
312 };
313
314 MarkBitmap *CreateMarkBitmap();
315 MarkBitmap *CreateLiveBitmap();
316
317 void SwapMarkBitmap();
318
319 void CloneMarkBitmapToLiveBitmap();
320
321 void SetMarkBit(ObjectHeader *object);
322
323 #ifndef NDEBUG
IsAllocating()324 NO_THREAD_SANITIZE bool IsAllocating()
325 {
326 // Atomic with acquire order reason: data race with is_allocating_ with dependecies on reads after the load
327 // which should become visible
328 return reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->load(std::memory_order_acquire);
329 }
330
IsIterating()331 NO_THREAD_SANITIZE bool IsIterating()
332 {
333 // Atomic with acquire order reason: data race with is_iterating_ with dependecies on reads after the load which
334 // should become visible
335 return reinterpret_cast<std::atomic<bool> *>(&isIterating_)->load(std::memory_order_acquire);
336 }
337
SetAllocating(bool value)338 NO_THREAD_SANITIZE bool SetAllocating(bool value)
339 {
340 if (IsIterating()) {
341 return false;
342 }
343 // Atomic with release order reason: data race with is_allocating_ with dependecies on writes before the store
344 // which should become visible acquire
345 reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->store(value, std::memory_order_release);
346 return true;
347 }
348
SetIterating(bool value)349 NO_THREAD_SANITIZE bool SetIterating(bool value)
350 {
351 if (IsAllocating()) {
352 return false;
353 }
354 // Atomic with release order reason: data race with is_iterating_ with dependecies on writes before the store
355 // which should become visible acquire
356 reinterpret_cast<std::atomic<bool> *>(&isIterating_)->store(value, std::memory_order_release);
357 return true;
358 }
359 #endif
360
AsListNode()361 DListNode *AsListNode()
362 {
363 return &node_;
364 }
365
AsRegion(const DListNode * node)366 static Region *AsRegion(const DListNode *node)
367 {
368 return reinterpret_cast<Region *>(ToUintPtr(node) - MEMBER_OFFSET(Region, node_));
369 }
370
371 private:
372 DListNode node_;
373 RegionSpace *space_;
374 uintptr_t begin_;
375 uintptr_t end_;
376 uintptr_t top_;
377 uint32_t flags_ {0};
378 size_t allocatedObjects_ {0};
379 std::atomic<uint32_t> *liveBytes_ {nullptr};
380 std::atomic<uint32_t> *pinnedObjects_ {nullptr};
381 MarkBitmap *liveBitmap_ {nullptr}; // records live objects for old region
382 MarkBitmap *markBitmap_ {nullptr}; // mark bitmap used in current gc marking phase
383 RemSetT *remSet_ {nullptr}; // remember set(old region -> eden/survivor region)
384 PandaVector<TLAB *> *tlabVector_ {nullptr}; // pointer to a vector with thread tlabs associated with this region
385 #ifndef NDEBUG
386 bool isAllocating_ = false;
387 bool isIterating_ = false;
388 #endif
389 };
390
DumpRegionRange(std::ostream & out,const Region & region)391 inline std::ostream &DumpRegionRange(std::ostream &out, const Region ®ion)
392 {
393 std::ios_base::fmtflags flags = out.flags();
394 static constexpr size_t POINTER_PRINT_WIDTH = 8;
395 out << std::hex << "[0x" << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.Begin() << "-0x"
396 << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.End() << "]";
397 out.flags(flags);
398 return out;
399 }
400
401 inline std::ostream &operator<<(std::ostream &out, const Region ®ion)
402 {
403 if (region.HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
404 out << "H";
405 } else if (region.HasFlag(RegionFlag::IS_NONMOVABLE)) {
406 out << "NM";
407 } else if (region.HasFlag(RegionFlag::IS_OLD)) {
408 out << "T";
409 } else {
410 out << "Y";
411 }
412
413 return DumpRegionRange(out, region);
414 }
415
416 // RegionBlock is used for allocate regions from a continuous big memory block
417 // |--------------------------|
418 // |.....RegionBlock class....|
419 // |--------------------------|
420 // |.......regions_end_.......|--------|
421 // |.......regions_begin_.....|----| |
422 // |--------------------------| | |
423 // | |
424 // | Continuous Mem Block | | |
425 // |--------------------------| | |
426 // |...........Region.........|<---| |
427 // |...........Region.........| |
428 // |...........Region.........| |
429 // |..........................| |
430 // |..........................| |
431 // |..........................| |
432 // |..........................| |
433 // |..........................| |
434 // |..........................| |
435 // |..........................| |
436 // |...........Region.........|<-------|
437 class RegionBlock {
438 public:
RegionBlock(size_t regionSize,InternalAllocatorPtr allocator)439 RegionBlock(size_t regionSize, InternalAllocatorPtr allocator) : regionSize_(regionSize), allocator_(allocator) {}
440
~RegionBlock()441 ~RegionBlock()
442 {
443 if (!occupied_.Empty()) {
444 allocator_->Free(occupied_.Data());
445 }
446 }
447
448 NO_COPY_SEMANTIC(RegionBlock);
449 NO_MOVE_SEMANTIC(RegionBlock);
450
451 void Init(uintptr_t regionsBegin, uintptr_t regionsEnd);
452
453 Region *AllocRegion();
454
455 Region *AllocLargeRegion(size_t largeRegionSize);
456
457 void FreeRegion(Region *region, bool releasePages = true);
458
IsAddrInRange(const void * addr)459 bool IsAddrInRange(const void *addr) const
460 {
461 return ToUintPtr(addr) < regionsEnd_ && ToUintPtr(addr) >= regionsBegin_;
462 }
463
GetAllocatedRegion(const void * addr)464 Region *GetAllocatedRegion(const void *addr) const
465 {
466 ASSERT(IsAddrInRange(addr));
467 os::memory::LockHolder lock(lock_);
468 return occupied_[RegionIndex(addr)];
469 }
470
GetFreeRegionsNum()471 size_t GetFreeRegionsNum() const
472 {
473 os::memory::LockHolder lock(lock_);
474 return occupied_.Size() - numUsedRegions_;
475 }
476
477 private:
RegionAt(size_t index)478 Region *RegionAt(size_t index) const
479 {
480 return reinterpret_cast<Region *>(regionsBegin_ + index * regionSize_);
481 }
482
RegionIndex(const void * addr)483 size_t RegionIndex(const void *addr) const
484 {
485 return (ToUintPtr(addr) - regionsBegin_) / regionSize_;
486 }
487
488 size_t regionSize_;
489 InternalAllocatorPtr allocator_;
490 uintptr_t regionsBegin_ = 0;
491 uintptr_t regionsEnd_ = 0;
492 size_t numUsedRegions_ = 0;
493 Span<Region *> occupied_ GUARDED_BY(lock_);
494 mutable os::memory::Mutex lock_;
495 };
496
497 // RegionPool supports to work in three ways:
498 // 1.alloc region in pre-allocated buffer(RegionBlock)
499 // 2.alloc region in mmap pool directly
500 // 3.mixed above two ways
501 class RegionPool {
502 public:
RegionPool(size_t regionSize,bool extend,GenerationalSpaces * spaces,InternalAllocatorPtr allocator)503 explicit RegionPool(size_t regionSize, bool extend, GenerationalSpaces *spaces, InternalAllocatorPtr allocator)
504 : block_(regionSize, allocator),
505 regionSize_(regionSize),
506 spaces_(spaces),
507 allocator_(allocator),
508 extend_(extend)
509 {
510 }
511
512 Region *NewRegion(RegionSpace *space, SpaceType spaceType, AllocatorType allocatorType, size_t regionSize,
513 RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
514 OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
515
516 Region *NewRegion(void *region, RegionSpace *space, size_t regionSize, RegionFlag edenOrOldOrNonmovable,
517 RegionFlag properties);
518
519 template <OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
520 void FreeRegion(Region *region);
521
522 void PromoteYoungRegion(Region *region);
523
InitRegionBlock(uintptr_t regionsBegin,uintptr_t regionsEnd)524 void InitRegionBlock(uintptr_t regionsBegin, uintptr_t regionsEnd)
525 {
526 block_.Init(regionsBegin, regionsEnd);
527 }
528
IsAddrInPoolRange(const void * addr)529 bool IsAddrInPoolRange(const void *addr) const
530 {
531 return block_.IsAddrInRange(addr) || IsAddrInExtendPoolRange(addr);
532 }
533
534 template <bool CROSS_REGION = false>
GetRegion(const void * addr)535 Region *GetRegion(const void *addr) const
536 {
537 if (block_.IsAddrInRange(addr)) {
538 return block_.GetAllocatedRegion(addr);
539 }
540 if (IsAddrInExtendPoolRange(addr)) {
541 return AddrToRegion<CROSS_REGION>(addr);
542 }
543 return nullptr;
544 }
545
GetFreeRegionsNumInRegionBlock()546 size_t GetFreeRegionsNumInRegionBlock() const
547 {
548 return block_.GetFreeRegionsNum();
549 }
550
551 bool HaveTenuredSize(size_t size) const;
552
553 bool HaveFreeRegions(size_t numRegions, size_t regionSize) const;
554
GetInternalAllocator()555 InternalAllocatorPtr GetInternalAllocator()
556 {
557 return allocator_;
558 }
559
560 ~RegionPool() = default;
561 NO_COPY_SEMANTIC(RegionPool);
562 NO_MOVE_SEMANTIC(RegionPool);
563
564 private:
565 template <bool CROSS_REGION>
566 static Region *AddrToRegion(const void *addr, size_t mask = DEFAULT_REGION_MASK)
567 {
568 // if it is possible that (object address - region start addr) larger than region alignment,
569 // we should get the region start address from mmappool which records it in allocator info
570 if constexpr (CROSS_REGION) { // NOLINT(readability-braces-around-statements, bugprone-suspicious-semicolon)
571 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) == SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
572
573 auto regionAddr = PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(const_cast<void *>(addr));
574 return reinterpret_cast<Region *>(regionAddr);
575 }
576 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) != SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
577
578 return reinterpret_cast<Region *>(((ToUintPtr(addr)) & ~mask));
579 }
580
IsAddrInExtendPoolRange(const void * addr)581 bool IsAddrInExtendPoolRange(const void *addr) const
582 {
583 if (extend_) {
584 AllocatorInfo allocInfo = PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(const_cast<void *>(addr));
585 return allocInfo.GetAllocatorHeaderAddr() == this;
586 }
587 return false;
588 }
589
590 RegionBlock block_;
591 size_t regionSize_;
592 GenerationalSpaces *spaces_ {nullptr};
593 InternalAllocatorPtr allocator_;
594 bool extend_ = true;
595 };
596
597 class RegionSpace {
598 public:
599 explicit RegionSpace(SpaceType spaceType, AllocatorType allocatorType, RegionPool *regionPool,
600 size_t emptyTenuredRegionsMaxCount = 0)
spaceType_(spaceType)601 : spaceType_(spaceType),
602 allocatorType_(allocatorType),
603 regionPool_(regionPool),
604 emptyTenuredRegionsMaxCount_(emptyTenuredRegionsMaxCount)
605 {
606 }
607
~RegionSpace()608 virtual ~RegionSpace()
609 {
610 FreeAllRegions();
611 }
612
613 NO_COPY_SEMANTIC(RegionSpace);
614 NO_MOVE_SEMANTIC(RegionSpace);
615
616 enum class ReleaseRegionsPolicy : bool {
617 Release, // NOLINT(readability-identifier-naming)
618 NoRelease, // NOLINT(readability-identifier-naming)
619 };
620
621 Region *NewRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
622 OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
623
624 template <ReleaseRegionsPolicy REGIONS_RELEASE_POLICY = ReleaseRegionsPolicy::Release,
625 OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
626 void FreeRegion(Region *region);
627
628 void PromoteYoungRegion(Region *region);
629
630 void FreeAllRegions();
631
632 template <typename RegionVisitor>
633 void IterateRegions(RegionVisitor visitor);
634
GetPool()635 RegionPool *GetPool() const
636 {
637 return regionPool_;
638 }
639
640 template <bool CROSS_REGION = false>
GetRegion(const ObjectHeader * object)641 Region *GetRegion(const ObjectHeader *object) const
642 {
643 auto *region = regionPool_->GetRegion<CROSS_REGION>(object);
644
645 // check if the region is allocated by this space
646 return (region != nullptr && region->GetSpace() == this) ? region : nullptr;
647 }
648
649 template <bool CROSS_REGION = false>
650 bool ContainObject(const ObjectHeader *object) const;
651
652 template <bool CROSS_REGION = false>
653 bool IsLive(const ObjectHeader *object) const;
654
655 template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
656 void ReleaseEmptyRegions();
657
SetDesiredEdenLength(size_t edenLength)658 void SetDesiredEdenLength(size_t edenLength)
659 {
660 desiredEdenLength_ = edenLength;
661 }
662
663 private:
664 template <typename RegionVisitor>
665 void IterateRegionsList(DList ®ionsList, RegionVisitor visitor);
666
667 Region *GetRegionFromEmptyList(DList ®ionList);
668
669 SpaceType spaceType_;
670
671 // related allocator type
672 AllocatorType allocatorType_;
673
674 // underlying shared region pool
675 RegionPool *regionPool_;
676
677 size_t emptyTenuredRegionsMaxCount_;
678
679 // region allocated by this space
680 DList regions_;
681
682 // Empty regions which is not returned back
683 DList emptyYoungRegions_;
684 DList emptyTenuredRegions_;
685 // Use atomic because it is updated in RegionSpace::PromoteYoungRegion without lock
686 std::atomic<size_t> youngRegionsInUse_ {0};
687 // Desired eden length is not restricted initially
688 size_t desiredEdenLength_ {std::numeric_limits<size_t>::max()};
689 };
690
691 } // namespace ark::mem
692
693 #endif // PANDA_RUNTIME_MEM_REGION_SPACE_H
694