• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_H
17 
18 #include <atomic>
19 #include <cstdint>
20 
21 #include "libpandabase/utils/list.h"
22 #include "runtime/include/mem/panda_containers.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/tlab.h"
25 #include "runtime/mem/rem_set.h"
26 #include "runtime/mem/heap_space.h"
27 
28 namespace ark::mem {
29 
30 enum RegionFlag {
31     IS_UNUSED = 0U,
32     IS_EDEN = 1U,
33     IS_SURVIVOR = 1U << 1U,
34     IS_OLD = 1U << 2U,
35     IS_LARGE_OBJECT = 1U << 3U,
36     IS_NONMOVABLE = 1U << 4U,
37     IS_TLAB = 1U << 5U,
38     IS_COLLECTION_SET = 1U << 6U,
39     IS_FREE = 1U << 7U,
40     IS_PROMOTED = 1U << 8U,
41     IS_RESERVED = 1U << 9U,
42     IS_PINNED = 1U << 10U,
43     IS_MIXEDTLAB = 1U << 11U
44 };
45 
IsYoungRegionFlag(RegionFlag flag)46 constexpr bool IsYoungRegionFlag(RegionFlag flag)
47 {
48     return flag == RegionFlag::IS_EDEN || flag == RegionFlag::IS_SURVIVOR;
49 }
50 
51 static constexpr size_t DEFAULT_REGION_ALIGNMENT = 256_KB;
52 static constexpr size_t DEFAULT_REGION_SIZE = DEFAULT_REGION_ALIGNMENT;
53 static constexpr size_t DEFAULT_REGION_MASK = DEFAULT_REGION_ALIGNMENT - 1;
54 
55 using RemSetT = RemSet<>;
56 
57 class RegionSpace;
58 class Region {
59 public:
Region(RegionSpace * space,uintptr_t begin,uintptr_t end)60     NO_THREAD_SANITIZE explicit Region(RegionSpace *space, uintptr_t begin, uintptr_t end)
61         : space_(space), begin_(begin), end_(end), top_(begin)
62     {
63     }
64 
65     ~Region() = default;
66 
67     NO_COPY_SEMANTIC(Region);
68     NO_MOVE_SEMANTIC(Region);
69 
70     void Destroy();
71 
GetSpace()72     RegionSpace *GetSpace()
73     {
74         return space_;
75     }
76 
Begin()77     uintptr_t Begin() const
78     {
79         return begin_;
80     }
81 
End()82     uintptr_t End() const
83     {
84         return end_;
85     }
86 
Intersect(uintptr_t begin,uintptr_t end)87     bool Intersect(uintptr_t begin, uintptr_t end) const
88     {
89         return !(end <= begin_ || end_ <= begin);
90     }
91 
Top()92     uintptr_t Top() const
93     {
94         return top_;
95     }
96 
SetTop(uintptr_t newTop)97     void SetTop(uintptr_t newTop)
98     {
99         ASSERT(!IsTLAB() || IsMixedTLAB());
100         top_ = newTop;
101     }
102 
GetLiveBytes()103     uint32_t GetLiveBytes() const
104     {
105         ASSERT(liveBytes_ != nullptr);
106         // Atomic with relaxed order reason: load value without concurrency
107         auto liveBytes = liveBytes_->load(std::memory_order_relaxed);
108         ASSERT(liveBytes <= Size());
109         return liveBytes;
110     }
111 
112     uint32_t GetAllocatedBytes() const;
113 
114     double GetFragmentation() const;
115 
GetGarbageBytes()116     uint32_t GetGarbageBytes() const
117     {
118         ASSERT(GetAllocatedBytes() >= GetLiveBytes());
119         return GetAllocatedBytes() - GetLiveBytes();
120     }
121 
SetLiveBytes(uint32_t count)122     void SetLiveBytes(uint32_t count)
123     {
124         ASSERT(liveBytes_ != nullptr);
125         // Atomic with relaxed order reason: store value without concurrency
126         liveBytes_->store(count, std::memory_order_relaxed);
127     }
128 
129     template <bool ATOMICALLY>
AddLiveBytes(uint32_t count)130     void AddLiveBytes(uint32_t count)
131     {
132         ASSERT(liveBytes_ != nullptr);
133         if constexpr (ATOMICALLY) {
134             // Atomic with seq_cst order reason: store value with concurrency
135             liveBytes_->fetch_add(count, std::memory_order_seq_cst);
136         } else {
137             auto *field = reinterpret_cast<uint32_t *>(liveBytes_);
138             *field += count;
139         }
140     }
141 
142     uint32_t CalcLiveBytes() const;
143 
144     uint32_t CalcMarkBytes() const;
145 
GetLiveBitmap()146     MarkBitmap *GetLiveBitmap() const
147     {
148         return liveBitmap_;
149     }
150 
IncreaseAllocatedObjects()151     void IncreaseAllocatedObjects()
152     {
153         // We can call it from the promoted region
154         ASSERT(liveBitmap_ != nullptr);
155         allocatedObjects_++;
156     }
157 
UpdateAllocatedObjects()158     size_t UpdateAllocatedObjects()
159     {
160         size_t aliveCount = GetMarkBitmap()->GetSetBitCount();
161         SetAllocatedObjects(aliveCount);
162         return aliveCount;
163     }
164 
GetAllocatedObjects()165     size_t GetAllocatedObjects()
166     {
167         ASSERT(HasFlag(RegionFlag::IS_OLD));
168         return allocatedObjects_;
169     }
170 
GetMarkBitmap()171     MarkBitmap *GetMarkBitmap() const
172     {
173         return markBitmap_;
174     }
175 
GetRemSet()176     RemSetT *GetRemSet()
177     {
178         return remSet_;
179     }
180 
GetRemSetSize()181     size_t GetRemSetSize() const
182     {
183         return remSet_->Size();
184     }
185 
AddFlag(RegionFlag flag)186     void AddFlag(RegionFlag flag)
187     {
188         flags_ |= flag;
189     }
190 
RmvFlag(RegionFlag flag)191     void RmvFlag(RegionFlag flag)
192     {
193         // NOLINTNEXTLINE(hicpp-signed-bitwise)
194         flags_ &= ~flag;
195     }
196 
HasFlags(RegionFlag flag)197     bool HasFlags(RegionFlag flag) const
198     {
199         return (flags_ & flag) == flag;
200     }
201 
HasFlag(RegionFlag flag)202     bool HasFlag(RegionFlag flag) const
203     {
204         return (flags_ & flag) != 0;
205     }
206 
IsEden()207     bool IsEden() const
208     {
209         return HasFlag(IS_EDEN);
210     }
211 
IsSurvivor()212     bool IsSurvivor() const
213     {
214         return HasFlag(RegionFlag::IS_SURVIVOR);
215     }
216 
IsYoung()217     bool IsYoung() const
218     {
219         return IsEden() || IsSurvivor();
220     }
221 
IsInCollectionSet()222     bool IsInCollectionSet() const
223     {
224         return HasFlag(IS_COLLECTION_SET);
225     }
226 
IsTLAB()227     bool IsTLAB() const
228     {
229         ASSERT((tlabVector_ == nullptr) || (top_ == begin_) || IsMixedTLAB());
230         return tlabVector_ != nullptr;
231     }
232 
IsMixedTLAB()233     bool IsMixedTLAB() const
234     {
235         return HasFlag(RegionFlag::IS_MIXEDTLAB);
236     }
237 
Size()238     size_t Size() const
239     {
240         return end_ - ToUintPtr(this);
241     }
242 
PinObject()243     void PinObject()
244     {
245         ASSERT(pinnedObjects_ != nullptr);
246         // Atomic with seq_cst order reason: add value with concurrency
247         pinnedObjects_->fetch_add(1, std::memory_order_seq_cst);
248     }
249 
UnpinObject()250     void UnpinObject()
251     {
252         ASSERT(pinnedObjects_ != nullptr);
253         // Atomic with seq_cst order reason: sub value with concurrency
254         pinnedObjects_->fetch_sub(1, std::memory_order_seq_cst);
255     }
256 
HasPinnedObjects()257     bool HasPinnedObjects() const
258     {
259         ASSERT(pinnedObjects_ != nullptr);
260         // Atomic with seq_cst order reason: load value with concurrency
261         return pinnedObjects_->load(std::memory_order_seq_cst) > 0;
262     }
263 
264     template <bool ATOMIC = true>
265     NO_THREAD_SANITIZE void *Alloc(size_t alignedSize);
266 
267     NO_THREAD_SANITIZE void UndoAlloc(void *addr);
268 
269     template <typename ObjectVisitor>
270     void IterateOverObjects(const ObjectVisitor &visitor);
271 
GetLargeObject()272     ObjectHeader *GetLargeObject()
273     {
274         ASSERT(HasFlag(RegionFlag::IS_LARGE_OBJECT));
275         return reinterpret_cast<ObjectHeader *>(Begin());
276     }
277 
278     bool IsInRange(const ObjectHeader *object) const;
279 
280     [[nodiscard]] bool IsInAllocRange(const ObjectHeader *object) const;
281 
IsAlignment(uintptr_t regionAddr,size_t regionSize)282     static bool IsAlignment(uintptr_t regionAddr, size_t regionSize)
283     {
284         ASSERT(regionSize != 0);
285         return ((regionAddr - HeapStartAddress()) % regionSize) == 0;
286     }
287 
HeadSize()288     constexpr static size_t HeadSize()
289     {
290         return AlignUp(sizeof(Region), DEFAULT_ALIGNMENT_IN_BYTES);
291     }
292 
RegionSize(size_t objectSize,size_t regionSize)293     constexpr static size_t RegionSize(size_t objectSize, size_t regionSize)
294     {
295         return AlignUp(HeadSize() + objectSize, regionSize);
296     }
297 
HeapStartAddress()298     static uintptr_t HeapStartAddress()
299     {
300         return PoolManager::GetMmapMemPool()->GetMinObjectAddress();
301     }
302 
303     InternalAllocatorPtr GetInternalAllocator();
304 
305     void CreateRemSet();
306 
307     void SetupAtomics();
308 
309     void CreateTLABSupport();
310 
311     size_t GetRemainingSizeForTLABs() const;
312     TLAB *CreateTLAB(size_t size);
313 
GetLastTLAB()314     TLAB *GetLastTLAB() const
315     {
316         ASSERT(tlabVector_ != nullptr);
317         ASSERT(!tlabVector_->empty());
318         return tlabVector_->back();
319     };
320 
321     MarkBitmap *CreateMarkBitmap();
322     MarkBitmap *CreateLiveBitmap();
323 
324     void SwapMarkBitmap();
325 
326     void CloneMarkBitmapToLiveBitmap();
327 
328     void SetMarkBit(ObjectHeader *object);
329 
330 #ifndef NDEBUG
IsAllocating()331     NO_THREAD_SANITIZE bool IsAllocating()
332     {
333         // Atomic with acquire order reason: data race with is_allocating_ with dependecies on reads after the load
334         // which should become visible
335         return reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->load(std::memory_order_acquire);
336     }
337 
IsIterating()338     NO_THREAD_SANITIZE bool IsIterating()
339     {
340         // Atomic with acquire order reason: data race with is_iterating_ with dependecies on reads after the load which
341         // should become visible
342         return reinterpret_cast<std::atomic<bool> *>(&isIterating_)->load(std::memory_order_acquire);
343     }
344 
SetAllocating(bool value)345     NO_THREAD_SANITIZE bool SetAllocating(bool value)
346     {
347         if (IsIterating()) {
348             return false;
349         }
350         // Atomic with release order reason: data race with is_allocating_ with dependecies on writes before the store
351         // which should become visible acquire
352         reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->store(value, std::memory_order_release);
353         return true;
354     }
355 
SetIterating(bool value)356     NO_THREAD_SANITIZE bool SetIterating(bool value)
357     {
358         if (IsAllocating()) {
359             return false;
360         }
361         // Atomic with release order reason: data race with is_iterating_ with dependecies on writes before the store
362         // which should become visible acquire
363         reinterpret_cast<std::atomic<bool> *>(&isIterating_)->store(value, std::memory_order_release);
364         return true;
365     }
366 #endif
367 
AsListNode()368     DListNode *AsListNode()
369     {
370         return &node_;
371     }
372 
AsRegion(const DListNode * node)373     static Region *AsRegion(const DListNode *node)
374     {
375         return reinterpret_cast<Region *>(ToUintPtr(node) - MEMBER_OFFSET(Region, node_));
376     }
377 
378 private:
SetAllocatedObjects(size_t allocatedObjects)379     void SetAllocatedObjects(size_t allocatedObjects)
380     {
381         // We can call it from the promoted region
382         ASSERT(liveBitmap_ != nullptr);
383         allocatedObjects_ = allocatedObjects;
384     }
385 
386     DListNode node_;
387     RegionSpace *space_;
388     uintptr_t begin_;
389     uintptr_t end_;
390     uintptr_t top_;
391     uint32_t flags_ {0};
392     size_t allocatedObjects_ {0};
393     std::atomic<uint32_t> *liveBytes_ {nullptr};
394     std::atomic<uint32_t> *pinnedObjects_ {nullptr};
395     MarkBitmap *liveBitmap_ {nullptr};           // records live objects for old region
396     MarkBitmap *markBitmap_ {nullptr};           // mark bitmap used in current gc marking phase
397     RemSetT *remSet_ {nullptr};                  // remember set(old region -> eden/survivor region)
398     PandaVector<TLAB *> *tlabVector_ {nullptr};  // pointer to a vector with thread tlabs associated with this region
399 #ifndef NDEBUG
400     bool isAllocating_ = false;
401     bool isIterating_ = false;
402 #endif
403 };
404 
DumpRegionRange(std::ostream & out,const Region & region)405 inline std::ostream &DumpRegionRange(std::ostream &out, const Region &region)
406 {
407     std::ios_base::fmtflags flags = out.flags();
408     static constexpr size_t POINTER_PRINT_WIDTH = 8;
409     out << std::hex << "[0x" << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.Begin() << "-0x"
410         << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.End() << "]";
411     out.flags(flags);
412     return out;
413 }
414 
415 inline std::ostream &operator<<(std::ostream &out, const Region &region)
416 {
417     if (region.HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
418         out << "H";
419     } else if (region.HasFlag(RegionFlag::IS_NONMOVABLE)) {
420         out << "NM";
421     } else if (region.HasFlag(RegionFlag::IS_OLD)) {
422         out << "T";
423     } else {
424         out << "Y";
425     }
426 
427     return DumpRegionRange(out, region);
428 }
429 
430 // RegionBlock is used for allocate regions from a continuous big memory block
431 // |--------------------------|
432 // |.....RegionBlock class....|
433 // |--------------------------|
434 // |.......regions_end_.......|--------|
435 // |.......regions_begin_.....|----|   |
436 // |--------------------------|    |   |
437 //                                 |   |
438 // |   Continuous Mem Block   |    |   |
439 // |--------------------------|    |   |
440 // |...........Region.........|<---|   |
441 // |...........Region.........|        |
442 // |...........Region.........|        |
443 // |..........................|        |
444 // |..........................|        |
445 // |..........................|        |
446 // |..........................|        |
447 // |..........................|        |
448 // |..........................|        |
449 // |..........................|        |
450 // |...........Region.........|<-------|
451 class RegionBlock {
452 public:
RegionBlock(size_t regionSize,InternalAllocatorPtr allocator)453     RegionBlock(size_t regionSize, InternalAllocatorPtr allocator) : regionSize_(regionSize), allocator_(allocator) {}
454 
~RegionBlock()455     ~RegionBlock()
456     {
457         if (!occupied_.Empty()) {
458             allocator_->Free(occupied_.Data());
459         }
460     }
461 
462     NO_COPY_SEMANTIC(RegionBlock);
463     NO_MOVE_SEMANTIC(RegionBlock);
464 
465     void Init(uintptr_t regionsBegin, uintptr_t regionsEnd);
466 
467     Region *AllocRegion();
468 
469     Region *AllocLargeRegion(size_t largeRegionSize);
470 
471     void FreeRegion(Region *region, bool releasePages = true);
472 
IsAddrInRange(const void * addr)473     bool IsAddrInRange(const void *addr) const
474     {
475         return ToUintPtr(addr) < regionsEnd_ && ToUintPtr(addr) >= regionsBegin_;
476     }
477 
GetAllocatedRegion(const void * addr)478     Region *GetAllocatedRegion(const void *addr) const
479     {
480         ASSERT(IsAddrInRange(addr));
481         os::memory::LockHolder lock(lock_);
482         return occupied_[RegionIndex(addr)];
483     }
484 
GetFreeRegionsNum()485     size_t GetFreeRegionsNum() const
486     {
487         os::memory::LockHolder lock(lock_);
488         return occupied_.Size() - numUsedRegions_;
489     }
490 
491 private:
RegionAt(size_t index)492     Region *RegionAt(size_t index) const
493     {
494         return reinterpret_cast<Region *>(regionsBegin_ + index * regionSize_);
495     }
496 
RegionIndex(const void * addr)497     size_t RegionIndex(const void *addr) const
498     {
499         return (ToUintPtr(addr) - regionsBegin_) / regionSize_;
500     }
501 
502     size_t regionSize_;
503     InternalAllocatorPtr allocator_;
504     uintptr_t regionsBegin_ = 0;
505     uintptr_t regionsEnd_ = 0;
506     size_t numUsedRegions_ = 0;
507     Span<Region *> occupied_ GUARDED_BY(lock_);
508     mutable os::memory::Mutex lock_;
509 };
510 
511 // RegionPool supports to work in three ways:
512 // 1.alloc region in pre-allocated buffer(RegionBlock)
513 // 2.alloc region in mmap pool directly
514 // 3.mixed above two ways
515 class RegionPool {
516 public:
RegionPool(size_t regionSize,bool extend,GenerationalSpaces * spaces,InternalAllocatorPtr allocator)517     explicit RegionPool(size_t regionSize, bool extend, GenerationalSpaces *spaces, InternalAllocatorPtr allocator)
518         : block_(regionSize, allocator),
519           regionSize_(regionSize),
520           spaces_(spaces),
521           allocator_(allocator),
522           extend_(extend)
523     {
524     }
525 
526     Region *NewRegion(RegionSpace *space, SpaceType spaceType, AllocatorType allocatorType, size_t regionSize,
527                       RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
528                       OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
529 
530     Region *NewRegion(void *region, RegionSpace *space, size_t regionSize, RegionFlag edenOrOldOrNonmovable,
531                       RegionFlag properties);
532 
533     template <OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
534     void FreeRegion(Region *region);
535 
536     void PromoteYoungRegion(Region *region);
537 
InitRegionBlock(uintptr_t regionsBegin,uintptr_t regionsEnd)538     void InitRegionBlock(uintptr_t regionsBegin, uintptr_t regionsEnd)
539     {
540         block_.Init(regionsBegin, regionsEnd);
541     }
542 
IsAddrInPoolRange(const void * addr)543     bool IsAddrInPoolRange(const void *addr) const
544     {
545         return block_.IsAddrInRange(addr) || IsAddrInExtendPoolRange(addr);
546     }
547 
548     template <bool CROSS_REGION = false>
GetRegion(const void * addr)549     Region *GetRegion(const void *addr) const
550     {
551         if (block_.IsAddrInRange(addr)) {
552             return block_.GetAllocatedRegion(addr);
553         }
554         if (IsAddrInExtendPoolRange(addr)) {
555             return AddrToRegion<CROSS_REGION>(addr);
556         }
557         return nullptr;
558     }
559 
GetFreeRegionsNumInRegionBlock()560     size_t GetFreeRegionsNumInRegionBlock() const
561     {
562         return block_.GetFreeRegionsNum();
563     }
564 
565     bool HaveTenuredSize(size_t size) const;
566 
567     bool HaveFreeRegions(size_t numRegions, size_t regionSize) const;
568 
GetInternalAllocator()569     InternalAllocatorPtr GetInternalAllocator()
570     {
571         return allocator_;
572     }
573 
574     ~RegionPool() = default;
575     NO_COPY_SEMANTIC(RegionPool);
576     NO_MOVE_SEMANTIC(RegionPool);
577 
578 private:
579     template <bool CROSS_REGION>
580     static Region *AddrToRegion(const void *addr, size_t mask = DEFAULT_REGION_MASK)
581     {
582         // if it is possible that (object address - region start addr) larger than region alignment,
583         // we should get the region start address from mmappool which records it in allocator info
584         if constexpr (CROSS_REGION) {  // NOLINT(readability-braces-around-statements, bugprone-suspicious-semicolon)
585             ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) == SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
586 
587             auto regionAddr = PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(const_cast<void *>(addr));
588             return reinterpret_cast<Region *>(regionAddr);
589         }
590         ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) != SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
591 
592         return reinterpret_cast<Region *>(((ToUintPtr(addr)) & ~mask));
593     }
594 
IsAddrInExtendPoolRange(const void * addr)595     bool IsAddrInExtendPoolRange(const void *addr) const
596     {
597         if (extend_) {
598             AllocatorInfo allocInfo = PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(const_cast<void *>(addr));
599             return allocInfo.GetAllocatorHeaderAddr() == this;
600         }
601         return false;
602     }
603 
604     RegionBlock block_;
605     size_t regionSize_;
606     GenerationalSpaces *spaces_ {nullptr};
607     InternalAllocatorPtr allocator_;
608     bool extend_ = true;
609 };
610 
611 class RegionSpace {
612 public:
613     explicit RegionSpace(SpaceType spaceType, AllocatorType allocatorType, RegionPool *regionPool,
614                          size_t emptyTenuredRegionsMaxCount = 0)
spaceType_(spaceType)615         : spaceType_(spaceType),
616           allocatorType_(allocatorType),
617           regionPool_(regionPool),
618           emptyTenuredRegionsMaxCount_(emptyTenuredRegionsMaxCount)
619     {
620     }
621 
~RegionSpace()622     virtual ~RegionSpace()
623     {
624         FreeAllRegions();
625     }
626 
627     NO_COPY_SEMANTIC(RegionSpace);
628     NO_MOVE_SEMANTIC(RegionSpace);
629 
630     enum class ReleaseRegionsPolicy : bool {
631         Release,    // NOLINT(readability-identifier-naming)
632         NoRelease,  // NOLINT(readability-identifier-naming)
633     };
634 
635     Region *NewRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
636                       OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
637 
638     template <ReleaseRegionsPolicy REGIONS_RELEASE_POLICY = ReleaseRegionsPolicy::Release,
639               OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
640     void FreeRegion(Region *region);
641 
642     void PromoteYoungRegion(Region *region);
643 
644     void FreeAllRegions();
645 
646     template <typename RegionVisitor>
647     void IterateRegions(RegionVisitor visitor);
648 
GetPool()649     RegionPool *GetPool() const
650     {
651         return regionPool_;
652     }
653 
654     template <bool CROSS_REGION = false>
GetRegion(const ObjectHeader * object)655     Region *GetRegion(const ObjectHeader *object) const
656     {
657         auto *region = regionPool_->GetRegion<CROSS_REGION>(object);
658 
659         // check if the region is allocated by this space
660         return (region != nullptr && region->GetSpace() == this) ? region : nullptr;
661     }
662 
663     template <bool CROSS_REGION = false>
664     bool ContainObject(const ObjectHeader *object) const;
665 
666     template <bool CROSS_REGION = false>
667     bool IsLive(const ObjectHeader *object) const;
668 
669     template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
670     void ReleaseEmptyRegions();
671 
SetDesiredEdenLength(size_t edenLength)672     void SetDesiredEdenLength(size_t edenLength)
673     {
674         desiredEdenLength_ = edenLength;
675     }
676 
677 private:
678     template <typename RegionVisitor>
679     void IterateRegionsList(DList &regionsList, RegionVisitor visitor);
680 
681     Region *GetRegionFromEmptyList(DList &regionList);
682 
683     SpaceType spaceType_;
684 
685     // related allocator type
686     AllocatorType allocatorType_;
687 
688     // underlying shared region pool
689     RegionPool *regionPool_;
690 
691     size_t emptyTenuredRegionsMaxCount_;
692 
693     // region allocated by this space
694     DList regions_;
695 
696     // Empty regions which is not returned back
697     DList emptyYoungRegions_;
698     DList emptyTenuredRegions_;
699     // Use atomic because it is updated in RegionSpace::PromoteYoungRegion without lock
700     std::atomic<size_t> youngRegionsInUse_ {0};
701     // Desired eden length is not restricted initially
702     size_t desiredEdenLength_ {std::numeric_limits<size_t>::max()};
703 };
704 
705 }  // namespace ark::mem
706 
707 #endif  // PANDA_RUNTIME_MEM_REGION_SPACE_H
708