• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_H
17 
18 #include <atomic>
19 #include <cstdint>
20 
21 #include "libpandabase/utils/list.h"
22 #include "runtime/include/mem/panda_containers.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/tlab.h"
25 #include "runtime/mem/rem_set.h"
26 #include "runtime/mem/heap_space.h"
27 
28 namespace panda::mem {
29 
30 enum RegionFlag {
31     IS_UNUSED = 0U,
32     IS_EDEN = 1U,
33     IS_SURVIVOR = 1U << 1U,
34     IS_OLD = 1U << 2U,
35     IS_LARGE_OBJECT = 1U << 3U,
36     IS_NONMOVABLE = 1U << 4U,
37     IS_TLAB = 1U << 5U,
38     IS_COLLECTION_SET = 1U << 6U,
39     IS_FREE = 1U << 7U,
40     IS_PROMOTED = 1U << 8U,
41     IS_RESERVED = 1U << 9U
42 };
43 
IsYoungRegionFlag(RegionFlag flag)44 constexpr bool IsYoungRegionFlag(RegionFlag flag)
45 {
46     return flag == RegionFlag::IS_EDEN || flag == RegionFlag::IS_SURVIVOR;
47 }
48 
49 static constexpr size_t DEFAULT_REGION_ALIGNMENT = 256_KB;
50 static constexpr size_t DEFAULT_REGION_SIZE = DEFAULT_REGION_ALIGNMENT;
51 static constexpr size_t DEFAULT_REGION_MASK = DEFAULT_REGION_ALIGNMENT - 1;
52 
53 using RemSetT = RemSet<>;
54 
55 class RegionSpace;
56 class Region {
57 public:
Region(RegionSpace * space,uintptr_t begin,uintptr_t end)58     NO_THREAD_SANITIZE explicit Region(RegionSpace *space, uintptr_t begin, uintptr_t end)
59         : space_(space), begin_(begin), end_(end), top_(begin)
60     {
61     }
62 
63     ~Region() = default;
64 
65     NO_COPY_SEMANTIC(Region);
66     NO_MOVE_SEMANTIC(Region);
67 
68     void Destroy();
69 
GetSpace()70     RegionSpace *GetSpace()
71     {
72         return space_;
73     }
74 
Begin()75     uintptr_t Begin() const
76     {
77         return begin_;
78     }
79 
End()80     uintptr_t End() const
81     {
82         return end_;
83     }
84 
Intersect(uintptr_t begin,uintptr_t end)85     bool Intersect(uintptr_t begin, uintptr_t end) const
86     {
87         return !(end <= begin_ || end_ <= begin);
88     }
89 
Top()90     uintptr_t Top() const
91     {
92         return top_;
93     }
94 
SetTop(uintptr_t newTop)95     void SetTop(uintptr_t newTop)
96     {
97         ASSERT(!IsTLAB());
98         top_ = newTop;
99     }
100 
GetLiveBytes()101     uint32_t GetLiveBytes() const
102     {
103         ASSERT(liveBytes_ != nullptr);
104         // Atomic with relaxed order reason: load value without concurrency
105         auto liveBytes = liveBytes_->load(std::memory_order_relaxed);
106         ASSERT(liveBytes <= Size());
107         return liveBytes;
108     }
109 
110     uint32_t GetAllocatedBytes() const;
111 
112     double GetFragmentation() const;
113 
GetGarbageBytes()114     uint32_t GetGarbageBytes() const
115     {
116         ASSERT(GetAllocatedBytes() >= GetLiveBytes());
117         return GetAllocatedBytes() - GetLiveBytes();
118     }
119 
SetLiveBytes(uint32_t count)120     void SetLiveBytes(uint32_t count)
121     {
122         ASSERT(liveBytes_ != nullptr);
123         // Atomic with relaxed order reason: store value without concurrency
124         liveBytes_->store(count, std::memory_order_relaxed);
125     }
126 
127     template <bool ATOMICALLY>
AddLiveBytes(uint32_t count)128     void AddLiveBytes(uint32_t count)
129     {
130         ASSERT(liveBytes_ != nullptr);
131         if constexpr (ATOMICALLY) {
132             // Atomic with seq_cst order reason: store value with concurrency
133             liveBytes_->fetch_add(count, std::memory_order_seq_cst);
134         } else {
135             auto *field = reinterpret_cast<uint32_t *>(liveBytes_);
136             *field += count;
137         }
138     }
139 
140     uint32_t CalcLiveBytes() const;
141 
142     uint32_t CalcMarkBytes() const;
143 
GetLiveBitmap()144     MarkBitmap *GetLiveBitmap() const
145     {
146         return liveBitmap_;
147     }
148 
IncreaseAllocatedObjects()149     void IncreaseAllocatedObjects()
150     {
151         // We can call it from the promoted region
152         ASSERT(liveBitmap_ != nullptr);
153         allocatedObjects_++;
154     }
155 
GetAllocatedObjects()156     size_t GetAllocatedObjects()
157     {
158         ASSERT(HasFlag(RegionFlag::IS_OLD));
159         return allocatedObjects_;
160     }
161 
GetMarkBitmap()162     MarkBitmap *GetMarkBitmap() const
163     {
164         return markBitmap_;
165     }
166 
GetRemSet()167     RemSetT *GetRemSet()
168     {
169         return remSet_;
170     }
171 
GetRemSetSize()172     size_t GetRemSetSize() const
173     {
174         return remSet_->Size();
175     }
176 
AddFlag(RegionFlag flag)177     void AddFlag(RegionFlag flag)
178     {
179         flags_ |= flag;
180     }
181 
RmvFlag(RegionFlag flag)182     void RmvFlag(RegionFlag flag)
183     {
184         // NOLINTNEXTLINE(hicpp-signed-bitwise)
185         flags_ &= ~flag;
186     }
187 
HasFlags(RegionFlag flag)188     bool HasFlags(RegionFlag flag) const
189     {
190         return (flags_ & flag) == flag;
191     }
192 
HasFlag(RegionFlag flag)193     bool HasFlag(RegionFlag flag) const
194     {
195         return (flags_ & flag) != 0;
196     }
197 
IsEden()198     bool IsEden() const
199     {
200         return HasFlag(IS_EDEN);
201     }
202 
IsSurvivor()203     bool IsSurvivor() const
204     {
205         return HasFlag(RegionFlag::IS_SURVIVOR);
206     }
207 
IsYoung()208     bool IsYoung() const
209     {
210         return IsEden() || IsSurvivor();
211     }
212 
IsInCollectionSet()213     bool IsInCollectionSet() const
214     {
215         return HasFlag(IS_COLLECTION_SET);
216     }
217 
IsTLAB()218     bool IsTLAB() const
219     {
220         ASSERT((tlabVector_ == nullptr) || (top_ == begin_));
221         return tlabVector_ != nullptr;
222     }
223 
Size()224     size_t Size() const
225     {
226         return end_ - ToUintPtr(this);
227     }
228 
PinObject()229     void PinObject()
230     {
231         ASSERT(pinnedObjects_ != nullptr);
232         // Atomic with seq_cst order reason: add value with concurrency
233         pinnedObjects_->fetch_add(1, std::memory_order_seq_cst);
234     }
235 
UnpinObject()236     void UnpinObject()
237     {
238         ASSERT(pinnedObjects_ != nullptr);
239         // Atomic with seq_cst order reason: sub value with concurrency
240         pinnedObjects_->fetch_sub(1, std::memory_order_seq_cst);
241     }
242 
HasPinnedObjects()243     bool HasPinnedObjects() const
244     {
245         ASSERT(pinnedObjects_ != nullptr);
246         // Atomic with seq_cst order reason: load value with concurrency
247         return pinnedObjects_->load(std::memory_order_seq_cst) > 0;
248     }
249 
250     template <bool ATOMIC = true>
251     NO_THREAD_SANITIZE void *Alloc(size_t alignedSize);
252 
253     template <typename ObjectVisitor>
254     void IterateOverObjects(const ObjectVisitor &visitor);
255 
GetLargeObject()256     ObjectHeader *GetLargeObject()
257     {
258         ASSERT(HasFlag(RegionFlag::IS_LARGE_OBJECT));
259         return reinterpret_cast<ObjectHeader *>(Begin());
260     }
261 
IsInRange(const ObjectHeader * object)262     bool IsInRange(const ObjectHeader *object) const
263     {
264         return ToUintPtr(object) >= begin_ && ToUintPtr(object) < end_;
265     }
266 
IsInAllocRange(const ObjectHeader * object)267     [[nodiscard]] bool IsInAllocRange(const ObjectHeader *object) const
268     {
269         bool inRange = false;
270         if (!IsTLAB()) {
271             inRange = (ToUintPtr(object) >= begin_ && ToUintPtr(object) < top_);
272         } else {
273             for (auto i : *tlabVector_) {
274                 inRange = i->ContainObject(object);
275                 if (inRange) {
276                     break;
277                 }
278             }
279         }
280         return inRange;
281     }
282 
IsAlignment(uintptr_t regionAddr,size_t regionSize)283     static bool IsAlignment(uintptr_t regionAddr, size_t regionSize)
284     {
285         ASSERT(regionSize != 0);
286         return ((regionAddr - HeapStartAddress()) % regionSize) == 0;
287     }
288 
HeadSize()289     constexpr static size_t HeadSize()
290     {
291         return AlignUp(sizeof(Region), DEFAULT_ALIGNMENT_IN_BYTES);
292     }
293 
RegionSize(size_t objectSize,size_t regionSize)294     constexpr static size_t RegionSize(size_t objectSize, size_t regionSize)
295     {
296         return AlignUp(HeadSize() + objectSize, regionSize);
297     }
298 
HeapStartAddress()299     static uintptr_t HeapStartAddress()
300     {
301         return PoolManager::GetMmapMemPool()->GetMinObjectAddress();
302     }
303 
304     InternalAllocatorPtr GetInternalAllocator();
305 
306     void CreateRemSet();
307 
308     void SetupAtomics();
309 
310     void CreateTLABSupport();
311 
312     size_t GetRemainingSizeForTLABs() const;
313     TLAB *CreateTLAB(size_t size);
314 
315     MarkBitmap *CreateMarkBitmap();
316     MarkBitmap *CreateLiveBitmap();
317 
SwapMarkBitmap()318     void SwapMarkBitmap()
319     {
320         ASSERT(liveBitmap_ != nullptr);
321         ASSERT(markBitmap_ != nullptr);
322         std::swap(liveBitmap_, markBitmap_);
323     }
324 
CloneMarkBitmapToLiveBitmap()325     void CloneMarkBitmapToLiveBitmap()
326     {
327         ASSERT(liveBitmap_ != nullptr);
328         ASSERT(markBitmap_ != nullptr);
329         markBitmap_->CopyTo(liveBitmap_);
330     }
331 
332     void SetMarkBit(ObjectHeader *object);
333 
334 #ifndef NDEBUG
IsAllocating()335     NO_THREAD_SANITIZE bool IsAllocating()
336     {
337         // Atomic with acquire order reason: data race with is_allocating_ with dependecies on reads after the load
338         // which should become visible
339         return reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->load(std::memory_order_acquire);
340     }
341 
IsIterating()342     NO_THREAD_SANITIZE bool IsIterating()
343     {
344         // Atomic with acquire order reason: data race with is_iterating_ with dependecies on reads after the load which
345         // should become visible
346         return reinterpret_cast<std::atomic<bool> *>(&isIterating_)->load(std::memory_order_acquire);
347     }
348 
SetAllocating(bool value)349     NO_THREAD_SANITIZE bool SetAllocating(bool value)
350     {
351         if (IsIterating()) {
352             return false;
353         }
354         // Atomic with release order reason: data race with is_allocating_ with dependecies on writes before the store
355         // which should become visible acquire
356         reinterpret_cast<std::atomic<bool> *>(&isAllocating_)->store(value, std::memory_order_release);
357         return true;
358     }
359 
SetIterating(bool value)360     NO_THREAD_SANITIZE bool SetIterating(bool value)
361     {
362         if (IsAllocating()) {
363             return false;
364         }
365         // Atomic with release order reason: data race with is_iterating_ with dependecies on writes before the store
366         // which should become visible acquire
367         reinterpret_cast<std::atomic<bool> *>(&isIterating_)->store(value, std::memory_order_release);
368         return true;
369     }
370 #endif
371 
AsListNode()372     DListNode *AsListNode()
373     {
374         return &node_;
375     }
376 
AsRegion(const DListNode * node)377     static Region *AsRegion(const DListNode *node)
378     {
379         return reinterpret_cast<Region *>(ToUintPtr(node) - MEMBER_OFFSET(Region, node_));
380     }
381 
382 private:
383     DListNode node_;
384     RegionSpace *space_;
385     uintptr_t begin_;
386     uintptr_t end_;
387     uintptr_t top_;
388     uint32_t flags_ {0};
389     size_t allocatedObjects_ {0};
390     std::atomic<uint32_t> *liveBytes_ {nullptr};
391     std::atomic<uint32_t> *pinnedObjects_ {nullptr};
392     MarkBitmap *liveBitmap_ {nullptr};           // records live objects for old region
393     MarkBitmap *markBitmap_ {nullptr};           // mark bitmap used in current gc marking phase
394     RemSetT *remSet_ {nullptr};                  // remember set(old region -> eden/survivor region)
395     PandaVector<TLAB *> *tlabVector_ {nullptr};  // pointer to a vector with thread tlabs associated with this region
396 #ifndef NDEBUG
397     bool isAllocating_ = false;
398     bool isIterating_ = false;
399 #endif
400 };
401 
DumpRegionRange(std::ostream & out,const Region & region)402 inline std::ostream &DumpRegionRange(std::ostream &out, const Region &region)
403 {
404     std::ios_base::fmtflags flags = out.flags();
405     static constexpr size_t POINTER_PRINT_WIDTH = 8;
406     out << std::hex << "[0x" << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.Begin() << "-0x"
407         << std::setw(POINTER_PRINT_WIDTH) << std::setfill('0') << region.End() << "]";
408     out.flags(flags);
409     return out;
410 }
411 
412 inline std::ostream &operator<<(std::ostream &out, const Region &region)
413 {
414     if (region.HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
415         out << "H";
416     } else if (region.HasFlag(RegionFlag::IS_NONMOVABLE)) {
417         out << "NM";
418     } else if (region.HasFlag(RegionFlag::IS_OLD)) {
419         out << "T";
420     } else {
421         out << "Y";
422     }
423 
424     return DumpRegionRange(out, region);
425 }
426 
427 // RegionBlock is used for allocate regions from a continuous big memory block
428 // |--------------------------|
429 // |.....RegionBlock class....|
430 // |--------------------------|
431 // |.......regions_end_.......|--------|
432 // |.......regions_begin_.....|----|   |
433 // |--------------------------|    |   |
434 //                                 |   |
435 // |   Continuous Mem Block   |    |   |
436 // |--------------------------|    |   |
437 // |...........Region.........|<---|   |
438 // |...........Region.........|        |
439 // |...........Region.........|        |
440 // |..........................|        |
441 // |..........................|        |
442 // |..........................|        |
443 // |..........................|        |
444 // |..........................|        |
445 // |..........................|        |
446 // |..........................|        |
447 // |...........Region.........|<-------|
448 class RegionBlock {
449 public:
RegionBlock(size_t regionSize,InternalAllocatorPtr allocator)450     RegionBlock(size_t regionSize, InternalAllocatorPtr allocator) : regionSize_(regionSize), allocator_(allocator) {}
451 
~RegionBlock()452     ~RegionBlock()
453     {
454         if (!occupied_.Empty()) {
455             allocator_->Free(occupied_.Data());
456         }
457     }
458 
459     NO_COPY_SEMANTIC(RegionBlock);
460     NO_MOVE_SEMANTIC(RegionBlock);
461 
462     void Init(uintptr_t regionsBegin, uintptr_t regionsEnd);
463 
464     Region *AllocRegion();
465 
466     Region *AllocLargeRegion(size_t largeRegionSize);
467 
468     void FreeRegion(Region *region, bool releasePages = true);
469 
IsAddrInRange(const void * addr)470     bool IsAddrInRange(const void *addr) const
471     {
472         return ToUintPtr(addr) < regionsEnd_ && ToUintPtr(addr) >= regionsBegin_;
473     }
474 
GetAllocatedRegion(const void * addr)475     Region *GetAllocatedRegion(const void *addr) const
476     {
477         ASSERT(IsAddrInRange(addr));
478         os::memory::LockHolder lock(lock_);
479         return occupied_[RegionIndex(addr)];
480     }
481 
GetFreeRegionsNum()482     size_t GetFreeRegionsNum() const
483     {
484         os::memory::LockHolder lock(lock_);
485         return occupied_.Size() - numUsedRegions_;
486     }
487 
488 private:
RegionAt(size_t index)489     Region *RegionAt(size_t index) const
490     {
491         return reinterpret_cast<Region *>(regionsBegin_ + index * regionSize_);
492     }
493 
RegionIndex(const void * addr)494     size_t RegionIndex(const void *addr) const
495     {
496         return (ToUintPtr(addr) - regionsBegin_) / regionSize_;
497     }
498 
499     size_t regionSize_;
500     InternalAllocatorPtr allocator_;
501     uintptr_t regionsBegin_ = 0;
502     uintptr_t regionsEnd_ = 0;
503     size_t numUsedRegions_ = 0;
504     Span<Region *> occupied_ GUARDED_BY(lock_);
505     mutable os::memory::Mutex lock_;
506 };
507 
508 // RegionPool supports to work in three ways:
509 // 1.alloc region in pre-allocated buffer(RegionBlock)
510 // 2.alloc region in mmap pool directly
511 // 3.mixed above two ways
512 class RegionPool {
513 public:
RegionPool(size_t regionSize,bool extend,GenerationalSpaces * spaces,InternalAllocatorPtr allocator)514     explicit RegionPool(size_t regionSize, bool extend, GenerationalSpaces *spaces, InternalAllocatorPtr allocator)
515         : block_(regionSize, allocator),
516           regionSize_(regionSize),
517           spaces_(spaces),
518           allocator_(allocator),
519           extend_(extend)
520     {
521     }
522 
523     Region *NewRegion(RegionSpace *space, SpaceType spaceType, AllocatorType allocatorType, size_t regionSize,
524                       RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
525                       OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
526 
527     Region *NewRegion(void *region, RegionSpace *space, size_t regionSize, RegionFlag edenOrOldOrNonmovable,
528                       RegionFlag properties);
529 
530     template <OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
531     void FreeRegion(Region *region);
532 
533     void PromoteYoungRegion(Region *region);
534 
InitRegionBlock(uintptr_t regionsBegin,uintptr_t regionsEnd)535     void InitRegionBlock(uintptr_t regionsBegin, uintptr_t regionsEnd)
536     {
537         block_.Init(regionsBegin, regionsEnd);
538     }
539 
IsAddrInPoolRange(const void * addr)540     bool IsAddrInPoolRange(const void *addr) const
541     {
542         return block_.IsAddrInRange(addr) || IsAddrInExtendPoolRange(addr);
543     }
544 
545     template <bool CROSS_REGION = false>
GetRegion(const void * addr)546     Region *GetRegion(const void *addr) const
547     {
548         if (block_.IsAddrInRange(addr)) {
549             return block_.GetAllocatedRegion(addr);
550         }
551         if (IsAddrInExtendPoolRange(addr)) {
552             return AddrToRegion<CROSS_REGION>(addr);
553         }
554         return nullptr;
555     }
556 
GetFreeRegionsNumInRegionBlock()557     size_t GetFreeRegionsNumInRegionBlock() const
558     {
559         return block_.GetFreeRegionsNum();
560     }
561 
562     bool HaveTenuredSize(size_t size) const;
563 
564     bool HaveFreeRegions(size_t numRegions, size_t regionSize) const;
565 
GetInternalAllocator()566     InternalAllocatorPtr GetInternalAllocator()
567     {
568         return allocator_;
569     }
570 
571     ~RegionPool() = default;
572     NO_COPY_SEMANTIC(RegionPool);
573     NO_MOVE_SEMANTIC(RegionPool);
574 
575 private:
576     template <bool CROSS_REGION>
577     static Region *AddrToRegion(const void *addr, size_t mask = DEFAULT_REGION_MASK)
578     {
579         // if it is possible that (object address - region start addr) larger than region alignment,
580         // we should get the region start address from mmappool which records it in allocator info
581         if constexpr (CROSS_REGION) {  // NOLINT(readability-braces-around-statements, bugprone-suspicious-semicolon)
582             ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) == SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
583 
584             auto regionAddr = PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(const_cast<void *>(addr));
585             return reinterpret_cast<Region *>(regionAddr);
586         }
587         ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) != SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
588 
589         return reinterpret_cast<Region *>(((ToUintPtr(addr)) & ~mask));
590     }
591 
IsAddrInExtendPoolRange(const void * addr)592     bool IsAddrInExtendPoolRange(const void *addr) const
593     {
594         if (extend_) {
595             AllocatorInfo allocInfo = PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(const_cast<void *>(addr));
596             return allocInfo.GetAllocatorHeaderAddr() == this;
597         }
598         return false;
599     }
600 
601     RegionBlock block_;
602     size_t regionSize_;
603     GenerationalSpaces *spaces_ {nullptr};
604     InternalAllocatorPtr allocator_;
605     bool extend_ = true;
606 };
607 
608 class RegionSpace {
609 public:
610     explicit RegionSpace(SpaceType spaceType, AllocatorType allocatorType, RegionPool *regionPool,
611                          size_t emptyTenuredRegionsMaxCount = 0)
spaceType_(spaceType)612         : spaceType_(spaceType),
613           allocatorType_(allocatorType),
614           regionPool_(regionPool),
615           emptyTenuredRegionsMaxCount_(emptyTenuredRegionsMaxCount)
616     {
617     }
618 
~RegionSpace()619     virtual ~RegionSpace()
620     {
621         FreeAllRegions();
622     }
623 
624     NO_COPY_SEMANTIC(RegionSpace);
625     NO_MOVE_SEMANTIC(RegionSpace);
626 
627     enum class ReleaseRegionsPolicy : bool {
628         Release,    // NOLINT(readability-identifier-naming)
629         NoRelease,  // NOLINT(readability-identifier-naming)
630     };
631 
632     Region *NewRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties,
633                       OSPagesAllocPolicy allocPolicy = OSPagesAllocPolicy::NO_POLICY);
634 
635     template <ReleaseRegionsPolicy REGIONS_RELEASE_POLICY = ReleaseRegionsPolicy::Release,
636               OSPagesPolicy OS_PAGES_POLICY = OSPagesPolicy::IMMEDIATE_RETURN>
637     void FreeRegion(Region *region);
638 
639     void PromoteYoungRegion(Region *region);
640 
641     void FreeAllRegions();
642 
643     template <typename RegionVisitor>
644     void IterateRegions(RegionVisitor visitor);
645 
GetPool()646     RegionPool *GetPool() const
647     {
648         return regionPool_;
649     }
650 
651     template <bool CROSS_REGION = false>
GetRegion(const ObjectHeader * object)652     Region *GetRegion(const ObjectHeader *object) const
653     {
654         auto *region = regionPool_->GetRegion<CROSS_REGION>(object);
655 
656         // check if the region is allocated by this space
657         return (region != nullptr && region->GetSpace() == this) ? region : nullptr;
658     }
659 
660     template <bool CROSS_REGION = false>
661     bool ContainObject(const ObjectHeader *object) const;
662 
663     template <bool CROSS_REGION = false>
664     bool IsLive(const ObjectHeader *object) const;
665 
666     template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
667     void ReleaseEmptyRegions();
668 
SetDesiredEdenLength(size_t edenLength)669     void SetDesiredEdenLength(size_t edenLength)
670     {
671         desiredEdenLength_ = edenLength;
672     }
673 
674 private:
675     template <typename RegionVisitor>
676     void IterateRegionsList(DList &regionsList, RegionVisitor visitor);
677 
678     Region *GetRegionFromEmptyList(DList &regionList);
679 
680     SpaceType spaceType_;
681 
682     // related allocator type
683     AllocatorType allocatorType_;
684 
685     // underlying shared region pool
686     RegionPool *regionPool_;
687 
688     size_t emptyTenuredRegionsMaxCount_;
689 
690     // region allocated by this space
691     DList regions_;
692 
693     // Empty regions which is not returned back
694     DList emptyYoungRegions_;
695     DList emptyTenuredRegions_;
696     // Use atomic because it is updated in RegionSpace::PromoteYoungRegion without lock
697     std::atomic<size_t> youngRegionsInUse_ {0};
698     // Desired eden length is not restricted initially
699     size_t desiredEdenLength_ {std::numeric_limits<size_t>::max()};
700 };
701 
702 }  // namespace panda::mem
703 
704 #endif  // PANDA_RUNTIME_MEM_REGION_SPACE_H
705