• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_REGION_SPACE_H
16 #define PANDA_RUNTIME_MEM_REGION_SPACE_H
17 
18 #include <atomic>
19 #include <cstdint>
20 
21 #include "libpandabase/utils/list.h"
22 #include "runtime/include/mem/panda_containers.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/tlab.h"
25 #include "runtime/mem/rem_set.h"
26 #include "runtime/mem/heap_space.h"
27 
28 namespace panda::mem {
29 
30 enum RegionFlag {
31     IS_UNUSED = 0U,
32     IS_EDEN = 1U,
33     IS_SURVIVOR = 1U << 1U,
34     IS_OLD = 1U << 2U,
35     IS_LARGE_OBJECT = 1U << 3U,
36     IS_NONMOVABLE = 1U << 4U,
37     IS_TLAB = 1U << 5U,
38     IS_COLLECTION_SET = 1U << 6U,
39     IS_FREE = 1U << 7U,
40     IS_PROMOTED = 1U << 8U,
41 };
42 
IsYoungRegionFlag(RegionFlag flag)43 constexpr bool IsYoungRegionFlag(RegionFlag flag)
44 {
45     return flag == RegionFlag::IS_EDEN || flag == RegionFlag::IS_SURVIVOR;
46 }
47 
48 static constexpr size_t DEFAULT_REGION_ALIGNMENT = 256_KB;
49 static constexpr size_t DEFAULT_REGION_SIZE = DEFAULT_REGION_ALIGNMENT;
50 static constexpr size_t DEFAULT_REGION_MASK = DEFAULT_REGION_ALIGNMENT - 1;
51 
52 using RemSetT = RemSet<>;
53 
54 class RegionSpace;
55 class Region {
56 public:
Region(RegionSpace * space,uintptr_t begin,uintptr_t end)57     NO_THREAD_SANITIZE explicit Region(RegionSpace *space, uintptr_t begin, uintptr_t end)
58         : space_(space),
59           begin_(begin),
60           end_(end),
61           top_(begin),
62           flags_(0),
63           live_bytes_(0),
64           live_bitmap_(nullptr),
65           mark_bitmap_(nullptr),
66           rem_set_(nullptr),
67           tlab_vector_(nullptr)
68     {
69     }
70 
71     ~Region() = default;
72 
73     NO_COPY_SEMANTIC(Region);
74     NO_MOVE_SEMANTIC(Region);
75 
76     void Destroy();
77 
GetSpace()78     RegionSpace *GetSpace()
79     {
80         return space_;
81     }
82 
Begin()83     uintptr_t Begin() const
84     {
85         return begin_;
86     }
87 
End()88     uintptr_t End() const
89     {
90         return end_;
91     }
92 
Intersect(uintptr_t begin,uintptr_t end)93     bool Intersect(uintptr_t begin, uintptr_t end) const
94     {
95         return !(end <= begin_ || end_ <= begin);
96     }
97 
Top()98     uintptr_t Top() const
99     {
100         return top_;
101     }
102 
SetTop(uintptr_t new_top)103     void SetTop(uintptr_t new_top)
104     {
105         ASSERT(!IsTLAB());
106         top_ = new_top;
107     }
108 
GetLiveBytes()109     uint32_t GetLiveBytes() const
110     {
111         // Atomic with relaxed order reason: load value without concurrency
112         return live_bytes_.load(std::memory_order_relaxed);
113     }
114 
115     uint32_t GetAllocatedBytes() const;
116 
GetGarbageBytes()117     uint32_t GetGarbageBytes() const
118     {
119         ASSERT(GetAllocatedBytes() >= GetLiveBytes());
120         return GetAllocatedBytes() - GetLiveBytes();
121     }
122 
SetLiveBytes(uint32_t count)123     void SetLiveBytes(uint32_t count)
124     {
125         // Atomic with relaxed order reason: store value without concurrency
126         live_bytes_.store(count, std::memory_order_relaxed);
127     }
128 
AddLiveBytesConcurrently(uint32_t count)129     void AddLiveBytesConcurrently(uint32_t count)
130     {
131         live_bytes_ += count;
132     }
133 
134     uint32_t CalcLiveBytes() const;
135 
136     uint32_t CalcMarkBytes() const;
137 
GetLiveBitmap()138     MarkBitmap *GetLiveBitmap() const
139     {
140         return live_bitmap_;
141     }
142 
IncreaseAllocatedObjects()143     void IncreaseAllocatedObjects()
144     {
145         // We can call it from the promoted region
146         ASSERT(live_bitmap_ != nullptr);
147         allocated_objects_++;
148     }
149 
GetAllocatedObjects()150     size_t GetAllocatedObjects()
151     {
152         ASSERT(HasFlag(RegionFlag::IS_OLD));
153         return allocated_objects_;
154     }
155 
GetMarkBitmap()156     MarkBitmap *GetMarkBitmap() const
157     {
158         return mark_bitmap_;
159     }
160 
GetRemSet()161     RemSetT *GetRemSet()
162     {
163         return rem_set_;
164     }
165 
AddFlag(RegionFlag flag)166     void AddFlag(RegionFlag flag)
167     {
168         flags_ |= flag;
169     }
170 
RmvFlag(RegionFlag flag)171     void RmvFlag(RegionFlag flag)
172     {
173         // NOLINTNEXTLINE(hicpp-signed-bitwise)
174         flags_ &= ~flag;
175     }
176 
HasFlags(RegionFlag flag)177     bool HasFlags(RegionFlag flag) const
178     {
179         return (flags_ & flag) == flag;
180     }
181 
HasFlag(RegionFlag flag)182     bool HasFlag(RegionFlag flag) const
183     {
184         return (flags_ & flag) != 0;
185     }
186 
IsEden()187     bool IsEden() const
188     {
189         return HasFlag(IS_EDEN);
190     }
191 
IsSurvivor()192     bool IsSurvivor() const
193     {
194         return HasFlag(RegionFlag::IS_SURVIVOR);
195     }
196 
IsYoung()197     bool IsYoung() const
198     {
199         return IsEden() || IsSurvivor();
200     }
201 
IsInCollectionSet()202     bool IsInCollectionSet() const
203     {
204         return HasFlag(IS_COLLECTION_SET);
205     }
206 
IsTLAB()207     bool IsTLAB() const
208     {
209         ASSERT((tlab_vector_ == nullptr) || (top_ == begin_));
210         return tlab_vector_ != nullptr;
211     }
212 
Size()213     size_t Size() const
214     {
215         return end_ - ToUintPtr(this);
216     }
217 
218     template <bool atomic = true>
219     NO_THREAD_SANITIZE void *Alloc(size_t aligned_size);
220 
221     template <typename ObjectVisitor>
222     void IterateOverObjects(const ObjectVisitor &visitor);
223 
GetLargeObject()224     ObjectHeader *GetLargeObject()
225     {
226         ASSERT(HasFlag(RegionFlag::IS_LARGE_OBJECT));
227         return reinterpret_cast<ObjectHeader *>(Begin());
228     }
229 
IsInRange(const ObjectHeader * object)230     bool IsInRange(const ObjectHeader *object) const
231     {
232         return ToUintPtr(object) >= begin_ && ToUintPtr(object) < end_;
233     }
234 
IsInAllocRange(const ObjectHeader * object)235     [[nodiscard]] bool IsInAllocRange(const ObjectHeader *object) const
236     {
237         bool in_range = false;
238         if (!IsTLAB()) {
239             in_range = (ToUintPtr(object) >= begin_ && ToUintPtr(object) < top_);
240         } else {
241             for (auto i : *tlab_vector_) {
242                 in_range = i->ContainObject(object);
243                 if (in_range) {
244                     break;
245                 }
246             }
247         }
248         return in_range;
249     }
250 
IsAlignment(uintptr_t region_addr,size_t region_size)251     static bool IsAlignment(uintptr_t region_addr, size_t region_size)
252     {
253         return ((region_addr - HeapStartAddress()) % region_size) == 0;
254     }
255 
HeadSize()256     constexpr static size_t HeadSize()
257     {
258         return AlignUp(sizeof(Region), DEFAULT_ALIGNMENT_IN_BYTES);
259     }
260 
RegionSize(size_t object_size,size_t region_size)261     constexpr static size_t RegionSize(size_t object_size, size_t region_size)
262     {
263         return AlignUp(HeadSize() + object_size, region_size);
264     }
265 
266     template <bool cross_region>
267     static Region *AddrToRegion(const void *addr, size_t mask = DEFAULT_REGION_MASK)
268     {
269         // if it is possible that (object address - region start addr) larger than region alignment,
270         // we should get the region start address from mmappool which records it in allocator info
271         if constexpr (cross_region) {  // NOLINT(readability-braces-around-statements, bugprone-suspicious-semicolon)
272             ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) == SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
273 
274             auto region_addr = PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(const_cast<void *>(addr));
275             return reinterpret_cast<Region *>(region_addr);
276         }
277         ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(addr) != SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT);
278 
279         return reinterpret_cast<Region *>(((ToUintPtr(addr)) & ~mask));
280     }
281 
HeapStartAddress()282     static uintptr_t HeapStartAddress()
283     {
284         // see MmapMemPool about the object space start address
285 #if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS)
286         return PANDA_32BITS_HEAP_START_ADDRESS;
287 #else
288         return PoolManager::GetMmapMemPool()->GetMinObjectAddress();
289 #endif
290     }
291 
292     InternalAllocatorPtr GetInternalAllocator();
293 
294     void CreateRemSet();
295 
296     void CreateTLABSupport();
297 
298     size_t GetRemainingSizeForTLABs() const;
299     TLAB *CreateTLAB(size_t size);
300 
301     MarkBitmap *CreateMarkBitmap();
302     MarkBitmap *CreateLiveBitmap();
303 
SwapMarkBitmap()304     void SwapMarkBitmap()
305     {
306         ASSERT(live_bitmap_ != nullptr);
307         ASSERT(mark_bitmap_ != nullptr);
308         std::swap(live_bitmap_, mark_bitmap_);
309     }
310 
CloneMarkBitmapToLiveBitmap()311     void CloneMarkBitmapToLiveBitmap()
312     {
313         ASSERT(live_bitmap_ != nullptr);
314         ASSERT(mark_bitmap_ != nullptr);
315         live_bitmap_->ClearAllBits();
316         mark_bitmap_->IterateOverMarkedChunks([this](void *object) { this->live_bitmap_->Set(object); });
317     }
318 
319     void SetMarkBit(ObjectHeader *object);
320 
321 #ifndef NDEBUG
IsAllocating()322     NO_THREAD_SANITIZE bool IsAllocating()
323     {
324         // Atomic with acquire order reason: data race with is_allocating_ with dependecies on reads after the load
325         // which should become visible
326         return reinterpret_cast<std::atomic<bool> *>(&is_allocating_)->load(std::memory_order_acquire);
327     }
328 
IsIterating()329     NO_THREAD_SANITIZE bool IsIterating()
330     {
331         // Atomic with acquire order reason: data race with is_iterating_ with dependecies on reads after the load which
332         // should become visible
333         return reinterpret_cast<std::atomic<bool> *>(&is_iterating_)->load(std::memory_order_acquire);
334     }
335 
SetAllocating(bool value)336     NO_THREAD_SANITIZE bool SetAllocating(bool value)
337     {
338         if (IsIterating()) {
339             return false;
340         }
341         // Atomic with release order reason: data race with is_allocating_ with dependecies on writes before the store
342         // which should become visible acquire
343         reinterpret_cast<std::atomic<bool> *>(&is_allocating_)->store(value, std::memory_order_release);
344         return true;
345     }
346 
SetIterating(bool value)347     NO_THREAD_SANITIZE bool SetIterating(bool value)
348     {
349         if (IsAllocating()) {
350             return false;
351         }
352         // Atomic with release order reason: data race with is_iterating_ with dependecies on writes before the store
353         // which should become visible acquire
354         reinterpret_cast<std::atomic<bool> *>(&is_iterating_)->store(value, std::memory_order_release);
355         return true;
356     }
357 #endif
358 
AsListNode()359     DListNode *AsListNode()
360     {
361         return &node_;
362     }
363 
AsRegion(const DListNode * node)364     static Region *AsRegion(const DListNode *node)
365     {
366         return reinterpret_cast<Region *>(ToUintPtr(node) - MEMBER_OFFSET(Region, node_));
367     }
368 
369 private:
370     DListNode node_;
371     RegionSpace *space_;
372     uintptr_t begin_;
373     uintptr_t end_;
374     uintptr_t top_;
375     uint32_t flags_;
376     size_t allocated_objects_ {0};
377     std::atomic<uint32_t> live_bytes_;
378     MarkBitmap *live_bitmap_;           // records live objects for old region
379     MarkBitmap *mark_bitmap_;           // mark bitmap used in current gc marking phase
380     RemSetT *rem_set_;                  // remember set(old region -> eden/survivor region)
381     PandaVector<TLAB *> *tlab_vector_;  // pointer to a vector with thread tlabs associated with this region
382 #ifndef NDEBUG
383     bool is_allocating_ = false;
384     bool is_iterating_ = false;
385 #endif
386 };
387 
388 inline std::ostream &operator<<(std::ostream &out, const Region &region)
389 {
390     if (region.HasFlag(RegionFlag::IS_LARGE_OBJECT)) {
391         out << "H";
392     } else if (region.HasFlag(RegionFlag::IS_NONMOVABLE)) {
393         out << "NM";
394     } else if (region.HasFlag(RegionFlag::IS_OLD)) {
395         out << "T";
396     } else {
397         out << "Y";
398     }
399     std::ios_base::fmtflags flags = out.flags();
400     out << std::hex << "[0x" << region.Begin() << "-0x" << region.End() << "]";
401     out.flags(flags);
402     return out;
403 }
404 
405 // RegionBlock is used for allocate regions from a continuous big memory block
406 // |--------------------------|
407 // |.....RegionBlock class....|
408 // |--------------------------|
409 // |.......regions_end_.......|--------|
410 // |.......regions_begin_.....|----|   |
411 // |--------------------------|    |   |
412 //                                 |   |
413 // |   Continuous Mem Block   |    |   |
414 // |--------------------------|    |   |
415 // |...........Region.........|<---|   |
416 // |...........Region.........|        |
417 // |...........Region.........|        |
418 // |..........................|        |
419 // |..........................|        |
420 // |..........................|        |
421 // |..........................|        |
422 // |..........................|        |
423 // |..........................|        |
424 // |..........................|        |
425 // |...........Region.........|<-------|
426 class RegionBlock {
427 public:
RegionBlock(size_t region_size,InternalAllocatorPtr allocator)428     RegionBlock(size_t region_size, InternalAllocatorPtr allocator) : region_size_(region_size), allocator_(allocator)
429     {
430     }
431 
~RegionBlock()432     ~RegionBlock()
433     {
434         if (!occupied_.Empty()) {
435             allocator_->Free(occupied_.Data());
436         }
437     }
438 
439     NO_COPY_SEMANTIC(RegionBlock);
440     NO_MOVE_SEMANTIC(RegionBlock);
441 
442     void Init(uintptr_t regions_begin, uintptr_t regions_end);
443 
444     Region *AllocRegion();
445 
446     Region *AllocLargeRegion(size_t large_region_size);
447 
448     void FreeRegion(Region *region, bool release_pages = true);
449 
IsAddrInRange(const void * addr)450     bool IsAddrInRange(const void *addr) const
451     {
452         return ToUintPtr(addr) < regions_end_ && ToUintPtr(addr) >= regions_begin_;
453     }
454 
GetAllocatedRegion(const void * addr)455     Region *GetAllocatedRegion(const void *addr) const
456     {
457         ASSERT(IsAddrInRange(addr));
458         os::memory::LockHolder lock(lock_);
459         return occupied_[RegionIndex(addr)];
460     }
461 
GetFreeRegionsNum()462     size_t GetFreeRegionsNum() const
463     {
464         os::memory::LockHolder lock(lock_);
465         return occupied_.Size() - num_used_regions_;
466     }
467 
468 private:
RegionAt(size_t index)469     Region *RegionAt(size_t index) const
470     {
471         return reinterpret_cast<Region *>(regions_begin_ + index * region_size_);
472     }
473 
RegionIndex(const void * addr)474     size_t RegionIndex(const void *addr) const
475     {
476         return (ToUintPtr(addr) - regions_begin_) / region_size_;
477     }
478 
479     size_t region_size_;
480     InternalAllocatorPtr allocator_;
481     uintptr_t regions_begin_ = 0;
482     uintptr_t regions_end_ = 0;
483     size_t num_used_regions_ = 0;
484     Span<Region *> occupied_ GUARDED_BY(lock_);
485     mutable os::memory::Mutex lock_;
486 };
487 
488 // RegionPool supports to work in three ways:
489 // 1.alloc region in pre-allocated buffer(RegionBlock)
490 // 2.alloc region in mmap pool directly
491 // 3.mixed above two ways
492 class RegionPool {
493 public:
RegionPool(size_t region_size,bool extend,GenerationalSpaces * spaces,InternalAllocatorPtr allocator)494     explicit RegionPool(size_t region_size, bool extend, GenerationalSpaces *spaces, InternalAllocatorPtr allocator)
495         : block_(region_size, allocator),
496           region_size_(region_size),
497           spaces_(spaces),
498           allocator_(allocator),
499           extend_(extend)
500     {
501     }
502 
503     Region *NewRegion(RegionSpace *space, SpaceType space_type, AllocatorType allocator_type, size_t region_size,
504                       RegionFlag eden_or_old_or_nonmovable, RegionFlag properties);
505 
506     void FreeRegion(Region *region, bool release_pages = true);
507 
508     void PromoteYoungRegion(Region *region);
509 
InitRegionBlock(uintptr_t regions_begin,uintptr_t regions_end)510     void InitRegionBlock(uintptr_t regions_begin, uintptr_t regions_end)
511     {
512         block_.Init(regions_begin, regions_end);
513     }
514 
IsAddrInPoolRange(const void * addr)515     bool IsAddrInPoolRange(const void *addr) const
516     {
517         return block_.IsAddrInRange(addr) || IsAddrInExtendPoolRange(addr);
518     }
519 
520     template <bool cross_region = false>
GetRegion(const void * addr)521     Region *GetRegion(const void *addr) const
522     {
523         if (block_.IsAddrInRange(addr)) {
524             return block_.GetAllocatedRegion(addr);
525         }
526         if (IsAddrInExtendPoolRange(addr)) {
527             return Region::AddrToRegion<cross_region>(addr);
528         }
529         return nullptr;
530     }
531 
GetFreeRegionsNumInRegionBlock()532     size_t GetFreeRegionsNumInRegionBlock() const
533     {
534         return block_.GetFreeRegionsNum();
535     }
536 
537     bool HaveTenuredSize(size_t size) const;
538 
539     bool HaveFreeRegions(size_t num_regions, size_t region_size) const;
540 
GetInternalAllocator()541     InternalAllocatorPtr GetInternalAllocator()
542     {
543         return allocator_;
544     }
545 
546     ~RegionPool() = default;
547     NO_COPY_SEMANTIC(RegionPool);
548     NO_MOVE_SEMANTIC(RegionPool);
549 
550 private:
IsAddrInExtendPoolRange(const void * addr)551     bool IsAddrInExtendPoolRange(const void *addr) const
552     {
553         if (extend_) {
554             AllocatorInfo alloc_info = PoolManager::GetMmapMemPool()->GetAllocatorInfoForAddr(const_cast<void *>(addr));
555             return alloc_info.GetAllocatorHeaderAddr() == this;
556         }
557         return false;
558     }
559 
560     RegionBlock block_;
561     size_t region_size_;
562     GenerationalSpaces *spaces_ {nullptr};
563     InternalAllocatorPtr allocator_;
564     bool extend_ = true;
565 };
566 
567 class RegionSpace {
568 public:
RegionSpace(SpaceType space_type,AllocatorType allocator_type,RegionPool * region_pool)569     explicit RegionSpace(SpaceType space_type, AllocatorType allocator_type, RegionPool *region_pool)
570         : space_type_(space_type), allocator_type_(allocator_type), region_pool_(region_pool)
571     {
572     }
573 
~RegionSpace()574     virtual ~RegionSpace()
575     {
576         FreeAllRegions();
577     }
578 
579     NO_COPY_SEMANTIC(RegionSpace);
580     NO_MOVE_SEMANTIC(RegionSpace);
581 
582     Region *NewRegion(size_t region_size, RegionFlag eden_or_old_or_nonmovable, RegionFlag properties);
583 
584     void FreeRegion(Region *region);
585 
586     void PromoteYoungRegion(Region *region);
587 
588     void FreeAllRegions();
589 
590     template <typename RegionVisitor>
591     void IterateRegions(RegionVisitor visitor);
592 
GetPool()593     RegionPool *GetPool() const
594     {
595         return region_pool_;
596     }
597 
598     template <bool cross_region = false>
GetRegion(const ObjectHeader * object)599     Region *GetRegion(const ObjectHeader *object) const
600     {
601         auto *region = region_pool_->GetRegion<cross_region>(object);
602 
603         // check if the region is allocated by this space
604         return (region != nullptr && region->GetSpace() == this) ? region : nullptr;
605     }
606 
607     template <bool cross_region = false>
608     bool ContainObject(const ObjectHeader *object) const;
609 
610     template <bool cross_region = false>
611     bool IsLive(const ObjectHeader *object) const;
612 
613 private:
DestroyRegion(Region * region)614     void DestroyRegion(Region *region)
615     {
616         region->Destroy();
617         region_pool_->FreeRegion(region);
618     }
619 
620     SpaceType space_type_;
621 
622     // related allocator type
623     AllocatorType allocator_type_;
624 
625     // underlying shared region pool
626     RegionPool *region_pool_;
627 
628     // region allocated by this space
629     DList regions_;
630 };
631 
632 }  // namespace panda::mem
633 
634 #endif  // PANDA_RUNTIME_MEM_REGION_SPACE_H
635