1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_MEM_REGION_ALLOCATOR_H
16 #define PANDA_RUNTIME_MEM_REGION_ALLOCATOR_H
17
18 #include <atomic>
19 #include <cstdint>
20
21 #include "runtime/mem/region_space.h"
22
23 namespace ark {
24 class ManagedThread;
25 struct GCTask;
26 } // namespace ark
27
28 namespace ark::mem {
29
30 class RegionAllocatorLockConfig {
31 public:
32 using CommonLock = os::memory::Mutex;
33 using DummyLock = os::memory::DummyLock;
34 };
35
36 using RegionsVisitor = std::function<void(PandaVector<Region *> &vector)>;
37
38 /// Return the region which corresponds to the start of the object.
ObjectToRegion(const ObjectHeader * object)39 static inline Region *ObjectToRegion(const ObjectHeader *object)
40 {
41 auto *region = reinterpret_cast<Region *>(((ToUintPtr(object)) & ~DEFAULT_REGION_MASK));
42 ASSERT(ToUintPtr(PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(object)) == ToUintPtr(region));
43 // Getting region by object is a bit operation and TSAN doesn't
44 // sees the relation between region creation and region access.
45 // This annotation suggests TSAN that this code always executes after
46 // the region gets created.
47 // See the corresponding annotation in RegionAllocatorBase::CreateAndSetUpNewRegion
48 TSAN_ANNOTATE_HAPPENS_AFTER(region);
49 return region;
50 }
51
IsSameRegion(const void * o1,const void * o2,size_t regionSizeBits)52 static inline bool IsSameRegion(const void *o1, const void *o2, size_t regionSizeBits)
53 {
54 return ((ToUintPtr(o1) ^ ToUintPtr(o2)) >> regionSizeBits) == 0;
55 }
56
57 /// Return the region which corresponds to the address.
AddrToRegion(const void * addr)58 static inline Region *AddrToRegion(const void *addr)
59 {
60 auto regionAddr = PoolManager::GetMmapMemPool()->GetStartAddrPoolForAddr(addr);
61 return static_cast<Region *>(regionAddr);
62 }
63
64 template <typename LockConfigT>
65 class RegionAllocatorBase {
66 public:
67 NO_MOVE_SEMANTIC(RegionAllocatorBase);
68 NO_COPY_SEMANTIC(RegionAllocatorBase);
69
70 explicit RegionAllocatorBase(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType,
71 AllocatorType allocatorType, size_t initSpaceSize, bool extend, size_t regionSize,
72 size_t emptyTenuredRegionsMaxCount);
73 explicit RegionAllocatorBase(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType,
74 AllocatorType allocatorType, RegionPool *sharedRegionPool,
75 size_t emptyTenuredRegionsMaxCount);
76
~RegionAllocatorBase()77 virtual ~RegionAllocatorBase()
78 {
79 ClearRegionsPool();
80 }
81
GetRegion(const ObjectHeader * object)82 Region *GetRegion(const ObjectHeader *object) const
83 {
84 return regionSpace_.GetRegion(object);
85 }
86
GetSpace()87 RegionSpace *GetSpace()
88 {
89 return ®ionSpace_;
90 }
91
GetSpace()92 const RegionSpace *GetSpace() const
93 {
94 return ®ionSpace_;
95 }
96
97 PandaVector<Region *> GetAllRegions();
98
99 template <RegionFlag REGION_TYPE, OSPagesPolicy OS_PAGES_POLICY>
ReleaseEmptyRegions()100 void ReleaseEmptyRegions()
101 {
102 this->GetSpace()->template ReleaseEmptyRegions<REGION_TYPE, OS_PAGES_POLICY>();
103 }
104
105 protected:
ClearRegionsPool()106 void ClearRegionsPool()
107 {
108 regionSpace_.FreeAllRegions();
109
110 if (initBlock_.GetMem() != nullptr) {
111 spaces_->FreeSharedPool(initBlock_.GetMem(), initBlock_.GetSize());
112 initBlock_ = NULLPOOL;
113 }
114 }
115
116 template <OSPagesAllocPolicy OS_ALLOC_POLICY>
AllocRegion(size_t regionSize,RegionFlag edenOrOldOrNonmovable,RegionFlag properties)117 Region *AllocRegion(size_t regionSize, RegionFlag edenOrOldOrNonmovable, RegionFlag properties)
118 {
119 return regionSpace_.NewRegion(regionSize, edenOrOldOrNonmovable, properties, OS_ALLOC_POLICY);
120 }
121
GetSpaceType()122 SpaceType GetSpaceType() const
123 {
124 return spaceType_;
125 }
126
127 template <typename AllocConfigT, OSPagesAllocPolicy OS_ALLOC_POLICY = OSPagesAllocPolicy::NO_POLICY>
128 Region *CreateAndSetUpNewRegion(size_t regionSize, RegionFlag regionType, RegionFlag properties = IS_UNUSED)
129 REQUIRES(regionLock_);
130
131 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
132 LockConfigT regionLock_;
133 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
134 MemStatsType *memStats_;
135 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
136 SpaceType spaceType_;
137 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
138 GenerationalSpaces *spaces_;
139 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
140 RegionPool regionPool_; // self created pool, only used by this allocator
141 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
142 RegionSpace regionSpace_; // the target region space used by this allocator
143 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
144 Pool initBlock_; // the initial memory block for region allocation
145 };
146
147 /// @brief A region-based bump-pointer allocator.
148 template <typename AllocConfigT, typename LockConfigT = RegionAllocatorLockConfig::CommonLock>
149 class RegionAllocator final : public RegionAllocatorBase<LockConfigT> {
150 public:
151 static constexpr bool USE_PARTIAL_TLAB = true;
152 static constexpr size_t REGION_SIZE = DEFAULT_REGION_SIZE;
153
154 NO_MOVE_SEMANTIC(RegionAllocator);
155 NO_COPY_SEMANTIC(RegionAllocator);
156
157 /**
158 * @brief Create new region allocator
159 * @param mem_stats - memory statistics
160 * @param space_type - space type
161 * @param init_space_size - initial continuous space size, 0 means no need for initial space
162 * @param extend - true means that will allocate more regions from mmap pool if initial space is not enough
163 */
164 explicit RegionAllocator(MemStatsType *memStats, GenerationalSpaces *spaces,
165 SpaceType spaceType = SpaceType::SPACE_TYPE_OBJECT, size_t initSpaceSize = 0,
166 bool extend = true, size_t emptyTenuredRegionsMaxCount = 0);
167
168 /**
169 * @brief Create new region allocator with shared region pool specified
170 * @param mem_stats - memory statistics
171 * @param space_type - space type
172 * @param shared_region_pool - a shared region pool that can be reused by multi-spaces
173 */
174 explicit RegionAllocator(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType,
175 RegionPool *sharedRegionPool, size_t emptyTenuredRegionsMaxCount = 0);
176
177 ~RegionAllocator() override = default;
178
179 template <RegionFlag REGION_TYPE = RegionFlag::IS_EDEN, bool UPDATE_MEMSTATS = true>
180 void *Alloc(size_t size, Alignment align = DEFAULT_ALIGNMENT, bool pinned = false);
181
182 template <typename T>
AllocArray(size_t arrLength)183 T *AllocArray(size_t arrLength)
184 {
185 return static_cast<T *>(Alloc(sizeof(T) * arrLength));
186 }
187
Free(void * mem)188 void Free([[maybe_unused]] void *mem) {}
189
190 void PinObject(ObjectHeader *object);
191
192 void UnpinObject(ObjectHeader *object);
193
194 /**
195 * @brief Create a TLAB of the specified size
196 * @param size - required size of tlab
197 * @return newly allocated TLAB, TLAB is set to Empty is allocation failed.
198 */
199 TLAB *CreateTLAB(size_t size);
200
201 /**
202 * @brief Create a TLAB in a new region. TLAB will occupy the whole region.
203 * @return newly allocated TLAB, TLAB is set to Empty is allocation failed.
204 */
205 TLAB *CreateRegionSizeTLAB();
206
207 /**
208 * @brief Iterates over all objects allocated by this allocator.
209 * @param visitor - function pointer or functor
210 */
211 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & visitor)212 void IterateOverObjects(const ObjectVisitor &visitor)
213 {
214 this->GetSpace()->IterateRegions([&](Region *region) { region->IterateOverObjects(visitor); });
215 }
216
217 template <typename ObjectVisitor>
IterateOverObjectsInRange(const ObjectVisitor & visitor,void * begin,void * end)218 void IterateOverObjectsInRange(const ObjectVisitor &visitor, void *begin, void *end)
219 {
220 this->GetSpace()->IterateRegions([&](Region *region) {
221 if (region->Intersect(ToUintPtr(begin), ToUintPtr(end))) {
222 region->IterateOverObjects([&visitor, begin, end](ObjectHeader *obj) {
223 if (ToUintPtr(begin) <= ToUintPtr(obj) && ToUintPtr(obj) < ToUintPtr(end)) {
224 visitor(obj);
225 }
226 });
227 }
228 });
229 }
230
231 template <bool INCLUDE_CURRENT_REGION>
232 PandaPriorityQueue<std::pair<uint32_t, Region *>> GetTopGarbageRegions();
233
234 /**
235 * Return a vector of all regions with the specific type.
236 * @tparam regions_type - type of regions needed to proceed.
237 * @return vector of all regions with the /param regions_type type
238 */
239 template <RegionFlag REGIONS_TYPE>
240 PandaVector<Region *> GetAllSpecificRegions();
241
242 /**
243 * Iterate over all regions with type /param regions_type_from
244 * and move all alive objects to the regions with type /param regions_type_to.
245 * NOTE: /param regions_type_from and /param regions_type_to can't be equal.
246 * @tparam regions_type_from - type of regions needed to proceed.
247 * @tparam regions_type_to - type of regions to which we want to move all alive objects.
248 * @tparam use_marked_bitmap - if we need to use marked_bitmap from the regions or not.
249 * @param death_checker - checker what will return objects status for iterated object.
250 * @param move_handler - called for every moved object
251 * can be used as a simple visitor if we enable /param use_marked_bitmap
252 */
253 template <RegionFlag REGIONS_TYPE_FROM, RegionFlag REGIONS_TYPE_TO, bool USE_MARKED_BITMAP = false>
254 void CompactAllSpecificRegions(const GCObjectVisitor &deathChecker, const ObjectVisitorEx &moveHandler);
255
256 template <RegionFlag REGION_TYPE>
ClearCurrentRegion()257 void ClearCurrentRegion()
258 {
259 ResetCurrentRegion<false, REGION_TYPE>();
260 }
261
262 /**
263 * Iterate over specific regions from vector
264 * and move all alive objects to the regions with type /param regions_type_to.
265 * @tparam regions_type_from - type of regions needed to proceed.
266 * @tparam regions_type_to - type of regions to which we want to move all alive objects.
267 * @tparam use_marked_bitmap - if we need to use marked_bitmap from the regions or not.
268 * @param regions - vector of regions needed to proceed.
269 * @param death_checker - checker what will return objects status for iterated object.
270 * @param move_handler - called for every moved object
271 * can be used as a simple visitor if we enable /param use_marked_bitmap
272 */
273 template <RegionFlag REGIONS_TYPE_FROM, RegionFlag REGIONS_TYPE_TO, bool USE_MARKED_BITMAP = false>
274 void CompactSeveralSpecificRegions(const PandaVector<Region *> ®ions, const GCObjectVisitor &deathChecker,
275 const ObjectVisitorEx &moveHandler);
276
277 /**
278 * Iterate over specific region
279 * and move all alive objects to the regions with type /param regions_type_to.
280 * @tparam regions_type_from - type of regions needed to proceed.
281 * @tparam regions_type_to - type of regions to which we want to move all alive objects.
282 * @tparam use_marked_bitmap - if we need to use marked_bitmap from the regions or not.
283 * @param region - region needed to proceed.
284 * @param death_checker - checker what will return objects status for iterated object.
285 * @param move_handler - called for every moved object
286 * can be used as a simple visitor if we enable /param use_marked_bitmap
287 */
288 template <RegionFlag REGIONS_TYPE_FROM, RegionFlag REGIONS_TYPE_TO, bool USE_MARKED_BITMAP = false>
289 void CompactSpecificRegion(Region *regions, const GCObjectVisitor &deathChecker,
290 const ObjectVisitorEx &moveHandler);
291
292 template <bool USE_MARKED_BITMAP = false>
293 void PromoteYoungRegion(Region *region, const GCObjectVisitor &deathChecker,
294 const ObjectVisitor &aliveObjectsHandler);
295
296 /**
297 * Reset all regions with type /param regions_type.
298 * @tparam regions_type - type of regions needed to proceed.
299 */
300 template <RegionFlag REGIONS_TYPE>
301 void ResetAllSpecificRegions();
302
303 /**
304 * Reset regions from vector.
305 * @tparam REGIONS_TYPE - type of regions needed to proceed.
306 * @tparam REGIONS_RELEASE_POLICY - region need to be placed in the free queue or returned to mempool.
307 * @tparam OS_PAGES_POLICY - if we need to return region pages to OS or not.
308 * @tparam NEED_LOCK - if we need to take region lock or not. Use it if we allocate regions in parallel
309 * @param regions - vector of regions needed to proceed.
310 * @tparam Container - region's container type
311 */
312 template <RegionFlag REGIONS_TYPE, RegionSpace::ReleaseRegionsPolicy REGIONS_RELEASE_POLICY,
313 OSPagesPolicy OS_PAGES_POLICY, bool NEED_LOCK, typename Container>
314 void ResetSeveralSpecificRegions(const Container ®ions);
315
316 /// Reserve one region if no reserved region
317 void ReserveRegionIfNeeded();
318
319 /// Release reserved region to free space
320 void ReleaseReservedRegion();
321
VisitAndRemoveAllPools(const MemVisitor & memVisitor)322 void VisitAndRemoveAllPools([[maybe_unused]] const MemVisitor &memVisitor)
323 {
324 this->ClearRegionsPool();
325 }
326
GetMaxRegularObjectSize()327 constexpr static size_t GetMaxRegularObjectSize()
328 {
329 return REGION_SIZE - AlignUp(sizeof(Region), DEFAULT_ALIGNMENT_IN_BYTES);
330 }
331
ContainObject(const ObjectHeader * object)332 bool ContainObject(const ObjectHeader *object) const
333 {
334 return this->GetSpace()->ContainObject(object);
335 }
336
IsLive(const ObjectHeader * object)337 bool IsLive(const ObjectHeader *object) const
338 {
339 return this->GetSpace()->IsLive(object);
340 }
341
GetAllocatorType()342 static constexpr AllocatorType GetAllocatorType()
343 {
344 return AllocatorType::REGION_ALLOCATOR;
345 }
346
SetDesiredEdenLength(size_t edenLength)347 void SetDesiredEdenLength(size_t edenLength)
348 {
349 this->GetSpace()->SetDesiredEdenLength(edenLength);
350 }
351
AddPromotedRegionToQueueIfPinned(Region * region)352 void AddPromotedRegionToQueueIfPinned(Region *region)
353 {
354 if (region->HasPinnedObjects()) {
355 ASSERT(region->HasFlag(RegionFlag::IS_PROMOTED));
356 PushToRegionQueue<false, RegionFlag::IS_PINNED>(region);
357 }
358 }
359
360 private:
361 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
GetCurrentRegion()362 Region *GetCurrentRegion()
363 {
364 Region **curRegion = GetCurrentRegionPointerUnsafe<REGION_TYPE>();
365 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
366 if constexpr (USE_ATOMIC) {
367 // Atomic with relaxed order reason: data race with cur_region with no synchronization or ordering
368 // constraints imposed on other reads or writes
369 return reinterpret_cast<std::atomic<Region *> *>(curRegion)->load(std::memory_order_relaxed);
370 // NOLINTNEXTLINE(readability-misleading-indentation)
371 }
372 return *curRegion;
373 }
374
375 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
SetCurrentRegion(Region * region)376 void SetCurrentRegion(Region *region)
377 {
378 Region **curRegion = GetCurrentRegionPointerUnsafe<REGION_TYPE>();
379 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
380 if constexpr (USE_ATOMIC) {
381 // Atomic with relaxed order reason: data race with cur_region with no synchronization or ordering
382 // constraints imposed on other reads or writes
383 reinterpret_cast<std::atomic<Region *> *>(curRegion)->store(region, std::memory_order_relaxed);
384 // NOLINTNEXTLINE(readability-misleading-indentation)
385 } else {
386 *curRegion = region;
387 }
388 }
389
390 template <RegionFlag REGION_TYPE>
GetCurrentRegionPointerUnsafe()391 Region **GetCurrentRegionPointerUnsafe()
392 {
393 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
394 if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
395 return &edenCurrentRegion_;
396 }
397 UNREACHABLE();
398 return nullptr;
399 }
400
401 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
ResetCurrentRegion()402 void ResetCurrentRegion()
403 {
404 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
405 if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
406 SetCurrentRegion<USE_ATOMIC, REGION_TYPE>(&fullRegion_);
407 return;
408 }
409 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
410 if constexpr (REGION_TYPE == RegionFlag::IS_OLD) {
411 os::memory::LockHolder<os::memory::Mutex, USE_ATOMIC> lock(*GetQueueLock<REGION_TYPE>());
412 GetRegionQueuePointer<REGION_TYPE>()->clear();
413 return;
414 }
415 UNREACHABLE();
416 }
417
418 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
IsInCurrentRegion(Region * region)419 bool IsInCurrentRegion(Region *region)
420 {
421 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
422 if constexpr (REGION_TYPE == RegionFlag::IS_EDEN) {
423 return GetCurrentRegion<USE_ATOMIC, REGION_TYPE>() == region;
424 }
425 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
426 if constexpr (REGION_TYPE != RegionFlag::IS_OLD) {
427 LOG(FATAL, ALLOC) << "Region type is neither eden nor old";
428 }
429 os::memory::LockHolder<os::memory::Mutex, USE_ATOMIC> lock(*GetQueueLock<REGION_TYPE>());
430 for (auto i : *GetRegionQueuePointer<REGION_TYPE>()) {
431 if (i == region) {
432 return true;
433 }
434 }
435 return false;
436 }
437
438 public:
439 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
PopFromRegionQueue()440 Region *PopFromRegionQueue()
441 {
442 PandaVector<Region *> *regionQueue = GetRegionQueuePointer<REGION_TYPE>();
443 os::memory::LockHolder<os::memory::Mutex, USE_ATOMIC> lock(*GetQueueLock<REGION_TYPE>());
444 if (regionQueue->empty()) {
445 return nullptr;
446 }
447 auto *region = regionQueue->back();
448 regionQueue->pop_back();
449 return region;
450 }
451
452 // NOLINTNEXTLINE(readability-identifier-naming)
453 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
PushToRegionQueue(Region * region)454 void PushToRegionQueue(Region *region)
455 {
456 PandaVector<Region *> *regionQueue = GetRegionQueuePointer<REGION_TYPE>();
457 os::memory::LockHolder<os::memory::Mutex, USE_ATOMIC> lock(*GetQueueLock<REGION_TYPE>());
458 regionQueue->push_back(region);
459 }
460
461 template <bool USE_ATOMIC = true, RegionFlag REGION_TYPE>
CreateAndSetUpNewRegionWithLock()462 Region *CreateAndSetUpNewRegionWithLock()
463 {
464 os::memory::LockHolder<LockConfigT, USE_ATOMIC> lock(this->regionLock_);
465 Region *regionTo = this->template CreateAndSetUpNewRegion<AllocConfigT>(DEFAULT_REGION_SIZE, REGION_TYPE);
466 ASSERT(regionTo != nullptr);
467 return regionTo;
468 }
469
470 private:
471 template <RegionFlag REGION_TYPE>
GetQueueLock()472 os::memory::Mutex *GetQueueLock()
473 {
474 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
475 if constexpr (REGION_TYPE == RegionFlag::IS_OLD || REGION_TYPE == RegionFlag::IS_PINNED) {
476 return &oldQueueLock_;
477 }
478 UNREACHABLE();
479 return nullptr;
480 }
481
482 template <RegionFlag REGION_TYPE>
GetRegionQueuePointer()483 PandaVector<Region *> *GetRegionQueuePointer()
484 {
485 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
486 if constexpr (REGION_TYPE == RegionFlag::IS_OLD) {
487 return &oldRegionQueue_;
488 } else if constexpr (REGION_TYPE == RegionFlag::IS_PINNED) {
489 return &pinnedRegionQueue_;
490 }
491 UNREACHABLE();
492 return nullptr;
493 }
494
495 template <RegionFlag REGION_TYPE>
496 void *AllocRegular(size_t alignSize);
497 void *AllocRegularPinned(size_t alignSize);
498 TLAB *CreateTLABInRegion(Region *region, size_t size);
499
500 Region fullRegion_;
501 Region *edenCurrentRegion_;
502 Region *reservedRegion_ = nullptr;
503 os::memory::Mutex oldQueueLock_;
504 PandaVector<Region *> oldRegionQueue_;
505 PandaVector<Region *> pinnedRegionQueue_;
506 // To store partially used Regions that can be reused later.
507 ark::PandaMultiMap<size_t, Region *, std::greater<size_t>> retainedTlabs_;
508 friend class test::RegionAllocatorTest;
509 };
510
511 template <typename AllocConfigT, typename LockConfigT, typename ObjectAllocator>
512 class RegionNonmovableAllocator final : public RegionAllocatorBase<LockConfigT> {
513 public:
514 static constexpr size_t REGION_SIZE = DEFAULT_REGION_SIZE;
515
516 NO_MOVE_SEMANTIC(RegionNonmovableAllocator);
517 NO_COPY_SEMANTIC(RegionNonmovableAllocator);
518
519 explicit RegionNonmovableAllocator(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType,
520 size_t initSpaceSize = 0, bool extend = true);
521 explicit RegionNonmovableAllocator(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType,
522 RegionPool *sharedRegionPool);
523
524 ~RegionNonmovableAllocator() override = default;
525
526 void *Alloc(size_t size, Alignment align = DEFAULT_ALIGNMENT);
527
528 void Free(void *mem);
529
530 void Collect(const GCObjectVisitor &deathChecker);
531
532 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & objVisitor)533 void IterateOverObjects(const ObjectVisitor &objVisitor)
534 {
535 objectAllocator_.IterateOverObjects(objVisitor);
536 }
537
538 template <typename MemVisitor>
IterateOverObjectsInRange(const MemVisitor & memVisitor,void * begin,void * end)539 void IterateOverObjectsInRange(const MemVisitor &memVisitor, void *begin, void *end)
540 {
541 objectAllocator_.IterateOverObjectsInRange(memVisitor, begin, end);
542 }
543
VisitAndRemoveAllPools(const MemVisitor & memVisitor)544 void VisitAndRemoveAllPools([[maybe_unused]] const MemVisitor &memVisitor)
545 {
546 objectAllocator_.VisitAndRemoveAllPools([this](void *mem, [[maybe_unused]] size_t size) {
547 auto *region = AddrToRegion(mem);
548 ASSERT(ToUintPtr(mem) + size == region->End());
549 this->GetSpace()->FreeRegion(region);
550 });
551 }
552
553 void VisitAndRemoveFreeRegions(const RegionsVisitor ®ionVisitor);
554
GetMaxSize()555 constexpr static size_t GetMaxSize()
556 {
557 // NOTE(yxr) : get accurate max payload size in a freelist pool
558 return std::min(ObjectAllocator::GetMaxSize(), static_cast<size_t>(REGION_SIZE - 1_KB));
559 }
560
ContainObject(const ObjectHeader * object)561 bool ContainObject(const ObjectHeader *object) const
562 {
563 return objectAllocator_.ContainObject(object);
564 }
565
IsLive(const ObjectHeader * object)566 bool IsLive(const ObjectHeader *object) const
567 {
568 ASSERT(this->GetRegion(object)->GetLiveBitmap() != nullptr);
569 return this->GetRegion(object)->GetLiveBitmap()->AtomicTest(const_cast<ObjectHeader *>(object));
570 }
571
572 private:
573 void *NewRegionAndRetryAlloc(size_t objectSize, Alignment align);
574
575 mutable ObjectAllocator objectAllocator_;
576 };
577
578 /// @brief A region-based humongous allocator.
579 template <typename AllocConfigT, typename LockConfigT = RegionAllocatorLockConfig::CommonLock>
580 class RegionHumongousAllocator final : public RegionAllocatorBase<LockConfigT> {
581 public:
582 static constexpr size_t REGION_SIZE = DEFAULT_REGION_SIZE;
583
584 NO_MOVE_SEMANTIC(RegionHumongousAllocator);
585 NO_COPY_SEMANTIC(RegionHumongousAllocator);
586
587 /**
588 * @brief Create new humongous region allocator
589 * @param mem_stats - memory statistics
590 * @param space_type - space type
591 */
592 explicit RegionHumongousAllocator(MemStatsType *memStats, GenerationalSpaces *spaces, SpaceType spaceType);
593
594 ~RegionHumongousAllocator() override = default;
595
596 template <bool UPDATE_MEMSTATS = true>
597 void *Alloc(size_t size, Alignment align = DEFAULT_ALIGNMENT);
598
599 template <typename T>
AllocArray(size_t arrLength)600 T *AllocArray(size_t arrLength)
601 {
602 return static_cast<T *>(Alloc(sizeof(T) * arrLength));
603 }
604
Free(void * mem)605 void Free([[maybe_unused]] void *mem) {}
606
607 void CollectAndRemoveFreeRegions(const RegionsVisitor ®ionVisitor, const GCObjectVisitor &deathChecker);
608
609 /**
610 * @brief Iterates over all objects allocated by this allocator.
611 * @param visitor - function pointer or functor
612 */
613 template <typename ObjectVisitor>
IterateOverObjects(const ObjectVisitor & visitor)614 void IterateOverObjects(const ObjectVisitor &visitor)
615 {
616 this->GetSpace()->IterateRegions([&](Region *region) { region->IterateOverObjects(visitor); });
617 }
618
619 template <typename ObjectVisitor>
IterateOverObjectsInRange(const ObjectVisitor & visitor,void * begin,void * end)620 void IterateOverObjectsInRange(const ObjectVisitor &visitor, void *begin, void *end)
621 {
622 this->GetSpace()->IterateRegions([&](Region *region) {
623 if (region->Intersect(ToUintPtr(begin), ToUintPtr(end))) {
624 region->IterateOverObjects([&visitor, begin, end](ObjectHeader *obj) {
625 if (ToUintPtr(begin) <= ToUintPtr(obj) && ToUintPtr(obj) < ToUintPtr(end)) {
626 visitor(obj);
627 }
628 });
629 }
630 });
631 }
632
VisitAndRemoveAllPools(const MemVisitor & memVisitor)633 void VisitAndRemoveAllPools([[maybe_unused]] const MemVisitor &memVisitor)
634 {
635 this->ClearRegionsPool();
636 }
637
ContainObject(const ObjectHeader * object)638 bool ContainObject(const ObjectHeader *object) const
639 {
640 return this->GetSpace()->template ContainObject<true>(object);
641 }
642
IsLive(const ObjectHeader * object)643 bool IsLive(const ObjectHeader *object) const
644 {
645 return this->GetSpace()->template IsLive<true>(object);
646 }
647
648 private:
649 void ResetRegion(Region *region);
650 void Collect(Region *region, const GCObjectVisitor &deathChecker);
651
652 // If we change this constant, we will increase fragmentation dramatically
653 static_assert(REGION_SIZE / PANDA_POOL_ALIGNMENT_IN_BYTES == 1);
654 friend class test::RegionAllocatorTest;
655 };
656
657 } // namespace ark::mem
658
659 #endif // PANDA_RUNTIME_MEM_REGION_ALLOCATOR_H
660