• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef RUNTIME_MEM_ALLOCATOR_H
16 #define RUNTIME_MEM_ALLOCATOR_H
17 
18 #include <functional>
19 
20 #include "libpandabase/mem/code_allocator.h"
21 #include "libpandabase/mem/mem.h"
22 #include "libpandabase/mem/pool_map.h"
23 #include "libpandabase/utils/logger.h"
24 #include "libpandabase/macros.h"
25 #include "runtime/mem/bump-allocator.h"
26 #include "runtime/mem/freelist_allocator.h"
27 #include "runtime/mem/gc/bitmap.h"
28 #include "runtime/mem/gc/gc_types.h"
29 #include "runtime/mem/humongous_obj_allocator.h"
30 #include "runtime/mem/internal_allocator.h"
31 #include "runtime/mem/runslots_allocator.h"
32 #include "runtime/mem/pygote_space_allocator.h"
33 #include "runtime/mem/heap_space.h"
34 
35 namespace panda {
36 class ObjectHeader;
37 }  // namespace panda
38 
39 namespace panda {
40 class ManagedThread;
41 }  // namespace panda
42 
43 namespace panda {
44 class BaseClass;
45 }  // namespace panda
46 
47 namespace panda::mem {
48 
49 class ObjectAllocConfigWithCrossingMap;
50 class ObjectAllocConfig;
51 class TLAB;
52 
53 /// AllocatorPurpose and GCCollectMode provide info when we should collect from some allocator or not
54 enum class AllocatorPurpose {
55     ALLOCATOR_PURPOSE_OBJECT,    // Allocator for objects
56     ALLOCATOR_PURPOSE_INTERNAL,  // Space for runtime internal needs
57 };
58 
59 template <AllocatorType>
60 class AllocatorTraits {
61 };
62 
63 template <>
64 class AllocatorTraits<AllocatorType::RUNSLOTS_ALLOCATOR> {
65     using AllocType = RunSlotsAllocator<ObjectAllocConfig>;
66     static constexpr bool HAS_FREE {true};  // indicates allocator can free
67 };
68 
69 template <typename T, AllocScope ALLOC_SCOPE_T>
70 class AllocatorAdapter;
71 
72 class Allocator {
73 public:
74     template <typename T, AllocScope ALLOC_SCOPE_T = AllocScope::GLOBAL>
75     using AdapterType = AllocatorAdapter<T, ALLOC_SCOPE_T>;
76 
77     NO_COPY_SEMANTIC(Allocator);
78     NO_MOVE_SEMANTIC(Allocator);
Allocator(MemStatsType * memStats,AllocatorPurpose purpose,GCCollectMode gcCollectMode)79     explicit Allocator(MemStatsType *memStats, AllocatorPurpose purpose, GCCollectMode gcCollectMode)
80         : memStats_(memStats), allocatorPurpose_(purpose), gcCollectMode_(gcCollectMode)
81     {
82     }
83     virtual ~Allocator() = 0;
84 
GetPurpose()85     ALWAYS_INLINE AllocatorPurpose GetPurpose() const
86     {
87         return allocatorPurpose_;
88     }
89 
GetCollectMode()90     ALWAYS_INLINE GCCollectMode GetCollectMode() const
91     {
92         return gcCollectMode_;
93     }
94 
GetMemStats()95     ALWAYS_INLINE MemStatsType *GetMemStats() const
96     {
97         return memStats_;
98     }
99 
Alloc(size_t size)100     [[nodiscard]] void *Alloc(size_t size)
101     {
102         return Allocate(size, CalculateAllocatorAlignment(alignof(uintptr_t)), nullptr);
103     }
104 
Alloc(size_t size,Alignment align)105     [[nodiscard]] void *Alloc(size_t size, Alignment align)
106     {
107         return Allocate(size, align, nullptr);
108     }
109 
AllocLocal(size_t size)110     [[nodiscard]] void *AllocLocal(size_t size)
111     {
112         return AllocateLocal(size, CalculateAllocatorAlignment(alignof(uintptr_t)), nullptr);
113     }
114 
AllocLocal(size_t size,Alignment align)115     [[nodiscard]] void *AllocLocal(size_t size, Alignment align)
116     {
117         return AllocateLocal(size, align, nullptr);
118     }
119 
120     [[nodiscard]] virtual void *Allocate(size_t size, Alignment align,
121                                          [[maybe_unused]] panda::ManagedThread *thread) = 0;
122 
123     [[nodiscard]] virtual void *AllocateLocal(size_t size, Alignment align,
124                                               [[maybe_unused]] panda::ManagedThread *thread) = 0;
125 
126     template <class T>
AllocArray(size_t size)127     [[nodiscard]] T *AllocArray(size_t size)
128     {
129         return static_cast<T *>(this->Allocate(sizeof(T) * size, CalculateAllocatorAlignment(alignof(T)), nullptr));
130     }
131 
132     template <class T>
AllocArrayLocal(size_t size)133     [[nodiscard]] T *AllocArrayLocal(size_t size)
134     {
135         return static_cast<T *>(
136             this->AllocateLocal(sizeof(T) * size, CalculateAllocatorAlignment(alignof(T)), nullptr));
137     }
138 
139     template <class T>
Delete(T * ptr)140     void Delete(T *ptr)
141     {
142         if (ptr == nullptr) {
143             return;
144         }
145         // NOLINTNEXTLINE(readability-braces-around-statements,bugprone-suspicious-semicolon)
146         if constexpr (std::is_class_v<T>) {
147             ptr->~T();
148         }
149         Free(ptr);
150     }
151 
152     template <typename T>
DeleteArray(T * data)153     void DeleteArray(T *data)
154     {
155         if (data == nullptr) {
156             return;
157         }
158         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
159             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
160         void *p = ToVoidPtr(ToUintPtr(data) - SIZE_BEFORE_DATA_OFFSET);
161         size_t size = *static_cast<size_t *>(p);
162         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
163         if constexpr (std::is_class_v<T>) {
164             // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
165             for (size_t i = 0; i < size; ++i, ++data) {
166                 data->~T();
167             }
168         }
169         Free(p);
170     }
171 
172     virtual void Free(void *mem) = 0;
173 
174     virtual void VisitAndRemoveAllPools(const MemVisitor &memVisitor) = 0;
175 
176     virtual void VisitAndRemoveFreePools(const MemVisitor &memVisitor) = 0;
177 
IterateOverYoungObjects(const ObjectVisitor & objectVisitor)178     virtual void IterateOverYoungObjects([[maybe_unused]] const ObjectVisitor &objectVisitor)
179     {
180         LOG(FATAL, ALLOC) << "Allocator::IterateOverYoungObjects" << std::endl;
181     }
182 
IterateOverTenuredObjects(const ObjectVisitor & objectVisitor)183     virtual void IterateOverTenuredObjects([[maybe_unused]] const ObjectVisitor &objectVisitor)
184     {
185         LOG(FATAL, ALLOC) << "Allocator::IterateOverTenuredObjects" << std::endl;
186     }
187 
188     /// @brief iterates all objects in object allocator
IterateRegularSizeObjects(const ObjectVisitor & objectVisitor)189     virtual void IterateRegularSizeObjects([[maybe_unused]] const ObjectVisitor &objectVisitor)
190     {
191         LOG(FATAL, ALLOC) << "Allocator::IterateRegularSizeObjects";
192     }
193 
194     /// @brief iterates objects in all allocators except object allocator
IterateNonRegularSizeObjects(const ObjectVisitor & objectVisitor)195     virtual void IterateNonRegularSizeObjects([[maybe_unused]] const ObjectVisitor &objectVisitor)
196     {
197         LOG(FATAL, ALLOC) << "Allocator::IterateNonRegularSizeObjects";
198     }
199 
FreeObjectsMovedToPygoteSpace()200     virtual void FreeObjectsMovedToPygoteSpace()
201     {
202         LOG(FATAL, ALLOC) << "Allocator::FreeObjectsMovedToPygoteSpace";
203     }
204 
205     virtual void IterateOverObjectsInRange(MemRange memRange, const ObjectVisitor &objectVisitor) = 0;
206 
207     virtual void IterateOverObjects(const ObjectVisitor &objectVisitor) = 0;
208 
209     template <AllocScope ALLOC_SCOPE_T = AllocScope::GLOBAL>
210     AllocatorAdapter<void, ALLOC_SCOPE_T> Adapter();
211 
212     template <typename T, typename... Args>
New(Args &&...args)213     std::enable_if_t<!std::is_array_v<T>, T *> New(Args &&...args)
214     {
215         void *p = Alloc(sizeof(T), CalculateAllocatorAlignment(alignof(T)));
216         if (UNLIKELY(p == nullptr)) {
217             return nullptr;
218         }
219         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
220         return reinterpret_cast<T *>(p);
221     }
222 
223     template <typename T>
New(size_t size)224     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> New(size_t size)
225     {
226         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
227             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
228         using ElementType = std::remove_extent_t<T>;
229         void *p = Alloc(SIZE_BEFORE_DATA_OFFSET + sizeof(ElementType) * size, CalculateAllocatorAlignment(alignof(T)));
230         if (UNLIKELY(p == nullptr)) {
231             return nullptr;
232         }
233         *static_cast<size_t *>(p) = size;
234         auto *data = ToNativePtr<ElementType>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
235         ElementType *currentElement = data;
236         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
237         for (size_t i = 0; i < size; ++i, ++currentElement) {
238             new (currentElement) ElementType();
239         }
240         return data;
241     }
242 
243     template <typename T, typename... Args>
NewLocal(Args &&...args)244     std::enable_if_t<!std::is_array_v<T>, T *> NewLocal(Args &&...args)
245     {
246         void *p = AllocLocal(sizeof(T), CalculateAllocatorAlignment(alignof(T)));
247         if (UNLIKELY(p == nullptr)) {
248             return nullptr;
249         }
250         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
251         return reinterpret_cast<T *>(p);
252     }
253 
254     template <typename T>
NewLocal(size_t size)255     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> NewLocal(size_t size)
256     {
257         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
258             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
259         using ElementType = std::remove_extent_t<T>;
260         void *p =
261             AllocLocal(SIZE_BEFORE_DATA_OFFSET + sizeof(ElementType) * size, CalculateAllocatorAlignment(alignof(T)));
262         *static_cast<size_t *>(p) = size;
263         auto *data = ToNativePtr<ElementType>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
264         ElementType *currentElement = data;
265         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
266         for (size_t i = 0; i < size; ++i, ++currentElement) {
267             new (currentElement) ElementType();
268         }
269         return data;
270     }
271 
AllocateInLargeAllocator(size_t size,Alignment align,BaseClass * cls)272     virtual void *AllocateInLargeAllocator([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
273                                            [[maybe_unused]] BaseClass *cls)
274     {
275         return nullptr;
276     }
277 
278 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()279     virtual void Dump() {}
280 #endif
281 
282 protected:
283     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
284     MemStatsType *memStats_;
285 
286 private:
287     virtual Alignment CalculateAllocatorAlignment(size_t align) = 0;
288 
289     AllocatorPurpose allocatorPurpose_;
290     GCCollectMode gcCollectMode_;
291 };
292 
293 class ObjectAllocatorBase : public Allocator {
294 protected:
295     using PygoteAllocator = PygoteSpaceAllocator<ObjectAllocConfig>;  // Allocator for pygote space
296 
297     /// @brief Add new memory pools to object_allocator and allocate memory in them
298     template <typename AllocT, bool NEED_LOCK = true>
299     inline void *AddPoolsAndAlloc(size_t size, Alignment align, AllocT *objectAllocator, size_t poolSize,
300                                   SpaceType spaceType, HeapSpace *heapSpace);
301 
302     /**
303      * Try to allocate memory for the object and if failed add new memory pools and allocate again
304      * @param size - size of the object in bytes
305      * @param align - alignment
306      * @param object_allocator - allocator for the object
307      * @param pool_size - size of a memory pool for specified allocator
308      * @param space_type - SpaceType of the object
309      * @return pointer to allocated memory or nullptr if failed
310      */
311     template <typename AllocT, bool NEED_LOCK = true>
312     inline void *AllocateSafe(size_t size, Alignment align, AllocT *objectAllocator, size_t poolSize,
313                               SpaceType spaceType, HeapSpace *heapSpace);
314 
315     /**
316      * @brief Initialize an object memory allocated by an allocator.
317      *        NOTE: object header should be zero
318      * @param mem - pointer to allocated object
319      * @param size - size of the object in bytes
320      */
321     void ObjectMemoryInit(void *mem, size_t size) const;
322 
323     /**
324      * @brief Initialize memory which will be used for objects.
325      * @param mem - pointer to allocated memory
326      * @param size - size of the memory in bytes
327      */
328     void MemoryInitialize(void *mem, size_t size) const;
329 
330 public:
331     enum class ObjMemInitPolicy : bool { NO_INIT, REQUIRE_INIT };
332     ObjectAllocatorBase() = delete;
333     NO_COPY_SEMANTIC(ObjectAllocatorBase);
334     NO_MOVE_SEMANTIC(ObjectAllocatorBase);
335 
336     explicit ObjectAllocatorBase(MemStatsType *memStats, GCCollectMode gcCollectMode, bool createPygoteSpaceAllocator);
337 
338     ~ObjectAllocatorBase() override;
Allocate(size_t size,Alignment align,panda::ManagedThread * thread)339     void *Allocate([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
340                    [[maybe_unused]] panda::ManagedThread *thread) final
341     {
342         LOG(FATAL, ALLOC)
343             << "Don't use common Allocate method for object allocation without object initialization argument";
344         return nullptr;
345     }
346 
347     [[nodiscard]] virtual void *Allocate(size_t size, Alignment align, panda::ManagedThread *thread,
348                                          ObjMemInitPolicy objInit) = 0;
349     [[nodiscard]] virtual void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread,
350                                                    ObjMemInitPolicy objInit) = 0;
351 
352     /**
353      * Iterate over all objects and reclaim memory for objects reported as true by gc_object_visitor
354      * @param gc_object_visitor - function which return true for ObjectHeader if we can reclaim memory occupied by
355      * object
356      */
357     virtual void Collect(const GCObjectVisitor &gcObjectVisitor, GCCollectMode collectMode) = 0;
358 
359     /**
360      * Return max size for regular size objects
361      * @return max size in bytes for regular size objects
362      */
363     virtual size_t GetRegularObjectMaxSize() = 0;
364 
365     /**
366      * Return max size for large objects
367      * @return max size in bytes for large objects
368      */
369     virtual size_t GetLargeObjectMaxSize() = 0;
370 
371     /**
372      * Checks if object in the young space
373      * @param object address
374      * @return true if @param object is in young space
375      */
376     virtual bool IsObjectInYoungSpace(const ObjectHeader *obj) = 0;
377 
378     /**
379      * Checks if @param mem_range intersect young space
380      * @param mem_range
381      * @return true if @param mem_range is intersect young space
382      */
383     virtual bool IsIntersectedWithYoung(const MemRange &memRange) = 0;
384 
385     /**
386      * Checks if object in the non-movable space
387      * @param obj
388      * @return true if @param obj is in non-movable space
389      */
390     virtual bool IsObjectInNonMovableSpace(const ObjectHeader *obj) = 0;
391 
392     /// @return true if allocator has an young space
393     virtual bool HasYoungSpace() = 0;
394 
395     /**
396      * Pin object address in heap space. Such object can not be moved to other space
397      * @param object object for pinning
398      */
399     virtual void PinObject(ObjectHeader *object) = 0;
400 
401     /**
402      * Unpin pinned object in heap space. Such object can be moved after this operation
403      * @param object object for unpinning
404      */
405     virtual void UnpinObject(ObjectHeader *object) = 0;
406 
407     /**
408      * Get young space memory ranges
409      * \note PandaVector can't be used here
410      * @return young space memory ranges
411      */
412     virtual const std::vector<MemRange> &GetYoungSpaceMemRanges() = 0;
413 
414     virtual std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() = 0;
415 
416     virtual void ResetYoungAllocator() = 0;
417 
418     virtual TLAB *CreateNewTLAB(panda::ManagedThread *thread) = 0;
419 
420     virtual size_t GetTLABMaxAllocSize() = 0;
421 
422     virtual bool IsTLABSupported() = 0;
423 
424     /// @brief Check if the object allocator contains the object starting at address obj
425     virtual bool ContainObject([[maybe_unused]] const ObjectHeader *obj) const = 0;
426 
427     /**
428      * @brief Check if the object obj is live: obj is allocated already and
429      * not collected yet.
430      */
431     virtual bool IsLive([[maybe_unused]] const ObjectHeader *obj) = 0;
432 
433     /// @brief Check if current allocators' allocation state is valid.
434     virtual size_t VerifyAllocatorStatus() = 0;
435 
436     virtual HeapSpace *GetHeapSpace() = 0;
437 
GetPygoteSpaceAllocator()438     PygoteAllocator *GetPygoteSpaceAllocator()
439     {
440         return pygoteSpaceAllocator_;
441     }
442 
GetPygoteSpaceAllocator()443     const PygoteAllocator *GetPygoteSpaceAllocator() const
444     {
445         return pygoteSpaceAllocator_;
446     }
447 
DisablePygoteAlloc()448     void DisablePygoteAlloc()
449     {
450         pygoteAllocEnabled_ = false;
451     }
452 
IsPygoteAllocEnabled()453     bool IsPygoteAllocEnabled() const
454     {
455         ASSERT(!pygoteAllocEnabled_ || pygoteSpaceAllocator_ != nullptr);
456         return pygoteAllocEnabled_;
457     }
458 
GetObjectSpaceFreeBytes()459     static size_t GetObjectSpaceFreeBytes()
460     {
461         return PoolManager::GetMmapMemPool()->GetObjectSpaceFreeBytes();
462     }
463 
464     bool HaveEnoughPoolsInObjectSpace(size_t poolsNum) const;
465 
466 protected:
467     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
468     PygoteAllocator *pygoteSpaceAllocator_ = nullptr;
469     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
470     bool pygoteAllocEnabled_ = false;
471 
472 private:
Free(void * mem)473     void Free([[maybe_unused]] void *mem) final
474     {
475         LOG(FATAL, ALLOC) << "ObjectAllocatorBase shouldn't have Free";
476     }
477 };
478 
479 /**
480  * Template wrapper for single underlying allocator
481  * @tparam AllocT
482  */
483 template <typename AllocT, AllocatorPurpose ALLOCATOR_PURPOSE>
484 class AllocatorSingleT final : public Allocator {
485 public:
486     // NOLINTNEXTLINE(readability-magic-numbers)
AllocatorSingleT(MemStatsType * memStats)487     explicit AllocatorSingleT(MemStatsType *memStats)
488         : Allocator(memStats, ALLOCATOR_PURPOSE, GCCollectMode::GC_NONE), allocator_(memStats)
489     {
490     }
491     ~AllocatorSingleT() final = default;
492     NO_COPY_SEMANTIC(AllocatorSingleT);
493     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorSingleT);
494 
Allocate(size_t size,Alignment align,panda::ManagedThread * thread)495     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
496     {
497         return allocator_.Alloc(size, align);
498     }
499 
AllocateLocal(size_t size,Alignment align,panda::ManagedThread * thread)500     [[nodiscard]] void *AllocateLocal(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
501     {
502         return allocator_.AllocLocal(size, align);
503     }
504 
Free(void * mem)505     void Free(void *mem) final
506     {
507         allocator_.Free(mem);
508     }
509 
VisitAndRemoveAllPools(const MemVisitor & memVisitor)510     void VisitAndRemoveAllPools(const MemVisitor &memVisitor) final
511     {
512         allocator_.VisitAndRemoveAllPools(memVisitor);
513     }
514 
VisitAndRemoveFreePools(const MemVisitor & memVisitor)515     void VisitAndRemoveFreePools(const MemVisitor &memVisitor) final
516     {
517         allocator_.VisitAndRemoveFreePools(memVisitor);
518     }
519 
IterateOverObjectsInRange(MemRange memRange,const ObjectVisitor & objectVisitor)520     void IterateOverObjectsInRange([[maybe_unused]] MemRange memRange,
521                                    [[maybe_unused]] const ObjectVisitor &objectVisitor) final
522     {
523         LOG(FATAL, ALLOC) << "IterateOverObjectsInRange not implemented for AllocatorSinglet";
524     }
525 
IterateOverObjects(const ObjectVisitor & objectVisitor)526     void IterateOverObjects([[maybe_unused]] const ObjectVisitor &objectVisitor) final
527     {
528         LOG(FATAL, ALLOC) << "IterateOverObjects not implemented for AllocatorSinglet";
529     }
530 
531 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()532     void Dump() override
533     {
534         allocator_.Dump();
535     }
536 #endif
537 
538 private:
CalculateAllocatorAlignment(size_t align)539     Alignment CalculateAllocatorAlignment(size_t align) final
540     {
541         if constexpr (ALLOCATOR_PURPOSE == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT) {
542             return GetAlignment(align);
543         }
544         return GetInternalAlignment(align);
545     }
546 
547     AllocT allocator_;
548 };
549 
550 /**
551  * Class is pointer wrapper. It checks if type of allocator matches expected.
552  * @tparam allocatorType - type of allocator
553  */
554 template <AllocatorPurpose ALLOCATOR_PURPOSE>
555 class AllocatorPtr {
556 public:
557     AllocatorPtr() = default;
558     // NOLINTNEXTLINE(google-explicit-constructor)
AllocatorPtr(std::nullptr_t aNullptr)559     AllocatorPtr(std::nullptr_t aNullptr) noexcept : allocatorPtr_(aNullptr) {}
560 
AllocatorPtr(Allocator * allocator)561     explicit AllocatorPtr(Allocator *allocator) : allocatorPtr_(allocator) {}
562 
563     Allocator *operator->()
564     {
565         ASSERT((allocatorPtr_ == nullptr) || (allocatorPtr_->GetPurpose() == ALLOCATOR_PURPOSE));
566         return allocatorPtr_;
567     }
568 
569     AllocatorPtr &operator=(std::nullptr_t aNullptr) noexcept
570     {
571         allocatorPtr_ = aNullptr;
572         return *this;
573     }
574 
575     AllocatorPtr &operator=(Allocator *allocator)
576     {
577         allocatorPtr_ = allocator;
578         return *this;
579     }
580 
581     explicit operator Allocator *()
582     {
583         return allocatorPtr_;
584     }
585 
586     explicit operator ObjectAllocatorBase *()
587     {
588         ASSERT(allocatorPtr_->GetPurpose() == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
589         return static_cast<ObjectAllocatorBase *>(allocatorPtr_);
590     }
591 
592     ALWAYS_INLINE bool operator==(const AllocatorPtr &other)
593     {
594         return allocatorPtr_ == static_cast<Allocator *>(other);
595     }
596 
597     ALWAYS_INLINE bool operator==(std::nullptr_t) noexcept
598     {
599         return allocatorPtr_ == nullptr;
600     }
601 
602     ALWAYS_INLINE bool operator!=(std::nullptr_t) noexcept
603     {
604         return allocatorPtr_ != nullptr;
605     }
606 
AsObjectAllocator()607     ObjectAllocatorBase *AsObjectAllocator()
608     {
609         ASSERT(ALLOCATOR_PURPOSE == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
610         return this->operator panda::mem::ObjectAllocatorBase *();
611     }
612 
613     ~AllocatorPtr() = default;
614 
615     DEFAULT_COPY_SEMANTIC(AllocatorPtr);
616     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorPtr);
617 
618 protected:
619     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
620     Allocator *allocatorPtr_ = nullptr;
621 };
622 
623 using InternalAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
624 using ObjectAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT>;
625 
626 template <InternalAllocatorConfig CONFIG>
627 using InternalAllocatorT = AllocatorSingleT<InternalAllocator<CONFIG>, AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
628 
629 template <MTModeT MT_MODE = MT_MODE_MULTI>
630 class ObjectAllocatorNoGen final : public ObjectAllocatorBase {
631     using ObjectAllocator = RunSlotsAllocator<ObjectAllocConfig>;       // Allocator used for middle size allocations
632     using LargeObjectAllocator = FreeListAllocator<ObjectAllocConfig>;  // Allocator used for large objects
633     using HumongousObjectAllocator = HumongousObjAllocator<ObjectAllocConfig>;  // Allocator used for humongous objects
634 
635 public:
636     NO_MOVE_SEMANTIC(ObjectAllocatorNoGen);
637     NO_COPY_SEMANTIC(ObjectAllocatorNoGen);
638 
639     explicit ObjectAllocatorNoGen(MemStatsType *memStats, bool createPygoteSpaceAllocator);
640 
641     ~ObjectAllocatorNoGen() final;
642 
643     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread,
644                                  ObjMemInitPolicy objInit) final;
645 
646     [[nodiscard]] void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread,
647                                            ObjMemInitPolicy objInit) final;
648 
649     void VisitAndRemoveAllPools(const MemVisitor &memVisitor) final;
650 
651     void VisitAndRemoveFreePools(const MemVisitor &memVisitor) final;
652 
653     void IterateOverObjects(const ObjectVisitor &objectVisitor) final;
654 
655     /// @brief iterates all objects in object allocator
656     void IterateRegularSizeObjects(const ObjectVisitor &objectVisitor) final;
657 
658     /// @brief iterates objects in all allocators except object allocator
659     void IterateNonRegularSizeObjects(const ObjectVisitor &objectVisitor) final;
660 
661     void FreeObjectsMovedToPygoteSpace() final;
662 
663     void Collect(const GCObjectVisitor &gcObjectVisitor, GCCollectMode collectMode) final;
664 
665     size_t GetRegularObjectMaxSize() final;
666 
667     size_t GetLargeObjectMaxSize() final;
668 
IsObjectInYoungSpace(const ObjectHeader * obj)669     bool IsObjectInYoungSpace([[maybe_unused]] const ObjectHeader *obj) final
670     {
671         return false;
672     }
673 
IsIntersectedWithYoung(const MemRange & memRange)674     bool IsIntersectedWithYoung([[maybe_unused]] const MemRange &memRange) final
675     {
676         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IsIntersectedWithYoung not applicable";
677         return false;
678     }
679 
IsObjectInNonMovableSpace(const ObjectHeader * obj)680     bool IsObjectInNonMovableSpace([[maybe_unused]] const ObjectHeader *obj) final
681     {
682         return true;
683     }
684 
HasYoungSpace()685     bool HasYoungSpace() final
686     {
687         return false;
688     }
689 
PinObject(ObjectHeader * object)690     void PinObject([[maybe_unused]] ObjectHeader *object) final {}
691 
UnpinObject(ObjectHeader * object)692     void UnpinObject([[maybe_unused]] ObjectHeader *object) final {}
693 
GetYoungSpaceMemRanges()694     const std::vector<MemRange> &GetYoungSpaceMemRanges() final
695     {
696         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: GetYoungSpaceMemRanges not applicable";
697         UNREACHABLE();
698     }
699 
GetYoungSpaceBitmaps()700     std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() final
701     {
702         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: GetYoungBitmaps not applicable";
703         UNREACHABLE();
704     }
705 
ResetYoungAllocator()706     void ResetYoungAllocator() final
707     {
708         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: ResetYoungAllocator not applicable";
709     }
710 
711     TLAB *CreateNewTLAB(panda::ManagedThread *thread) final;
712 
713     size_t GetTLABMaxAllocSize() final;
714 
IsTLABSupported()715     bool IsTLABSupported() final
716     {
717         return false;
718     }
719 
IterateOverObjectsInRange(MemRange memRange,const ObjectVisitor & objectVisitor)720     void IterateOverObjectsInRange([[maybe_unused]] MemRange memRange,
721                                    [[maybe_unused]] const ObjectVisitor &objectVisitor) final
722     {
723         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IterateOverObjectsInRange not implemented";
724     }
725 
726     bool ContainObject(const ObjectHeader *obj) const final;
727 
728     bool IsLive(const ObjectHeader *obj) final;
729 
VerifyAllocatorStatus()730     size_t VerifyAllocatorStatus() final
731     {
732         size_t failCount = 0;
733         failCount += objectAllocator_->VerifyAllocator();
734         // NOTE(yyang): add verify for large/humongous allocator
735         return failCount;
736     }
737 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)738     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
739                                       panda::ManagedThread * /* thread */) final
740     {
741         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: AllocateLocal not supported";
742         return nullptr;
743     }
744 
GetHeapSpace()745     HeapSpace *GetHeapSpace() override
746     {
747         return &heapSpace_;
748     }
749 
750 private:
751     Alignment CalculateAllocatorAlignment(size_t align) final;
752 
753     ObjectAllocator *objectAllocator_ = nullptr;
754     LargeObjectAllocator *largeObjectAllocator_ = nullptr;
755     HumongousObjectAllocator *humongousObjectAllocator_ = nullptr;
756     HeapSpace heapSpace_;
757 };
758 
759 // Base class for all generational GCs
760 class ObjectAllocatorGenBase : public ObjectAllocatorBase {
761 public:
762     explicit ObjectAllocatorGenBase(MemStatsType *memStats, GCCollectMode gcCollectMode,
763                                     bool createPygoteSpaceAllocator);
764 
GetHeapSpace()765     GenerationalSpaces *GetHeapSpace() override
766     {
767         return &heapSpaces_;
768     }
769 
770     ~ObjectAllocatorGenBase() override = default;
771 
772     virtual void *AllocateTenured(size_t size) = 0;
773     virtual void *AllocateTenuredWithoutLocks(size_t size) = 0;
774 
775     NO_COPY_SEMANTIC(ObjectAllocatorGenBase);
776     NO_MOVE_SEMANTIC(ObjectAllocatorGenBase);
777 
778     /// Updates young space mem ranges, bitmaps etc
779     virtual void UpdateSpaceData() = 0;
780 
781     /// Invalidates space mem ranges, bitmaps etc
782     virtual void InvalidateSpaceData() final;
783 
784 protected:
785     static constexpr size_t YOUNG_ALLOC_MAX_SIZE = PANDA_TLAB_MAX_ALLOC_SIZE;  // max size of allocation in young space
786 
GetYoungRanges()787     ALWAYS_INLINE std::vector<MemRange> &GetYoungRanges()
788     {
789         return ranges_;
790     }
791 
GetYoungBitmaps()792     ALWAYS_INLINE std::vector<MarkBitmap *> &GetYoungBitmaps()
793     {
794         return youngBitmaps_;
795     }
796 
797     GenerationalSpaces heapSpaces_;  // NOLINT(misc-non-private-member-variables-in-classes)
798 private:
799     std::vector<MemRange> ranges_;            // Ranges for young space
800     std::vector<MarkBitmap *> youngBitmaps_;  // Bitmaps for young regions
801 };
802 
803 template <MTModeT MT_MODE = MT_MODE_MULTI>
804 class ObjectAllocatorGen final : public ObjectAllocatorGenBase {
805     // NOTE(dtrubenkov): create a command line argument for this
806     static constexpr size_t DEFAULT_YOUNG_TLAB_SIZE = 4_KB;  // TLAB size for young gen
807 
808     using YoungGenAllocator = BumpPointerAllocator<ObjectAllocConfigWithCrossingMap,
809                                                    BumpPointerAllocatorLockConfig::ParameterizedLock<MT_MODE>, true>;
810     using ObjectAllocator =
811         RunSlotsAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for middle size allocations
812     using LargeObjectAllocator =
813         FreeListAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for large objects
814     using HumongousObjectAllocator =
815         HumongousObjAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for humongous objects
816 
817 public:
818     NO_MOVE_SEMANTIC(ObjectAllocatorGen);
819     NO_COPY_SEMANTIC(ObjectAllocatorGen);
820 
821     explicit ObjectAllocatorGen(MemStatsType *memStats, bool createPygoteSpaceAllocator);
822 
823     ~ObjectAllocatorGen() final;
824 
825     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread,
826                                  ObjMemInitPolicy objInit) final;
827 
828     [[nodiscard]] void *AllocateNonMovable(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread,
829                                            ObjMemInitPolicy objInit) final;
830 
AllocateTenured(size_t size)831     void *AllocateTenured(size_t size) final
832     {
833         return AllocateTenuredImpl<true>(size);
834     }
835 
AllocateTenuredWithoutLocks(size_t size)836     void *AllocateTenuredWithoutLocks(size_t size) final
837     {
838         return AllocateTenuredImpl<false>(size);
839     }
840 
841     void VisitAndRemoveAllPools(const MemVisitor &memVisitor) final;
842 
843     void VisitAndRemoveFreePools(const MemVisitor &memVisitor) final;
844 
845     void IterateOverYoungObjects(const ObjectVisitor &objectVisitor) final;
846 
847     void IterateOverTenuredObjects(const ObjectVisitor &objectVisitor) final;
848 
849     void IterateOverObjects(const ObjectVisitor &objectVisitor) final;
850 
851     /// @brief iterates all objects in object allocator
852     void IterateRegularSizeObjects(const ObjectVisitor &objectVisitor) final;
853 
854     /// @brief iterates objects in all allocators except object allocator
855     void IterateNonRegularSizeObjects(const ObjectVisitor &objectVisitor) final;
856 
857     void FreeObjectsMovedToPygoteSpace() final;
858 
859     void Collect(const GCObjectVisitor &gcObjectVisitor, GCCollectMode collectMode) final;
860 
861     size_t GetRegularObjectMaxSize() final;
862 
863     size_t GetLargeObjectMaxSize() final;
864 
865     bool IsObjectInYoungSpace(const ObjectHeader *obj) final;
866 
PinObject(ObjectHeader * object)867     void PinObject([[maybe_unused]] ObjectHeader *object) final
868     {
869         ASSERT(!IsObjectInYoungSpace(object));
870     }
871 
UnpinObject(ObjectHeader * object)872     void UnpinObject([[maybe_unused]] ObjectHeader *object) final
873     {
874         ASSERT(!IsObjectInYoungSpace(object));
875     }
876 
877     bool IsIntersectedWithYoung(const MemRange &memRange) final;
878 
879     bool IsObjectInNonMovableSpace(const ObjectHeader *obj) final;
880 
881     bool HasYoungSpace() final;
882 
883     const std::vector<MemRange> &GetYoungSpaceMemRanges() final;
884 
885     std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() final;
886 
887     void ResetYoungAllocator() final;
888 
889     TLAB *CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread) final;
890 
891     size_t GetTLABMaxAllocSize() final;
892 
IsTLABSupported()893     bool IsTLABSupported() final
894     {
895         return true;
896     }
897 
898     void IterateOverObjectsInRange(MemRange memRange, const ObjectVisitor &objectVisitor) final;
899 
900     bool ContainObject(const ObjectHeader *obj) const final;
901 
902     bool IsLive(const ObjectHeader *obj) final;
903 
VerifyAllocatorStatus()904     size_t VerifyAllocatorStatus() final
905     {
906         size_t failCount = 0;
907         failCount += objectAllocator_->VerifyAllocator();
908         // NOTE(yyang): add verify for large/humongous allocator
909         return failCount;
910     }
911 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)912     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
913                                       panda::ManagedThread * /* thread */) final
914     {
915         LOG(FATAL, ALLOC) << "ObjectAllocatorGen: AllocateLocal not supported";
916         return nullptr;
917     }
918 
GetYoungAllocMaxSize()919     static constexpr size_t GetYoungAllocMaxSize()
920     {
921         return YOUNG_ALLOC_MAX_SIZE;
922     }
923 
924     void UpdateSpaceData() final;
925 
926 private:
927     Alignment CalculateAllocatorAlignment(size_t align) final;
928 
929     YoungGenAllocator *youngGenAllocator_ = nullptr;
930     ObjectAllocator *objectAllocator_ = nullptr;
931     LargeObjectAllocator *largeObjectAllocator_ = nullptr;
932     HumongousObjectAllocator *humongousObjectAllocator_ = nullptr;
933     MemStatsType *memStats_ = nullptr;
934     ObjectAllocator *nonMovableObjectAllocator_ = nullptr;
935     LargeObjectAllocator *largeNonMovableObjectAllocator_ = nullptr;
936     size_t tlabSize_ = DEFAULT_YOUNG_TLAB_SIZE;
937 
938     template <bool NEED_LOCK = true>
939     void *AllocateTenuredImpl(size_t size);
940 };
941 
942 template <GCType GC_TYPE, MTModeT MT_MODE = MT_MODE_MULTI>
943 class AllocConfig {
944 };
945 
946 }  // namespace panda::mem
947 
948 #endif  // RUNTIME_MEM_ALLOCATOR_H
949