• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef RUNTIME_MEM_ALLOCATOR_H
16 #define RUNTIME_MEM_ALLOCATOR_H
17 
18 #include <functional>
19 
20 #include "libpandabase/mem/code_allocator.h"
21 #include "libpandabase/mem/mem.h"
22 #include "libpandabase/mem/pool_map.h"
23 #include "libpandabase/utils/logger.h"
24 #include "libpandabase/macros.h"
25 #include "runtime/mem/bump-allocator.h"
26 #include "runtime/mem/freelist_allocator.h"
27 #include "runtime/mem/gc/bitmap.h"
28 #include "runtime/mem/gc/gc_types.h"
29 #include "runtime/mem/humongous_obj_allocator.h"
30 #include "runtime/mem/internal_allocator.h"
31 #include "runtime/mem/runslots_allocator.h"
32 #include "runtime/mem/pygote_space_allocator.h"
33 #include "runtime/mem/heap_space.h"
34 
35 namespace panda {
36 class ObjectHeader;
37 }  // namespace panda
38 
39 namespace panda {
40 class ManagedThread;
41 }  // namespace panda
42 
43 namespace panda {
44 class BaseClass;
45 }  // namespace panda
46 
47 namespace panda::mem {
48 
49 class ObjectAllocConfigWithCrossingMap;
50 class ObjectAllocConfig;
51 class TLAB;
52 
53 /**
54  * AllocatorPurpose and GCCollectMode provide info when we should collect from some allocator or not
55  */
56 enum class AllocatorPurpose {
57     ALLOCATOR_PURPOSE_OBJECT,    // Allocator for objects
58     ALLOCATOR_PURPOSE_INTERNAL,  // Space for runtime internal needs
59 };
60 
61 template <AllocatorType>
62 class AllocatorTraits {
63 };
64 
65 template <>
66 class AllocatorTraits<AllocatorType::RUNSLOTS_ALLOCATOR> {
67     using AllocType = RunSlotsAllocator<ObjectAllocConfig>;
68     static constexpr bool HAS_FREE {true};  // indicates allocator can free
69 };
70 
71 template <typename T, AllocScope AllocScopeT>
72 class AllocatorAdapter;
73 
74 class Allocator {
75 public:
76     template <typename T, AllocScope AllocScopeT = AllocScope::GLOBAL>
77     using AdapterType = AllocatorAdapter<T, AllocScopeT>;
78 
79     NO_COPY_SEMANTIC(Allocator);
80     NO_MOVE_SEMANTIC(Allocator);
Allocator(MemStatsType * mem_stats,AllocatorPurpose purpose,GCCollectMode gc_collect_mode)81     explicit Allocator(MemStatsType *mem_stats, AllocatorPurpose purpose, GCCollectMode gc_collect_mode)
82         : mem_stats_(mem_stats), allocator_purpose_(purpose), gc_collect_mode_(gc_collect_mode)
83     {
84     }
85     virtual ~Allocator() = 0;
86 
GetPurpose()87     ALWAYS_INLINE AllocatorPurpose GetPurpose() const
88     {
89         return allocator_purpose_;
90     }
91 
GetCollectMode()92     ALWAYS_INLINE GCCollectMode GetCollectMode() const
93     {
94         return gc_collect_mode_;
95     }
96 
GetMemStats()97     ALWAYS_INLINE MemStatsType *GetMemStats() const
98     {
99         return mem_stats_;
100     }
101 
102     [[nodiscard]] void *Alloc(size_t size, Alignment align = DEFAULT_ALIGNMENT)
103     {
104         return Allocate(size, align, nullptr);
105     }
106 
107     [[nodiscard]] void *AllocLocal(size_t size, Alignment align = DEFAULT_ALIGNMENT)
108     {
109         return AllocateLocal(size, align, nullptr);
110     }
111 
112     [[nodiscard]] virtual void *Allocate(size_t size, Alignment align,
113                                          [[maybe_unused]] panda::ManagedThread *thread) = 0;
114 
115     [[nodiscard]] virtual void *AllocateLocal(size_t size, Alignment align,
116                                               [[maybe_unused]] panda::ManagedThread *thread) = 0;
117 
118     [[nodiscard]] virtual void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread) = 0;
119 
120     template <class T>
AllocArray(size_t size)121     [[nodiscard]] T *AllocArray(size_t size)
122     {
123         return static_cast<T *>(this->Allocate(sizeof(T) * size, GetAlignment<T>(), nullptr));
124     }
125 
126     template <class T>
AllocArrayLocal(size_t size)127     [[nodiscard]] T *AllocArrayLocal(size_t size)
128     {
129         return static_cast<T *>(this->AllocateLocal(sizeof(T) * size, GetAlignment<T>(), nullptr));
130     }
131 
132     template <class T>
Delete(T * ptr)133     void Delete(T *ptr)
134     {
135         if (ptr == nullptr) {
136             return;
137         }
138         // NOLINTNEXTLINE(readability-braces-around-statements,bugprone-suspicious-semicolon)
139         if constexpr (std::is_class_v<T>) {
140             ptr->~T();
141         }
142         Free(ptr);
143     }
144 
145     template <typename T>
DeleteArray(T * data)146     void DeleteArray(T *data)
147     {
148         if (data == nullptr) {
149             return;
150         }
151         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
152             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
153         void *p = ToVoidPtr(ToUintPtr(data) - SIZE_BEFORE_DATA_OFFSET);
154         size_t size = *static_cast<size_t *>(p);
155         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
156         if constexpr (std::is_class_v<T>) {
157             for (size_t i = 0; i < size; ++i, ++data) {
158                 data->~T();
159             }
160         }
161         Free(p);
162     }
163 
164     virtual void Free(void *mem) = 0;
165 
166     virtual void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) = 0;
167 
168     virtual void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) = 0;
169 
IterateOverYoungObjects(const ObjectVisitor & object_visitor)170     virtual void IterateOverYoungObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
171     {
172         LOG(FATAL, ALLOC) << "Allocator::IterateOverYoungObjects" << std::endl;
173     }
174 
IterateOverTenuredObjects(const ObjectVisitor & object_visitor)175     virtual void IterateOverTenuredObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
176     {
177         LOG(FATAL, ALLOC) << "Allocator::IterateOverTenuredObjects" << std::endl;
178     }
179 
180     /**
181      * \brief iterates all objects in object allocator
182      */
IterateRegularSizeObjects(const ObjectVisitor & object_visitor)183     virtual void IterateRegularSizeObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
184     {
185         LOG(FATAL, ALLOC) << "Allocator::IterateRegularSizeObjects";
186     }
187 
188     /**
189      * \brief iterates objects in all allocators except object allocator
190      */
IterateNonRegularSizeObjects(const ObjectVisitor & object_visitor)191     virtual void IterateNonRegularSizeObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
192     {
193         LOG(FATAL, ALLOC) << "Allocator::IterateNonRegularSizeObjects";
194     }
195 
FreeObjectsMovedToPygoteSpace()196     virtual void FreeObjectsMovedToPygoteSpace()
197     {
198         LOG(FATAL, ALLOC) << "Allocator::FreeObjectsMovedToPygoteSpace";
199     }
200 
201     virtual void IterateOverObjectsInRange(MemRange mem_range, const ObjectVisitor &object_visitor) = 0;
202 
203     virtual void IterateOverObjects(const ObjectVisitor &object_visitor) = 0;
204 
205     template <AllocScope AllocScopeT = AllocScope::GLOBAL>
206     AllocatorAdapter<void, AllocScopeT> Adapter();
207 
208     template <typename T, typename... Args>
New(Args &&...args)209     std::enable_if_t<!std::is_array_v<T>, T *> New(Args &&... args)
210     {
211         void *p = Alloc(sizeof(T), GetAlignment<T>());
212         if (UNLIKELY(p == nullptr)) {
213             return nullptr;
214         }
215         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
216         return reinterpret_cast<T *>(p);
217     }
218 
219     template <typename T>
New(size_t size)220     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> New(size_t size)
221     {
222         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
223             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
224         using element_type = std::remove_extent_t<T>;
225         void *p = Alloc(SIZE_BEFORE_DATA_OFFSET + sizeof(element_type) * size, GetAlignment<T>());
226         if (UNLIKELY(p == nullptr)) {
227             return nullptr;
228         }
229         *static_cast<size_t *>(p) = size;
230         auto *data = ToNativePtr<element_type>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
231         element_type *current_element = data;
232         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
233         for (size_t i = 0; i < size; ++i, ++current_element) {
234             new (current_element) element_type();
235         }
236         return data;
237     }
238 
239     template <typename T, typename... Args>
NewLocal(Args &&...args)240     std::enable_if_t<!std::is_array_v<T>, T *> NewLocal(Args &&... args)
241     {
242         void *p = AllocLocal(sizeof(T), GetAlignment<T>());
243         if (UNLIKELY(p == nullptr)) {
244             return nullptr;
245         }
246         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
247         return reinterpret_cast<T *>(p);
248     }
249 
250     template <typename T>
NewLocal(size_t size)251     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> NewLocal(size_t size)
252     {
253         static constexpr size_t SIZE_BEFORE_DATA_OFFSET =
254             AlignUp(sizeof(size_t), GetAlignmentInBytes(GetAlignment<T>()));
255         using element_type = std::remove_extent_t<T>;
256         void *p = AllocLocal(SIZE_BEFORE_DATA_OFFSET + sizeof(element_type) * size, GetAlignment<T>());
257         *static_cast<size_t *>(p) = size;
258         auto *data = ToNativePtr<element_type>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
259         element_type *current_element = data;
260         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
261         for (size_t i = 0; i < size; ++i, ++current_element) {
262             new (current_element) element_type();
263         }
264         return data;
265     }
266 
AllocateInLargeAllocator(size_t size,Alignment align,BaseClass * cls)267     virtual void *AllocateInLargeAllocator([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
268                                            [[maybe_unused]] BaseClass *cls)
269     {
270         return nullptr;
271     }
272 
273 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()274     virtual void Dump() {}
275 #endif
276 
277 protected:
278     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
279     MemStatsType *mem_stats_;
280 
281 private:
282     AllocatorPurpose allocator_purpose_;
283     GCCollectMode gc_collect_mode_;
284 };
285 
286 class ObjectAllocatorBase : public Allocator {
287 protected:
288     using PygoteAllocator = PygoteSpaceAllocator<ObjectAllocConfig>;  // Allocator for pygote space
289 
290     /**
291      * \brief Add new memory pools to object_allocator and allocate memory in them
292      */
293     template <typename AllocT, bool need_lock = true>
294     inline void *AddPoolsAndAlloc(size_t size, Alignment align, AllocT *object_allocator, size_t pool_size,
295                                   SpaceType space_type, HeapSpace *heap_space);
296 
297     /**
298      * Try to allocate memory for the object and if failed add new memory pools and allocate again
299      * @param size - size of the object in bytes
300      * @param align - alignment
301      * @param object_allocator - allocator for the object
302      * @param pool_size - size of a memory pool for specified allocator
303      * @param space_type - SpaceType of the object
304      * @return pointer to allocated memory or nullptr if failed
305      */
306     template <typename AllocT, bool need_lock = true>
307     inline void *AllocateSafe(size_t size, Alignment align, AllocT *object_allocator, size_t pool_size,
308                               SpaceType space_type, HeapSpace *heap_space);
309 
310 public:
311     ObjectAllocatorBase() = delete;
312     NO_COPY_SEMANTIC(ObjectAllocatorBase);
313     NO_MOVE_SEMANTIC(ObjectAllocatorBase);
314 
315     explicit ObjectAllocatorBase(MemStatsType *mem_stats, GCCollectMode gc_collect_mode,
316                                  bool create_pygote_space_allocator);
317 
318     ~ObjectAllocatorBase() override;
319 
320     /**
321      * Iterate over all objects and reclaim memory for objects reported as true by gc_object_visitor
322      * @param gc_object_visitor - function which return true for ObjectHeader if we can reclaim memory occupied by
323      * object
324      */
325     virtual void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) = 0;
326 
327     /**
328      * Return max size for regular size objects
329      * @return max size in bytes for regular size objects
330      */
331     virtual size_t GetRegularObjectMaxSize() = 0;
332 
333     /**
334      * Return max size for large objects
335      * @return max size in bytes for large objects
336      */
337     virtual size_t GetLargeObjectMaxSize() = 0;
338 
339     /**
340      * Checks if address in the young space
341      * @param address
342      * @return true if \param address is in young space
343      */
344     virtual bool IsAddressInYoungSpace(uintptr_t address) = 0;
345 
346     /**
347      * Checks if \param mem_range intersect young space
348      * @param mem_range
349      * @return true if \param mem_range is intersect young space
350      */
351     virtual bool IsIntersectedWithYoung(const MemRange &mem_range) = 0;
352 
353     /**
354      * Checks if object in the non-movable space
355      * @param obj
356      * @return true if \param obj is in non-movable space
357      */
358     virtual bool IsObjectInNonMovableSpace(const ObjectHeader *obj) = 0;
359 
360     /**
361      * @return true if allocator has an young space
362      */
363     virtual bool HasYoungSpace() = 0;
364 
365     /**
366      * Get young space memory ranges
367      * \note PandaVector can't be used here
368      * @return young space memory ranges
369      */
370     virtual const std::vector<MemRange> &GetYoungSpaceMemRanges() = 0;
371 
372     virtual std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() = 0;
373 
374     virtual void ResetYoungAllocator() = 0;
375 
376     virtual TLAB *CreateNewTLAB(panda::ManagedThread *thread) = 0;
377 
378     virtual size_t GetTLABMaxAllocSize() = 0;
379 
380     virtual bool IsTLABSupported() = 0;
381 
382     /**
383      * \brief Check if the object allocator contains the object starting at address obj
384      */
385     virtual bool ContainObject([[maybe_unused]] const ObjectHeader *obj) const = 0;
386 
387     /**
388      * \brief Check if the object obj is live: obj is allocated already and
389      * not collected yet.
390      */
391     virtual bool IsLive([[maybe_unused]] const ObjectHeader *obj) = 0;
392 
393     /**
394      * \brief Check if current allocators' allocation state is valid.
395      */
396     virtual size_t VerifyAllocatorStatus() = 0;
397 
398     virtual HeapSpace *GetHeapSpace() = 0;
399 
GetPygoteSpaceAllocator()400     PygoteAllocator *GetPygoteSpaceAllocator()
401     {
402         return pygote_space_allocator_;
403     }
404 
GetPygoteSpaceAllocator()405     const PygoteAllocator *GetPygoteSpaceAllocator() const
406     {
407         return pygote_space_allocator_;
408     }
409 
DisablePygoteAlloc()410     void DisablePygoteAlloc()
411     {
412         pygote_alloc_enabled_ = false;
413     }
414 
IsPygoteAllocEnabled()415     bool IsPygoteAllocEnabled() const
416     {
417         ASSERT(!pygote_alloc_enabled_ || pygote_space_allocator_ != nullptr);
418         return pygote_alloc_enabled_;
419     }
420 
GetObjectSpaceFreeBytes()421     static size_t GetObjectSpaceFreeBytes()
422     {
423         return PoolManager::GetMmapMemPool()->GetObjectSpaceFreeBytes();
424     }
425 
426     bool HaveEnoughPoolsInObjectSpace(size_t pools_num) const;
427 
428 protected:
429     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
430     PygoteAllocator *pygote_space_allocator_ = nullptr;
431     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
432     bool pygote_alloc_enabled_ = false;
433 
434 private:
Free(void * mem)435     void Free([[maybe_unused]] void *mem) final
436     {
437         LOG(FATAL, ALLOC) << "ObjectAllocatorBase shouldn't have Free";
438     }
439 };
440 
441 /**
442  * Template wrapper for single underlying allocator
443  * @tparam AllocT
444  */
445 template <typename AllocT, AllocatorPurpose allocatorPurpose>
446 class AllocatorSingleT final : public Allocator {
447 public:
448     // NOLINTNEXTLINE(readability-magic-numbers)
AllocatorSingleT(MemStatsType * mem_stats)449     explicit AllocatorSingleT(MemStatsType *mem_stats)
450         : Allocator(mem_stats, allocatorPurpose, GCCollectMode::GC_NONE), allocator_(mem_stats)
451     {
452     }
453     ~AllocatorSingleT() final = default;
454     NO_COPY_SEMANTIC(AllocatorSingleT);
455     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorSingleT);
456 
Allocate(size_t size,Alignment align,panda::ManagedThread * thread)457     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
458     {
459         return allocator_.Alloc(size, align);
460     }
461 
AllocateLocal(size_t size,Alignment align,panda::ManagedThread * thread)462     [[nodiscard]] void *AllocateLocal(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
463     {
464         return allocator_.AllocLocal(size, align);
465     }
466 
AllocateNonMovable(size_t size,Alignment align,panda::ManagedThread * thread)467     [[nodiscard]] void *AllocateNonMovable([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
468                                            [[maybe_unused]] panda::ManagedThread *thread) final
469     {
470         LOG(FATAL, ALLOC) << "AllocatorSingleT shouldn't have AllocateNonMovable";
471         return nullptr;
472     }
473 
Free(void * mem)474     void Free(void *mem) final
475     {
476         allocator_.Free(mem);
477     }
478 
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)479     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final
480     {
481         allocator_.VisitAndRemoveAllPools(mem_visitor);
482     }
483 
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)484     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final
485     {
486         allocator_.VisitAndRemoveFreePools(mem_visitor);
487     }
488 
IterateOverObjectsInRange(MemRange mem_range,const ObjectVisitor & object_visitor)489     void IterateOverObjectsInRange([[maybe_unused]] MemRange mem_range,
490                                    [[maybe_unused]] const ObjectVisitor &object_visitor) final
491     {
492         LOG(FATAL, ALLOC) << "IterateOverObjectsInRange not implemented for AllocatorSinglet";
493     }
494 
IterateOverObjects(const ObjectVisitor & object_visitor)495     void IterateOverObjects([[maybe_unused]] const ObjectVisitor &object_visitor) final
496     {
497         LOG(FATAL, ALLOC) << "IterateOverObjects not implemented for AllocatorSinglet";
498     }
499 
500 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()501     void Dump() override
502     {
503         allocator_.Dump();
504     }
505 #endif
506 
507 private:
508     AllocT allocator_;
509 };
510 
511 /**
512  * Class is pointer wrapper. It checks if type of allocator matches expected.
513  * @tparam allocatorType - type of allocator
514  */
515 template <AllocatorPurpose allocatorPurpose>
516 class AllocatorPtr {
517 public:
518     AllocatorPtr() = default;
519     // NOLINTNEXTLINE(google-explicit-constructor)
AllocatorPtr(std::nullptr_t a_nullptr)520     AllocatorPtr(std::nullptr_t a_nullptr) noexcept : allocator_ptr_(a_nullptr) {}
521 
AllocatorPtr(Allocator * allocator)522     explicit AllocatorPtr(Allocator *allocator) : allocator_ptr_(allocator) {}
523 
524     Allocator *operator->()
525     {
526         ASSERT((allocator_ptr_ == nullptr) || (allocator_ptr_->GetPurpose() == allocatorPurpose));
527         return allocator_ptr_;
528     }
529 
530     AllocatorPtr &operator=(std::nullptr_t a_nullptr) noexcept
531     {
532         allocator_ptr_ = a_nullptr;
533         return *this;
534     }
535 
536     AllocatorPtr &operator=(Allocator *allocator)
537     {
538         allocator_ptr_ = allocator;
539         return *this;
540     }
541 
542     explicit operator Allocator *()
543     {
544         return allocator_ptr_;
545     }
546 
547     explicit operator ObjectAllocatorBase *()
548     {
549         ASSERT(allocator_ptr_->GetPurpose() == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
550         return static_cast<ObjectAllocatorBase *>(allocator_ptr_);
551     }
552 
553     ALWAYS_INLINE bool operator==(const AllocatorPtr &other)
554     {
555         return allocator_ptr_ == static_cast<Allocator *>(other);
556     }
557 
558     ALWAYS_INLINE bool operator==(std::nullptr_t) noexcept
559     {
560         return allocator_ptr_ == nullptr;
561     }
562 
563     ALWAYS_INLINE bool operator!=(std::nullptr_t) noexcept
564     {
565         return allocator_ptr_ != nullptr;
566     }
567 
AsObjectAllocator()568     ObjectAllocatorBase *AsObjectAllocator()
569     {
570         ASSERT(allocatorPurpose == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
571         return this->operator panda::mem::ObjectAllocatorBase *();
572     }
573 
574     ~AllocatorPtr() = default;
575 
576     DEFAULT_COPY_SEMANTIC(AllocatorPtr);
577     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorPtr);
578 
579 protected:
580     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
581     Allocator *allocator_ptr_ = nullptr;
582 };
583 
584 using InternalAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
585 using ObjectAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT>;
586 
587 template <InternalAllocatorConfig Config>
588 using InternalAllocatorT = AllocatorSingleT<InternalAllocator<Config>, AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
589 
590 template <MTModeT MTMode = MT_MODE_MULTI>
591 class ObjectAllocatorNoGen final : public ObjectAllocatorBase {
592     using ObjectAllocator = RunSlotsAllocator<ObjectAllocConfig>;       // Allocator used for middle size allocations
593     using LargeObjectAllocator = FreeListAllocator<ObjectAllocConfig>;  // Allocator used for large objects
594     using HumongousObjectAllocator = HumongousObjAllocator<ObjectAllocConfig>;  // Allocator used for humongous objects
595 
596 public:
597     NO_MOVE_SEMANTIC(ObjectAllocatorNoGen);
598     NO_COPY_SEMANTIC(ObjectAllocatorNoGen);
599 
600     explicit ObjectAllocatorNoGen(MemStatsType *mem_stats, bool create_pygote_space_allocator);
601 
602     ~ObjectAllocatorNoGen() final;
603 
604     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
605 
606     [[nodiscard]] void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread) final;
607 
608     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final;
609 
610     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final;
611 
612     void IterateOverObjects(const ObjectVisitor &object_visitor) final;
613 
614     /**
615      * \brief iterates all objects in object allocator
616      */
617     void IterateRegularSizeObjects(const ObjectVisitor &object_visitor) final;
618 
619     /**
620      * \brief iterates objects in all allocators except object allocator
621      */
622     void IterateNonRegularSizeObjects(const ObjectVisitor &object_visitor) final;
623 
624     void FreeObjectsMovedToPygoteSpace() final;
625 
626     void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) final;
627 
628     size_t GetRegularObjectMaxSize() final;
629 
630     size_t GetLargeObjectMaxSize() final;
631 
IsAddressInYoungSpace(uintptr_t address)632     bool IsAddressInYoungSpace([[maybe_unused]] uintptr_t address) final
633     {
634         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IsAddressInYoungSpace not applicable";
635         return false;
636     }
637 
IsIntersectedWithYoung(const MemRange & mem_range)638     bool IsIntersectedWithYoung([[maybe_unused]] const MemRange &mem_range) final
639     {
640         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IsIntersectedWithYoung not applicable";
641         return false;
642     }
643 
IsObjectInNonMovableSpace(const ObjectHeader * obj)644     bool IsObjectInNonMovableSpace([[maybe_unused]] const ObjectHeader *obj) final
645     {
646         return true;
647     }
648 
HasYoungSpace()649     bool HasYoungSpace() final
650     {
651         return false;
652     }
653 
GetYoungSpaceMemRanges()654     const std::vector<MemRange> &GetYoungSpaceMemRanges() final
655     {
656         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: GetYoungSpaceMemRanges not applicable";
657         UNREACHABLE();
658     }
659 
GetYoungSpaceBitmaps()660     std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() final
661     {
662         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: GetYoungBitmaps not applicable";
663         UNREACHABLE();
664     }
665 
ResetYoungAllocator()666     void ResetYoungAllocator() final
667     {
668         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: ResetYoungAllocator not applicable";
669     }
670 
671     TLAB *CreateNewTLAB(panda::ManagedThread *thread) final;
672 
673     size_t GetTLABMaxAllocSize() final;
674 
IsTLABSupported()675     bool IsTLABSupported() final
676     {
677         return false;
678     }
679 
IterateOverObjectsInRange(MemRange mem_range,const ObjectVisitor & object_visitor)680     void IterateOverObjectsInRange([[maybe_unused]] MemRange mem_range,
681                                    [[maybe_unused]] const ObjectVisitor &object_visitor) final
682     {
683         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IterateOverObjectsInRange not implemented";
684     }
685 
686     bool ContainObject(const ObjectHeader *obj) const final;
687 
688     bool IsLive(const ObjectHeader *obj) final;
689 
VerifyAllocatorStatus()690     size_t VerifyAllocatorStatus() final
691     {
692         size_t fail_count = 0;
693         fail_count += object_allocator_->VerifyAllocator();
694         // TODO(yyang): add verify for large/humongous allocator
695         return fail_count;
696     }
697 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)698     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
699                                       panda::ManagedThread * /* thread */) final
700     {
701         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: AllocateLocal not supported";
702         return nullptr;
703     }
704 
GetHeapSpace()705     HeapSpace *GetHeapSpace() override
706     {
707         return &heap_space_;
708     }
709 
710 private:
711     ObjectAllocator *object_allocator_ = nullptr;
712     LargeObjectAllocator *large_object_allocator_ = nullptr;
713     HumongousObjectAllocator *humongous_object_allocator_ = nullptr;
714     HeapSpace heap_space_;
715 };
716 
717 // Base class for all generational GCs
718 class ObjectAllocatorGenBase : public ObjectAllocatorBase {
719 public:
720     explicit ObjectAllocatorGenBase(MemStatsType *mem_stats, GCCollectMode gc_collect_mode,
721                                     bool create_pygote_space_allocator);
722 
GetHeapSpace()723     HeapSpace *GetHeapSpace() override
724     {
725         return &heap_spaces_;
726     }
727 
728     ~ObjectAllocatorGenBase() override = default;
729 
730     virtual void *AllocateTenured(size_t size) = 0;
731     virtual void *AllocateTenuredWithoutLocks(size_t size) = 0;
732 
733     NO_COPY_SEMANTIC(ObjectAllocatorGenBase);
734     NO_MOVE_SEMANTIC(ObjectAllocatorGenBase);
735 
736     /**
737      * Updates young space mem ranges, bitmaps etc
738      */
739     virtual void UpdateSpaceData() = 0;
740 
741     /**
742      * Invalidates space mem ranges, bitmaps etc
743      */
744     virtual void InvalidateSpaceData() final;
745 
746 protected:
747     static constexpr size_t YOUNG_ALLOC_MAX_SIZE = PANDA_TLAB_MAX_ALLOC_SIZE;  // max size of allocation in young space
748 
GetYoungRanges()749     ALWAYS_INLINE std::vector<MemRange> &GetYoungRanges()
750     {
751         return ranges_;
752     }
753 
GetYoungBitmaps()754     ALWAYS_INLINE std::vector<MarkBitmap *> &GetYoungBitmaps()
755     {
756         return young_bitmaps_;
757     }
758 
759     GenerationalSpaces heap_spaces_;  // NOLINT(misc-non-private-member-variables-in-classes)
760 private:
761     std::vector<MemRange> ranges_;             // Ranges for young space
762     std::vector<MarkBitmap *> young_bitmaps_;  // Bitmaps for young regions
763 };
764 
765 template <MTModeT MTMode = MT_MODE_MULTI>
766 class ObjectAllocatorGen final : public ObjectAllocatorGenBase {
767     // TODO(dtrubenkov): create a command line argument for this
768     static constexpr size_t YOUNG_TLAB_SIZE = 4_KB;  // TLAB size for young gen
769 
770     using YoungGenAllocator = BumpPointerAllocator<ObjectAllocConfigWithCrossingMap,
771                                                    BumpPointerAllocatorLockConfig::ParameterizedLock<MTMode>, true>;
772     using ObjectAllocator =
773         RunSlotsAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for middle size allocations
774     using LargeObjectAllocator =
775         FreeListAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for large objects
776     using HumongousObjectAllocator =
777         HumongousObjAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for humongous objects
778 
779 public:
780     NO_MOVE_SEMANTIC(ObjectAllocatorGen);
781     NO_COPY_SEMANTIC(ObjectAllocatorGen);
782 
783     explicit ObjectAllocatorGen(MemStatsType *mem_stats, bool create_pygote_space_allocator);
784 
785     ~ObjectAllocatorGen() final;
786 
787     void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
788 
789     void *AllocateNonMovable(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
790 
AllocateTenured(size_t size)791     void *AllocateTenured(size_t size) final
792     {
793         return AllocateTenuredImpl<true>(size);
794     }
795 
AllocateTenuredWithoutLocks(size_t size)796     void *AllocateTenuredWithoutLocks(size_t size) final
797     {
798         return AllocateTenuredImpl<false>(size);
799     }
800 
801     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final;
802 
803     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final;
804 
805     void IterateOverYoungObjects(const ObjectVisitor &object_visitor) final;
806 
807     void IterateOverTenuredObjects(const ObjectVisitor &object_visitor) final;
808 
809     void IterateOverObjects(const ObjectVisitor &object_visitor) final;
810 
811     /**
812      * \brief iterates all objects in object allocator
813      */
814     void IterateRegularSizeObjects(const ObjectVisitor &object_visitor) final;
815 
816     /**
817      * \brief iterates objects in all allocators except object allocator
818      */
819     void IterateNonRegularSizeObjects(const ObjectVisitor &object_visitor) final;
820 
821     void FreeObjectsMovedToPygoteSpace() final;
822 
823     void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) final;
824 
825     size_t GetRegularObjectMaxSize() final;
826 
827     size_t GetLargeObjectMaxSize() final;
828 
829     bool IsAddressInYoungSpace(uintptr_t address) final;
830 
831     bool IsIntersectedWithYoung(const MemRange &mem_range) final;
832 
833     bool IsObjectInNonMovableSpace(const ObjectHeader *obj) final;
834 
835     bool HasYoungSpace() final;
836 
837     const std::vector<MemRange> &GetYoungSpaceMemRanges() final;
838 
839     std::vector<MarkBitmap *> &GetYoungSpaceBitmaps() final;
840 
841     void ResetYoungAllocator() final;
842 
843     TLAB *CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread) final;
844 
845     size_t GetTLABMaxAllocSize() final;
846 
IsTLABSupported()847     bool IsTLABSupported() final
848     {
849         return true;
850     }
851 
852     void IterateOverObjectsInRange(MemRange mem_range, const ObjectVisitor &object_visitor) final;
853 
854     bool ContainObject(const ObjectHeader *obj) const final;
855 
856     bool IsLive(const ObjectHeader *obj) final;
857 
VerifyAllocatorStatus()858     size_t VerifyAllocatorStatus() final
859     {
860         size_t fail_count = 0;
861         fail_count += object_allocator_->VerifyAllocator();
862         // TODO(yyang): add verify for large/humongous allocator
863         return fail_count;
864     }
865 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)866     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
867                                       panda::ManagedThread * /* thread */) final
868     {
869         LOG(FATAL, ALLOC) << "ObjectAllocatorGen: AllocateLocal not supported";
870         return nullptr;
871     }
872 
GetYoungAllocMaxSize()873     static constexpr size_t GetYoungAllocMaxSize()
874     {
875         return YOUNG_ALLOC_MAX_SIZE;
876     }
877 
878     void UpdateSpaceData() final;
879 
880 private:
881     YoungGenAllocator *young_gen_allocator_ = nullptr;
882     ObjectAllocator *object_allocator_ = nullptr;
883     LargeObjectAllocator *large_object_allocator_ = nullptr;
884     HumongousObjectAllocator *humongous_object_allocator_ = nullptr;
885     MemStatsType *mem_stats_ = nullptr;
886     ObjectAllocator *non_movable_object_allocator_ = nullptr;
887     LargeObjectAllocator *large_non_movable_object_allocator_ = nullptr;
888 
889     template <bool need_lock = true>
890     void *AllocateTenuredImpl(size_t size);
891 };
892 
893 template <GCType gcType, MTModeT MTMode = MT_MODE_MULTI>
894 class AllocConfig {
895 };
896 
897 }  // namespace panda::mem
898 
899 #endif  // RUNTIME_MEM_ALLOCATOR_H
900