• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_RUNTIME_INCLUDE_MEM_ALLOCATOR_H_
17 #define PANDA_RUNTIME_INCLUDE_MEM_ALLOCATOR_H_
18 
19 #include <functional>
20 
21 #include "libpandabase/mem/code_allocator.h"
22 #include "libpandabase/mem/mem.h"
23 #include "libpandabase/mem/pool_map.h"
24 #include "libpandabase/utils/logger.h"
25 #include "libpandabase/macros.h"
26 #include "runtime/mem/bump-allocator.h"
27 #include "runtime/mem/freelist_allocator.h"
28 #include "runtime/mem/gc/gc_types.h"
29 #include "runtime/mem/humongous_obj_allocator.h"
30 #include "runtime/mem/internal_allocator.h"
31 #include "runtime/mem/runslots_allocator.h"
32 #include "runtime/mem/pygote_space_allocator.h"
33 
34 namespace panda {
35 class ObjectHeader;
36 }  // namespace panda
37 
38 namespace panda {
39 class ManagedThread;
40 }  // namespace panda
41 
42 namespace panda {
43 class BaseClass;
44 }  // namespace panda
45 
46 namespace panda::mem {
47 
48 class ObjectAllocConfigWithCrossingMap;
49 class ObjectAllocConfig;
50 class TLAB;
51 
52 /**
53  * AllocatorPurpose and GCCollectMode provide info when we should collect from some allocator or not
54  */
55 enum class AllocatorPurpose {
56     ALLOCATOR_PURPOSE_OBJECT,    // Allocator for objects
57     ALLOCATOR_PURPOSE_INTERNAL,  // Space for runtime internal needs
58 };
59 
60 template <AllocatorType>
61 class AllocatorTraits {
62 };
63 
64 template <>
65 class AllocatorTraits<AllocatorType::RUNSLOTS_ALLOCATOR> {
66     using AllocType = RunSlotsAllocator<ObjectAllocConfig>;
67     static constexpr bool HAS_FREE {true};  // indicates allocator can free
68 };
69 
70 template <typename T, AllocScope AllocScopeT>
71 class AllocatorAdapter;
72 
73 class Allocator {
74 public:
75     template <typename T, AllocScope AllocScopeT = AllocScope::GLOBAL>
76     using AdapterType = AllocatorAdapter<T, AllocScopeT>;
77 
78     NO_COPY_SEMANTIC(Allocator);
79     NO_MOVE_SEMANTIC(Allocator);
Allocator(MemStatsType * mem_stats,AllocatorPurpose purpose,GCCollectMode gc_collect_mode)80     explicit Allocator(MemStatsType *mem_stats, AllocatorPurpose purpose, GCCollectMode gc_collect_mode)
81         : mem_stats_(mem_stats), allocator_purpose_(purpose), gc_collect_mode_(gc_collect_mode)
82     {
83     }
84     virtual ~Allocator() = 0;
85 
GetPurpose()86     ALWAYS_INLINE AllocatorPurpose GetPurpose() const
87     {
88         return allocator_purpose_;
89     }
90 
GetCollectMode()91     ALWAYS_INLINE GCCollectMode GetCollectMode() const
92     {
93         return gc_collect_mode_;
94     }
95 
GetMemStats()96     ALWAYS_INLINE MemStatsType *GetMemStats() const
97     {
98         return mem_stats_;
99     }
100 
Alloc(size_t size)101     [[nodiscard]] void *Alloc(size_t size)
102     {
103         return Allocate(size, DEFAULT_ALIGNMENT, nullptr);
104     }
105 
AllocLocal(size_t size)106     [[nodiscard]] void *AllocLocal(size_t size)
107     {
108         return AllocateLocal(size, DEFAULT_ALIGNMENT, nullptr);
109     }
110 
111     [[nodiscard]] virtual void *Allocate(size_t size, Alignment align,
112                                          [[maybe_unused]] panda::ManagedThread *thread) = 0;
113 
114     [[nodiscard]] virtual void *AllocateLocal(size_t size, Alignment align,
115                                               [[maybe_unused]] panda::ManagedThread *thread) = 0;
116 
117     [[nodiscard]] virtual void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread) = 0;
118 
AllocateTenured(size_t size)119     virtual void *AllocateTenured([[maybe_unused]] size_t size)
120     {
121         LOG(FATAL, ALLOC) << "AllocTenured not implemented";
122         UNREACHABLE();
123     }
124 
125     template <class T>
AllocArray(size_t size)126     [[nodiscard]] T *AllocArray(size_t size)
127     {
128         return static_cast<T *>(this->Allocate(sizeof(T) * size, DEFAULT_ALIGNMENT, nullptr));
129     }
130 
131     template <class T>
Delete(T * ptr)132     void Delete(T *ptr)
133     {
134         if (ptr == nullptr) {
135             return;
136         }
137         // NOLINTNEXTLINE(readability-braces-around-statements,bugprone-suspicious-semicolon)
138         if constexpr (std::is_class_v<T>) {
139             ptr->~T();
140         }
141         Free(ptr);
142     }
143 
144     template <typename T>
DeleteArray(T * data)145     void DeleteArray(T *data)
146     {
147         if (data == nullptr) {
148             return;
149         }
150         static constexpr size_t SIZE_BEFORE_DATA_OFFSET = AlignUp(sizeof(size_t), DEFAULT_ALIGNMENT_IN_BYTES);
151         void *p = ToVoidPtr(ToUintPtr(data) - SIZE_BEFORE_DATA_OFFSET);
152         size_t size = *static_cast<size_t *>(p);
153         // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
154         if constexpr (std::is_class_v<T>) {
155             for (size_t i = 0; i < size; ++i, ++data) {
156                 data->~T();
157             }
158         }
159         Free(p);
160     }
161 
162     virtual void Free(void *mem) = 0;
163 
164     virtual void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) = 0;
165 
166     virtual void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) = 0;
167 
IterateOverYoungObjects(const ObjectVisitor & object_visitor)168     virtual void IterateOverYoungObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
169     {
170         LOG(FATAL, ALLOC) << "Allocator::IterateOverYoungObjects" << std::endl;
171     }
172 
IterateOverTenuredObjects(const ObjectVisitor & object_visitor)173     virtual void IterateOverTenuredObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
174     {
175         LOG(FATAL, ALLOC) << "Allocator::IterateOverTenuredObjects" << std::endl;
176     }
177 
178     /**
179      * \brief iterates all objects in object allocator
180      */
IterateRegularSizeObjects(const ObjectVisitor & object_visitor)181     virtual void IterateRegularSizeObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
182     {
183         LOG(FATAL, ALLOC) << "Allocator::IterateRegularSizeObjects";
184     }
185 
186     /**
187      * \brief iterates objects in all allocators except object allocator
188      */
IterateNonRegularSizeObjects(const ObjectVisitor & object_visitor)189     virtual void IterateNonRegularSizeObjects([[maybe_unused]] const ObjectVisitor &object_visitor)
190     {
191         LOG(FATAL, ALLOC) << "Allocator::IterateNonRegularSizeObjects";
192     }
193 
FreeObjectsMovedToPygoteSpace()194     virtual void FreeObjectsMovedToPygoteSpace()
195     {
196         LOG(FATAL, ALLOC) << "Allocator::FreeObjectsMovedToPygoteSpace";
197     }
198 
199     virtual void IterateOverObjectsInRange(MemRange mem_range, const ObjectVisitor &object_visitor) = 0;
200 
201     virtual void IterateOverObjects(const ObjectVisitor &object_visitor) = 0;
202 
203     template <AllocScope AllocScopeT = AllocScope::GLOBAL>
204     AllocatorAdapter<void, AllocScopeT> Adapter();
205 
206     template <typename T, typename... Args>
New(Args &&...args)207     std::enable_if_t<!std::is_array_v<T>, T *> New(Args &&... args)
208     {
209         void *p = Alloc(sizeof(T));
210         if (UNLIKELY(p == nullptr)) {
211             return nullptr;
212         }
213         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
214         return reinterpret_cast<T *>(p);
215     }
216 
217     template <typename T>
New(size_t size)218     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> New(size_t size)
219     {
220         static constexpr size_t SIZE_BEFORE_DATA_OFFSET = AlignUp(sizeof(size_t), DEFAULT_ALIGNMENT_IN_BYTES);
221         using element_type = std::remove_extent_t<T>;
222         void *p = Alloc(SIZE_BEFORE_DATA_OFFSET + sizeof(element_type) * size);
223         *static_cast<size_t *>(p) = size;
224         auto *data = ToNativePtr<element_type>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
225         element_type *current_element = data;
226         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
227         for (size_t i = 0; i < size; ++i, ++current_element) {
228             new (current_element) element_type();
229         }
230         return data;
231     }
232 
233     template <typename T, typename... Args>
NewLocal(Args &&...args)234     std::enable_if_t<!std::is_array_v<T>, T *> NewLocal(Args &&... args)
235     {
236         void *p = AllocLocal(sizeof(T));
237         if (UNLIKELY(p == nullptr)) {
238             return nullptr;
239         }
240         new (p) T(std::forward<Args>(args)...);  // NOLINT(bugprone-throw-keyword-missing)
241         return reinterpret_cast<T *>(p);
242     }
243 
244     template <typename T>
NewLocal(size_t size)245     std::enable_if_t<is_unbounded_array_v<T>, std::remove_extent_t<T> *> NewLocal(size_t size)
246     {
247         static constexpr size_t SIZE_BEFORE_DATA_OFFSET = AlignUp(sizeof(size_t), DEFAULT_ALIGNMENT_IN_BYTES);
248         using element_type = std::remove_extent_t<T>;
249         void *p = AllocLocal(SIZE_BEFORE_DATA_OFFSET + sizeof(element_type) * size);
250         *static_cast<size_t *>(p) = size;
251         auto *data = ToNativePtr<element_type>(ToUintPtr(p) + SIZE_BEFORE_DATA_OFFSET);
252         element_type *current_element = data;
253         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
254         for (size_t i = 0; i < size; ++i, ++current_element) {
255             new (current_element) element_type();
256         }
257         return data;
258     }
259 
AllocateInLargeAllocator(size_t size,Alignment align,BaseClass * cls)260     virtual void *AllocateInLargeAllocator([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
261                                            [[maybe_unused]] BaseClass *cls)
262     {
263         return nullptr;
264     }
265 
266 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()267     virtual void Dump() {}
268 #endif
269 
270 protected:
271     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
272     MemStatsType *mem_stats_;
273 
274 private:
275     AllocatorPurpose allocator_purpose_;
276     GCCollectMode gc_collect_mode_;
277 };
278 
279 class ObjectAllocatorBase : public Allocator {
280 public:
281     ObjectAllocatorBase() = delete;
282     NO_COPY_SEMANTIC(ObjectAllocatorBase);
283     NO_MOVE_SEMANTIC(ObjectAllocatorBase);
284 
285     explicit ObjectAllocatorBase(MemStatsType *mem_stats, GCCollectMode gc_collect_mode,
286                                  bool create_pygote_space_allocator);
287 
288     ~ObjectAllocatorBase() override;
289 
290     /**
291      * Iterate over all objects and reclaim memory for objects reported as true by gc_object_visitor
292      * @param gc_object_visitor - function which return true for ObjectHeader if we can reclaim memory occupied by
293      * object
294      */
295     virtual void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) = 0;
296 
297     /**
298      * Return max size for regular size objects
299      * @return max size in bytes for regular size objects
300      */
301     virtual size_t GetRegularObjectMaxSize() = 0;
302 
303     /**
304      * Return max size for large objects
305      * @return max size in bytes for large objects
306      */
307     virtual size_t GetLargeObjectMaxSize() = 0;
308 
309     /**
310      * Checks if address in the young space
311      * @param address
312      * @return true if \param address is in young space
313      */
314     virtual bool IsAddressInYoungSpace(uintptr_t address) = 0;
315 
316     /**
317      * Checks if object in the non-movable space
318      * @param obj
319      * @return true if \param obj is in non-movable space
320      */
321     virtual bool IsObjectInNonMovableSpace(const ObjectHeader *obj) = 0;
322 
323     /**
324      * @return true if allocator has a young space
325      */
326     virtual bool HasYoungSpace() = 0;
327 
328     /**
329      * Get young space memory range
330      * @return young space memory range
331      */
332     virtual MemRange GetYoungSpaceMemRange() = 0;
333 
334     virtual void ResetYoungAllocator() = 0;
335 
336     virtual TLAB *CreateNewTLAB(panda::ManagedThread *thread) = 0;
337 
338     virtual size_t GetTLABMaxAllocSize() = 0;
339 
340     virtual bool IsTLABSupported() = 0;
341 
342     /**
343      * \brief Check if the object allocator contains the object starting at address obj
344      */
345     virtual bool ContainObject([[maybe_unused]] const ObjectHeader *obj) const = 0;
346 
347     /**
348      * \brief Check if the object obj is live: obj is allocated already and
349      * not collected yet.
350      */
351     virtual bool IsLive([[maybe_unused]] const ObjectHeader *obj) = 0;
352 
353     /**
354      * \brief Check if current allocators' allocation state is valid.
355      */
356     virtual size_t VerifyAllocatorStatus() = 0;
357 
358     using PygoteAllocator = PygoteSpaceAllocator<ObjectAllocConfig>;  // Allocator for pygote space
GetPygoteSpaceAllocator()359     PygoteAllocator *GetPygoteSpaceAllocator()
360     {
361         return pygote_space_allocator_;
362     }
363 
GetPygoteSpaceAllocator()364     const PygoteAllocator *GetPygoteSpaceAllocator() const
365     {
366         return pygote_space_allocator_;
367     }
368 
DisablePygoteAlloc()369     void DisablePygoteAlloc()
370     {
371         pygote_alloc_enabled_ = false;
372     }
373 
IsPygoteAllocEnabled()374     bool IsPygoteAllocEnabled() const
375     {
376         ASSERT(!pygote_alloc_enabled_ || pygote_space_allocator_ != nullptr);
377         return pygote_alloc_enabled_;
378     }
379 
GetObjectSpaceFreeBytes()380     static size_t GetObjectSpaceFreeBytes()
381     {
382         return PoolManager::GetMmapMemPool()->GetObjectSpaceFreeBytes();
383     }
384 
385 protected:
386     /**
387      * \brief Add new memory pools to object_allocator and allocate memory in them
388      */
389     template <typename AllocT>
390     inline void *AddPoolsAndAlloc(size_t size, Alignment align, AllocT *object_allocator, size_t pool_size,
391                                   SpaceType space_type);
392 
393     /**
394      * Try to allocate memory for the object and if failed add new memory pools and allocate again
395      * @param size - size of the object in bytes
396      * @param align - alignment
397      * @param object_allocator - allocator for the object
398      * @param pool_size - size of a memory pool for specified allocator
399      * @param space_type - SpaceType of the object
400      * @return pointer to allocated memory or nullptr if failed
401      */
402     template <typename AllocT>
403     inline void *AllocateSafe(size_t size, Alignment align, AllocT *object_allocator, size_t pool_size,
404                               SpaceType space_type);
405 
406     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
407     PygoteAllocator *pygote_space_allocator_ = nullptr;
408     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
409     bool pygote_alloc_enabled_ = false;
410 
411 private:
Free(void * mem)412     void Free([[maybe_unused]] void *mem) final
413     {
414         LOG(FATAL, ALLOC) << "ObjectAllocatorBase shouldn't have Free";
415     }
416 };
417 
418 /**
419  * Template wrapper for single underlying allocator
420  * @tparam AllocT
421  */
422 template <typename AllocT, AllocatorPurpose allocatorPurpose>
423 class AllocatorSingleT final : public Allocator {
424 public:
425     // NOLINTNEXTLINE(readability-magic-numbers)
AllocatorSingleT(MemStatsType * mem_stats)426     explicit AllocatorSingleT(MemStatsType *mem_stats)
427         : Allocator(mem_stats, allocatorPurpose, GCCollectMode::GC_NONE), allocator_(mem_stats)
428     {
429     }
430     ~AllocatorSingleT() final = default;
431     NO_COPY_SEMANTIC(AllocatorSingleT);
432     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorSingleT);
433 
Allocate(size_t size,Alignment align,panda::ManagedThread * thread)434     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
435     {
436         return allocator_.Alloc(size, align);
437     }
438 
AllocateLocal(size_t size,Alignment align,panda::ManagedThread * thread)439     [[nodiscard]] void *AllocateLocal(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final
440     {
441         return allocator_.AllocLocal(size, align);
442     }
443 
AllocateNonMovable(size_t size,Alignment align,panda::ManagedThread * thread)444     [[nodiscard]] void *AllocateNonMovable([[maybe_unused]] size_t size, [[maybe_unused]] Alignment align,
445                                            [[maybe_unused]] panda::ManagedThread *thread) final
446     {
447         LOG(FATAL, ALLOC) << "AllocatorSingleT shouldn't have AllocateNonMovable";
448         return nullptr;
449     }
450 
Free(void * mem)451     void Free(void *mem) final
452     {
453         allocator_.Free(mem);
454     }
455 
VisitAndRemoveAllPools(const MemVisitor & mem_visitor)456     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final
457     {
458         allocator_.VisitAndRemoveAllPools(mem_visitor);
459     }
460 
VisitAndRemoveFreePools(const MemVisitor & mem_visitor)461     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final
462     {
463         allocator_.VisitAndRemoveFreePools(mem_visitor);
464     }
465 
IterateOverObjectsInRange(MemRange mem_range,const ObjectVisitor & object_visitor)466     void IterateOverObjectsInRange([[maybe_unused]] MemRange mem_range,
467                                    [[maybe_unused]] const ObjectVisitor &object_visitor) final
468     {
469         LOG(FATAL, ALLOC) << "IterateOverObjectsInRange not implemented for AllocatorSinglet";
470     }
471 
IterateOverObjects(const ObjectVisitor & object_visitor)472     void IterateOverObjects([[maybe_unused]] const ObjectVisitor &object_visitor) final
473     {
474         LOG(FATAL, ALLOC) << "IterateOverObjects not implemented for AllocatorSinglet";
475     }
476 
477 #if defined(TRACK_INTERNAL_ALLOCATIONS)
Dump()478     void Dump() override
479     {
480         allocator_.Dump();
481     }
482 #endif
483 
484 private:
485     AllocT allocator_;
486 };
487 
488 /**
489  * Class is pointer wrapper. It checks if type of allocator matches expected.
490  * @tparam allocatorType - type of allocator
491  */
492 template <AllocatorPurpose allocatorPurpose>
493 class AllocatorPtr {
494 public:
495     AllocatorPtr() = default;
496     // NOLINTNEXTLINE(google-explicit-constructor)
AllocatorPtr(std::nullptr_t a_nullptr)497     AllocatorPtr(std::nullptr_t a_nullptr) noexcept : allocator_ptr_(a_nullptr) {}
498 
AllocatorPtr(Allocator * allocator)499     explicit AllocatorPtr(Allocator *allocator) : allocator_ptr_(allocator) {}
500 
501     Allocator *operator->()
502     {
503         ASSERT((allocator_ptr_ == nullptr) || (allocator_ptr_->GetPurpose() == allocatorPurpose));
504         return allocator_ptr_;
505     }
506 
507     AllocatorPtr &operator=(std::nullptr_t a_nullptr) noexcept
508     {
509         allocator_ptr_ = a_nullptr;
510         return *this;
511     }
512 
513     AllocatorPtr &operator=(Allocator *allocator)
514     {
515         allocator_ptr_ = allocator;
516         return *this;
517     }
518 
519     explicit operator Allocator *()
520     {
521         return allocator_ptr_;
522     }
523 
524     explicit operator ObjectAllocatorBase *()
525     {
526         ASSERT(allocator_ptr_->GetPurpose() == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
527         return static_cast<ObjectAllocatorBase *>(allocator_ptr_);
528     }
529 
530     ALWAYS_INLINE bool operator==(const AllocatorPtr &other)
531     {
532         return allocator_ptr_ == static_cast<Allocator *>(other);
533     }
534 
535     ALWAYS_INLINE bool operator==(std::nullptr_t) noexcept
536     {
537         return allocator_ptr_ == nullptr;
538     }
539 
540     ALWAYS_INLINE bool operator!=(std::nullptr_t) noexcept
541     {
542         return allocator_ptr_ != nullptr;
543     }
544 
AsObjectAllocator()545     ObjectAllocatorBase *AsObjectAllocator()
546     {
547         ASSERT(allocatorPurpose == AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT);
548         return this->operator panda::mem::ObjectAllocatorBase *();
549     }
550 
551     ~AllocatorPtr() = default;
552 
553     DEFAULT_COPY_SEMANTIC(AllocatorPtr);
554     DEFAULT_NOEXCEPT_MOVE_SEMANTIC(AllocatorPtr);
555 
556 protected:
557     // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
558     Allocator *allocator_ptr_ = nullptr;
559 };
560 
561 using InternalAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
562 using ObjectAllocatorPtr = AllocatorPtr<AllocatorPurpose::ALLOCATOR_PURPOSE_OBJECT>;
563 
564 template <InternalAllocatorConfig Config>
565 using InternalAllocatorT = AllocatorSingleT<InternalAllocator<Config>, AllocatorPurpose::ALLOCATOR_PURPOSE_INTERNAL>;
566 
567 template <MTModeT MTMode = MT_MODE_MULTI>
568 class ObjectAllocatorNoGen final : public ObjectAllocatorBase {
569     using ObjectAllocator = RunSlotsAllocator<ObjectAllocConfig>;       // Allocator used for middle size allocations
570     using LargeObjectAllocator = FreeListAllocator<ObjectAllocConfig>;  // Allocator used for large objects
571     using HumongousObjectAllocator = HumongousObjAllocator<ObjectAllocConfig>;  // Allocator used for humongous objects
572 public:
573     NO_MOVE_SEMANTIC(ObjectAllocatorNoGen);
574     NO_COPY_SEMANTIC(ObjectAllocatorNoGen);
575 
576     explicit ObjectAllocatorNoGen(MemStatsType *mem_stats, bool create_pygote_space_allocator);
577 
578     ~ObjectAllocatorNoGen() final;
579 
580     [[nodiscard]] void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
581 
582     [[nodiscard]] void *AllocateNonMovable(size_t size, Alignment align, panda::ManagedThread *thread) final;
583 
584     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final;
585 
586     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final;
587 
588     void IterateOverObjects(const ObjectVisitor &object_visitor) final;
589 
590     /**
591      * \brief iterates all objects in object allocator
592      */
593     void IterateRegularSizeObjects(const ObjectVisitor &object_visitor) final;
594 
595     /**
596      * \brief iterates objects in all allocators except object allocator
597      */
598     void IterateNonRegularSizeObjects(const ObjectVisitor &object_visitor) final;
599 
600     void FreeObjectsMovedToPygoteSpace() final;
601 
602     void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) final;
603 
604     size_t GetRegularObjectMaxSize() final;
605 
606     size_t GetLargeObjectMaxSize() final;
607 
IsAddressInYoungSpace(uintptr_t address)608     bool IsAddressInYoungSpace([[maybe_unused]] uintptr_t address) final
609     {
610         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IsAddressInYoungSpace not applicable";
611         return false;
612     }
613 
IsObjectInNonMovableSpace(const ObjectHeader * obj)614     bool IsObjectInNonMovableSpace([[maybe_unused]] const ObjectHeader *obj) final
615     {
616         return true;
617     }
618 
HasYoungSpace()619     bool HasYoungSpace() final
620     {
621         return false;
622     }
623 
GetYoungSpaceMemRange()624     MemRange GetYoungSpaceMemRange() final
625     {
626         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: GetYoungSpaceMemRange not applicable";
627         return MemRange(0, 0);
628     }
629 
ResetYoungAllocator()630     void ResetYoungAllocator() final
631     {
632         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: ResetYoungAllocator not applicable";
633     }
634 
635     TLAB *CreateNewTLAB(panda::ManagedThread *thread) final;
636 
637     size_t GetTLABMaxAllocSize() final;
638 
IsTLABSupported()639     bool IsTLABSupported() final
640     {
641         return false;
642     }
643 
IterateOverObjectsInRange(MemRange mem_range,const ObjectVisitor & object_visitor)644     void IterateOverObjectsInRange([[maybe_unused]] MemRange mem_range,
645                                    [[maybe_unused]] const ObjectVisitor &object_visitor) final
646     {
647         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: IterateOverObjectsInRange not implemented";
648     }
649 
650     bool ContainObject(const ObjectHeader *obj) const final;
651 
652     bool IsLive(const ObjectHeader *obj) final;
653 
VerifyAllocatorStatus()654     size_t VerifyAllocatorStatus() final
655     {
656         size_t fail_count = 0;
657         fail_count += object_allocator_->VerifyAllocator();
658         return fail_count;
659     }
660 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)661     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
662                                       panda::ManagedThread * /* thread */) final
663     {
664         LOG(FATAL, ALLOC) << "ObjectAllocatorNoGen: AllocateLocal not supported";
665         return nullptr;
666     }
667 
668 private:
669     ObjectAllocator *object_allocator_ = nullptr;
670     LargeObjectAllocator *large_object_allocator_ = nullptr;
671     HumongousObjectAllocator *humongous_object_allocator_ = nullptr;
672 };
673 
674 // Base class for all generational GCs
675 class ObjectAllocatorGenBase : public ObjectAllocatorBase {
676 public:
ObjectAllocatorGenBase(MemStatsType * mem_stats,GCCollectMode gc_collect_mode,bool create_pygote_space_allocator)677     explicit ObjectAllocatorGenBase(MemStatsType *mem_stats, GCCollectMode gc_collect_mode,
678                                     bool create_pygote_space_allocator)
679         : ObjectAllocatorBase(mem_stats, gc_collect_mode, create_pygote_space_allocator)
680     {
681     }
682 
683     ~ObjectAllocatorGenBase() override = default;
684 
685     NO_COPY_SEMANTIC(ObjectAllocatorGenBase);
686     NO_MOVE_SEMANTIC(ObjectAllocatorGenBase);
687 
688 protected:
689     static constexpr size_t YOUNG_ALLOC_MAX_SIZE = PANDA_TLAB_MAX_ALLOC_SIZE;  // max size of allocation in young space
690 };
691 
692 template <MTModeT MTMode = MT_MODE_MULTI>
693 class ObjectAllocatorGen final : public ObjectAllocatorGenBase {
694     static constexpr size_t YOUNG_TLAB_SIZE = 4_KB;  // TLAB size for young gen
695 
696     using YoungGenAllocator = BumpPointerAllocator<ObjectAllocConfigWithCrossingMap,
697                                                    BumpPointerAllocatorLockConfig::ParameterizedLock<MTMode>, true>;
698     using ObjectAllocator =
699         RunSlotsAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for middle size allocations
700     using LargeObjectAllocator =
701         FreeListAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for large objects
702     using HumongousObjectAllocator =
703         HumongousObjAllocator<ObjectAllocConfigWithCrossingMap>;  // Allocator used for humongous objects
704 
705 public:
706     NO_MOVE_SEMANTIC(ObjectAllocatorGen);
707     NO_COPY_SEMANTIC(ObjectAllocatorGen);
708 
709     explicit ObjectAllocatorGen(MemStatsType *mem_stats, bool create_pygote_space_allocator);
710 
711     ~ObjectAllocatorGen() final;
712 
713     void *Allocate(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
714 
715     void *AllocateNonMovable(size_t size, Alignment align, [[maybe_unused]] panda::ManagedThread *thread) final;
716 
717     void VisitAndRemoveAllPools(const MemVisitor &mem_visitor) final;
718 
719     void VisitAndRemoveFreePools(const MemVisitor &mem_visitor) final;
720 
721     void IterateOverYoungObjects(const ObjectVisitor &object_visitor) final;
722 
723     void IterateOverTenuredObjects(const ObjectVisitor &object_visitor) final;
724 
725     void IterateOverObjects(const ObjectVisitor &object_visitor) final;
726 
727     /**
728      * \brief iterates all objects in object allocator
729      */
730     void IterateRegularSizeObjects(const ObjectVisitor &object_visitor) final;
731 
732     /**
733      * \brief iterates objects in all allocators except object allocator
734      */
735     void IterateNonRegularSizeObjects(const ObjectVisitor &object_visitor) final;
736 
737     void FreeObjectsMovedToPygoteSpace() final;
738 
739     void Collect(const GCObjectVisitor &gc_object_visitor, GCCollectMode collect_mode) final;
740 
741     size_t GetRegularObjectMaxSize() final;
742 
743     size_t GetLargeObjectMaxSize() final;
744 
745     bool IsAddressInYoungSpace(uintptr_t address) final;
746 
747     bool IsObjectInNonMovableSpace(const ObjectHeader *obj) final;
748 
749     bool HasYoungSpace() final;
750 
751     MemRange GetYoungSpaceMemRange() final;
752 
753     void ResetYoungAllocator() final;
754 
755     TLAB *CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread) final;
756 
757     size_t GetTLABMaxAllocSize() final;
758 
IsTLABSupported()759     bool IsTLABSupported() final
760     {
761         return true;
762     }
763 
764     void IterateOverObjectsInRange(MemRange mem_range, const ObjectVisitor &object_visitor) final;
765 
766     bool ContainObject(const ObjectHeader *obj) const final;
767 
768     bool IsLive(const ObjectHeader *obj) final;
769 
VerifyAllocatorStatus()770     size_t VerifyAllocatorStatus() final
771     {
772         size_t fail_count = 0;
773         fail_count += object_allocator_->VerifyAllocator();
774         return fail_count;
775     }
776 
AllocateLocal(size_t,Alignment,panda::ManagedThread *)777     [[nodiscard]] void *AllocateLocal(size_t /* size */, Alignment /* align */,
778                                       panda::ManagedThread * /* thread */) final
779     {
780         LOG(FATAL, ALLOC) << "ObjectAllocatorGen: AllocateLocal not supported";
781         return nullptr;
782     }
783 
GetYoungAllocMaxSize()784     static constexpr size_t GetYoungAllocMaxSize()
785     {
786         return YOUNG_ALLOC_MAX_SIZE;
787     }
788 
789 private:
790     YoungGenAllocator *young_gen_allocator_ = nullptr;
791     ObjectAllocator *object_allocator_ = nullptr;
792     LargeObjectAllocator *large_object_allocator_ = nullptr;
793     HumongousObjectAllocator *humongous_object_allocator_ = nullptr;
794     MemStatsType *mem_stats_ = nullptr;
795     ObjectAllocator *non_movable_object_allocator_ = nullptr;
796     LargeObjectAllocator *large_non_movable_object_allocator_ = nullptr;
797 
798     void *AllocateTenured(size_t size) final;
799 };
800 
801 template <GCType gcType, MTModeT MTMode = MT_MODE_MULTI>
802 class AllocConfig {
803 };
804 
805 }  // namespace panda::mem
806 
807 #endif  // PANDA_RUNTIME_INCLUDE_MEM_ALLOCATOR_H_
808