• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H
17 #define RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H
18 
19 #include "runtime/mem/gc/lang/gc_lang.h"
20 #include "runtime/include/mem/allocator.h"
21 
22 namespace ark::mem {
23 namespace test {
24 class MemStatsGenGCTest;
25 }  // namespace test
26 /// Base class for generational GC
27 template <class LanguageConfig>
28 class GenerationalGC : public GCLang<LanguageConfig> {
29 public:
30     using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT;
31 
GetCardTable()32     CardTable *GetCardTable() const override
33     {
34         return cardTable_.get();
35     }
36 
37     bool Trigger(PandaUniquePtr<GCTask> task) override;
38 
39 protected:
GenerationalGC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)40     GenerationalGC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings)
41         : GCLang<LanguageConfig>(objectAllocator, settings)
42     {
43     }
44     virtual bool ShouldRunTenuredGC(const GCTask &task);
45 
DisableTenuredGC()46     void DisableTenuredGC()
47     {
48         majorPeriod_ = DISABLED_MAJOR_PERIOD;  // Disable tenured GC temporarily.
49     }
50 
RestoreTenuredGC()51     void RestoreTenuredGC()
52     {
53         majorPeriod_ = DEFAULT_MAJOR_PERIOD;
54     }
55 
GetMajorPeriod()56     ALWAYS_INLINE size_t GetMajorPeriod() const
57     {
58         return majorPeriod_;
59     }
60 
PostForkCallback(size_t restoreLimit)61     void PostForkCallback([[maybe_unused]] size_t restoreLimit) override
62     {
63         GenerationalGC<LanguageConfig>::RestoreTenuredGC();
64     }
65 
66     template <typename Marker>
67     NO_THREAD_SAFETY_ANALYSIS void MarkImpl(Marker *marker, GCMarkingStackType *objectsStack,
68                                             CardTableVisitFlag visitCardTableRoots,
69                                             const ReferenceCheckPredicateT &refPred,
70                                             const MemRangeChecker &memRangeChecker,
71                                             const GC::MarkPreprocess &markPreprocess = GC::EmptyMarkPreprocess);
72 
73     /// Mark all objects in stack recursively
74     template <typename Marker, class... ReferenceCheckPredicate>
75     void MarkStack(Marker *marker, GCMarkingStackType *stack, const GC::MarkPreprocess &markPreprocess,
76                    const ReferenceCheckPredicate &...refPred);
77 
78     /**
79      * Update statistics in MemStats. Required initialized mem_stats_ field.
80      * @param bytes_in_heap_before - bytes in heap before the GC
81      * @param update_tenured_stats - if true, we will update tenured moved and tenured deleted memstats too
82      * @param record_allocation_for_moved_objects - if true, we will record allocation for all moved objects (young and
83      *  tenured)
84      */
85     void UpdateMemStats(size_t bytesInHeapBefore, bool updateTenuredStats = false,
86                         bool recordAllocationForMovedObjects = false);
87 
88     class MemStats {
89     public:
90         template <bool ATOMIC = false>
RecordCountFreedYoung(size_t count)91         ALWAYS_INLINE void RecordCountFreedYoung(size_t count)
92         {
93             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
94             if constexpr (ATOMIC) {
95                 // Atomic with relaxed order reason: memory accesses from different threads
96                 reinterpret_cast<std::atomic<uint32_t> *>(&youngFreeObjectCount_)
97                     ->fetch_add(count, std::memory_order_relaxed);
98                 // NOLINTNEXTLINE(readability-misleading-indentation)
99             } else {
100                 youngFreeObjectCount_ += count;
101             }
102         }
103 
104         template <bool ATOMIC = false>
RecordSizeFreedYoung(size_t size)105         ALWAYS_INLINE void RecordSizeFreedYoung(size_t size)
106         {
107             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
108             if constexpr (ATOMIC) {
109                 // Atomic with relaxed order reason: memory accesses from different threads
110                 reinterpret_cast<std::atomic<uint64_t> *>(&youngFreeObjectSize_)
111                     ->fetch_add(size, std::memory_order_relaxed);
112                 // NOLINTNEXTLINE(readability-misleading-indentation)
113             } else {
114                 youngFreeObjectSize_ += size;
115             }
116         }
117 
118         template <bool ATOMIC = false>
RecordCountMovedYoung(size_t count)119         ALWAYS_INLINE void RecordCountMovedYoung(size_t count)
120         {
121             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
122             if constexpr (ATOMIC) {
123                 // Atomic with relaxed order reason: memory accesses from different threads
124                 reinterpret_cast<std::atomic<uint32_t> *>(&youngMoveObjectCount_)
125                     ->fetch_add(count, std::memory_order_relaxed);
126                 // NOLINTNEXTLINE(readability-misleading-indentation)
127             } else {
128                 youngMoveObjectCount_ += count;
129             }
130         }
131 
132         template <bool ATOMIC = false>
RecordSizeMovedYoung(size_t size)133         ALWAYS_INLINE void RecordSizeMovedYoung(size_t size)
134         {
135             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
136             if constexpr (ATOMIC) {
137                 // Atomic with relaxed order reason: memory accesses from different threads
138                 reinterpret_cast<std::atomic<uint64_t> *>(&youngMoveObjectSize_)
139                     ->fetch_add(size, std::memory_order_relaxed);
140                 // NOLINTNEXTLINE(readability-misleading-indentation)
141             } else {
142                 youngMoveObjectSize_ += size;
143             }
144         }
145 
146         template <bool ATOMIC = false>
RecordYoungStats(size_t youngMoveSize,size_t youngMoveCount,size_t youngDeleteSize,size_t youngDeleteCount)147         ALWAYS_INLINE void RecordYoungStats(size_t youngMoveSize, size_t youngMoveCount, size_t youngDeleteSize,
148                                             size_t youngDeleteCount)
149         {
150             RecordSizeMovedYoung(youngMoveSize);
151             RecordCountMovedYoung(youngMoveCount);
152             RecordSizeFreedYoung(youngDeleteSize);
153             RecordCountFreedYoung(youngDeleteCount);
154         }
155 
156         template <bool ATOMIC = false>
RecordCountMovedTenured(size_t count)157         ALWAYS_INLINE void RecordCountMovedTenured(size_t count)
158         {
159             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
160             if constexpr (ATOMIC) {
161                 // Atomic with relaxed order reason: memory accesses from different threads
162                 reinterpret_cast<std::atomic<uint32_t> *>(&tenuredMoveObjectCount_)
163                     ->fetch_add(count, std::memory_order_relaxed);
164                 // NOLINTNEXTLINE(readability-misleading-indentation)
165             } else {
166                 tenuredMoveObjectCount_ += count;
167             }
168         }
169 
170         template <bool ATOMIC = false>
RecordSizeMovedTenured(size_t size)171         ALWAYS_INLINE void RecordSizeMovedTenured(size_t size)
172         {
173             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
174             if constexpr (ATOMIC) {
175                 // Atomic with relaxed order reason: memory accesses from different threads
176                 reinterpret_cast<std::atomic<uint64_t> *>(&tenuredMoveObjectSize_)
177                     ->fetch_add(size, std::memory_order_relaxed);
178                 // NOLINTNEXTLINE(readability-misleading-indentation)
179             } else {
180                 tenuredMoveObjectSize_ += size;
181             }
182         }
183 
184         template <bool ATOMIC = false>
RecordCountFreedTenured(size_t count)185         ALWAYS_INLINE void RecordCountFreedTenured(size_t count)
186         {
187             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
188             if constexpr (ATOMIC) {
189                 // Atomic with relaxed order reason: memory accesses from different threads
190                 reinterpret_cast<std::atomic<uint32_t> *>(&tenuredFreeObjectCount_)
191                     ->fetch_add(count, std::memory_order_relaxed);
192                 // NOLINTNEXTLINE(readability-misleading-indentation)
193             } else {
194                 tenuredFreeObjectCount_ += count;
195             }
196         }
197 
198         template <bool ATOMIC = false>
RecordSizeFreedTenured(size_t size)199         ALWAYS_INLINE void RecordSizeFreedTenured(size_t size)
200         {
201             // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon)
202             if constexpr (ATOMIC) {
203                 // Atomic with relaxed order reason: memory accesses from different threads
204                 reinterpret_cast<std::atomic<uint64_t> *>(&tenuredFreeObjectSize_)
205                     ->fetch_add(size, std::memory_order_relaxed);
206                 // NOLINTNEXTLINE(readability-misleading-indentation)
207             } else {
208                 tenuredFreeObjectSize_ += size;
209             }
210         }
211 
Reset()212         void Reset()
213         {
214             youngFreeObjectCount_ = 0U;
215             youngFreeObjectSize_ = 0U;
216             youngMoveObjectCount_ = 0U;
217             youngMoveObjectSize_ = 0U;
218             tenuredFreeObjectCount_ = 0U;
219             tenuredFreeObjectSize_ = 0U;
220             tenuredMoveObjectCount_ = 0U;
221             tenuredMoveObjectSize_ = 0U;
222         }
223 
224         PandaString Dump();
225 
GetCountFreedYoung()226         ALWAYS_INLINE size_t GetCountFreedYoung()
227         {
228             return youngFreeObjectCount_;
229         }
230 
GetSizeFreedYoung()231         ALWAYS_INLINE size_t GetSizeFreedYoung()
232         {
233             return youngFreeObjectSize_;
234         }
235 
GetCountMovedYoung()236         ALWAYS_INLINE size_t GetCountMovedYoung()
237         {
238             return youngMoveObjectCount_;
239         }
240 
GetSizeMovedYoung()241         ALWAYS_INLINE size_t GetSizeMovedYoung()
242         {
243             return youngMoveObjectSize_;
244         }
245 
GetCountFreedTenured()246         ALWAYS_INLINE size_t GetCountFreedTenured()
247         {
248             return tenuredFreeObjectCount_;
249         }
250 
GetSizeFreedTenured()251         ALWAYS_INLINE size_t GetSizeFreedTenured()
252         {
253             return tenuredFreeObjectSize_;
254         }
255 
GetCountMovedTenured()256         ALWAYS_INLINE size_t GetCountMovedTenured()
257         {
258             return tenuredMoveObjectCount_;
259         }
260 
GetSizeMovedTenured()261         ALWAYS_INLINE size_t GetSizeMovedTenured()
262         {
263             return tenuredMoveObjectSize_;
264         }
265 
266     private:
267         uint32_t youngFreeObjectCount_ {0U};
268         uint64_t youngFreeObjectSize_ {0U};
269         uint32_t youngMoveObjectCount_ {0U};
270         uint64_t youngMoveObjectSize_ {0U};
271         uint32_t tenuredFreeObjectCount_ {0U};
272         uint64_t tenuredFreeObjectSize_ {0U};
273         uint32_t tenuredMoveObjectCount_ {0U};
274         uint64_t tenuredMoveObjectSize_ {0U};
275 
276         friend class GenerationalGC;
277         friend class test::MemStatsGenGCTest;
278     };
279 
GetObjectGenAllocator()280     ALWAYS_INLINE ObjectAllocatorGenBase *GetObjectGenAllocator()
281     {
282         return static_cast<ObjectAllocatorGenBase *>(this->GetObjectAllocator());
283     }
284 
285     void CreateCardTable(InternalAllocatorPtr internalAllocatorPtr, uintptr_t minAddress, size_t size);
286 
287     template <typename Marker>
288     void VisitCardTableConcurrent(Marker *marker, GCMarkingStackType *objectsStack,
289                                   const ReferenceCheckPredicateT &refPred, const MemRangeChecker &memRangeChecker,
290                                   const GC::MarkPreprocess &markPreprocess);
291 
PrintDetailedLog()292     void PrintDetailedLog() override
293     {
294         LOG(INFO, GC) << memStats_.Dump();
295         GC::PrintDetailedLog();
296     }
297 
298     MemStats memStats_;  // NOLINT(misc-non-private-member-variables-in-classes)
299 private:
300     static constexpr size_t DEFAULT_MAJOR_PERIOD = 3;
301     static constexpr size_t DISABLED_MAJOR_PERIOD = 65535;
302     size_t majorPeriod_ {DEFAULT_MAJOR_PERIOD};
303     PandaUniquePtr<CardTable> cardTable_ {nullptr};
304     friend class test::MemStatsGenGCTest;
305 };
306 
307 }  // namespace ark::mem
308 #endif  // RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H
309