1 /** 2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 17 #define RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 18 19 #include "runtime/mem/gc/lang/gc_lang.h" 20 #include "runtime/include/mem/allocator.h" 21 22 namespace panda::mem { 23 namespace test { 24 class MemStatsGenGCTest; 25 } // namespace test 26 /// Base class for generational GC 27 template <class LanguageConfig> 28 class GenerationalGC : public GCLang<LanguageConfig> { 29 public: 30 using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT; 31 GetCardTable()32 CardTable *GetCardTable() const override 33 { 34 return cardTable_.get(); 35 } 36 37 bool Trigger(PandaUniquePtr<GCTask> task) override; 38 39 protected: GenerationalGC(ObjectAllocatorBase * objectAllocator,const GCSettings & settings)40 GenerationalGC(ObjectAllocatorBase *objectAllocator, const GCSettings &settings) 41 : GCLang<LanguageConfig>(objectAllocator, settings) 42 { 43 } 44 virtual bool ShouldRunTenuredGC(const GCTask &task); 45 DisableTenuredGC()46 void DisableTenuredGC() 47 { 48 majorPeriod_ = DISABLED_MAJOR_PERIOD; // Disable tenured GC temporarily. 49 } 50 RestoreTenuredGC()51 void RestoreTenuredGC() 52 { 53 majorPeriod_ = DEFAULT_MAJOR_PERIOD; 54 } 55 GetMajorPeriod()56 ALWAYS_INLINE size_t GetMajorPeriod() const 57 { 58 return majorPeriod_; 59 } 60 PostForkCallback()61 void PostForkCallback() override 62 { 63 GenerationalGC<LanguageConfig>::RestoreTenuredGC(); 64 } 65 66 template <typename Marker> 67 NO_THREAD_SAFETY_ANALYSIS void MarkImpl(Marker *marker, GCMarkingStackType *objectsStack, 68 CardTableVisitFlag visitCardTableRoots, 69 const ReferenceCheckPredicateT &refPred, 70 const MemRangeChecker &memRangeChecker, 71 const GC::MarkPreprocess &markPreprocess = GC::EmptyMarkPreprocess); 72 73 /// Mark all objects in stack recursively 74 template <typename Marker, class... ReferenceCheckPredicate> 75 void MarkStack(Marker *marker, GCMarkingStackType *stack, const GC::MarkPreprocess &markPreprocess, 76 const ReferenceCheckPredicate &...refPred); 77 78 /** 79 * Update statistics in MemStats. Required initialized mem_stats_ field. 80 * @param bytes_in_heap_before - bytes in heap before the GC 81 * @param update_tenured_stats - if true, we will update tenured moved and tenured deleted memstats too 82 * @param record_allocation_for_moved_objects - if true, we will record allocation for all moved objects (young and 83 * tenured) 84 */ 85 void UpdateMemStats(size_t bytesInHeapBefore, bool updateTenuredStats = false, 86 bool recordAllocationForMovedObjects = false); 87 88 class MemStats { 89 public: 90 template <bool ATOMIC = false> RecordCountFreedYoung(size_t count)91 ALWAYS_INLINE void RecordCountFreedYoung(size_t count) 92 { 93 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 94 if constexpr (ATOMIC) { 95 // Atomic with relaxed order reason: memory accesses from different threads 96 reinterpret_cast<std::atomic<uint32_t> *>(&youngFreeObjectCount_) 97 ->fetch_add(count, std::memory_order_relaxed); 98 // NOLINTNEXTLINE(readability-misleading-indentation) 99 } else { 100 youngFreeObjectCount_ += count; 101 } 102 } 103 104 template <bool ATOMIC = false> RecordSizeFreedYoung(size_t size)105 ALWAYS_INLINE void RecordSizeFreedYoung(size_t size) 106 { 107 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 108 if constexpr (ATOMIC) { 109 // Atomic with relaxed order reason: memory accesses from different threads 110 reinterpret_cast<std::atomic<uint64_t> *>(&youngFreeObjectSize_) 111 ->fetch_add(size, std::memory_order_relaxed); 112 // NOLINTNEXTLINE(readability-misleading-indentation) 113 } else { 114 youngFreeObjectSize_ += size; 115 } 116 } 117 118 template <bool ATOMIC = false> RecordCountMovedYoung(size_t count)119 ALWAYS_INLINE void RecordCountMovedYoung(size_t count) 120 { 121 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 122 if constexpr (ATOMIC) { 123 // Atomic with relaxed order reason: memory accesses from different threads 124 reinterpret_cast<std::atomic<uint32_t> *>(&youngMoveObjectCount_) 125 ->fetch_add(count, std::memory_order_relaxed); 126 // NOLINTNEXTLINE(readability-misleading-indentation) 127 } else { 128 youngMoveObjectCount_ += count; 129 } 130 } 131 132 template <bool ATOMIC = false> RecordSizeMovedYoung(size_t size)133 ALWAYS_INLINE void RecordSizeMovedYoung(size_t size) 134 { 135 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 136 if constexpr (ATOMIC) { 137 // Atomic with relaxed order reason: memory accesses from different threads 138 reinterpret_cast<std::atomic<uint64_t> *>(&youngMoveObjectSize_) 139 ->fetch_add(size, std::memory_order_relaxed); 140 // NOLINTNEXTLINE(readability-misleading-indentation) 141 } else { 142 youngMoveObjectSize_ += size; 143 } 144 } 145 146 template <bool ATOMIC = false> RecordCountMovedTenured(size_t count)147 ALWAYS_INLINE void RecordCountMovedTenured(size_t count) 148 { 149 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 150 if constexpr (ATOMIC) { 151 // Atomic with relaxed order reason: memory accesses from different threads 152 reinterpret_cast<std::atomic<uint32_t> *>(&tenuredMoveObjectCount_) 153 ->fetch_add(count, std::memory_order_relaxed); 154 // NOLINTNEXTLINE(readability-misleading-indentation) 155 } else { 156 tenuredMoveObjectCount_ += count; 157 } 158 } 159 160 template <bool ATOMIC = false> RecordSizeMovedTenured(size_t size)161 ALWAYS_INLINE void RecordSizeMovedTenured(size_t size) 162 { 163 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 164 if constexpr (ATOMIC) { 165 // Atomic with relaxed order reason: memory accesses from different threads 166 reinterpret_cast<std::atomic<uint64_t> *>(&tenuredMoveObjectSize_) 167 ->fetch_add(size, std::memory_order_relaxed); 168 // NOLINTNEXTLINE(readability-misleading-indentation) 169 } else { 170 tenuredMoveObjectSize_ += size; 171 } 172 } 173 174 template <bool ATOMIC = false> RecordCountFreedTenured(size_t count)175 ALWAYS_INLINE void RecordCountFreedTenured(size_t count) 176 { 177 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 178 if constexpr (ATOMIC) { 179 // Atomic with relaxed order reason: memory accesses from different threads 180 reinterpret_cast<std::atomic<uint32_t> *>(&tenuredFreeObjectCount_) 181 ->fetch_add(count, std::memory_order_relaxed); 182 // NOLINTNEXTLINE(readability-misleading-indentation) 183 } else { 184 tenuredFreeObjectCount_ += count; 185 } 186 } 187 188 template <bool ATOMIC = false> RecordSizeFreedTenured(size_t size)189 ALWAYS_INLINE void RecordSizeFreedTenured(size_t size) 190 { 191 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 192 if constexpr (ATOMIC) { 193 // Atomic with relaxed order reason: memory accesses from different threads 194 reinterpret_cast<std::atomic<uint64_t> *>(&tenuredFreeObjectSize_) 195 ->fetch_add(size, std::memory_order_relaxed); 196 // NOLINTNEXTLINE(readability-misleading-indentation) 197 } else { 198 tenuredFreeObjectSize_ += size; 199 } 200 } 201 Reset()202 void Reset() 203 { 204 youngFreeObjectCount_ = 0U; 205 youngFreeObjectSize_ = 0U; 206 youngMoveObjectCount_ = 0U; 207 youngMoveObjectSize_ = 0U; 208 tenuredFreeObjectCount_ = 0U; 209 tenuredFreeObjectSize_ = 0U; 210 tenuredMoveObjectCount_ = 0U; 211 tenuredMoveObjectSize_ = 0U; 212 } 213 214 PandaString Dump(); 215 GetCountFreedYoung()216 ALWAYS_INLINE size_t GetCountFreedYoung() 217 { 218 return youngFreeObjectCount_; 219 } 220 GetSizeFreedYoung()221 ALWAYS_INLINE size_t GetSizeFreedYoung() 222 { 223 return youngFreeObjectSize_; 224 } 225 GetCountMovedYoung()226 ALWAYS_INLINE size_t GetCountMovedYoung() 227 { 228 return youngMoveObjectCount_; 229 } 230 GetSizeMovedYoung()231 ALWAYS_INLINE size_t GetSizeMovedYoung() 232 { 233 return youngMoveObjectSize_; 234 } 235 GetCountFreedTenured()236 ALWAYS_INLINE size_t GetCountFreedTenured() 237 { 238 return tenuredFreeObjectCount_; 239 } 240 GetSizeFreedTenured()241 ALWAYS_INLINE size_t GetSizeFreedTenured() 242 { 243 return tenuredFreeObjectSize_; 244 } 245 GetCountMovedTenured()246 ALWAYS_INLINE size_t GetCountMovedTenured() 247 { 248 return tenuredMoveObjectCount_; 249 } 250 GetSizeMovedTenured()251 ALWAYS_INLINE size_t GetSizeMovedTenured() 252 { 253 return tenuredMoveObjectSize_; 254 } 255 256 private: 257 uint32_t youngFreeObjectCount_ {0U}; 258 uint64_t youngFreeObjectSize_ {0U}; 259 uint32_t youngMoveObjectCount_ {0U}; 260 uint64_t youngMoveObjectSize_ {0U}; 261 uint32_t tenuredFreeObjectCount_ {0U}; 262 uint64_t tenuredFreeObjectSize_ {0U}; 263 uint32_t tenuredMoveObjectCount_ {0U}; 264 uint64_t tenuredMoveObjectSize_ {0U}; 265 266 friend class GenerationalGC; 267 friend class test::MemStatsGenGCTest; 268 }; 269 GetObjectGenAllocator()270 ALWAYS_INLINE ObjectAllocatorGenBase *GetObjectGenAllocator() 271 { 272 return static_cast<ObjectAllocatorGenBase *>(this->GetObjectAllocator()); 273 } 274 275 void CreateCardTable(InternalAllocatorPtr internalAllocatorPtr, uintptr_t minAddress, size_t size); 276 PrintDetailedLog()277 void PrintDetailedLog() override 278 { 279 LOG(INFO, GC) << memStats_.Dump(); 280 GC::PrintDetailedLog(); 281 } 282 283 MemStats memStats_; // NOLINT(misc-non-private-member-variables-in-classes) 284 private: 285 static constexpr size_t DEFAULT_MAJOR_PERIOD = 3; 286 static constexpr size_t DISABLED_MAJOR_PERIOD = 65535; 287 size_t majorPeriod_ {DEFAULT_MAJOR_PERIOD}; 288 PandaUniquePtr<CardTable> cardTable_ {nullptr}; 289 friend class test::MemStatsGenGCTest; 290 }; 291 292 } // namespace panda::mem 293 #endif // RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 294