1 /** 2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 17 #define RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 18 19 #include "runtime/mem/gc/lang/gc_lang.h" 20 #include "runtime/include/mem/allocator.h" 21 22 namespace panda::mem { 23 namespace test { 24 class MemStatsGenGCTest; 25 } // namespace test 26 /** 27 * Base class for generational GC 28 */ 29 template <class LanguageConfig> 30 class GenerationalGC : public GCLang<LanguageConfig> { 31 public: 32 using ConcurrentMarkPredicateT = std::function<bool()>; 33 using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT; 34 GetCardTable()35 CardTable *GetCardTable() override 36 { 37 return card_table_.get(); 38 } 39 40 protected: GenerationalGC(ObjectAllocatorBase * object_allocator,const GCSettings & settings)41 GenerationalGC(ObjectAllocatorBase *object_allocator, const GCSettings &settings) 42 : GCLang<LanguageConfig>(object_allocator, settings) 43 { 44 } 45 virtual bool ShouldRunTenuredGC(const GCTask &task); 46 DisableTenuredGC()47 void DisableTenuredGC() 48 { 49 major_period_ = DISABLED_MAJOR_PERIOD; // Disable tenured GC temporarily. 50 } 51 RestoreTenuredGC()52 void RestoreTenuredGC() 53 { 54 major_period_ = DEFAULT_MAJOR_PERIOD; 55 } 56 GetMajorPeriod()57 ALWAYS_INLINE size_t GetMajorPeriod() const 58 { 59 return major_period_; 60 } 61 PostForkCallback()62 void PostForkCallback() override 63 { 64 GenerationalGC<LanguageConfig>::RestoreTenuredGC(); 65 } 66 67 void WaitForGC(GCTask task) override; 68 69 /** 70 * Concurrently marking all objects 71 * @param objects_stack 72 */ 73 template <typename Marker> 74 NO_THREAD_SAFETY_ANALYSIS void ConcurrentMark(Marker *marker, GCMarkingStackType *objects_stack, 75 CardTableVisitFlag visit_card_table_roots, 76 const ConcurrentMarkPredicateT &pred, 77 const ReferenceCheckPredicateT &ref_pred, 78 const MemRangeChecker &mem_range_checker); 79 80 /** 81 * Marking all objects on pause 82 * @param objects_stack 83 */ 84 template <typename Marker> 85 NO_THREAD_SAFETY_ANALYSIS void OnPauseMark(Marker *marker, GCMarkingStackType *objects_stack, 86 CardTableVisitFlag visit_card_table_roots, 87 const ConcurrentMarkPredicateT &pred, 88 const ReferenceCheckPredicateT &ref_pred, 89 const MemRangeChecker &mem_range_checker); 90 91 template <typename Marker> 92 NO_THREAD_SAFETY_ANALYSIS void MarkImpl(Marker *marker, GCMarkingStackType *objects_stack, 93 CardTableVisitFlag visit_card_table_roots, 94 const ConcurrentMarkPredicateT &pred, 95 const ReferenceCheckPredicateT &ref_pred, 96 const MemRangeChecker &mem_range_checker); 97 98 /** 99 * Mark all objects in stack recursively for Full GC. 100 */ 101 template <typename Marker> 102 void MarkStack(Marker *marker, GCMarkingStackType *stack, const ReferenceCheckPredicateT &ref_pred, 103 const GC::MarkPredicate &markPredicate); 104 105 /** 106 * Mark all objects in stack recursively for Full GC. 107 */ 108 template <typename Marker> 109 void MarkStackCond(Marker *marker, GCMarkingStackType *stack, const ConcurrentMarkPredicateT &pred, 110 const ReferenceCheckPredicateT &ref_pred, const GC::MarkPredicate &markPredicate); 111 112 /** 113 * Update statistics in MemStats. Required initialized mem_stats_ field. 114 * @param bytes_in_heap_before - bytes in heap before the GC 115 * @param update_tenured_stats - if true, we will update tenured moved and tenured deleted memstats too 116 * @param record_allocation_for_moved_objects - if true, we will record allocation for all moved objects (young and 117 * tenured) 118 */ 119 void UpdateMemStats(size_t bytes_in_heap_before, bool update_tenured_stats = false, 120 bool record_allocation_for_moved_objects = false); 121 122 class MemStats { 123 public: 124 template <bool atomic = false> RecordCountFreedYoung(size_t count)125 ALWAYS_INLINE void RecordCountFreedYoung(size_t count) 126 { 127 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 128 if constexpr (atomic) { 129 // Atomic with relaxed order reason: memory accesses from different threads 130 reinterpret_cast<std::atomic<uint32_t> *>(&young_free_object_count_) 131 ->fetch_add(count, std::memory_order_relaxed); 132 // NOLINTNEXTLINE(readability-misleading-indentation) 133 } else { 134 young_free_object_count_ += count; 135 } 136 } 137 138 template <bool atomic = false> RecordSizeFreedYoung(size_t size)139 ALWAYS_INLINE void RecordSizeFreedYoung(size_t size) 140 { 141 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 142 if constexpr (atomic) { 143 // Atomic with relaxed order reason: memory accesses from different threads 144 reinterpret_cast<std::atomic<uint64_t> *>(&young_free_object_size_) 145 ->fetch_add(size, std::memory_order_relaxed); 146 // NOLINTNEXTLINE(readability-misleading-indentation) 147 } else { 148 young_free_object_size_ += size; 149 } 150 } 151 152 template <bool atomic = false> RecordCountMovedYoung(size_t count)153 ALWAYS_INLINE void RecordCountMovedYoung(size_t count) 154 { 155 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 156 if constexpr (atomic) { 157 // Atomic with relaxed order reason: memory accesses from different threads 158 reinterpret_cast<std::atomic<uint32_t> *>(&young_move_object_count_) 159 ->fetch_add(count, std::memory_order_relaxed); 160 // NOLINTNEXTLINE(readability-misleading-indentation) 161 } else { 162 young_move_object_count_ += count; 163 } 164 } 165 166 template <bool atomic = false> RecordSizeMovedYoung(size_t size)167 ALWAYS_INLINE void RecordSizeMovedYoung(size_t size) 168 { 169 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 170 if constexpr (atomic) { 171 // Atomic with relaxed order reason: memory accesses from different threads 172 reinterpret_cast<std::atomic<uint64_t> *>(&young_move_object_size_) 173 ->fetch_add(size, std::memory_order_relaxed); 174 // NOLINTNEXTLINE(readability-misleading-indentation) 175 } else { 176 young_move_object_size_ += size; 177 } 178 } 179 180 template <bool atomic = false> RecordCountMovedTenured(size_t count)181 ALWAYS_INLINE void RecordCountMovedTenured(size_t count) 182 { 183 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 184 if constexpr (atomic) { 185 // Atomic with relaxed order reason: memory accesses from different threads 186 reinterpret_cast<std::atomic<uint32_t> *>(&tenured_move_object_count_) 187 ->fetch_add(count, std::memory_order_relaxed); 188 // NOLINTNEXTLINE(readability-misleading-indentation) 189 } else { 190 tenured_move_object_count_ += count; 191 } 192 } 193 194 template <bool atomic = false> RecordSizeMovedTenured(size_t size)195 ALWAYS_INLINE void RecordSizeMovedTenured(size_t size) 196 { 197 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 198 if constexpr (atomic) { 199 // Atomic with relaxed order reason: memory accesses from different threads 200 reinterpret_cast<std::atomic<uint64_t> *>(&tenured_move_object_size_) 201 ->fetch_add(size, std::memory_order_relaxed); 202 // NOLINTNEXTLINE(readability-misleading-indentation) 203 } else { 204 tenured_move_object_size_ += size; 205 } 206 } 207 208 template <bool atomic = false> RecordCountFreedTenured(size_t count)209 ALWAYS_INLINE void RecordCountFreedTenured(size_t count) 210 { 211 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 212 if constexpr (atomic) { 213 // Atomic with relaxed order reason: memory accesses from different threads 214 reinterpret_cast<std::atomic<uint32_t> *>(&tenured_free_object_count_) 215 ->fetch_add(count, std::memory_order_relaxed); 216 // NOLINTNEXTLINE(readability-misleading-indentation) 217 } else { 218 tenured_free_object_count_ += count; 219 } 220 } 221 222 template <bool atomic = false> RecordSizeFreedTenured(size_t size)223 ALWAYS_INLINE void RecordSizeFreedTenured(size_t size) 224 { 225 // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) 226 if constexpr (atomic) { 227 // Atomic with relaxed order reason: memory accesses from different threads 228 reinterpret_cast<std::atomic<uint64_t> *>(&tenured_free_object_size_) 229 ->fetch_add(size, std::memory_order_relaxed); 230 // NOLINTNEXTLINE(readability-misleading-indentation) 231 } else { 232 tenured_free_object_size_ += size; 233 } 234 } 235 Reset()236 void Reset() 237 { 238 young_free_object_count_ = 0U; 239 young_free_object_size_ = 0U; 240 young_move_object_count_ = 0U; 241 young_move_object_size_ = 0U; 242 tenured_free_object_count_ = 0U; 243 tenured_free_object_size_ = 0U; 244 tenured_move_object_count_ = 0U; 245 tenured_move_object_size_ = 0U; 246 } 247 248 PandaString Dump(); 249 GetCountFreedYoung()250 ALWAYS_INLINE size_t GetCountFreedYoung() 251 { 252 return young_free_object_count_; 253 } 254 GetSizeFreedYoung()255 ALWAYS_INLINE size_t GetSizeFreedYoung() 256 { 257 return young_free_object_size_; 258 } 259 GetCountMovedYoung()260 ALWAYS_INLINE size_t GetCountMovedYoung() 261 { 262 return young_move_object_count_; 263 } 264 GetSizeMovedYoung()265 ALWAYS_INLINE size_t GetSizeMovedYoung() 266 { 267 return young_move_object_size_; 268 } 269 GetCountFreedTenured()270 ALWAYS_INLINE size_t GetCountFreedTenured() 271 { 272 return tenured_free_object_count_; 273 } 274 GetSizeFreedTenured()275 ALWAYS_INLINE size_t GetSizeFreedTenured() 276 { 277 return tenured_free_object_size_; 278 } 279 GetCountMovedTenured()280 ALWAYS_INLINE size_t GetCountMovedTenured() 281 { 282 return tenured_move_object_count_; 283 } 284 GetSizeMovedTenured()285 ALWAYS_INLINE size_t GetSizeMovedTenured() 286 { 287 return tenured_move_object_size_; 288 } 289 290 private: 291 uint32_t young_free_object_count_ {0U}; 292 uint64_t young_free_object_size_ {0U}; 293 uint32_t young_move_object_count_ {0U}; 294 uint64_t young_move_object_size_ {0U}; 295 uint32_t tenured_free_object_count_ {0U}; 296 uint64_t tenured_free_object_size_ {0U}; 297 uint32_t tenured_move_object_count_ {0U}; 298 uint64_t tenured_move_object_size_ {0U}; 299 300 friend class GenerationalGC; 301 friend class test::MemStatsGenGCTest; 302 }; 303 GetObjectGenAllocator()304 ALWAYS_INLINE ObjectAllocatorGenBase *GetObjectGenAllocator() 305 { 306 return static_cast<ObjectAllocatorGenBase *>(this->GetObjectAllocator()); 307 } 308 309 /** 310 * Sweeps string table from about to become dangled pointers to young generation 311 */ 312 void SweepStringTableYoung(const std::function<bool(ObjectHeader *)> &young_checker); 313 314 void CreateCardTable(InternalAllocatorPtr internal_allocator_ptr, uintptr_t min_address, size_t size); 315 316 MemStats mem_stats_; // NOLINT(misc-non-private-member-variables-in-classes) 317 private: 318 static constexpr size_t DEFAULT_MAJOR_PERIOD = 3; 319 static constexpr size_t DISABLED_MAJOR_PERIOD = 65535; 320 size_t major_period_ {DEFAULT_MAJOR_PERIOD}; 321 PandaUniquePtr<CardTable> card_table_ {nullptr}; 322 friend class test::MemStatsGenGCTest; 323 }; 324 325 } // namespace panda::mem 326 #endif // RUNTIME_MEM_GC_GENERATIONAL_GC_BASE_H 327