• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
16 #define PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
17 
18 #include <functional>
19 
20 #include "runtime/include/mem/panda_smart_pointers.h"
21 #include "runtime/mem/gc/card_table.h"
22 #include "runtime/mem/gc/gc.h"
23 #include "runtime/mem/gc/gc_marker.h"
24 #include "runtime/mem/gc/gc_workers_thread_pool.h"
25 #include "runtime/mem/gc/gc_barrier_set.h"
26 #include "runtime/mem/gc/lang/gc_lang.h"
27 #include "runtime/mem/gc/g1/g1-allocator.h"
28 #include "runtime/mem/gc/g1/collection_set.h"
29 #include "runtime/mem/gc/generational-gc-base.h"
30 #include "runtime/mem/heap_verifier.h"
31 
32 namespace panda {
33 class ManagedThread;
34 }  // namespace panda
35 namespace panda::mem {
36 template <typename T>
37 class UpdateRemsetThread;
38 
39 template <LangTypeT LANG_TYPE, bool HAS_VALUE_OBJECT_TYPES>
40 class G1GCMarker : public GCMarker<G1GCMarker<LANG_TYPE, HAS_VALUE_OBJECT_TYPES>, LANG_TYPE, HAS_VALUE_OBJECT_TYPES> {
41 public:
G1GCMarker(GC * gc)42     explicit G1GCMarker(GC *gc)
43         : GCMarker<G1GCMarker<LANG_TYPE, HAS_VALUE_OBJECT_TYPES>, LANG_TYPE, HAS_VALUE_OBJECT_TYPES>(gc)
44     {
45     }
46 
MarkIfNotMarked(ObjectHeader * object)47     bool MarkIfNotMarked(ObjectHeader *object) const
48     {
49         MarkBitmap *bitmap = ObjectToRegion(object)->GetMarkBitmap();
50         ASSERT(bitmap != nullptr);
51         return !bitmap->AtomicTestAndSet(object);
52     }
53 
IsMarked(const ObjectHeader * object)54     bool IsMarked(const ObjectHeader *object) const
55     {
56         MarkBitmap *bitmap = ObjectToRegion(object)->GetMarkBitmap();
57         ASSERT(bitmap != nullptr);
58         return bitmap->AtomicTest(object);
59     }
60 
61     template <bool atomic = true>
Mark(ObjectHeader * object)62     void Mark(ObjectHeader *object)
63     {
64         MarkBitmap *bitmap = ObjectToRegion(object)->GetMarkBitmap();
65         ASSERT(bitmap != nullptr);
66         bitmap->AtomicTestAndSet(object);
67     }
68 };
69 
70 /**
71  * \brief G1 alike GC
72  */
73 template <class LanguageConfig>
74 class G1GC final : public GenerationalGC<LanguageConfig> {
75     using Marker = G1GCMarker<LanguageConfig::LANG_TYPE, LanguageConfig::HAS_VALUE_OBJECT_TYPES>;
76     using ConcurrentMarkPredicateT = typename GenerationalGC<LanguageConfig>::ConcurrentMarkPredicateT;
77     using RefVector = PandaVector<RefInfo>;
78     using ReferenceCheckPredicateT = typename GC::ReferenceCheckPredicateT;
79 
80 public:
81     explicit G1GC(ObjectAllocatorBase *object_allocator, const GCSettings &settings);
82 
83     ~G1GC() override;
84 
StopGC()85     void StopGC() override
86     {
87         GC::StopGC();
88         // GC is using update_remset_thread so we need to stop GC first before we destroy the thread
89         update_remset_thread_->DestroyThread();
90     }
91 
92     NO_MOVE_SEMANTIC(G1GC);
93     NO_COPY_SEMANTIC(G1GC);
94 
95     void WaitForGC(GCTask task) override;
96 
97     void InitGCBits(panda::ObjectHeader *obj_header) override;
98 
99     void InitGCBitsForAllocationInTLAB(panda::ObjectHeader *object) override;
100 
101     void Trigger() override;
102 
103     bool InitWorker(void **worker_data) override;
104 
105     void DestroyWorker(void *worker_data) override;
106 
107     void WorkerTaskProcessing(GCWorkersTask *task, void *worker_data) override;
108 
109     void MarkReferences(GCMarkingStackType *references, GCPhase gc_phase) override;
110 
111     void MarkObject(ObjectHeader *object) override;
112 
113     bool MarkObjectIfNotMarked(ObjectHeader *object) override;
114 
115     bool InGCSweepRange(const ObjectHeader *object) const override;
116 
117     void OnThreadTerminate(ManagedThread *thread) override;
118 
119     void PreZygoteFork() override;
120     void PostZygoteFork() override;
121 
122     void OnWaitForIdleFail() override;
123 
StartGC()124     void StartGC() override
125     {
126         GC::StartGC();
127         InternalAllocatorPtr allocator = this->GetInternalAllocator();
128         update_remset_thread_->CreateThread(allocator);
129     }
130 
131 private:
132     bool HaveGarbageRegions();
133 
134     template <RegionFlag region_type>
135     void DoRegionCompacting(Region *region, bool use_gc_workers,
136                             PandaVector<PandaVector<ObjectHeader *> *> *moved_objects_vector);
137 
138     template <bool atomic, bool concurrently>
139     void CollectNonRegularObjects(GCTask &task);
140 
141     bool NeedToPromote(const Region *region) const;
142 
143     template <bool atomic, RegionFlag region_type>
144     void RegionCompactingImpl(PandaVector<ObjectHeader *> *moved_objects, Region *region);
145 
146     template <bool atomic>
147     void RegionPromotionImpl(PandaVector<ObjectHeader *> *moved_objects, Region *region);
148 
149     void CollectRefsFromCard(CardTable::CardPtr card, Region *region, RefVector *refs_from_remsets);
150 
151     void InitializeImpl() override;
152 
153     void RunPhasesImpl(GCTask &task) override;
154 
155     void RunPhasesForRegions([[maybe_unused]] panda::GCTask &task, const CollectionSet &collectible_regions);
156 
157     void RunFullForTenured(panda::GCTask &task);
158 
159     void RunFullMarkAndProcessRefs(panda::GCTask &task, const CollectionSet &collectible_regions);
160 
161     void RunFullProcessRefsNoCollect(panda::GCTask &task);
162 
163     void PreStartupImp() override;
164 
165     void VisitCard(CardTable::CardPtr card, const ObjectVisitor &object_visitor, const CardVisitor &card_visitor);
166 
167     /**
168      * GC for young generation. Runs with STW.
169      */
170     void RunGC(GCTask &task, const CollectionSet &collectible_regions);
171 
172     /**
173      * GC for tenured generation.
174      */
175     void RunTenuredGC(const GCTask &task);
176 
177     /**
178      * Marks objects in collection set (young-generation + maybe some tenured regions).
179      */
180     void MixedMark(const GCTask &task, const CollectionSet &collectible_regions);
181 
182     /**
183      * Mark roots and add them to the stack
184      * @param objects_stack
185      * @param visit_class_roots
186      * @param visit_card_table_roots
187      */
188     void MarkRoots(GCMarkingStackType *objects_stack, CardTableVisitFlag visit_card_table_roots,
189                    VisitGCRootFlags flags = VisitGCRootFlags::ACCESS_ROOT_ALL);
190 
191     /**
192      * Initial marks roots and fill in 1st level from roots into stack.
193      * STW
194      * @param objects_stack
195      */
196     void InitialMark(GCMarkingStackType *objects_stack);
197 
198     /**
199      * ReMarks objects after Concurrent marking
200      * @param objects_stack
201      */
202     void ReMark(GCMarkingStackType *objects_stack, GCTask task);
203 
204     void MarkStackMixed(GCMarkingStackType *stack);
205 
206     void MarkStackFull(GCMarkingStackType *stack);
207 
208     bool IsInCollectionSet(ObjectHeader *object);
209 
210     /**
211      * Collect dead objects in young generation and move survivors
212      * @return true if moving was success, false otherwise
213      */
214     bool CollectAndMove(const CollectionSet &collection_set);
215 
216     /**
217      * Collect verification info for CollectAndMove phase
218      * @param collection_set collection set for the current phase
219      * @return instance of verifier to be used to verify for updated references
220      */
221     [[nodiscard]] HeapVerifierIntoGC<LanguageConfig> CollectVerificationInfo(const CollectionSet &collection_set);
222 
223     /**
224      * Verify updted references
225      * @param collect_verifier instance of the verifier that was obtained before references were updated
226      * @param collection_set collection set for the current phase
227      *
228      * @see CollectVerificationInfo
229      * @see UpdateRefsToMovedObjects
230      */
231     void VerifyCollectAndMove(HeapVerifierIntoGC<LanguageConfig> &&collect_verifier,
232                               const CollectionSet &collection_set);
233 
234     /**
235      * Update all refs to moved objects
236      */
237     void UpdateRefsToMovedObjects(PandaVector<PandaVector<ObjectHeader *> *> *moved_objects_vector);
238 
239     void Sweep();
240 
241     bool IsMarked(const ObjectHeader *object) const override;
242 
GetG1ObjectAllocator()243     ALWAYS_INLINE ObjectAllocatorG1<LanguageConfig::MT_MODE> *GetG1ObjectAllocator() const
244     {
245         return static_cast<ObjectAllocatorG1<LanguageConfig::MT_MODE> *>(this->GetObjectAllocator());
246     }
247 
248     /**
249      * Start process of concurrent marking
250      */
251     template <bool is_concurrent>
252     void StartMarking(panda::GCTask &task);
253 
254     /*
255      * Mark the heap in concurrent mode and calculate live bytes
256      */
257     void ConcurrentMark(Marker *marker, GCMarkingStackType *objects_stack, CardTableVisitFlag visit_card_table_roots,
258                         const ConcurrentMarkPredicateT &pred, const ReferenceCheckPredicateT &ref_pred,
259                         const MemRangeChecker &mem_range_checker);
260 
261     /**
262      * ReMarks objects after Concurrent marking and actualize information about live bytes
263      */
264     void Remark(panda::GCTask const &task);
265 
266     /**
267      * Return collectible regions
268      */
269     CollectionSet GetCollectibleRegions(panda::GCTask const &task, bool is_mixed);
270 
271     void UpdateCollectionSet(const CollectionSet &collectible_regions);
272 
273     void CalcLiveBytesForMovableTenuredRegions();
274 
275     /**
276      * Estimate space in tenured to objects from collectible regions
277      */
278     bool HaveEnoughSpaceToMove(const CollectionSet &collectible_regions);
279 
280     /**
281      * Check if we have enough free regions in tenured space
282      */
283     bool HaveEnoughRegionsToMove(size_t num);
284 
285     /**
286      * Add data from SATB buffer to the object stack
287      * @param object_stack - stack to add data to
288      */
289     void DrainSatb(GCAdaptiveStack *object_stack);
290 
291     void WaitForUpdateRemsetThread();
292 
293     void ClearSatb();
294 
295     /**
296      * Iterate over object references in rem sets.
297      * The Visitor is a functor which accepts an object (referee), the reference value,
298      * offset of the reference in the object and the flag whether the reference is volatile.
299      * The visitor can be called for the references to the collection set in the object or
300      * for all references in an object which has at least one reference to the collection set.
301      * The decision is implementation dependent.
302      */
303     template <class Visitor>
304     void VisitRemSets(const Visitor &visitor);
305 
306     void CacheRefsFromRemsets();
307 
308     void ClearRefsFromRemsetsCache();
309 
310     // Issue 8183: Remove unnessesary SetYoungFullGC methods after refactoring Full GC
SetYoungFullGC(bool value)311     void SetYoungFullGC(bool value)
312     {
313         is_young_full_gc_ = value;
314     }
315 
316     // Issue 8183: Remove unnessesary IsYoungFullGC methods after refactoring Full GC
IsYoungFullGC()317     bool IsYoungFullGC() const
318     {
319         return is_young_full_gc_;
320     }
321 
322     void ActualizeRemSets();
323 
324     bool ShouldRunTenuredGC(const GCTask &task) override;
325 
326     Marker marker_;
327     std::atomic<bool> concurrent_marking_flag_ {false};  //! flag indicates if we currently in concurrent marking phase
328     std::atomic<bool> interrupt_concurrent_flag_ {false};  //! flag indicates if we need to interrupt concurrent marking
329     std::function<void(const void *, const void *)> post_queue_func_ {nullptr};  //! function called in the post WRB
330     /**
331      * After first process it stores humongous objects only, after marking them it's still store them for updating
332      * pointers from Humongous
333      */
GUARDED_BY(satb_and_newobj_buf_lock_)334     PandaList<PandaVector<ObjectHeader *> *> satb_buff_list_ GUARDED_BY(satb_and_newobj_buf_lock_) {};
335     PandaVector<ObjectHeader *> newobj_buffer_ GUARDED_BY(satb_and_newobj_buf_lock_);
336     // The lock guards both variables: satb_buff_list_ and newobj_buffer_
337     os::memory::Mutex satb_and_newobj_buf_lock_;
338     UpdateRemsetThread<LanguageConfig> *update_remset_thread_ {nullptr};
339     GCMarkingStackType concurrent_marking_stack_;
340     std::atomic<bool> is_mixed_gc_required_ {false};
341     // TODO(agrebenkin): Remove unnessesary is_young_full_gc_ field after refactoring Full GC
342     bool is_young_full_gc_ {false};
343     size_t number_of_mixed_tenured_regions_ {2};  //! number of tenured regions added at the young GC
344     double region_garbage_rate_threshold_ {0.0};
345     double g1_promotion_region_alive_rate_ {0.0};
346     bool g1_track_freed_objects_ {false};
347     CollectionSet collection_set_;
348     // Max size of unique_refs_from_remsets_ buffer. It should be enough to store
349     // almost all references to the collection set.
350     // But any way there may be humongous arrays which contains a lot of references to the collection set.
351     // For such objects GC don't store each reference. It just put the whole object into unique_objects_from_remsets_.
352     static constexpr size_t MAX_REFS = 1024;
353     // Storages for references from remsets to the collection set.
354     // The list has the same number of elements as number of GC workers + GC thread.
355     // Each vector is thread specific.
356     // unique_refs_from_remsets_ contains an object from the remset and the offset of
357     // the field which refers to the collection set.
358     // Total number of collected references is limited by MAX_REFS and
359     // divided between threads uniformly.
360     PandaList<RefVector> unique_refs_from_remsets_;
361     // unique_objects_from_remsets_ contains objects from remsets which have a reference to the collection set.
362     // It is used when the limit of unique_refs_from_remsets_ is reached.
363     PandaVector<ObjectHeader *> unique_objects_from_remsets_;
364     os::memory::Mutex objects_from_remsets_lock_;
365     PandaVector<RefVector *> unassigned_buffers_ GUARDED_BY(unassigned_buffers_lock_);
366     os::memory::Mutex unassigned_buffers_lock_;
367     GC::MarkPredicate calc_live_bytes_;
368 #ifndef NDEBUG
369     bool unique_cards_initialized_ = false;
370 #endif                                                                     // NDEBUG
371     GCG1BarrierSet::ThreadLocalCardQueues *updated_refs_queue_ {nullptr};  //! queue with updated refs info
372     os::memory::Mutex queue_lock_;
373 
374     template <class LC>
375     friend class RefCacheBuilder;
376 };
377 
378 template <MTModeT MTMode>
379 class AllocConfig<GCType::G1_GC, MTMode> {
380 public:
381     using ObjectAllocatorType = ObjectAllocatorG1<MTMode>;
382     using CodeAllocatorType = CodeAllocator;
383 };
384 
385 }  // namespace panda::mem
386 
387 #endif  // PANDA_RUNTIME_MEM_GC_G1_G1_GC_H
388