1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef PANDA_RUNTIME_MEM_GC_GC_H_
17 #define PANDA_RUNTIME_MEM_GC_GC_H_
18
19 #include <atomic>
20 #include <map>
21 #include <string_view>
22 #include <vector>
23
24 #include "libpandabase/os/mutex.h"
25 #include "libpandabase/os/thread.h"
26 #include "libpandabase/trace/trace.h"
27 #include "libpandabase/utils/expected.h"
28 #include "runtime/include/gc_task.h"
29 #include "runtime/include/language_config.h"
30 #include "runtime/include/locks.h"
31 #include "runtime/include/mem/panda_containers.h"
32 #include "runtime/include/mem/panda_smart_pointers.h"
33 #include "runtime/include/mem/panda_string.h"
34 #include "runtime/mem/allocator_adapter.h"
35 #include "runtime/mem/gc/gc_barrier_set.h"
36 #include "runtime/mem/gc/gc_phase.h"
37 #include "runtime/mem/gc/gc_root.h"
38 #include "runtime/mem/gc/gc_scoped_phase.h"
39 #include "runtime/mem/gc/gc_stats.h"
40 #include "runtime/mem/gc/gc_types.h"
41 #include "runtime/mem/refstorage/reference.h"
42 #include "runtime/mem/gc/bitmap.h"
43 #include "runtime/mem/object_helpers.h"
44 #include "runtime/timing.h"
45
46 namespace panda {
47 class BaseClass;
48 class Class;
49 class HClass;
50 class PandaVM;
51 class Timing;
52 namespace java {
53 class JClass;
54 class JReference;
55 } // namespace java
56 namespace mem {
57 class GlobalObjectStorage;
58 class ReferenceProcessor;
59 namespace test {
60 class ReferenceStorageTest;
61 class RemSetTest;
62 } // namespace test
63 namespace java {
64 class ReferenceQueue;
65 class JavaReferenceProcessor;
66 namespace test {
67 class ReferenceProcessorBaseTest;
68 } // namespace test
69 } // namespace java
70 } // namespace mem
71 } // namespace panda
72
73 namespace panda::coretypes {
74 class Array;
75 class DynClass;
76 } // namespace panda::coretypes
77
78 namespace panda::mem {
79
80 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
81 #define LOG_DEBUG_GC LOG(DEBUG, GC) << this->GetLogPrefix()
82 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
83 #define LOG_INFO_GC LOG(INFO, GC) << this->GetLogPrefix()
84
85 // forward declarations:
86 class GCListener;
87 class HybridObjectAllocator;
88 class GCScopedPhase;
89 class GCQueueInterface;
90 class GCDynamicObjectHelpers;
91
92 enum class GCError { GC_ERROR_NO_ROOTS, GC_ERROR_NO_FRAMES, GC_ERROR_LAST = GC_ERROR_NO_FRAMES };
93
94 enum ClassRootsVisitFlag : bool {
95 ENABLED = true,
96 DISABLED = false,
97 };
98
99 enum CardTableVisitFlag : bool {
100 VISIT_ENABLED = true,
101 VISIT_DISABLED = false,
102 };
103
104 enum class NativeGcTriggerType { INVALID_NATIVE_GC_TRIGGER, NO_NATIVE_GC_TRIGGER, SIMPLE_STRATEGY };
NativeGcTriggerTypeFromString(std::string_view native_gc_trigger_type_str)105 inline NativeGcTriggerType NativeGcTriggerTypeFromString(std::string_view native_gc_trigger_type_str)
106 {
107 if (native_gc_trigger_type_str == "no-native-gc-trigger") {
108 return NativeGcTriggerType::NO_NATIVE_GC_TRIGGER;
109 }
110 if (native_gc_trigger_type_str == "simple-strategy") {
111 return NativeGcTriggerType::SIMPLE_STRATEGY;
112 }
113 return NativeGcTriggerType::INVALID_NATIVE_GC_TRIGGER;
114 }
115
116 class GCListener {
117 public:
118 GCListener() = default;
119 NO_COPY_SEMANTIC(GCListener);
120 DEFAULT_MOVE_SEMANTIC(GCListener);
121 virtual ~GCListener() = 0;
122 virtual void GCStarted(size_t heap_size) = 0;
123 virtual void GCFinished(const GCTask &task, size_t heap_size_before_gc, size_t heap_size) = 0;
124 };
125
126 struct GCSettings {
127 bool is_gc_enable_tracing = false; /// if true then enable tracing
128 NativeGcTriggerType native_gc_trigger_type = {
129 NativeGcTriggerType::INVALID_NATIVE_GC_TRIGGER}; /// type of native trigger
130 bool is_dump_heap = false; /// dump heap at the beginning and the end of GC
131 bool is_concurrency_enabled = true; /// true if concurrency enabled
132 bool run_gc_in_place = false; /// true if GC should be running in place
133 bool pre_gc_heap_verification = false; /// true if heap verification before GC enabled
134 bool post_gc_heap_verification = false; /// true if heap verification after GC enabled
135 bool fail_on_heap_verification = false; /// if true then fail execution if heap verifier found heap corruption
136 uint64_t young_space_size = 0; /// size of young-space for gen-gc
137 };
138
139 class GCExtensionData;
140
141 using UpdateRefInObject = std::function<void(ObjectHeader *)>;
142 using UpdateRefInAllocator = std::function<void(const UpdateRefInObject &)>;
143
144 class GCMarker {
145 public:
146 template <bool reversed_mark = false, bool atomic_mark = true>
MarkObjectHeader(ObjectHeader * object)147 void MarkObjectHeader(ObjectHeader *object) const
148 {
149 // NOLINTNEXTLINE(readability-braces-around-statements)
150 if constexpr (reversed_mark) { // NOLINT(bugprone-suspicious-semicolon)
151 object->SetUnMarkedForGC<atomic_mark>();
152 return;
153 }
154 object->SetMarkedForGC<atomic_mark>();
155 }
156
157 template <bool reversed_mark = false, bool atomic_mark = true>
IsObjectHeaderMarked(ObjectHeader * object)158 bool IsObjectHeaderMarked(ObjectHeader *object) const
159 {
160 // NOLINTNEXTLINE(readability-braces-around-statements)
161 if constexpr (reversed_mark) { // NOLINT(bugprone-suspicious-semicolon)
162 return !object->IsMarkedForGC<atomic_mark>();
163 }
164 return object->IsMarkedForGC<atomic_mark>();
165 }
166
167 template <bool reversed_mark = false>
MarkIfNotMarked(ObjectHeader * object)168 bool MarkIfNotMarked(ObjectHeader *object) const
169 {
170 MarkBitmap *bitmap = GetMarkBitMap(object);
171 if (bitmap != nullptr) {
172 if (bitmap->Test(object)) {
173 return false;
174 }
175 bitmap->Set(object);
176 return true;
177 }
178 if (atomic_mark_flag_) {
179 if (IsObjectHeaderMarked<reversed_mark, true>(object)) {
180 return false;
181 }
182 MarkObjectHeader<reversed_mark, true>(object);
183 } else {
184 if (IsObjectHeaderMarked<reversed_mark, false>(object)) {
185 return false;
186 }
187 MarkObjectHeader<reversed_mark, false>(object);
188 }
189 return true;
190 }
191
192 template <bool reversed_mark = false>
Mark(ObjectHeader * object)193 void Mark(ObjectHeader *object) const
194 {
195 MarkBitmap *bitmap = GetMarkBitMap(object);
196 if (bitmap != nullptr) {
197 bitmap->Set(object);
198 return;
199 }
200 if constexpr (reversed_mark) { // NOLINTNEXTLINE(readability-braces-around-statements)
201 if (atomic_mark_flag_) {
202 object->SetUnMarkedForGC<true>();
203 } else {
204 object->SetUnMarkedForGC<false>();
205 }
206 return;
207 }
208 if (atomic_mark_flag_) {
209 object->SetMarkedForGC<true>();
210 } else {
211 object->SetMarkedForGC<false>();
212 }
213 }
214
215 template <bool reversed_mark = false>
UnMark(ObjectHeader * object)216 void UnMark(ObjectHeader *object) const
217 {
218 MarkBitmap *bitmap = GetMarkBitMap(object);
219 if (bitmap != nullptr) {
220 return; // no need for bitmap
221 }
222 if constexpr (reversed_mark) { // NOLINTNEXTLINE(readability-braces-around-statements)
223 if (atomic_mark_flag_) {
224 object->SetMarkedForGC<true>();
225 } else {
226 object->SetMarkedForGC<false>();
227 }
228 return;
229 }
230 if (atomic_mark_flag_) {
231 object->SetUnMarkedForGC<true>();
232 } else {
233 object->SetUnMarkedForGC<false>();
234 }
235 }
236
237 template <bool reversed_mark = false>
IsMarked(const ObjectHeader * object)238 bool IsMarked(const ObjectHeader *object) const
239 {
240 MarkBitmap *bitmap = GetMarkBitMap(object);
241 if (bitmap != nullptr) {
242 return bitmap->Test(object);
243 }
244 bool is_marked = atomic_mark_flag_ ? object->IsMarkedForGC<true>() : object->IsMarkedForGC<false>();
245 if constexpr (reversed_mark) { // NOLINTNEXTLINE(readability-braces-around-statements)
246 return !is_marked;
247 }
248 return is_marked;
249 }
250
251 template <bool reversed_mark = false>
MarkChecker(const ObjectHeader * object)252 ObjectStatus MarkChecker(const ObjectHeader *object) const
253 {
254 if constexpr (!reversed_mark) { // NOLINTNEXTLINE(readability-braces-around-statements)
255 // If ClassAddr is not setted - it means object header initialization is in progress now
256 if (object->AtomicClassAddr<Class>() == nullptr) {
257 return ObjectStatus::ALIVE_OBJECT;
258 }
259 }
260 ObjectStatus object_status =
261 IsMarked<reversed_mark>(object) ? ObjectStatus::ALIVE_OBJECT : ObjectStatus::DEAD_OBJECT;
262 LOG(DEBUG, GC) << " Mark check for " << std::hex << object << std::dec
263 << " object is alive: " << static_cast<bool>(object_status);
264 return object_status;
265 }
266
GetMarkBitMap(const void * object)267 MarkBitmap *GetMarkBitMap(const void *object) const
268 {
269 for (auto bitmap : mark_bitmaps_) {
270 if (bitmap->IsAddrInRange(object)) {
271 return bitmap;
272 }
273 }
274 return nullptr;
275 }
276
ClearMarkBitMaps()277 void ClearMarkBitMaps()
278 {
279 mark_bitmaps_.clear();
280 }
281
282 template <typename It>
AddMarkBitMaps(It start,It end)283 void AddMarkBitMaps(It start, It end)
284 {
285 mark_bitmaps_.insert(mark_bitmaps_.end(), start, end);
286 }
287
SetAtomicMark(bool flag)288 void SetAtomicMark(bool flag)
289 {
290 atomic_mark_flag_ = flag;
291 }
292
GetAtomicMark()293 bool GetAtomicMark() const
294 {
295 return atomic_mark_flag_;
296 }
297
298 private:
299 // Bitmaps for mark object
300 PandaVector<MarkBitmap *> mark_bitmaps_;
301 bool atomic_mark_flag_ = true;
302 };
303
304 class NoAtomicGCMarkerScope {
305 public:
NoAtomicGCMarkerScope(GCMarker * marker)306 explicit NoAtomicGCMarkerScope(GCMarker *marker)
307 {
308 ASSERT(marker != nullptr);
309 gc_marker_ = marker;
310 old_state_ = gc_marker_->GetAtomicMark();
311 if (old_state_) {
312 gc_marker_->SetAtomicMark(false);
313 }
314 }
315
316 NO_COPY_SEMANTIC(NoAtomicGCMarkerScope);
317 NO_MOVE_SEMANTIC(NoAtomicGCMarkerScope);
318
~NoAtomicGCMarkerScope()319 ~NoAtomicGCMarkerScope()
320 {
321 if (old_state_) {
322 gc_marker_->SetAtomicMark(old_state_);
323 }
324 }
325
326 private:
327 GCMarker *gc_marker_;
328 bool old_state_ = false;
329 };
330
331 // base class for all GCs
332 class GC {
333 public:
334 explicit GC(ObjectAllocatorBase *object_allocator, const GCSettings &settings);
335 NO_COPY_SEMANTIC(GC);
336 NO_MOVE_SEMANTIC(GC);
337 virtual ~GC() = 0;
338
339 GCType GetType();
340
341 /**
342 * \brief Initialize GC
343 */
344 void Initialize();
345
346 /**
347 * \brief Starts GC after initialization
348 * Creates worker thread, sets gc_running_ to true
349 */
350 virtual void StartGC();
351
352 /**
353 * \brief Stops GC for runtime destruction
354 * Joins GC thread, clears queue
355 */
356 virtual void StopGC();
357
358 /**
359 * Should be used to wait while GC should work exlusively
360 * Note: for non-mt STW GC can be used to run GC
361 */
362 virtual void WaitForGC(const GCTask &task) = 0;
363
364 /**
365 * Should be used to wait while GC should be executed in managed scope
366 */
367 void WaitForGCInManaged(const GCTask &task) NO_THREAD_SAFETY_ANALYSIS;
368
369 /**
370 * Only be used to at first pygote fork
371 */
372 void WaitForGCOnPygoteFork(const GCTask &task);
373
374 bool IsOnPygoteFork();
375
376 /**
377 * Initialize GC bits on object creation.
378 * Required only for GCs with switched bits
379 */
380 virtual void InitGCBits(panda::ObjectHeader *obj_header) = 0;
381
382 /**
383 * Initialize GC bits on object creation for the TLAB allocation.
384 */
385 virtual void InitGCBitsForAllocationInTLAB(panda::ObjectHeader *obj_header) = 0;
386
IsTLABsSupported()387 bool IsTLABsSupported()
388 {
389 return tlabs_supported_;
390 }
391
392 /**
393 * Triggers GC
394 */
395 virtual void Trigger() = 0;
396
397 /**
398 * Return true if gc has generations, false otherwise
399 */
400 bool IsGenerational() const;
401
DumpStatistics()402 PandaString DumpStatistics()
403 {
404 return instance_stats_.GetDump(gc_type_);
405 }
406
AddListener(GCListener * listener)407 void AddListener(GCListener *listener)
408 {
409 ASSERT(gc_listeners_ptr_ != nullptr);
410 gc_listeners_ptr_->push_back(listener);
411 }
412
GetBarrierSet()413 GCBarrierSet *GetBarrierSet()
414 {
415 ASSERT(gc_barrier_set_ != nullptr);
416 return gc_barrier_set_;
417 }
418
419 // Additional NativeGC
420 void NotifyNativeAllocations();
421
422 void RegisterNativeAllocation(size_t bytes);
423
424 void RegisterNativeFree(size_t bytes);
425
GetNotifyNativeInterval()426 int32_t GetNotifyNativeInterval()
427 {
428 return NOTIFY_NATIVE_INTERVAL;
429 }
430
431 // Calling CheckGCForNative immediately for every NOTIFY_NATIVE_INTERVAL allocations
432 static constexpr int32_t NOTIFY_NATIVE_INTERVAL = 32;
433
434 // Calling CheckGCForNative immediately if size exceeds the following
435 static constexpr size_t CHECK_IMMEDIATELY_THRESHOLD = 300000;
436
GetGCPhase()437 inline GCPhase GetGCPhase() const
438 {
439 return phase_;
440 }
441
IsGCRunning()442 inline bool IsGCRunning()
443 {
444 return gc_running_.load();
445 }
446
447 void PreStartup();
448
GetInternalAllocator()449 InternalAllocatorPtr GetInternalAllocator() const
450 {
451 return internal_allocator_;
452 }
453
454 /**
455 * Enqueue all references in ReferenceQueue. Should be done after GC to avoid deadlock (lock in
456 * ReferenceQueue.class)
457 */
458 void EnqueueReferences();
459
460 /**
461 * Process all references which GC found in marking phase.
462 */
463 void ProcessReferences(GCPhase gc_phase, const GCTask &task);
464
GetNativeBytesRegistered()465 size_t GetNativeBytesRegistered()
466 {
467 return native_bytes_registered_.load(std::memory_order_relaxed);
468 }
469
470 virtual void SetPandaVM(PandaVM *vm);
471
GetPandaVm()472 PandaVM *GetPandaVm() const
473 {
474 return vm_;
475 }
476
PreZygoteFork()477 virtual void PreZygoteFork()
478 {
479 JoinWorker();
480 }
481
PostZygoteFork()482 virtual void PostZygoteFork()
483 {
484 CreateWorker();
485 }
486
SetCanAddGCTask(bool can_add_task)487 void SetCanAddGCTask(bool can_add_task)
488 {
489 can_add_gc_task_.store(can_add_task, std::memory_order_relaxed);
490 }
491
SetGCAtomicFlag(bool atomic_flag)492 void SetGCAtomicFlag(bool atomic_flag)
493 {
494 marker_.SetAtomicMark(atomic_flag);
495 }
496
GetExtensionData()497 GCExtensionData *GetExtensionData() const
498 {
499 return extension_data_;
500 }
501
SetExtensionData(GCExtensionData * data)502 void SetExtensionData(GCExtensionData *data)
503 {
504 extension_data_ = data;
505 }
506
PostForkCallback()507 virtual void PostForkCallback() {}
508
509 uint64_t GetLastGCReclaimedBytes();
510
511 /**
512 * Check if the object addr is in the GC sweep range
513 */
InGCSweepRange(uintptr_t addr)514 virtual bool InGCSweepRange([[maybe_unused]] uintptr_t addr) const
515 {
516 return true;
517 }
518
519 protected:
520 /**
521 * \brief Runs all phases
522 */
523 void RunPhases(const GCTask &task);
524
525 template <LangTypeT LANG_TYPE, bool HAS_VALUE_OBJECT_TYPES>
526 void MarkInstance(PandaStackTL<ObjectHeader *> *objects_stack, const ObjectHeader *object, BaseClass *cls);
527
528 /**
529 * Add task to GC Queue to be run by GC thread (or run in place)
530 */
531 void AddGCTask(bool is_managed, PandaUniquePtr<GCTask> task, bool triggered_by_threshold);
532
533 virtual void InitializeImpl() = 0;
534 virtual void PreRunPhasesImpl() = 0;
535 virtual void RunPhasesImpl(const GCTask &task) = 0;
PreStartupImp()536 virtual void PreStartupImp() {}
537
538 void BindBitmaps(bool clear_pygote_space_bitmaps);
539
IsTracingEnabled()540 inline bool IsTracingEnabled() const
541 {
542 return gc_settings_.is_gc_enable_tracing;
543 }
544
BeginTracePoint(const PandaString & trace_point_name)545 inline void BeginTracePoint(const PandaString &trace_point_name) const
546 {
547 if (IsTracingEnabled()) {
548 trace::BeginTracePoint(trace_point_name.c_str());
549 }
550 }
551
EndTracePoint()552 inline void EndTracePoint() const
553 {
554 if (IsTracingEnabled()) {
555 trace::EndTracePoint();
556 }
557 }
558
559 virtual void VisitRoots(const GCRootVisitor &gc_root_visitor, VisitGCRootFlags flags) = 0;
560 virtual void VisitClassRoots(const GCRootVisitor &gc_root_visitor) = 0;
561 virtual void VisitCardTableRoots(CardTable *card_table, const GCRootVisitor &gc_root_visitor,
562 const MemRangeChecker &range_checker, const ObjectChecker &range_object_checker,
563 const ObjectChecker &from_object_checker, uint32_t processed_flag) = 0;
564
SetGCPhase(GCPhase gc_phase)565 inline void SetGCPhase(GCPhase gc_phase)
566 {
567 phase_ = gc_phase;
568 }
569
CASGCPhase(GCPhase expected,GCPhase set)570 inline bool CASGCPhase(GCPhase expected, GCPhase set)
571 {
572 return phase_.compare_exchange_strong(expected, set);
573 }
574
GetStats()575 GCInstanceStats *GetStats()
576 {
577 return &instance_stats_;
578 }
579
SetType(GCType gc_type)580 inline void SetType(GCType gc_type)
581 {
582 gc_type_ = gc_type;
583 }
584
SetTLABsSupported()585 inline void SetTLABsSupported()
586 {
587 tlabs_supported_ = true;
588 }
589
SetGCBarrierSet(GCBarrierSet * barrier_set)590 void SetGCBarrierSet(GCBarrierSet *barrier_set)
591 {
592 ASSERT(gc_barrier_set_ == nullptr);
593 gc_barrier_set_ = barrier_set;
594 }
595
596 /**
597 * Mark object.
598 * Note: for some GCs it is not necessary to set GC bit to 1.
599 * @param object_header
600 */
601 virtual void MarkObject(ObjectHeader *object_header);
602
603 /**
604 * Mark object.
605 * Note: for some GCs it is not necessary to set GC bit to 1.
606 * @param object_header
607 * @return true if object old state is not marked
608 */
609 virtual bool MarkObjectIfNotMarked(ObjectHeader *object_header);
610
611 /**
612 * UnMark object
613 * @param object_header
614 */
615 virtual void UnMarkObject(ObjectHeader *object_header);
616
617 /**
618 * Check if the object is marked for GC(alive)
619 * @param object
620 * @return true if object marked for GC
621 */
622 virtual bool IsMarked(const ObjectHeader *object) const;
623
624 /**
625 * Return true of ref is an instance of reference or it's ancestor, false otherwise
626 */
627 bool IsReference(BaseClass *cls, const ObjectHeader *ref);
628
629 void ProcessReference(PandaStackTL<ObjectHeader *> *objects_stack, BaseClass *cls, const ObjectHeader *object);
630
631 /**
632 * Add reference for later processing in marking phase
633 * @param object - object from which we start to mark
634 */
635 void AddReference(ObjectHeader *object);
636
637 /**
638 * Mark all references which we added by AddReference method
639 */
640 virtual void MarkReferences(PandaStackTL<ObjectHeader *> *references, GCPhase gc_phase) = 0;
641
GetObjectAllocator()642 ObjectAllocatorBase *GetObjectAllocator() const
643 {
644 return object_allocator_;
645 }
646
647 friend class HeapRootVisitor;
648
649 /**
650 * Update all refs to moved objects
651 */
652 virtual void CommonUpdateRefsToMovedObjects(const UpdateRefInAllocator &update_allocator) = 0;
653
654 virtual void UpdateVmRefs() = 0;
655
656 virtual void UpdateGlobalObjectStorage() = 0;
657
658 virtual void UpdateClassLinkerContextRoots() = 0;
659
660 void UpdateRefsInVRegs(ManagedThread *thread);
661
662 void AddToStack(PandaStackTL<ObjectHeader *> *objects_stack, ObjectHeader *object);
663
664 ObjectHeader *PopObjectFromStack(PandaStackTL<ObjectHeader *> *objects_stack);
665
GetTiming()666 Timing *GetTiming()
667 {
668 return &timing_;
669 }
670
671 void SetForwardAddress(ObjectHeader *src, ObjectHeader *dst);
672
673 // vector here because we can add some references on young-gc and get new refs on old-gc
674 // it's possible if we make 2 GCs for one safepoint
675 // max length of this vector - is 2
676 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
GUARDED_BY(cleared_references_lock_)677 PandaVector<panda::mem::Reference *> *cleared_references_ GUARDED_BY(cleared_references_lock_) {nullptr};
678
679 os::memory::Mutex *cleared_references_lock_ {nullptr}; // NOLINT(misc-non-private-member-variables-in-classes)
680
681 std::atomic<size_t> gc_counter_ {0}; // NOLINT(misc-non-private-member-variables-in-classes)
682 std::atomic<uint64_t> last_gc_reclaimed_bytes {0}; // NOLINT(misc-non-private-member-variables-in-classes)
683 // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes)
684 std::atomic<GCTaskCause> last_cause_ {GCTaskCause::INVALID_CAUSE};
685
GetSettings()686 GCSettings *GetSettings()
687 {
688 return &gc_settings_;
689 }
690
691 /**
692 * @return true if GC can work in concurrent mode
693 */
IsConcurrencyAllowed()694 bool IsConcurrencyAllowed() const
695 {
696 return gc_settings_.is_concurrency_enabled;
697 }
698
GetLogPrefix()699 PandaString GetLogPrefix() const
700 {
701 PandaOStringStream ss;
702 ss << "[" << gc_counter_.load(std::memory_order_acquire) << ", " << GCScopedPhase::GetPhaseAbbr(GetGCPhase())
703 << "]: ";
704 return ss.str();
705 }
706
707 GCMarker marker_; // NOLINT(misc-non-private-member-variables-in-classes)
708 Timing timing_; // NOLINT(misc-non-private-member-variables-in-classes)
709
710 private:
711 /**
712 * Entrypoint for GC worker thread
713 * @param gc pointer to GC structure
714 * @param vm pointer to VM structure
715 */
716 static void GCWorkerEntry(GC *gc, PandaVM *vm);
717
718 /**
719 * Iterate over all fields with references of object and add all not null object references to the objects_stack
720 * @param objects_stack - stack with objects
721 * @param object
722 * @param base_cls - class of object(used for perf in case if class for the object already was obtained)
723 */
724 template <LangTypeT LANG_TYPE, bool HAS_VALUE_OBJECT_TYPES>
725 void HandleObject(PandaStackTL<ObjectHeader *> *objects_stack, const ObjectHeader *object, BaseClass *base_cls);
726
727 /**
728 * Iterate over class data and add all found not null object references to the objects_stack
729 * @param objects_stack - stack with objects
730 * @param cls - class
731 */
732 template <LangTypeT LANG_TYPE, bool HAS_VALUE_OBJECT_TYPES, class ClassT>
733 void HandleClass(PandaStackTL<ObjectHeader *> *objects_stack, ClassT *cls);
734
735 /**
736 * For arrays of objects add all not null object references to the objects_stack
737 * @param objects_stack - stack with objects
738 * @param array_object - array object
739 * @param cls - class of array object(used for perf)
740 */
741 template <LangTypeT LANG_TYPE, bool HAS_VALUE_OBJECT_TYPES>
742 void HandleArrayClass(PandaStackTL<ObjectHeader *> *objects_stack, const coretypes::Array *array_object,
743 const BaseClass *cls);
744
745 void JoinWorker();
746 void CreateWorker();
747
748 /**
749 * Move small objects to pygote space at first pygote fork
750 */
751 void MoveObjectsToPygoteSpace();
752
753 size_t GetNativeBytesFromMallinfoAndRegister() const;
754 virtual void UpdateThreadLocals() = 0;
755 virtual size_t VerifyHeap() = 0;
756 NativeGcTriggerType GetNativeGcTriggerType();
757
758 volatile std::atomic<GCPhase> phase_ {GCPhase::GC_PHASE_IDLE};
759 GCType gc_type_ {GCType::INVALID_GC};
760 GCSettings gc_settings_;
761 PandaVector<GCListener *> *gc_listeners_ptr_ {nullptr};
762 GCBarrierSet *gc_barrier_set_ {nullptr};
763 ObjectAllocatorBase *object_allocator_ {nullptr};
764 InternalAllocatorPtr internal_allocator_ {nullptr};
765 GCInstanceStats instance_stats_;
766
767 // Additional NativeGC
768 std::atomic<size_t> native_bytes_registered_ = 0;
769 std::atomic<size_t> native_objects_notified_ = 0;
770
771 ReferenceProcessor *reference_processor_ {nullptr};
772 std::atomic_bool allow_soft_reference_processing_ = false;
773
774 GCQueueInterface *gc_queue_ = nullptr;
775 std::thread *worker_ = nullptr;
776 std::atomic_bool gc_running_ = false;
777 std::atomic<bool> can_add_gc_task_ = true;
778 bool tlabs_supported_ = false;
779
780 // Additional data for extensions
781 GCExtensionData *extension_data_ {nullptr};
782
783 class PostForkGCTask;
784
785 friend class java::ReferenceQueue;
786 friend class java::JavaReferenceProcessor;
787 friend class java::test::ReferenceProcessorBaseTest;
788 friend class panda::mem::test::ReferenceStorageTest;
789 friend class panda::mem::test::RemSetTest;
790 friend class GCScopedPhase;
791 friend class GlobalObjectStorage;
792 friend class GCDynamicObjectHelpers;
793 friend class GCStaticObjectHelpers;
794 void TriggerGCForNative();
795 size_t SimpleNativeAllocationGcWatermark();
796 /**
797 * Waits while current GC task(if any) will be processed
798 */
799 void WaitForIdleGC() NO_THREAD_SAFETY_ANALYSIS;
800
801 friend class ConcurrentScope;
802
803 PandaVM *vm_ {nullptr};
804 };
805
806 template <MTModeT MTMode>
807 class AllocConfig<GCType::STW_GC, MTMode> {
808 public:
809 using ObjectAllocatorType = ObjectAllocatorNoGen<MTMode>;
810 using CodeAllocatorType = CodeAllocator;
811 };
812
813 template <MTModeT MTMode>
814 class AllocConfig<GCType::EPSILON_GC, MTMode> {
815 public:
816 using ObjectAllocatorType = ObjectAllocatorNoGen<MTMode>;
817 using CodeAllocatorType = CodeAllocator;
818 };
819
820 template <MTModeT MTMode>
821 class AllocConfig<GCType::GEN_GC, MTMode> {
822 public:
823 using ObjectAllocatorType = ObjectAllocatorGen<MTMode>;
824 using CodeAllocatorType = CodeAllocator;
825 };
826
827 template <MTModeT MTMode>
828 class AllocConfig<GCType::HYBRID_GC, MTMode> {
829 public:
830 using ObjectAllocatorType = HybridObjectAllocator;
831 using CodeAllocatorType = CodeAllocator;
832 };
833
834 /**
835 * \brief Create GC with \param gc_type
836 * @param gc_type - type of create GC
837 * @return pointer to created GC on success, nullptr on failure
838 */
839 template <class LanguageConfig>
840 GC *CreateGC(GCType gc_type, ObjectAllocatorBase *object_allocator, const GCSettings &settings);
841
842 /**
843 * Enable concurrent mode. Should be used only from STW code.
844 */
845 class ConcurrentScope final {
846 public:
847 explicit ConcurrentScope(GC *gc, bool auto_start = true);
848 NO_COPY_SEMANTIC(ConcurrentScope);
849 NO_MOVE_SEMANTIC(ConcurrentScope);
850 ~ConcurrentScope();
851 void Start();
852
853 private:
854 GC *gc_;
855 bool started_ = false;
856 };
857
858 } // namespace panda::mem
859
860 #endif // PANDA_RUNTIME_MEM_GC_GC_H_
861