• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
18 #define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
19 
20 #include <stdint.h>
21 #include <list>
22 
23 #include "base/histogram.h"
24 #include "base/macros.h"
25 #include "base/metrics/metrics.h"
26 #include "base/mutex.h"
27 #include "base/timing_logger.h"
28 #include "gc/collector_type.h"
29 #include "gc/gc_cause.h"
30 #include "gc_root.h"
31 #include "gc_type.h"
32 #include "iteration.h"
33 #include "object_byte_pair.h"
34 #include "object_callbacks.h"
35 
36 namespace art HIDDEN {
37 
38 namespace mirror {
39 class Class;
40 class Object;
41 class Reference;
42 }  // namespace mirror
43 
44 namespace gc {
45 
46 class Heap;
47 
48 namespace collector {
49 
50 class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
51  public:
52   class SCOPED_LOCKABLE ScopedPause {
53    public:
54     explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
55         EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
56     ~ScopedPause() UNLOCK_FUNCTION();
57 
58    private:
59     const uint64_t start_time_;
60     GarbageCollector* const collector_;
61     bool with_reporting_;
62   };
63 
64   GarbageCollector(Heap* heap, const std::string& name);
~GarbageCollector()65   virtual ~GarbageCollector() { }
GetName()66   const char* GetName() const {
67     return name_.c_str();
68   }
69   virtual GcType GetGcType() const = 0;
70   virtual CollectorType GetCollectorType() const = 0;
71   // Run the garbage collector.
72   void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
GetHeap()73   Heap* GetHeap() const {
74     return heap_;
75   }
76   void RegisterPause(uint64_t nano_length);
GetCumulativeTimings()77   const CumulativeLogger& GetCumulativeTimings() const {
78     return cumulative_timings_;
79   }
80   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
81   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
82   void SwapBitmaps()
83       REQUIRES(Locks::heap_bitmap_lock_)
84       REQUIRES_SHARED(Locks::mutator_lock_);
GetTotalCpuTime()85   uint64_t GetTotalCpuTime() const {
86     return total_thread_cpu_time_ns_;
87   }
88   uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
GetTotalFreedBytes()89   int64_t GetTotalFreedBytes() const {
90     return total_freed_bytes_;
91   }
GetTotalFreedObjects()92   uint64_t GetTotalFreedObjects() const {
93     return total_freed_objects_;
94   }
GetTotalScannedBytes()95   uint64_t GetTotalScannedBytes() const {
96     return total_scanned_bytes_;
97   }
98   // Reset the cumulative timings and pause histogram.
99   void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
100   // Returns the estimated throughput in bytes / second.
101   uint64_t GetEstimatedMeanThroughput() const;
102   // Returns how many GC iterations have been run.
NumberOfIterations()103   size_t NumberOfIterations() const {
104     return GetCumulativeTimings().GetIterations();
105   }
106   // Returns the current GC iteration and assocated info.
107   Iteration* GetCurrentIteration();
108   const Iteration* GetCurrentIteration() const;
GetTimings()109   TimingLogger* GetTimings() {
110     return &GetCurrentIteration()->timings_;
111   }
112   // Record a free of normal objects.
113   void RecordFree(const ObjectBytePair& freed);
114   // Record a free of large objects.
115   void RecordFreeLOS(const ObjectBytePair& freed);
116   virtual void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
117 
118   // Extract RSS for GC-specific memory ranges using mincore().
119   uint64_t ExtractRssFromMincore(std::list<std::pair<void*, void*>>* gc_ranges);
120 
121   // Helper functions for querying if objects are marked. These are used for processing references,
122   // and will be used for reading system weaks while the GC is running.
123   virtual mirror::Object* IsMarked(mirror::Object* obj)
124       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
125   // Returns true if the given heap reference is null or is already marked. If it's already marked,
126   // update the reference (uses a CAS if do_atomic_update is true). Otherwise, returns false.
127   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
128                                            bool do_atomic_update)
129       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
130   // Used by reference processor.
131   virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
132   // Force mark an object.
133   virtual mirror::Object* MarkObject(mirror::Object* obj)
134       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
135   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
136                                  bool do_atomic_update)
137       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
138   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
139                                       ObjPtr<mirror::Reference> reference)
140       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
141 
IsTransactionActive()142   bool IsTransactionActive() const {
143     return is_transaction_active_;
144   }
145 
146   bool ShouldEagerlyReleaseMemoryToOS() const;
147 
148  protected:
149   // Run all of the GC phases.
150   virtual void RunPhases() REQUIRES(!Locks::mutator_lock_) = 0;
151   // Revoke all the thread-local buffers.
152   virtual void RevokeAllThreadLocalBuffers() = 0;
153 
154   static constexpr size_t kPauseBucketSize = 500;
155   static constexpr size_t kPauseBucketCount = 32;
156   static constexpr size_t kMemBucketSize = 10;
157   static constexpr size_t kMemBucketCount = 16;
158 
159   Heap* const heap_;
160   std::string name_;
161   // Cumulative statistics.
162   Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
163   Histogram<uint64_t> rss_histogram_;
164   Histogram<size_t> freed_bytes_histogram_;
165   metrics::MetricsBase<int64_t>* gc_time_histogram_;
166   metrics::MetricsBase<uint64_t>* metrics_gc_count_;
167   metrics::MetricsBase<uint64_t>* metrics_gc_count_delta_;
168   metrics::MetricsBase<int64_t>* gc_throughput_histogram_;
169   metrics::MetricsBase<int64_t>* gc_tracing_throughput_hist_;
170   metrics::MetricsBase<uint64_t>* gc_throughput_avg_;
171   metrics::MetricsBase<uint64_t>* gc_tracing_throughput_avg_;
172   metrics::MetricsBase<uint64_t>* gc_scanned_bytes_;
173   metrics::MetricsBase<uint64_t>* gc_scanned_bytes_delta_;
174   metrics::MetricsBase<uint64_t>* gc_freed_bytes_;
175   metrics::MetricsBase<uint64_t>* gc_freed_bytes_delta_;
176   metrics::MetricsBase<uint64_t>* gc_duration_;
177   metrics::MetricsBase<uint64_t>* gc_duration_delta_;
178   uint64_t total_thread_cpu_time_ns_;
179   uint64_t total_time_ns_;
180   uint64_t total_freed_objects_;
181   int64_t total_freed_bytes_;
182   uint64_t total_scanned_bytes_;
183   CumulativeLogger cumulative_timings_;
184   mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
185   bool is_transaction_active_;
186   // The garbage collector algorithms will either have all the metrics pointers
187   // (above) initialized, or none of them. So instead of checking each time, we
188   // use this flag.
189   bool are_metrics_initialized_;
190 
191  private:
192   DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
193 };
194 
195 }  // namespace collector
196 }  // namespace gc
197 }  // namespace art
198 
199 #endif  // ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
200