1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdio.h>
18 #include <unistd.h>
19 #include <sys/mman.h>
20
21 #include "garbage_collector.h"
22
23 #include "android-base/stringprintf.h"
24
25 #include "base/dumpable.h"
26 #include "base/histogram-inl.h"
27 #include "base/logging.h" // For VLOG_IS_ON.
28 #include "base/mutex-inl.h"
29 #include "base/systrace.h"
30 #include "base/time_utils.h"
31 #include "base/utils.h"
32 #include "gc/accounting/heap_bitmap.h"
33 #include "gc/gc_pause_listener.h"
34 #include "gc/heap.h"
35 #include "gc/space/large_object_space.h"
36 #include "gc/space/space-inl.h"
37 #include "runtime.h"
38 #include "thread-current-inl.h"
39 #include "thread_list.h"
40
41 namespace art {
42 namespace gc {
43 namespace collector {
44
Iteration()45 Iteration::Iteration()
46 : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
47 Reset(kGcCauseBackground, false); // Reset to some place holder values.
48 }
49
Reset(GcCause gc_cause,bool clear_soft_references)50 void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
51 timings_.Reset();
52 pause_times_.clear();
53 duration_ns_ = 0;
54 bytes_scanned_ = 0;
55 clear_soft_references_ = clear_soft_references;
56 gc_cause_ = gc_cause;
57 freed_ = ObjectBytePair();
58 freed_los_ = ObjectBytePair();
59 freed_bytes_revoke_ = 0;
60 }
61
GetEstimatedThroughput() const62 uint64_t Iteration::GetEstimatedThroughput() const {
63 // Add 1ms to prevent possible division by 0.
64 return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
65 }
66
GarbageCollector(Heap * heap,const std::string & name)67 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
68 : heap_(heap),
69 name_(name),
70 pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
71 rss_histogram_((name_ + " peak-rss").c_str(), kMemBucketSize, kMemBucketCount),
72 freed_bytes_histogram_((name_ + " freed-bytes").c_str(), kMemBucketSize, kMemBucketCount),
73 gc_time_histogram_(nullptr),
74 metrics_gc_count_(nullptr),
75 metrics_gc_count_delta_(nullptr),
76 gc_throughput_histogram_(nullptr),
77 gc_tracing_throughput_hist_(nullptr),
78 gc_throughput_avg_(nullptr),
79 gc_tracing_throughput_avg_(nullptr),
80 gc_scanned_bytes_(nullptr),
81 gc_scanned_bytes_delta_(nullptr),
82 gc_freed_bytes_(nullptr),
83 gc_freed_bytes_delta_(nullptr),
84 gc_duration_(nullptr),
85 gc_duration_delta_(nullptr),
86 cumulative_timings_(name),
87 pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
88 is_transaction_active_(false),
89 are_metrics_initialized_(false) {
90 ResetCumulativeStatistics();
91 }
92
RegisterPause(uint64_t nano_length)93 void GarbageCollector::RegisterPause(uint64_t nano_length) {
94 GetCurrentIteration()->pause_times_.push_back(nano_length);
95 }
96
ResetCumulativeStatistics()97 void GarbageCollector::ResetCumulativeStatistics() {
98 cumulative_timings_.Reset();
99 total_thread_cpu_time_ns_ = 0u;
100 total_time_ns_ = 0u;
101 total_freed_objects_ = 0u;
102 total_freed_bytes_ = 0;
103 total_scanned_bytes_ = 0;
104 rss_histogram_.Reset();
105 freed_bytes_histogram_.Reset();
106 MutexLock mu(Thread::Current(), pause_histogram_lock_);
107 pause_histogram_.Reset();
108 }
109
ExtractRssFromMincore(std::list<std::pair<void *,void * >> * gc_ranges)110 uint64_t GarbageCollector::ExtractRssFromMincore(
111 std::list<std::pair<void*, void*>>* gc_ranges) {
112 uint64_t rss = 0;
113 if (gc_ranges->empty()) {
114 return 0;
115 }
116 // mincore() is linux-specific syscall.
117 #if defined(__linux__)
118 using range_t = std::pair<void*, void*>;
119 // Sort gc_ranges
120 gc_ranges->sort([](const range_t& a, const range_t& b) {
121 return std::less()(a.first, b.first);
122 });
123 // Merge gc_ranges. It's necessary because the kernel may merge contiguous
124 // regions if their properties match. This is sufficient as kernel doesn't
125 // merge those adjoining ranges which differ only in name.
126 size_t vec_len = 0;
127 for (auto it = gc_ranges->begin(); it != gc_ranges->end(); it++) {
128 auto next_it = it;
129 next_it++;
130 while (next_it != gc_ranges->end()) {
131 if (it->second == next_it->first) {
132 it->second = next_it->second;
133 next_it = gc_ranges->erase(next_it);
134 } else {
135 break;
136 }
137 }
138 size_t length = static_cast<uint8_t*>(it->second) - static_cast<uint8_t*>(it->first);
139 // Compute max length for vector allocation later.
140 vec_len = std::max(vec_len, length / kPageSize);
141 }
142 std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
143 for (const auto it : *gc_ranges) {
144 size_t length = static_cast<uint8_t*>(it.second) - static_cast<uint8_t*>(it.first);
145 if (mincore(it.first, length, vec.get()) == 0) {
146 for (size_t i = 0; i < length / kPageSize; i++) {
147 // Least significant bit represents residency of a page. Other bits are
148 // reserved.
149 rss += vec[i] & 0x1;
150 }
151 } else {
152 LOG(WARNING) << "Call to mincore() on memory range [0x" << std::hex << it.first
153 << ", 0x" << it.second << std::dec << ") failed: " << strerror(errno);
154 }
155 }
156 rss *= kPageSize;
157 rss_histogram_.AddValue(rss / KB);
158 #endif
159 return rss;
160 }
161
Run(GcCause gc_cause,bool clear_soft_references)162 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
163 ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
164 Thread* self = Thread::Current();
165 Runtime* runtime = Runtime::Current();
166 uint64_t start_time = NanoTime();
167 uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
168 GetHeap()->CalculatePreGcWeightedAllocatedBytes();
169 Iteration* current_iteration = GetCurrentIteration();
170 current_iteration->Reset(gc_cause, clear_soft_references);
171 // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
172 // change in the middle of a GC.
173 is_transaction_active_ = runtime->IsActiveTransaction();
174 RunPhases(); // Run all the GC phases.
175 GetHeap()->CalculatePostGcWeightedAllocatedBytes();
176 // Add the current timings to the cumulative timings.
177 cumulative_timings_.AddLogger(*GetTimings());
178 // Update cumulative statistics with how many bytes the GC iteration freed.
179 total_freed_objects_ += current_iteration->GetFreedObjects() +
180 current_iteration->GetFreedLargeObjects();
181 total_scanned_bytes_ += current_iteration->GetScannedBytes();
182 int64_t freed_bytes = current_iteration->GetFreedBytes() +
183 current_iteration->GetFreedLargeObjectBytes();
184 total_freed_bytes_ += freed_bytes;
185 // Rounding negative freed bytes to 0 as we are not interested in such corner cases.
186 freed_bytes_histogram_.AddValue(std::max<int64_t>(freed_bytes / KB, 0));
187 uint64_t end_time = NanoTime();
188 uint64_t thread_cpu_end_time = ThreadCpuNanoTime();
189 total_thread_cpu_time_ns_ += thread_cpu_end_time - thread_cpu_start_time;
190 uint64_t duration_ns = end_time - start_time;
191 current_iteration->SetDurationNs(duration_ns);
192 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
193 // The entire GC was paused, clear the fake pauses which might be in the pause times and add
194 // the whole GC duration.
195 current_iteration->pause_times_.clear();
196 RegisterPause(duration_ns);
197 }
198 total_time_ns_ += duration_ns;
199 uint64_t total_pause_time_ns = 0;
200 for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
201 MutexLock mu(self, pause_histogram_lock_);
202 pause_histogram_.AdjustAndAddValue(pause_time);
203 total_pause_time_ns += pause_time;
204 }
205 metrics::ArtMetrics* metrics = runtime->GetMetrics();
206 // Report STW pause time in microseconds.
207 const uint64_t total_pause_time_us = total_pause_time_ns / 1'000;
208 metrics->WorldStopTimeDuringGCAvg()->Add(total_pause_time_us);
209 metrics->GcWorldStopTime()->Add(total_pause_time_us);
210 metrics->GcWorldStopTimeDelta()->Add(total_pause_time_us);
211 metrics->GcWorldStopCount()->AddOne();
212 metrics->GcWorldStopCountDelta()->AddOne();
213 // Report total collection time of all GCs put together.
214 metrics->TotalGcCollectionTime()->Add(NsToMs(duration_ns));
215 metrics->TotalGcCollectionTimeDelta()->Add(NsToMs(duration_ns));
216 if (are_metrics_initialized_) {
217 metrics_gc_count_->Add(1);
218 metrics_gc_count_delta_->Add(1);
219 // Report GC time in milliseconds.
220 gc_time_histogram_->Add(NsToMs(duration_ns));
221 // Throughput in bytes/s. Add 1us to prevent possible division by 0.
222 uint64_t throughput = (current_iteration->GetScannedBytes() * 1'000'000)
223 / (NsToUs(duration_ns) + 1);
224 // Report in MB/s.
225 throughput /= MB;
226 gc_tracing_throughput_hist_->Add(throughput);
227 gc_tracing_throughput_avg_->Add(throughput);
228
229 // Report GC throughput in MB/s.
230 throughput = current_iteration->GetEstimatedThroughput() / MB;
231 gc_throughput_histogram_->Add(throughput);
232 gc_throughput_avg_->Add(throughput);
233
234 gc_scanned_bytes_->Add(current_iteration->GetScannedBytes());
235 gc_scanned_bytes_delta_->Add(current_iteration->GetScannedBytes());
236 gc_freed_bytes_->Add(current_iteration->GetFreedBytes());
237 gc_freed_bytes_delta_->Add(current_iteration->GetFreedBytes());
238 gc_duration_->Add(NsToMs(current_iteration->GetDurationNs()));
239 gc_duration_delta_->Add(NsToMs(current_iteration->GetDurationNs()));
240 }
241 is_transaction_active_ = false;
242 }
243
SwapBitmaps()244 void GarbageCollector::SwapBitmaps() {
245 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
246 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
247 // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
248 // bits of dead objects in the live bitmap.
249 const GcType gc_type = GetGcType();
250 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
251 // We never allocate into zygote spaces.
252 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
253 (gc_type == kGcTypeFull &&
254 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
255 if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
256 CHECK(space->IsContinuousMemMapAllocSpace());
257 space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
258 }
259 }
260 }
261 for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
262 disc_space->AsLargeObjectSpace()->SwapBitmaps();
263 }
264 }
265
GetEstimatedMeanThroughput() const266 uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
267 // Add 1ms to prevent possible division by 0.
268 return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
269 }
270
ResetMeasurements()271 void GarbageCollector::ResetMeasurements() {
272 {
273 MutexLock mu(Thread::Current(), pause_histogram_lock_);
274 pause_histogram_.Reset();
275 }
276 cumulative_timings_.Reset();
277 rss_histogram_.Reset();
278 freed_bytes_histogram_.Reset();
279 total_thread_cpu_time_ns_ = 0u;
280 total_time_ns_ = 0u;
281 total_freed_objects_ = 0u;
282 total_freed_bytes_ = 0;
283 total_scanned_bytes_ = 0u;
284 }
285
ScopedPause(GarbageCollector * collector,bool with_reporting)286 GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
287 : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
288 Runtime* runtime = Runtime::Current();
289 runtime->GetThreadList()->SuspendAll(__FUNCTION__);
290 if (with_reporting) {
291 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
292 if (pause_listener != nullptr) {
293 pause_listener->StartPause();
294 }
295 }
296 }
297
~ScopedPause()298 GarbageCollector::ScopedPause::~ScopedPause() {
299 collector_->RegisterPause(NanoTime() - start_time_);
300 Runtime* runtime = Runtime::Current();
301 if (with_reporting_) {
302 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
303 if (pause_listener != nullptr) {
304 pause_listener->EndPause();
305 }
306 }
307 runtime->GetThreadList()->ResumeAll();
308 }
309
310 // Returns the current GC iteration and assocated info.
GetCurrentIteration()311 Iteration* GarbageCollector::GetCurrentIteration() {
312 return heap_->GetCurrentGcIteration();
313 }
GetCurrentIteration() const314 const Iteration* GarbageCollector::GetCurrentIteration() const {
315 return heap_->GetCurrentGcIteration();
316 }
317
RecordFree(const ObjectBytePair & freed)318 void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
319 GetCurrentIteration()->freed_.Add(freed);
320 heap_->RecordFree(freed.objects, freed.bytes);
321 }
RecordFreeLOS(const ObjectBytePair & freed)322 void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
323 GetCurrentIteration()->freed_los_.Add(freed);
324 heap_->RecordFree(freed.objects, freed.bytes);
325 }
326
GetTotalPausedTimeNs()327 uint64_t GarbageCollector::GetTotalPausedTimeNs() {
328 MutexLock mu(Thread::Current(), pause_histogram_lock_);
329 return pause_histogram_.AdjustedSum();
330 }
331
DumpPerformanceInfo(std::ostream & os)332 void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
333 const CumulativeLogger& logger = GetCumulativeTimings();
334 const size_t iterations = logger.GetIterations();
335 if (iterations == 0) {
336 return;
337 }
338 os << Dumpable<CumulativeLogger>(logger);
339 const uint64_t total_ns = logger.GetTotalNs();
340 const double seconds = NsToMs(total_ns) / 1000.0;
341 const uint64_t freed_bytes = GetTotalFreedBytes();
342 const uint64_t freed_objects = GetTotalFreedObjects();
343 const uint64_t scanned_bytes = GetTotalScannedBytes();
344 {
345 MutexLock mu(Thread::Current(), pause_histogram_lock_);
346 if (pause_histogram_.SampleSize() > 0) {
347 Histogram<uint64_t>::CumulativeData cumulative_data;
348 pause_histogram_.CreateHistogram(&cumulative_data);
349 pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
350 }
351 }
352 #if defined(__linux__)
353 if (rss_histogram_.SampleSize() > 0) {
354 os << rss_histogram_.Name()
355 << ": Avg: " << PrettySize(rss_histogram_.Mean() * KB)
356 << " Max: " << PrettySize(rss_histogram_.Max() * KB)
357 << " Min: " << PrettySize(rss_histogram_.Min() * KB) << "\n";
358 os << "Peak-rss Histogram: ";
359 rss_histogram_.DumpBins(os);
360 os << "\n";
361 }
362 #endif
363 if (freed_bytes_histogram_.SampleSize() > 0) {
364 os << freed_bytes_histogram_.Name()
365 << ": Avg: " << PrettySize(freed_bytes_histogram_.Mean() * KB)
366 << " Max: " << PrettySize(freed_bytes_histogram_.Max() * KB)
367 << " Min: " << PrettySize(freed_bytes_histogram_.Min() * KB) << "\n";
368 os << "Freed-bytes histogram: ";
369 freed_bytes_histogram_.DumpBins(os);
370 os << "\n";
371 }
372 const double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
373 os << GetName() << " total time: " << PrettyDuration(total_ns)
374 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
375 << GetName() << " freed: " << freed_objects
376 << " objects with total size " << PrettySize(freed_bytes) << "\n"
377 << GetName() << " throughput: " << freed_objects / seconds << "/s / "
378 << PrettySize(freed_bytes / seconds) << "/s"
379 << " per cpu-time: "
380 << static_cast<uint64_t>(freed_bytes / cpu_seconds) << "/s / "
381 << PrettySize(freed_bytes / cpu_seconds) << "/s\n"
382 << GetName() << " tracing throughput: "
383 << PrettySize(scanned_bytes / seconds) << "/s "
384 << " per cpu-time: "
385 << PrettySize(scanned_bytes / cpu_seconds) << "/s\n";
386 }
387
388 } // namespace collector
389 } // namespace gc
390 } // namespace art
391