• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stdio.h>
18 #include <unistd.h>
19 #include <sys/mman.h>
20 
21 #include "garbage_collector.h"
22 
23 #include "android-base/stringprintf.h"
24 
25 #include "base/dumpable.h"
26 #include "base/histogram-inl.h"
27 #include "base/logging.h"  // For VLOG_IS_ON.
28 #include "base/mutex-inl.h"
29 #include "base/systrace.h"
30 #include "base/time_utils.h"
31 #include "base/utils.h"
32 #include "gc/accounting/heap_bitmap.h"
33 #include "gc/gc_pause_listener.h"
34 #include "gc/heap.h"
35 #include "gc/space/large_object_space.h"
36 #include "gc/space/space-inl.h"
37 #include "runtime.h"
38 #include "thread-current-inl.h"
39 #include "thread_list.h"
40 
41 namespace art HIDDEN {
42 namespace gc {
43 namespace collector {
44 
45 namespace {
46 
47 // Report a GC metric via the ATrace interface.
TraceGCMetric(const char * name,int64_t value)48 void TraceGCMetric(const char* name, int64_t value) {
49   // ART's interface with systrace (through libartpalette) only supports
50   // reporting 32-bit (signed) integer values at the moment. Upon
51   // underflows/overflows, clamp metric values at `int32_t` min/max limits and
52   // report these events via a corresponding underflow/overflow counter; also
53   // log a warning about the first underflow/overflow occurrence.
54   //
55   // TODO(b/300015145): Consider extending libarpalette to allow reporting this
56   // value as a 64-bit (signed) integer (instead of a 32-bit (signed) integer).
57   // Note that this is likely unnecessary at the moment (November 2023) for any
58   // size-related GC metric, given the maximum theoretical size of a managed
59   // heap (4 GiB).
60   if (UNLIKELY(value < std::numeric_limits<int32_t>::min())) {
61     ATraceIntegerValue(name, std::numeric_limits<int32_t>::min());
62     std::string underflow_counter_name = std::string(name) + " int32_t underflow";
63     ATraceIntegerValue(underflow_counter_name.c_str(), 1);
64     static bool int32_underflow_reported = false;
65     if (!int32_underflow_reported) {
66       LOG(WARNING) << "GC Metric \"" << name << "\" with value " << value
67                    << " causing a 32-bit integer underflow";
68       int32_underflow_reported = true;
69     }
70     return;
71   }
72   if (UNLIKELY(value > std::numeric_limits<int32_t>::max())) {
73     ATraceIntegerValue(name, std::numeric_limits<int32_t>::max());
74     std::string overflow_counter_name = std::string(name) + " int32_t overflow";
75     ATraceIntegerValue(overflow_counter_name.c_str(), 1);
76     static bool int32_overflow_reported = false;
77     if (!int32_overflow_reported) {
78       LOG(WARNING) << "GC Metric \"" << name << "\" with value " << value
79                    << " causing a 32-bit integer overflow";
80       int32_overflow_reported = true;
81     }
82     return;
83   }
84   ATraceIntegerValue(name, value);
85 }
86 
87 }  // namespace
88 
Iteration()89 Iteration::Iteration()
90     : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
91   Reset(kGcCauseBackground, false);  // Reset to some place holder values.
92 }
93 
Reset(GcCause gc_cause,bool clear_soft_references)94 void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
95   timings_.Reset();
96   pause_times_.clear();
97   duration_ns_ = 0;
98   app_slow_path_duration_ms_ = 0;
99   bytes_scanned_ = 0;
100   clear_soft_references_ = clear_soft_references;
101   gc_cause_ = gc_cause;
102   freed_ = ObjectBytePair();
103   freed_los_ = ObjectBytePair();
104   freed_bytes_revoke_ = 0;
105 }
106 
GetEstimatedThroughput() const107 uint64_t Iteration::GetEstimatedThroughput() const {
108   // Add 1ms to prevent possible division by 0.
109   return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
110 }
111 
GarbageCollector(Heap * heap,const std::string & name)112 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
113     : heap_(heap),
114       name_(name),
115       pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
116       rss_histogram_((name_ + " peak-rss").c_str(), kMemBucketSize, kMemBucketCount),
117       freed_bytes_histogram_((name_ + " freed-bytes").c_str(), kMemBucketSize, kMemBucketCount),
118       gc_time_histogram_(nullptr),
119       metrics_gc_count_(nullptr),
120       metrics_gc_count_delta_(nullptr),
121       gc_throughput_histogram_(nullptr),
122       gc_tracing_throughput_hist_(nullptr),
123       gc_throughput_avg_(nullptr),
124       gc_tracing_throughput_avg_(nullptr),
125       gc_scanned_bytes_(nullptr),
126       gc_scanned_bytes_delta_(nullptr),
127       gc_freed_bytes_(nullptr),
128       gc_freed_bytes_delta_(nullptr),
129       gc_duration_(nullptr),
130       gc_duration_delta_(nullptr),
131       cumulative_timings_(name),
132       pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
133       is_transaction_active_(false),
134       are_metrics_initialized_(false) {
135   ResetMeasurements();
136 }
137 
RegisterPause(uint64_t nano_length)138 void GarbageCollector::RegisterPause(uint64_t nano_length) {
139   GetCurrentIteration()->pause_times_.push_back(nano_length);
140 }
141 
ExtractRssFromMincore(std::list<std::pair<void *,void * >> * gc_ranges)142 uint64_t GarbageCollector::ExtractRssFromMincore(
143     std::list<std::pair<void*, void*>>* gc_ranges) {
144   uint64_t rss = 0;
145   if (gc_ranges->empty()) {
146     return 0;
147   }
148   // mincore() is linux-specific syscall.
149 #if defined(__linux__)
150   using range_t = std::pair<void*, void*>;
151   // Sort gc_ranges
152   gc_ranges->sort([](const range_t& a, const range_t& b) {
153     return std::less()(a.first, b.first);
154   });
155   // Merge gc_ranges. It's necessary because the kernel may merge contiguous
156   // regions if their properties match. This is sufficient as kernel doesn't
157   // merge those adjoining ranges which differ only in name.
158   size_t vec_len = 0;
159   for (auto it = gc_ranges->begin(); it != gc_ranges->end(); it++) {
160     auto next_it = it;
161     next_it++;
162     while (next_it != gc_ranges->end()) {
163       if (it->second == next_it->first) {
164         it->second = next_it->second;
165         next_it = gc_ranges->erase(next_it);
166       } else {
167         break;
168       }
169     }
170     size_t length = static_cast<uint8_t*>(it->second) - static_cast<uint8_t*>(it->first);
171     // Compute max length for vector allocation later.
172     vec_len = std::max(vec_len, DivideByPageSize(length));
173   }
174   std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
175   for (const auto it : *gc_ranges) {
176     size_t length = static_cast<uint8_t*>(it.second) - static_cast<uint8_t*>(it.first);
177     if (mincore(it.first, length, vec.get()) == 0) {
178       for (size_t i = 0; i < DivideByPageSize(length); i++) {
179         // Least significant bit represents residency of a page. Other bits are
180         // reserved.
181         rss += vec[i] & 0x1;
182       }
183     } else {
184       LOG(WARNING) << "Call to mincore() on memory range [0x" << std::hex << it.first
185                    << ", 0x" << it.second << std::dec << ") failed: " << strerror(errno);
186     }
187   }
188   rss *= gPageSize;
189   rss_histogram_.AddValue(rss / KB);
190 #endif
191   return rss;
192 }
193 
Run(GcCause gc_cause,bool clear_soft_references)194 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
195   ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
196   Thread* self = Thread::Current();
197   Runtime* runtime = Runtime::Current();
198   uint64_t start_time = NanoTime();
199   uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
200   GetHeap()->CalculatePreGcWeightedAllocatedBytes();
201   Iteration* current_iteration = GetCurrentIteration();
202   current_iteration->Reset(gc_cause, clear_soft_references);
203   // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
204   // change in the middle of a GC.
205   is_transaction_active_ = runtime->IsActiveTransaction();
206   RunPhases();  // Run all the GC phases.
207   GetHeap()->CalculatePostGcWeightedAllocatedBytes();
208   // Add the current timings to the cumulative timings.
209   cumulative_timings_.AddLogger(*GetTimings());
210   // Update cumulative statistics with how many bytes the GC iteration freed.
211   total_freed_objects_ += current_iteration->GetFreedObjects() +
212       current_iteration->GetFreedLargeObjects();
213   total_scanned_bytes_ += current_iteration->GetScannedBytes();
214   int64_t freed_bytes = current_iteration->GetFreedBytes() +
215       current_iteration->GetFreedLargeObjectBytes();
216   total_freed_bytes_ += freed_bytes;
217   // Rounding negative freed bytes to 0 as we are not interested in such corner cases.
218   freed_bytes_histogram_.AddValue(std::max<int64_t>(freed_bytes / KB, 0));
219   uint64_t end_time = NanoTime();
220   uint64_t thread_cpu_end_time = ThreadCpuNanoTime();
221   total_thread_cpu_time_ns_ += thread_cpu_end_time - thread_cpu_start_time;
222   uint64_t duration_ns = end_time - start_time;
223   current_iteration->SetDurationNs(duration_ns);
224   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
225     // The entire GC was paused, clear the fake pauses which might be in the pause times and add
226     // the whole GC duration.
227     current_iteration->pause_times_.clear();
228     RegisterPause(duration_ns);
229   }
230   total_time_ns_ += duration_ns;
231   uint64_t total_pause_time_ns = 0;
232   for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
233     MutexLock mu(self, pause_histogram_lock_);
234     pause_histogram_.AdjustAndAddValue(pause_time);
235     total_pause_time_ns += pause_time;
236   }
237   metrics::ArtMetrics* metrics = runtime->GetMetrics();
238   // Report STW pause time in microseconds.
239   const uint64_t total_pause_time_us = total_pause_time_ns / 1'000;
240   metrics->WorldStopTimeDuringGCAvg()->Add(total_pause_time_us);
241   metrics->GcWorldStopTime()->Add(total_pause_time_us);
242   metrics->GcWorldStopTimeDelta()->Add(total_pause_time_us);
243   metrics->GcWorldStopCount()->AddOne();
244   metrics->GcWorldStopCountDelta()->AddOne();
245   // Report total collection time of all GCs put together.
246   metrics->TotalGcCollectionTime()->Add(NsToMs(duration_ns));
247   metrics->TotalGcCollectionTimeDelta()->Add(NsToMs(duration_ns));
248   if (are_metrics_initialized_) {
249     metrics_gc_count_->Add(1);
250     metrics_gc_count_delta_->Add(1);
251     // Report GC time in milliseconds.
252     gc_time_histogram_->Add(NsToMs(duration_ns));
253     // Throughput in bytes/s. Add 1us to prevent possible division by 0.
254     uint64_t throughput = (current_iteration->GetScannedBytes() * 1'000'000)
255             / (NsToUs(duration_ns) + 1);
256     // Report in MB/s.
257     throughput /= MB;
258     gc_tracing_throughput_hist_->Add(throughput);
259     gc_tracing_throughput_avg_->Add(throughput);
260 
261     // Report GC throughput in MB/s.
262     throughput = current_iteration->GetEstimatedThroughput() / MB;
263     gc_throughput_histogram_->Add(throughput);
264     gc_throughput_avg_->Add(throughput);
265 
266     gc_scanned_bytes_->Add(current_iteration->GetScannedBytes());
267     gc_scanned_bytes_delta_->Add(current_iteration->GetScannedBytes());
268     gc_freed_bytes_->Add(current_iteration->GetFreedBytes());
269     gc_freed_bytes_delta_->Add(current_iteration->GetFreedBytes());
270     gc_duration_->Add(NsToMs(current_iteration->GetDurationNs()));
271     gc_duration_delta_->Add(NsToMs(current_iteration->GetDurationNs()));
272     gc_app_slow_path_during_gc_duration_delta_->Add(current_iteration->GetAppSlowPathDurationMs());
273   }
274 
275   // Report some metrics via the ATrace interface, to surface them in Perfetto.
276   TraceGCMetric("freed_normal_object_bytes", current_iteration->GetFreedBytes());
277   TraceGCMetric("freed_large_object_bytes", current_iteration->GetFreedLargeObjectBytes());
278   TraceGCMetric("freed_bytes", freed_bytes);
279 
280   is_transaction_active_ = false;
281 }
282 
SwapBitmaps()283 void GarbageCollector::SwapBitmaps() {
284   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
285   // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
286   // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
287   // bits of dead objects in the live bitmap.
288   const GcType gc_type = GetGcType();
289   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
290     // We never allocate into zygote spaces.
291     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
292         (gc_type == kGcTypeFull &&
293          space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
294       if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
295         CHECK(space->IsContinuousMemMapAllocSpace());
296         space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
297       }
298     }
299   }
300   for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
301     disc_space->AsLargeObjectSpace()->SwapBitmaps();
302   }
303 }
304 
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps,std::vector<space::ContinuousSpace * > * sweep_spaces)305 void GarbageCollector::SweepArray(accounting::ObjectStack* allocations,
306                                   bool swap_bitmaps,
307                                   std::vector<space::ContinuousSpace*>* sweep_spaces) {
308   Thread* self = Thread::Current();
309   static constexpr size_t kSweepArrayChunkFreeSize = 1024;
310   mirror::Object** chunk_free_buffer = new mirror::Object*[kSweepArrayChunkFreeSize];
311   size_t chunk_free_pos = 0;
312   ObjectBytePair freed;
313   ObjectBytePair freed_los;
314   // How many objects are left in the array, modified after each space is swept.
315   StackReference<mirror::Object>* objects = allocations->Begin();
316   size_t count = allocations->Size();
317   // Start by sweeping the continuous spaces.
318   for (space::ContinuousSpace* space : *sweep_spaces) {
319     space::AllocSpace* alloc_space = space->AsAllocSpace();
320     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
321     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
322     if (swap_bitmaps) {
323       std::swap(live_bitmap, mark_bitmap);
324     }
325     StackReference<mirror::Object>* out = objects;
326     for (size_t i = 0; i < count; ++i) {
327       mirror::Object* const obj = objects[i].AsMirrorPtr();
328       if (obj == nullptr) {
329         continue;
330       }
331       if (space->HasAddress(obj)) {
332         // This object is in the space, remove it from the array and add it to the sweep buffer
333         // if needed.
334         if (!mark_bitmap->Test(obj)) {
335           // Handle the case where buffer allocation failed.
336           if (LIKELY(chunk_free_buffer != nullptr)) {
337             if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
338               TimingLogger::ScopedTiming t2("FreeList", GetTimings());
339               freed.objects += chunk_free_pos;
340               freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
341               chunk_free_pos = 0;
342             }
343             chunk_free_buffer[chunk_free_pos++] = obj;
344           } else {
345             freed.objects++;
346             freed.bytes += alloc_space->Free(self, obj);
347           }
348         }
349       } else {
350         (out++)->Assign(obj);
351       }
352     }
353     if (chunk_free_pos > 0) {
354       TimingLogger::ScopedTiming t2("FreeList", GetTimings());
355       freed.objects += chunk_free_pos;
356       freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
357       chunk_free_pos = 0;
358     }
359     // All of the references which space contained are no longer in the allocation stack, update
360     // the count.
361     count = out - objects;
362   }
363   if (chunk_free_buffer != nullptr) {
364     delete[] chunk_free_buffer;
365   }
366   // Handle the large object space.
367   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
368   if (large_object_space != nullptr) {
369     accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
370     accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
371     if (swap_bitmaps) {
372       std::swap(large_live_objects, large_mark_objects);
373     }
374     for (size_t i = 0; i < count; ++i) {
375       mirror::Object* const obj = objects[i].AsMirrorPtr();
376       // Handle large objects.
377       if (kUseThreadLocalAllocationStack && obj == nullptr) {
378         continue;
379       }
380       if (!large_mark_objects->Test(obj)) {
381         ++freed_los.objects;
382         freed_los.bytes += large_object_space->Free(self, obj);
383       }
384     }
385   }
386   {
387     TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
388     RecordFree(freed);
389     RecordFreeLOS(freed_los);
390     t2.NewTiming("ResetStack");
391     allocations->Reset();
392   }
393 }
394 
GetEstimatedMeanThroughput() const395 uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
396   // Add 1ms to prevent possible division by 0.
397   return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
398 }
399 
ResetMeasurements()400 void GarbageCollector::ResetMeasurements() {
401   {
402     MutexLock mu(Thread::Current(), pause_histogram_lock_);
403     pause_histogram_.Reset();
404   }
405   cumulative_timings_.Reset();
406   rss_histogram_.Reset();
407   freed_bytes_histogram_.Reset();
408   total_thread_cpu_time_ns_ = 0u;
409   total_time_ns_ = 0u;
410   total_freed_objects_ = 0u;
411   total_freed_bytes_ = 0;
412   total_scanned_bytes_ = 0u;
413 }
414 
ScopedPause(GarbageCollector * collector,bool with_reporting)415 GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
416     : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
417   Runtime* runtime = Runtime::Current();
418   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
419   if (with_reporting) {
420     GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
421     if (pause_listener != nullptr) {
422       pause_listener->StartPause();
423     }
424   }
425 }
426 
~ScopedPause()427 GarbageCollector::ScopedPause::~ScopedPause() {
428   collector_->RegisterPause(NanoTime() - start_time_);
429   Runtime* runtime = Runtime::Current();
430   if (with_reporting_) {
431     GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
432     if (pause_listener != nullptr) {
433       pause_listener->EndPause();
434     }
435   }
436   runtime->GetThreadList()->ResumeAll();
437 }
438 
439 // Returns the current GC iteration and assocated info.
GetCurrentIteration()440 Iteration* GarbageCollector::GetCurrentIteration() {
441   return heap_->GetCurrentGcIteration();
442 }
GetCurrentIteration() const443 const Iteration* GarbageCollector::GetCurrentIteration() const {
444   return heap_->GetCurrentGcIteration();
445 }
446 
ShouldEagerlyReleaseMemoryToOS() const447 bool GarbageCollector::ShouldEagerlyReleaseMemoryToOS() const {
448   // We have seen old kernels and custom kernel features misbehave in the
449   // presence of too much usage of MADV_FREE. So only release memory eagerly
450   // on platforms we know do not have the bug.
451   static const bool gEnableLazyRelease = !kIsTargetBuild || IsKernelVersionAtLeast(6, 0);
452   if (!gEnableLazyRelease) {
453     return true;
454   }
455   Runtime* runtime = Runtime::Current();
456   // Zygote isn't a memory heavy process, we should always instantly release memory to the OS.
457   if (runtime->IsZygote()) {
458     return true;
459   }
460   if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit &&
461       !runtime->IsEagerlyReleaseExplicitGcDisabled()) {
462     // Our behavior with explicit GCs is to always release any available memory.
463     return true;
464   }
465   // Keep on the memory if the app is in foreground. If it is in background or
466   // goes into the background (see invocation with cause kGcCauseCollectorTransition),
467   // release the memory.
468   return !runtime->InJankPerceptibleProcessState();
469 }
470 
RecordFree(const ObjectBytePair & freed)471 void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
472   GetCurrentIteration()->freed_.Add(freed);
473   heap_->RecordFree(freed.objects, freed.bytes);
474 }
RecordFreeLOS(const ObjectBytePair & freed)475 void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
476   GetCurrentIteration()->freed_los_.Add(freed);
477   heap_->RecordFree(freed.objects, freed.bytes);
478 }
479 
GetTotalPausedTimeNs()480 uint64_t GarbageCollector::GetTotalPausedTimeNs() {
481   MutexLock mu(Thread::Current(), pause_histogram_lock_);
482   return pause_histogram_.AdjustedSum();
483 }
484 
DumpPerformanceInfo(std::ostream & os)485 void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
486   const CumulativeLogger& logger = GetCumulativeTimings();
487   const size_t iterations = logger.GetIterations();
488   if (iterations == 0) {
489     return;
490   }
491   os << Dumpable<CumulativeLogger>(logger);
492   const uint64_t total_ns = logger.GetTotalNs();
493   const double seconds = NsToMs(total_ns) / 1000.0;
494   const uint64_t freed_bytes = GetTotalFreedBytes();
495   const uint64_t scanned_bytes = GetTotalScannedBytes();
496   {
497     MutexLock mu(Thread::Current(), pause_histogram_lock_);
498     if (pause_histogram_.SampleSize() > 0) {
499       Histogram<uint64_t>::CumulativeData cumulative_data;
500       pause_histogram_.CreateHistogram(&cumulative_data);
501       pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
502     }
503   }
504 #if defined(__linux__)
505   if (rss_histogram_.SampleSize() > 0) {
506     os << rss_histogram_.Name()
507        << ": Avg: " << PrettySize(rss_histogram_.Mean() * KB)
508        << " Max: " << PrettySize(rss_histogram_.Max() * KB)
509        << " Min: " << PrettySize(rss_histogram_.Min() * KB) << "\n";
510     os << "Peak-rss Histogram: ";
511     rss_histogram_.DumpBins(os);
512     os << "\n";
513   }
514 #endif
515   if (freed_bytes_histogram_.SampleSize() > 0) {
516     os << freed_bytes_histogram_.Name()
517        << ": Avg: " << PrettySize(freed_bytes_histogram_.Mean() * KB)
518        << " Max: " << PrettySize(freed_bytes_histogram_.Max() * KB)
519        << " Min: " << PrettySize(freed_bytes_histogram_.Min() * KB) << "\n";
520     os << "Freed-bytes histogram: ";
521     freed_bytes_histogram_.DumpBins(os);
522     os << "\n";
523   }
524   const double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
525   os << GetName() << " total time: " << PrettyDuration(total_ns)
526      << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
527      << GetName() << " freed: " << PrettySize(freed_bytes) << "\n"
528      << GetName() << " throughput: " << PrettySize(freed_bytes / seconds) << "/s"
529      << "  per cpu-time: "
530      << static_cast<uint64_t>(freed_bytes / cpu_seconds) << "/s / "
531      << PrettySize(freed_bytes / cpu_seconds) << "/s\n"
532      << GetName() << " tracing throughput: "
533      << PrettySize(scanned_bytes / seconds) << "/s "
534      << " per cpu-time: "
535      << PrettySize(scanned_bytes / cpu_seconds) << "/s\n";
536 }
537 
538 }  // namespace collector
539 }  // namespace gc
540 }  // namespace art
541