• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/histogram_samples.h"
6 
7 #include <limits>
8 #include <string_view>
9 #include <utility>
10 
11 #include "base/compiler_specific.h"
12 #include "base/memory/raw_ptr.h"
13 #include "base/metrics/histogram_functions.h"
14 #include "base/metrics/histogram_macros.h"
15 #include "base/numerics/clamped_math.h"
16 #include "base/numerics/safe_conversions.h"
17 #include "base/numerics/safe_math.h"
18 #include "base/pickle.h"
19 #include "base/strings/strcat.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/stringprintf.h"
22 
23 namespace base {
24 
25 namespace {
26 
27 // A shorthand constant for the max value of size_t.
28 constexpr size_t kSizeMax = std::numeric_limits<size_t>::max();
29 
30 // A constant stored in an AtomicSingleSample (as_atomic) to indicate that the
31 // sample is "disabled" and no further accumulation should be done with it. The
32 // value is chosen such that it will be MAX_UINT16 for both |bucket| & |count|,
33 // and thus less likely to conflict with real use. Conflicts are explicitly
34 // handled in the code but it's worth making them as unlikely as possible.
35 constexpr int32_t kDisabledSingleSample = -1;
36 
37 class SampleCountPickleIterator : public SampleCountIterator {
38  public:
39   explicit SampleCountPickleIterator(PickleIterator* iter);
40 
41   bool Done() const override;
42   void Next() override;
43   void Get(HistogramBase::Sample* min,
44            int64_t* max,
45            HistogramBase::Count* count) override;
46 
47  private:
48   const raw_ptr<PickleIterator> iter_;
49 
50   HistogramBase::Sample min_;
51   int64_t max_;
52   HistogramBase::Count count_;
53   bool is_done_;
54 };
55 
SampleCountPickleIterator(PickleIterator * iter)56 SampleCountPickleIterator::SampleCountPickleIterator(PickleIterator* iter)
57     : iter_(iter),
58       is_done_(false) {
59   Next();
60 }
61 
Done() const62 bool SampleCountPickleIterator::Done() const {
63   return is_done_;
64 }
65 
Next()66 void SampleCountPickleIterator::Next() {
67   DCHECK(!Done());
68   if (!iter_->ReadInt(&min_) || !iter_->ReadInt64(&max_) ||
69       !iter_->ReadInt(&count_)) {
70     is_done_ = true;
71   }
72 }
73 
Get(HistogramBase::Sample * min,int64_t * max,HistogramBase::Count * count)74 void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
75                                     int64_t* max,
76                                     HistogramBase::Count* count) {
77   DCHECK(!Done());
78   *min = min_;
79   *max = max_;
80   *count = count_;
81 }
82 
83 }  // namespace
84 
85 static_assert(sizeof(HistogramSamples::AtomicSingleSample) ==
86                   sizeof(subtle::Atomic32),
87               "AtomicSingleSample isn't 32 bits");
88 
Load() const89 HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
90     const {
91   AtomicSingleSample single_sample(subtle::Acquire_Load(&as_atomic));
92 
93   // If the sample was extracted/disabled, it's still zero to the outside.
94   if (single_sample.as_atomic == kDisabledSingleSample)
95     single_sample.as_atomic = 0;
96 
97   return single_sample.as_parts;
98 }
99 
Extract(AtomicSingleSample new_value)100 HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
101     AtomicSingleSample new_value) {
102   DCHECK(new_value.as_atomic != kDisabledSingleSample)
103       << "Disabling an AtomicSingleSample should be done through "
104          "ExtractAndDisable().";
105 
106   AtomicSingleSample old_value;
107 
108   // Because a concurrent call may modify and/or disable this object as we are
109   // trying to extract its value, a compare-and-swap loop must be done to ensure
110   // that the value was not changed between the reading and writing (and to
111   // prevent accidentally re-enabling this object).
112   while (true) {
113     old_value.as_atomic = subtle::Acquire_Load(&as_atomic);
114 
115     // If this object was already disabled, return an empty sample and keep it
116     // disabled.
117     if (old_value.as_atomic == kDisabledSingleSample) {
118       old_value.as_atomic = 0;
119       return old_value.as_parts;
120     }
121 
122     // Extract the single-sample from memory. |existing| is what was in that
123     // memory location at the time of the call; if it doesn't match |original|
124     // (i.e., the single-sample was concurrently modified during this
125     // iteration), then the swap did not happen, so try again.
126     subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
127         &as_atomic, old_value.as_atomic, new_value.as_atomic);
128     if (existing == old_value.as_atomic) {
129       return old_value.as_parts;
130     }
131   }
132 }
133 
134 HistogramSamples::SingleSample
ExtractAndDisable()135 HistogramSamples::AtomicSingleSample::ExtractAndDisable() {
136   AtomicSingleSample old_value(
137       subtle::NoBarrier_AtomicExchange(&as_atomic, kDisabledSingleSample));
138   // If this object was already disabled, return an empty sample.
139   if (old_value.as_atomic == kDisabledSingleSample) {
140     old_value.as_atomic = 0;
141   }
142   return old_value.as_parts;
143 }
144 
Accumulate(size_t bucket,HistogramBase::Count count)145 bool HistogramSamples::AtomicSingleSample::Accumulate(
146     size_t bucket,
147     HistogramBase::Count count) {
148   if (count == 0)
149     return true;
150 
151   // Convert the parameters to 16-bit variables because it's all 16-bit below.
152   // To support decrements/subtractions, divide the |count| into sign/value and
153   // do the proper operation below. The alternative is to change the single-
154   // sample's count to be a signed integer (int16_t) and just add an int16_t
155   // |count16| but that is somewhat wasteful given that the single-sample is
156   // never expected to have a count less than zero.
157   if (count < -std::numeric_limits<uint16_t>::max() ||
158       count > std::numeric_limits<uint16_t>::max() ||
159       bucket > std::numeric_limits<uint16_t>::max()) {
160     return false;
161   }
162   bool count_is_negative = count < 0;
163   uint16_t count16 = static_cast<uint16_t>(count_is_negative ? -count : count);
164   uint16_t bucket16 = static_cast<uint16_t>(bucket);
165 
166   // A local, unshared copy of the single-sample is necessary so the parts
167   // can be manipulated without worrying about atomicity.
168   AtomicSingleSample single_sample;
169 
170   bool sample_updated;
171   do {
172     subtle::Atomic32 original = subtle::Acquire_Load(&as_atomic);
173     if (original == kDisabledSingleSample)
174       return false;
175     single_sample.as_atomic = original;
176     if (single_sample.as_atomic != 0) {
177       // Only the same bucket (parameter and stored) can be counted multiple
178       // times.
179       if (single_sample.as_parts.bucket != bucket16)
180         return false;
181     } else {
182       // The |single_ sample| was zero so becomes the |bucket| parameter, the
183       // contents of which were checked above to fit in 16 bits.
184       single_sample.as_parts.bucket = bucket16;
185     }
186 
187     // Update count, making sure that it doesn't overflow.
188     CheckedNumeric<uint16_t> new_count(single_sample.as_parts.count);
189     if (count_is_negative)
190       new_count -= count16;
191     else
192       new_count += count16;
193     if (!new_count.AssignIfValid(&single_sample.as_parts.count))
194       return false;
195 
196     // Don't let this become equivalent to the "disabled" value.
197     if (single_sample.as_atomic == kDisabledSingleSample)
198       return false;
199 
200     // Store the updated single-sample back into memory. |existing| is what
201     // was in that memory location at the time of the call; if it doesn't
202     // match |original| then the swap didn't happen so loop again.
203     subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
204         &as_atomic, original, single_sample.as_atomic);
205     sample_updated = (existing == original);
206   } while (!sample_updated);
207 
208   return true;
209 }
210 
IsDisabled() const211 bool HistogramSamples::AtomicSingleSample::IsDisabled() const {
212   return subtle::Acquire_Load(&as_atomic) == kDisabledSingleSample;
213 }
214 
LocalMetadata()215 HistogramSamples::LocalMetadata::LocalMetadata() {
216   // This is the same way it's done for persistent metadata since no ctor
217   // is called for the data members in that case.
218   memset(this, 0, sizeof(*this));
219 }
220 
HistogramSamples(uint64_t id,Metadata * meta)221 HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
222     : meta_(meta) {
223   DCHECK(meta_->id == 0 || meta_->id == id);
224 
225   // It's possible that |meta| is contained in initialized, read-only memory
226   // so it's essential that no write be done in that case.
227   if (!meta_->id)
228     meta_->id = id;
229 }
230 
HistogramSamples(uint64_t id,std::unique_ptr<Metadata> meta)231 HistogramSamples::HistogramSamples(uint64_t id, std::unique_ptr<Metadata> meta)
232     : HistogramSamples(id, meta.get()) {
233   meta_owned_ = std::move(meta);
234 }
235 
236 // This mustn't do anything with |meta_|. It was passed to the ctor and may
237 // be invalid by the time this dtor gets called.
238 HistogramSamples::~HistogramSamples() = default;
239 
Add(const HistogramSamples & other)240 bool HistogramSamples::Add(const HistogramSamples& other) {
241   IncreaseSumAndCount(other.sum(), other.redundant_count());
242   std::unique_ptr<SampleCountIterator> it = other.Iterator();
243   return AddSubtractImpl(it.get(), ADD);
244 }
245 
AddFromPickle(PickleIterator * iter)246 bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
247   int64_t sum;
248   HistogramBase::Count redundant_count;
249 
250   if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
251     return false;
252 
253   IncreaseSumAndCount(sum, redundant_count);
254 
255   SampleCountPickleIterator pickle_iter(iter);
256   return AddSubtractImpl(&pickle_iter, ADD);
257 }
258 
Subtract(const HistogramSamples & other)259 bool HistogramSamples::Subtract(const HistogramSamples& other) {
260   IncreaseSumAndCount(-other.sum(), -other.redundant_count());
261   std::unique_ptr<SampleCountIterator> it = other.Iterator();
262   return AddSubtractImpl(it.get(), SUBTRACT);
263 }
264 
Extract(HistogramSamples & other)265 bool HistogramSamples::Extract(HistogramSamples& other) {
266   static_assert(sizeof(other.meta_->sum) == 8);
267 
268 #ifdef ARCH_CPU_64_BITS
269   // NoBarrier_AtomicExchange() is only defined for 64-bit types if
270   // the ARCH_CPU_64_BITS macro is set.
271   subtle::Atomic64 other_sum =
272       subtle::NoBarrier_AtomicExchange(&other.meta_->sum, 0);
273 #else
274   // |sum| is only atomic on 64 bit archs. Make |other_sum| volatile so that
275   // the following code is not optimized or rearranged to be something like:
276   //     IncreaseSumAndCount(other.meta_->sum, ...);
277   //     other.meta_->sum = 0;
278   // Or:
279   //     int64_t other_sum = other.meta_->sum;
280   //     other.meta_->sum = 0;
281   //     IncreaseSumAndCount(other_sum, ...);
282   // Which do not guarantee eventual consistency anymore (other.meta_->sum may
283   // be modified concurrently at any time). However, despite this, eventual
284   // consistency is still not guaranteed here because performing 64-bit
285   // operations (loading, storing, adding, etc.) on a 32-bit machine cannot be
286   // done atomically, but this at least reduces the odds of inconsistencies, at
287   // the cost of a few extra instructions.
288   volatile int64_t other_sum = other.meta_->sum;
289   other.meta_->sum -= other_sum;
290 #endif  // ARCH_CPU_64_BITS
291   HistogramBase::AtomicCount other_redundant_count =
292       subtle::NoBarrier_AtomicExchange(&other.meta_->redundant_count, 0);
293   IncreaseSumAndCount(other_sum, other_redundant_count);
294   std::unique_ptr<SampleCountIterator> it = other.ExtractingIterator();
295   return AddSubtractImpl(it.get(), ADD);
296 }
297 
IsDefinitelyEmpty() const298 bool HistogramSamples::IsDefinitelyEmpty() const {
299   return sum() == 0 && redundant_count() == 0;
300 }
301 
Serialize(Pickle * pickle) const302 void HistogramSamples::Serialize(Pickle* pickle) const {
303   pickle->WriteInt64(sum());
304   pickle->WriteInt(redundant_count());
305 
306   HistogramBase::Sample min;
307   int64_t max;
308   HistogramBase::Count count;
309   for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
310        it->Next()) {
311     it->Get(&min, &max, &count);
312     pickle->WriteInt(min);
313     pickle->WriteInt64(max);
314     pickle->WriteInt(count);
315   }
316 }
317 
AccumulateSingleSample(HistogramBase::Sample value,HistogramBase::Count count,size_t bucket)318 bool HistogramSamples::AccumulateSingleSample(HistogramBase::Sample value,
319                                               HistogramBase::Count count,
320                                               size_t bucket) {
321   if (single_sample().Accumulate(bucket, count)) {
322     // Success. Update the (separate) sum and redundant-count.
323     IncreaseSumAndCount(strict_cast<int64_t>(value) * count, count);
324     return true;
325   }
326   return false;
327 }
328 
IncreaseSumAndCount(int64_t sum,HistogramBase::Count count)329 void HistogramSamples::IncreaseSumAndCount(int64_t sum,
330                                            HistogramBase::Count count) {
331 #ifdef ARCH_CPU_64_BITS
332   subtle::NoBarrier_AtomicIncrement(&meta_->sum, sum);
333 #else
334   meta_->sum += sum;
335 #endif
336   subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
337 }
338 
RecordNegativeSample(NegativeSampleReason reason,HistogramBase::Count increment)339 void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
340                                             HistogramBase::Count increment) {
341   UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
342                             MAX_NEGATIVE_SAMPLE_REASONS);
343   UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
344                               1 << 30, 100);
345   UmaHistogramSparse("UMA.NegativeSamples.Histogram",
346                      static_cast<int32_t>(id()));
347 }
348 
ToGraphDict(std::string_view histogram_name,int32_t flags) const349 base::Value::Dict HistogramSamples::ToGraphDict(std::string_view histogram_name,
350                                                 int32_t flags) const {
351   base::Value::Dict dict;
352   dict.Set("name", histogram_name);
353   dict.Set("header", GetAsciiHeader(histogram_name, flags));
354   dict.Set("body", GetAsciiBody());
355   return dict;
356 }
357 
GetAsciiHeader(std::string_view histogram_name,int32_t flags) const358 std::string HistogramSamples::GetAsciiHeader(std::string_view histogram_name,
359                                              int32_t flags) const {
360   std::string output;
361   StrAppend(&output, {"Histogram: ", histogram_name, " recorded ",
362                       NumberToString(TotalCount()), " samples"});
363   if (flags)
364     StringAppendF(&output, " (flags = 0x%x)", flags);
365   return output;
366 }
367 
GetAsciiBody() const368 std::string HistogramSamples::GetAsciiBody() const {
369   HistogramBase::Count total_count = TotalCount();
370   double scaled_total_count = total_count / 100.0;
371 
372   // Determine how wide the largest bucket range is (how many digits to print),
373   // so that we'll be able to right-align starts for the graphical bars.
374   // Determine which bucket has the largest sample count so that we can
375   // normalize the graphical bar-width relative to that sample count.
376   HistogramBase::Count largest_count = 0;
377   HistogramBase::Sample largest_sample = 0;
378   std::unique_ptr<SampleCountIterator> it = Iterator();
379   while (!it->Done()) {
380     HistogramBase::Sample min;
381     int64_t max;
382     HistogramBase::Count count;
383     it->Get(&min, &max, &count);
384     if (min > largest_sample)
385       largest_sample = min;
386     if (count > largest_count)
387       largest_count = count;
388     it->Next();
389   }
390   // Scale histogram bucket counts to take at most 72 characters.
391   // Note: Keep in sync w/ kLineLength sample_vector.cc
392   const double kLineLength = 72;
393   double scaling_factor = 1;
394   if (largest_count > kLineLength)
395     scaling_factor = kLineLength / largest_count;
396   size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
397 
398   // iterate over each item and display them
399   it = Iterator();
400   std::string output;
401   while (!it->Done()) {
402     HistogramBase::Sample min;
403     int64_t max;
404     HistogramBase::Count count;
405     it->Get(&min, &max, &count);
406 
407     // value is min, so display it
408     std::string range = GetSimpleAsciiBucketRange(min);
409     output.append(range);
410     if (const auto range_size = range.size(); print_width >= range_size) {
411       output.append(print_width + 1 - range_size, ' ');
412     }
413     HistogramBase::Count current_size = round(count * scaling_factor);
414     WriteAsciiBucketGraph(current_size, kLineLength, &output);
415     WriteAsciiBucketValue(count, scaled_total_count, &output);
416     output.append(1, '\n');
417     it->Next();
418   }
419   return output;
420 }
421 
422 // static
WriteAsciiBucketGraph(double x_count,int line_length,std::string * output)423 void HistogramSamples::WriteAsciiBucketGraph(double x_count,
424                                              int line_length,
425                                              std::string* output) {
426   output->reserve(ClampAdd(output->size(), ClampAdd(line_length, 1)));
427 
428   const size_t count = ClampRound<size_t>(x_count);
429   output->append(count, '-');
430   output->append(1, 'O');
431   if (const auto len = as_unsigned(line_length); count < len) {
432     output->append(len - count, ' ');
433   }
434 }
435 
WriteAsciiBucketValue(HistogramBase::Count current,double scaled_sum,std::string * output) const436 void HistogramSamples::WriteAsciiBucketValue(HistogramBase::Count current,
437                                              double scaled_sum,
438                                              std::string* output) const {
439   StringAppendF(output, " (%d = %3.1f%%)", current, current / scaled_sum);
440 }
441 
GetSimpleAsciiBucketRange(HistogramBase::Sample sample) const442 const std::string HistogramSamples::GetSimpleAsciiBucketRange(
443     HistogramBase::Sample sample) const {
444   return StringPrintf("%d", sample);
445 }
446 
447 SampleCountIterator::~SampleCountIterator() = default;
448 
GetBucketIndex(size_t * index) const449 bool SampleCountIterator::GetBucketIndex(size_t* index) const {
450   DCHECK(!Done());
451   return false;
452 }
453 
SingleSampleIterator(HistogramBase::Sample min,int64_t max,HistogramBase::Count count,size_t bucket_index,bool value_was_extracted)454 SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
455                                            int64_t max,
456                                            HistogramBase::Count count,
457                                            size_t bucket_index,
458                                            bool value_was_extracted)
459     : min_(min),
460       max_(max),
461       bucket_index_(bucket_index),
462       count_(count),
463       value_was_extracted_(value_was_extracted) {}
464 
~SingleSampleIterator()465 SingleSampleIterator::~SingleSampleIterator() {
466   // Because this object may have been instantiated in such a way that the
467   // samples it is holding were already extracted from the underlying data, we
468   // add a DCHECK to ensure that in those cases, users of this iterator read the
469   // samples, otherwise they may be lost.
470   DCHECK(!value_was_extracted_ || Done());
471 }
472 
Done() const473 bool SingleSampleIterator::Done() const {
474   return count_ == 0;
475 }
476 
Next()477 void SingleSampleIterator::Next() {
478   DCHECK(!Done());
479   count_ = 0;
480 }
481 
Get(HistogramBase::Sample * min,int64_t * max,HistogramBase::Count * count)482 void SingleSampleIterator::Get(HistogramBase::Sample* min,
483                                int64_t* max,
484                                HistogramBase::Count* count) {
485   DCHECK(!Done());
486   *min = min_;
487   *max = max_;
488   *count = count_;
489 }
490 
GetBucketIndex(size_t * index) const491 bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
492   DCHECK(!Done());
493   if (bucket_index_ == kSizeMax)
494     return false;
495   *index = bucket_index_;
496   return true;
497 }
498 
499 }  // namespace base
500