• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_histogram_allocator.h"
6 
7 #include <memory>
8 
9 #include "base/files/file_path.h"
10 #include "base/files/file_util.h"
11 #include "base/files/important_file_writer.h"
12 #include "base/files/memory_mapped_file.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/metrics/histogram.h"
17 #include "base/metrics/histogram_base.h"
18 #include "base/metrics/histogram_samples.h"
19 #include "base/metrics/persistent_sample_map.h"
20 #include "base/metrics/sparse_histogram.h"
21 #include "base/metrics/statistics_recorder.h"
22 #include "base/pickle.h"
23 #include "base/synchronization/lock.h"
24 
25 namespace base {
26 
27 namespace {
28 
29 // Name of histogram for storing results of local operations.
30 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
31 
32 // Type identifiers used when storing in persistent memory so they can be
33 // identified during extraction; the first 4 bytes of the SHA1 of the name
34 // is used as a unique integer. A "version number" is added to the base
35 // so that, if the structure of that object changes, stored older versions
36 // will be safely ignored.
37 enum : uint32_t {
38   kTypeIdHistogram   = 0xF1645910 + 2,  // SHA1(Histogram)   v2
39   kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
40   kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
41 };
42 
43 // The current globally-active persistent allocator for all new histograms.
44 // The object held here will obviously not be destructed at process exit
45 // but that's best since PersistentMemoryAllocator objects (that underlie
46 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
47 // anything essential at exit anyway due to the fact that they depend on data
48 // managed elsewhere and which could be destructed first.
49 GlobalHistogramAllocator* g_allocator = nullptr;
50 
51 // Take an array of range boundaries and create a proper BucketRanges object
52 // which is returned to the caller. A return of nullptr indicates that the
53 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)54 std::unique_ptr<BucketRanges> CreateRangesFromData(
55     HistogramBase::Sample* ranges_data,
56     uint32_t ranges_checksum,
57     size_t count) {
58   // To avoid racy destruction at shutdown, the following may be leaked.
59   std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
60   DCHECK_EQ(count, ranges->size());
61   for (size_t i = 0; i < count; ++i) {
62     if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
63       return nullptr;
64     ranges->set_range(i, ranges_data[i]);
65   }
66 
67   ranges->ResetChecksum();
68   if (ranges->checksum() != ranges_checksum)
69     return nullptr;
70 
71   return ranges;
72 }
73 
74 // Calculate the number of bytes required to store all of a histogram's
75 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)76 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
77   // 2 because each "sample count" also requires a backup "logged count"
78   // used for calculating the delta during snapshot operations.
79   const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
80 
81   // If the |bucket_count| is such that it would overflow the return type,
82   // perhaps as the result of a malicious actor, then return zero to
83   // indicate the problem to the caller.
84   if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
85     return 0;
86 
87   return bucket_count * kBytesPerBucket;
88 }
89 
90 }  // namespace
91 
92 const Feature kPersistentHistogramsFeature{
93   "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
94 };
95 
96 
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)97 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
98     PersistentMemoryAllocator* allocator)
99     : allocator_(allocator), record_iterator_(allocator) {}
100 
~PersistentSparseHistogramDataManager()101 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
102 
103 PersistentSampleMapRecords*
UseSampleMapRecords(uint64_t id,const void * user)104 PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
105                                                           const void* user) {
106   base::AutoLock auto_lock(lock_);
107   return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
108 }
109 
110 PersistentSampleMapRecords*
GetSampleMapRecordsWhileLocked(uint64_t id)111 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
112     uint64_t id) {
113   lock_.AssertAcquired();
114 
115   auto found = sample_records_.find(id);
116   if (found != sample_records_.end())
117     return found->second.get();
118 
119   std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
120   samples = WrapUnique(new PersistentSampleMapRecords(this, id));
121   return samples.get();
122 }
123 
LoadRecords(PersistentSampleMapRecords * sample_map_records)124 bool PersistentSparseHistogramDataManager::LoadRecords(
125     PersistentSampleMapRecords* sample_map_records) {
126   // DataManager must be locked in order to access the found_ field of any
127   // PersistentSampleMapRecords object.
128   base::AutoLock auto_lock(lock_);
129   bool found = false;
130 
131   // If there are already "found" entries for the passed object, move them.
132   if (!sample_map_records->found_.empty()) {
133     sample_map_records->records_.reserve(sample_map_records->records_.size() +
134                                          sample_map_records->found_.size());
135     sample_map_records->records_.insert(sample_map_records->records_.end(),
136                                         sample_map_records->found_.begin(),
137                                         sample_map_records->found_.end());
138     sample_map_records->found_.clear();
139     found = true;
140   }
141 
142   // Acquiring a lock is a semi-expensive operation so load some records with
143   // each call. More than this number may be loaded if it takes longer to
144   // find at least one matching record for the passed object.
145   const int kMinimumNumberToLoad = 10;
146   const uint64_t match_id = sample_map_records->sample_map_id_;
147 
148   // Loop while no enty is found OR we haven't yet loaded the minimum number.
149   // This will continue reading even after a match is found.
150   for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
151     // Get the next sample-record. The iterator will always resume from where
152     // it left off even if it previously had nothing further to return.
153     uint64_t found_id;
154     PersistentMemoryAllocator::Reference ref =
155         PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
156                                                      &found_id);
157 
158     // Stop immediately if there are none.
159     if (!ref)
160       break;
161 
162     // The sample-record could be for any sparse histogram. Add the reference
163     // to the appropriate collection for later use.
164     if (found_id == match_id) {
165       sample_map_records->records_.push_back(ref);
166       found = true;
167     } else {
168       PersistentSampleMapRecords* samples =
169           GetSampleMapRecordsWhileLocked(found_id);
170       DCHECK(samples);
171       samples->found_.push_back(ref);
172     }
173   }
174 
175   return found;
176 }
177 
178 
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id)179 PersistentSampleMapRecords::PersistentSampleMapRecords(
180     PersistentSparseHistogramDataManager* data_manager,
181     uint64_t sample_map_id)
182     : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
183 
~PersistentSampleMapRecords()184 PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
185 
Acquire(const void * user)186 PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
187     const void* user) {
188   DCHECK(!user_);
189   user_ = user;
190   seen_ = 0;
191   return this;
192 }
193 
Release(const void * user)194 void PersistentSampleMapRecords::Release(const void* user) {
195   DCHECK_EQ(user_, user);
196   user_ = nullptr;
197 }
198 
GetNext()199 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
200   DCHECK(user_);
201 
202   // If there are no unseen records, lock and swap in all the found ones.
203   if (records_.size() == seen_) {
204     if (!data_manager_->LoadRecords(this))
205       return false;
206   }
207 
208   // Return the next record. Records *must* be returned in the same order
209   // they are found in the persistent memory in order to ensure that all
210   // objects using this data always have the same state. Race conditions
211   // can cause duplicate records so using the "first found" is the only
212   // guarantee that all objects always access the same one.
213   DCHECK_LT(seen_, records_.size());
214   return records_[seen_++];
215 }
216 
CreateNew(HistogramBase::Sample value)217 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
218     HistogramBase::Sample value) {
219   return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
220                                                      sample_map_id_, value);
221 }
222 
223 
224 // This data will be held in persistent memory in order for processes to
225 // locate and use histograms created elsewhere.
226 struct PersistentHistogramAllocator::PersistentHistogramData {
227   int32_t histogram_type;
228   int32_t flags;
229   int32_t minimum;
230   int32_t maximum;
231   uint32_t bucket_count;
232   PersistentMemoryAllocator::Reference ranges_ref;
233   uint32_t ranges_checksum;
234   PersistentMemoryAllocator::Reference counts_ref;
235   HistogramSamples::Metadata samples_metadata;
236   HistogramSamples::Metadata logged_metadata;
237 
238   // Space for the histogram name will be added during the actual allocation
239   // request. This must be the last field of the structure. A zero-size array
240   // or a "flexible" array would be preferred but is not (yet) valid C++.
241   char name[1];
242 };
243 
Iterator(PersistentHistogramAllocator * allocator)244 PersistentHistogramAllocator::Iterator::Iterator(
245     PersistentHistogramAllocator* allocator)
246     : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
247 
248 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)249 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
250   PersistentMemoryAllocator::Reference ref;
251   while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
252     if (ref != ignore)
253       return allocator_->GetHistogram(ref);
254   }
255   return nullptr;
256 }
257 
258 
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)259 PersistentHistogramAllocator::PersistentHistogramAllocator(
260     std::unique_ptr<PersistentMemoryAllocator> memory)
261     : memory_allocator_(std::move(memory)),
262       sparse_histogram_data_manager_(memory_allocator_.get()) {}
263 
~PersistentHistogramAllocator()264 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
265 
GetHistogram(Reference ref)266 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
267     Reference ref) {
268   // Unfortunately, the histogram "pickle" methods cannot be used as part of
269   // the persistance because the deserialization methods always create local
270   // count data (while these must reference the persistent counts) and always
271   // add it to the local list of known histograms (while these may be simple
272   // references to histograms in other processes).
273   PersistentHistogramData* histogram_data =
274       memory_allocator_->GetAsObject<PersistentHistogramData>(
275           ref, kTypeIdHistogram);
276   size_t length = memory_allocator_->GetAllocSize(ref);
277   if (!histogram_data ||
278       reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
279     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
280     NOTREACHED();
281     return nullptr;
282   }
283   return CreateHistogram(histogram_data);
284 }
285 
AllocateHistogram(HistogramType histogram_type,const std::string & name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)286 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
287     HistogramType histogram_type,
288     const std::string& name,
289     int minimum,
290     int maximum,
291     const BucketRanges* bucket_ranges,
292     int32_t flags,
293     Reference* ref_ptr) {
294   // If the allocator is corrupt, don't waste time trying anything else.
295   // This also allows differentiating on the dashboard between allocations
296   // failed due to a corrupt allocator and the number of process instances
297   // with one, the latter being idicated by "newly corrupt", below.
298   if (memory_allocator_->IsCorrupt()) {
299     RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
300     return nullptr;
301   }
302 
303   // Create the metadata necessary for a persistent sparse histogram. This
304   // is done first because it is a small subset of what is required for
305   // other histograms.
306   PersistentMemoryAllocator::Reference histogram_ref =
307       memory_allocator_->Allocate(
308           offsetof(PersistentHistogramData, name) + name.length() + 1,
309           kTypeIdHistogram);
310   PersistentHistogramData* histogram_data =
311       memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
312                                                               kTypeIdHistogram);
313   if (histogram_data) {
314     memcpy(histogram_data->name, name.c_str(), name.size() + 1);
315     histogram_data->histogram_type = histogram_type;
316     histogram_data->flags = flags | HistogramBase::kIsPersistent;
317   }
318 
319   // Create the remaining metadata necessary for regular histograms.
320   if (histogram_type != SPARSE_HISTOGRAM) {
321     size_t bucket_count = bucket_ranges->bucket_count();
322     size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
323     if (counts_bytes == 0) {
324       // |bucket_count| was out-of-range.
325       NOTREACHED();
326       return nullptr;
327     }
328 
329     size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
330     PersistentMemoryAllocator::Reference counts_ref =
331         memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
332     PersistentMemoryAllocator::Reference ranges_ref =
333         memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
334     HistogramBase::Sample* ranges_data =
335         memory_allocator_->GetAsObject<HistogramBase::Sample>(
336             ranges_ref, kTypeIdRangesArray);
337 
338     // Only continue here if all allocations were successful. If they weren't,
339     // there is no way to free the space but that's not really a problem since
340     // the allocations only fail because the space is full or corrupt and so
341     // any future attempts will also fail.
342     if (counts_ref && ranges_data && histogram_data) {
343       for (size_t i = 0; i < bucket_ranges->size(); ++i)
344         ranges_data[i] = bucket_ranges->range(i);
345 
346       histogram_data->minimum = minimum;
347       histogram_data->maximum = maximum;
348       // |bucket_count| must fit within 32-bits or the allocation of the counts
349       // array would have failed for being too large; the allocator supports
350       // less than 4GB total size.
351       histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
352       histogram_data->ranges_ref = ranges_ref;
353       histogram_data->ranges_checksum = bucket_ranges->checksum();
354       histogram_data->counts_ref = counts_ref;
355     } else {
356       histogram_data = nullptr;  // Clear this for proper handling below.
357     }
358   }
359 
360   if (histogram_data) {
361     // Create the histogram using resources in persistent memory. This ends up
362     // resolving the "ref" values stored in histogram_data instad of just
363     // using what is already known above but avoids duplicating the switch
364     // statement here and serves as a double-check that everything is
365     // correct before commiting the new histogram to persistent space.
366     std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
367     DCHECK(histogram);
368     if (ref_ptr != nullptr)
369       *ref_ptr = histogram_ref;
370 
371     // By storing the reference within the allocator to this histogram, the
372     // next import (which will happen before the next histogram creation)
373     // will know to skip it.
374     // See also the comment in ImportHistogramsToStatisticsRecorder().
375     subtle::NoBarrier_Store(&last_created_, histogram_ref);
376     return histogram;
377   }
378 
379   CreateHistogramResultType result;
380   if (memory_allocator_->IsCorrupt()) {
381     RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
382     result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
383   } else if (memory_allocator_->IsFull()) {
384     result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
385   } else {
386     result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
387   }
388   RecordCreateHistogramResult(result);
389   NOTREACHED() << "error=" << result;
390 
391   return nullptr;
392 }
393 
FinalizeHistogram(Reference ref,bool registered)394 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
395                                                      bool registered) {
396   // If the created persistent histogram was registered then it needs to
397   // be marked as "iterable" in order to be found by other processes.
398   if (registered)
399     memory_allocator_->MakeIterable(ref);
400   // If it wasn't registered then a race condition must have caused
401   // two to be created. The allocator does not support releasing the
402   // acquired memory so just change the type to be empty.
403   else
404     memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
405 }
406 
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)407 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
408     HistogramBase* histogram) {
409   DCHECK(histogram);
410 
411   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
412   if (!existing) {
413     // The above should never fail but if it does, no real harm is done.
414     // The data won't be merged but it also won't be recorded as merged
415     // so a future try, if successful, will get what was missed. If it
416     // continues to fail, some metric data will be lost but that is better
417     // than crashing.
418     NOTREACHED();
419     return;
420   }
421 
422   // Merge the delta from the passed object to the one in the SR.
423   existing->AddSamples(*histogram->SnapshotDelta());
424 }
425 
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)426 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
427     const HistogramBase* histogram) {
428   DCHECK(histogram);
429 
430   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
431   if (!existing) {
432     // The above should never fail but if it does, no real harm is done.
433     // Some metric data will be lost but that is better than crashing.
434     NOTREACHED();
435     return;
436   }
437 
438   // Merge the delta from the passed object to the one in the SR.
439   existing->AddSamples(*histogram->SnapshotFinalDelta());
440 }
441 
UseSampleMapRecords(uint64_t id,const void * user)442 PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
443     uint64_t id,
444     const void* user) {
445   return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
446 }
447 
CreateTrackingHistograms(StringPiece name)448 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
449   memory_allocator_->CreateTrackingHistograms(name);
450 }
451 
UpdateTrackingHistograms()452 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
453   memory_allocator_->UpdateTrackingHistograms();
454 }
455 
ClearLastCreatedReferenceForTesting()456 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
457   subtle::NoBarrier_Store(&last_created_, 0);
458 }
459 
460 // static
461 HistogramBase*
GetCreateHistogramResultHistogram()462 PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
463   // Get the histogram in which create-results are stored. This is copied
464   // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
465   // added code to prevent recursion (a likely occurance because the creation
466   // of a new a histogram can end up calling this.)
467   static base::subtle::AtomicWord atomic_histogram_pointer = 0;
468   HistogramBase* histogram_pointer =
469       reinterpret_cast<HistogramBase*>(
470           base::subtle::Acquire_Load(&atomic_histogram_pointer));
471   if (!histogram_pointer) {
472     // It's possible for multiple threads to make it here in parallel but
473     // they'll always return the same result as there is a mutex in the Get.
474     // The purpose of the "initialized" variable is just to ensure that
475     // the same thread doesn't recurse which is also why it doesn't have
476     // to be atomic.
477     static bool initialized = false;
478     if (!initialized) {
479       initialized = true;
480       if (g_allocator) {
481 // Don't log in release-with-asserts builds, otherwise the test_installer step
482 // fails because this code writes to a log file before the installer code had a
483 // chance to set the log file's location.
484 #if !defined(DCHECK_ALWAYS_ON)
485         DLOG(WARNING) << "Creating the results-histogram inside persistent"
486                       << " memory can cause future allocations to crash if"
487                       << " that memory is ever released (for testing).";
488 #endif
489       }
490 
491       histogram_pointer = LinearHistogram::FactoryGet(
492           kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
493           HistogramBase::kUmaTargetedHistogramFlag);
494       base::subtle::Release_Store(
495           &atomic_histogram_pointer,
496           reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
497     }
498   }
499   return histogram_pointer;
500 }
501 
CreateHistogram(PersistentHistogramData * histogram_data_ptr)502 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
503     PersistentHistogramData* histogram_data_ptr) {
504   if (!histogram_data_ptr) {
505     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
506     NOTREACHED();
507     return nullptr;
508   }
509 
510   // Sparse histograms are quite different so handle them as a special case.
511   if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
512     std::unique_ptr<HistogramBase> histogram =
513         SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
514                                           &histogram_data_ptr->samples_metadata,
515                                           &histogram_data_ptr->logged_metadata);
516     DCHECK(histogram);
517     histogram->SetFlags(histogram_data_ptr->flags);
518     RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
519     return histogram;
520   }
521 
522   // Copy the histogram_data to local storage because anything in persistent
523   // memory cannot be trusted as it could be changed at any moment by a
524   // malicious actor that shares access. The contents of histogram_data are
525   // validated below; the local copy is to ensure that the contents cannot
526   // be externally changed between validation and use.
527   PersistentHistogramData histogram_data = *histogram_data_ptr;
528 
529   HistogramBase::Sample* ranges_data =
530       memory_allocator_->GetAsObject<HistogramBase::Sample>(
531           histogram_data.ranges_ref, kTypeIdRangesArray);
532 
533   const uint32_t max_buckets =
534       std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
535   size_t required_bytes =
536       (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
537   size_t allocated_bytes =
538       memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
539   if (!ranges_data || histogram_data.bucket_count < 2 ||
540       histogram_data.bucket_count >= max_buckets ||
541       allocated_bytes < required_bytes) {
542     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
543     NOTREACHED();
544     return nullptr;
545   }
546 
547   std::unique_ptr<const BucketRanges> created_ranges =
548       CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
549                            histogram_data.bucket_count + 1);
550   if (!created_ranges) {
551     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
552     NOTREACHED();
553     return nullptr;
554   }
555   const BucketRanges* ranges =
556       StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
557           created_ranges.release());
558 
559   HistogramBase::AtomicCount* counts_data =
560       memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
561           histogram_data.counts_ref, kTypeIdCountsArray);
562   size_t counts_bytes =
563       CalculateRequiredCountsBytes(histogram_data.bucket_count);
564   if (!counts_data || counts_bytes == 0 ||
565       memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
566           counts_bytes) {
567     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
568     NOTREACHED();
569     return nullptr;
570   }
571 
572   // After the main "counts" array is a second array using for storing what
573   // was previously logged. This is used to calculate the "delta" during
574   // snapshot operations.
575   HistogramBase::AtomicCount* logged_data =
576       counts_data + histogram_data.bucket_count;
577 
578   std::string name(histogram_data_ptr->name);
579   std::unique_ptr<HistogramBase> histogram;
580   switch (histogram_data.histogram_type) {
581     case HISTOGRAM:
582       histogram = Histogram::PersistentCreate(
583           name, histogram_data.minimum, histogram_data.maximum, ranges,
584           counts_data, logged_data, histogram_data.bucket_count,
585           &histogram_data_ptr->samples_metadata,
586           &histogram_data_ptr->logged_metadata);
587       DCHECK(histogram);
588       break;
589     case LINEAR_HISTOGRAM:
590       histogram = LinearHistogram::PersistentCreate(
591           name, histogram_data.minimum, histogram_data.maximum, ranges,
592           counts_data, logged_data, histogram_data.bucket_count,
593           &histogram_data_ptr->samples_metadata,
594           &histogram_data_ptr->logged_metadata);
595       DCHECK(histogram);
596       break;
597     case BOOLEAN_HISTOGRAM:
598       histogram = BooleanHistogram::PersistentCreate(
599           name, ranges, counts_data, logged_data,
600           &histogram_data_ptr->samples_metadata,
601           &histogram_data_ptr->logged_metadata);
602       DCHECK(histogram);
603       break;
604     case CUSTOM_HISTOGRAM:
605       histogram = CustomHistogram::PersistentCreate(
606           name, ranges, counts_data, logged_data, histogram_data.bucket_count,
607           &histogram_data_ptr->samples_metadata,
608           &histogram_data_ptr->logged_metadata);
609       DCHECK(histogram);
610       break;
611     default:
612       NOTREACHED();
613   }
614 
615   if (histogram) {
616     DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
617     histogram->SetFlags(histogram_data.flags);
618     RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
619   } else {
620     RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
621   }
622 
623   return histogram;
624 }
625 
626 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)627 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
628     const HistogramBase* histogram) {
629   // This should never be called on the global histogram allocator as objects
630   // created there are already within the global statistics recorder.
631   DCHECK_NE(g_allocator, this);
632   DCHECK(histogram);
633 
634   HistogramBase* existing =
635       StatisticsRecorder::FindHistogram(histogram->histogram_name());
636   if (existing)
637     return existing;
638 
639   // Adding the passed histogram to the SR would cause a problem if the
640   // allocator that holds it eventually goes away. Instead, create a new
641   // one from a serialized version.
642   base::Pickle pickle;
643   if (!histogram->SerializeInfo(&pickle))
644     return nullptr;
645   PickleIterator iter(pickle);
646   existing = DeserializeHistogramInfo(&iter);
647   if (!existing)
648     return nullptr;
649 
650   // Make sure there is no "serialization" flag set.
651   DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
652   // Record the newly created histogram in the SR.
653   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
654 }
655 
656 // static
RecordCreateHistogramResult(CreateHistogramResultType result)657 void PersistentHistogramAllocator::RecordCreateHistogramResult(
658     CreateHistogramResultType result) {
659   HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
660   if (result_histogram)
661     result_histogram->Add(result);
662 }
663 
~GlobalHistogramAllocator()664 GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
665 
666 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,StringPiece name)667 void GlobalHistogramAllocator::CreateWithPersistentMemory(
668     void* base,
669     size_t size,
670     size_t page_size,
671     uint64_t id,
672     StringPiece name) {
673   Set(WrapUnique(new GlobalHistogramAllocator(
674       WrapUnique(new PersistentMemoryAllocator(
675           base, size, page_size, id, name, false)))));
676 }
677 
678 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name)679 void GlobalHistogramAllocator::CreateWithLocalMemory(
680     size_t size,
681     uint64_t id,
682     StringPiece name) {
683   Set(WrapUnique(new GlobalHistogramAllocator(
684       WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
685 }
686 
687 #if !defined(OS_NACL)
688 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name)689 void GlobalHistogramAllocator::CreateWithFile(
690     const FilePath& file_path,
691     size_t size,
692     uint64_t id,
693     StringPiece name) {
694   bool exists = PathExists(file_path);
695   File file(
696       file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
697                  File::FLAG_READ | File::FLAG_WRITE);
698 
699   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
700   if (exists) {
701     mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
702   } else {
703     mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
704                        MemoryMappedFile::READ_WRITE_EXTEND);
705   }
706   if (!mmfile->IsValid() ||
707       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
708     NOTREACHED();
709     return;
710   }
711 
712   Set(WrapUnique(new GlobalHistogramAllocator(
713       WrapUnique(new FilePersistentMemoryAllocator(
714           std::move(mmfile), size, id, name, false)))));
715 }
716 #endif
717 
718 // static
CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,size_t size,uint64_t,StringPiece)719 void GlobalHistogramAllocator::CreateWithSharedMemory(
720     std::unique_ptr<SharedMemory> memory,
721     size_t size,
722     uint64_t /*id*/,
723     StringPiece /*name*/) {
724   if ((!memory->memory() && !memory->Map(size)) ||
725       !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
726     NOTREACHED();
727     return;
728   }
729 
730   DCHECK_LE(memory->mapped_size(), size);
731   Set(WrapUnique(new GlobalHistogramAllocator(
732       WrapUnique(new SharedPersistentMemoryAllocator(
733           std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
734 }
735 
736 // static
CreateWithSharedMemoryHandle(const SharedMemoryHandle & handle,size_t size)737 void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
738     const SharedMemoryHandle& handle,
739     size_t size) {
740   std::unique_ptr<SharedMemory> shm(
741       new SharedMemory(handle, /*readonly=*/false));
742   if (!shm->Map(size) ||
743       !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
744     NOTREACHED();
745     return;
746   }
747 
748   Set(WrapUnique(new GlobalHistogramAllocator(
749       WrapUnique(new SharedPersistentMemoryAllocator(
750           std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
751 }
752 
753 // static
Set(std::unique_ptr<GlobalHistogramAllocator> allocator)754 void GlobalHistogramAllocator::Set(
755     std::unique_ptr<GlobalHistogramAllocator> allocator) {
756   // Releasing or changing an allocator is extremely dangerous because it
757   // likely has histograms stored within it. If the backing memory is also
758   // also released, future accesses to those histograms will seg-fault.
759   CHECK(!g_allocator);
760   g_allocator = allocator.release();
761   size_t existing = StatisticsRecorder::GetHistogramCount();
762 
763   DVLOG_IF(1, existing)
764       << existing << " histograms were created before persistence was enabled.";
765 }
766 
767 // static
Get()768 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
769   return g_allocator;
770 }
771 
772 // static
773 std::unique_ptr<GlobalHistogramAllocator>
ReleaseForTesting()774 GlobalHistogramAllocator::ReleaseForTesting() {
775   GlobalHistogramAllocator* histogram_allocator = g_allocator;
776   if (!histogram_allocator)
777     return nullptr;
778   PersistentMemoryAllocator* memory_allocator =
779       histogram_allocator->memory_allocator();
780 
781   // Before releasing the memory, it's necessary to have the Statistics-
782   // Recorder forget about the histograms contained therein; otherwise,
783   // some operations will try to access them and the released memory.
784   PersistentMemoryAllocator::Iterator iter(memory_allocator);
785   PersistentMemoryAllocator::Reference ref;
786   while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
787     PersistentHistogramData* histogram_data =
788         memory_allocator->GetAsObject<PersistentHistogramData>(
789             ref, kTypeIdHistogram);
790     DCHECK(histogram_data);
791     StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
792 
793     // If a test breaks here then a memory region containing a histogram
794     // actively used by this code is being released back to the test.
795     // If that memory segment were to be deleted, future calls to create
796     // persistent histograms would crash. To avoid this, have the test call
797     // the method GetCreateHistogramResultHistogram() *before* setting
798     // the (temporary) memory allocator via SetGlobalAllocator() so that
799     // histogram is instead allocated from the process heap.
800     DCHECK_NE(kResultHistogram, histogram_data->name);
801   }
802 
803   g_allocator = nullptr;
804   return WrapUnique(histogram_allocator);
805 };
806 
SetPersistentLocation(const FilePath & location)807 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
808   persistent_location_ = location;
809 }
810 
GetPersistentLocation() const811 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
812   return persistent_location_;
813 }
814 
WriteToPersistentLocation()815 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
816 #if defined(OS_NACL)
817   // NACL doesn't support file operations, including ImportantFileWriter.
818   NOTREACHED();
819   return false;
820 #else
821   // Stop if no destination is set.
822   if (persistent_location_.empty()) {
823     NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
824                  << " to file because no location was set.";
825     return false;
826   }
827 
828   StringPiece contents(static_cast<const char*>(data()), used());
829   if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
830                                                 contents)) {
831     LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
832                << " to file: " << persistent_location_.value();
833     return false;
834   }
835 
836   return true;
837 #endif
838 }
839 
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)840 GlobalHistogramAllocator::GlobalHistogramAllocator(
841     std::unique_ptr<PersistentMemoryAllocator> memory)
842     : PersistentHistogramAllocator(std::move(memory)),
843       import_iterator_(this) {}
844 
ImportHistogramsToStatisticsRecorder()845 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
846   // Skip the import if it's the histogram that was last created. Should a
847   // race condition cause the "last created" to be overwritten before it
848   // is recognized here then the histogram will be created and be ignored
849   // when it is detected as a duplicate by the statistics-recorder. This
850   // simple check reduces the time of creating persistent histograms by
851   // about 40%.
852   Reference record_to_ignore = last_created();
853 
854   // There is no lock on this because the iterator is lock-free while still
855   // guaranteed to only return each entry only once. The StatisticsRecorder
856   // has its own lock so the Register operation is safe.
857   while (true) {
858     std::unique_ptr<HistogramBase> histogram =
859         import_iterator_.GetNextWithIgnore(record_to_ignore);
860     if (!histogram)
861       break;
862     StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
863   }
864 }
865 
866 }  // namespace base
867