1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_histogram_allocator.h"
6
7 #include <memory>
8
9 #include "base/atomicops.h"
10 #include "base/files/file_path.h"
11 #include "base/files/file_util.h"
12 #include "base/files/important_file_writer.h"
13 #include "base/files/memory_mapped_file.h"
14 #include "base/lazy_instance.h"
15 #include "base/logging.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/histogram_base.h"
19 #include "base/metrics/histogram_samples.h"
20 #include "base/metrics/persistent_sample_map.h"
21 #include "base/metrics/sparse_histogram.h"
22 #include "base/metrics/statistics_recorder.h"
23 #include "base/pickle.h"
24 #include "base/synchronization/lock.h"
25
26 namespace base {
27
28 namespace {
29
30 // Name of histogram for storing results of local operations.
31 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
32
33 // Type identifiers used when storing in persistent memory so they can be
34 // identified during extraction; the first 4 bytes of the SHA1 of the name
35 // is used as a unique integer. A "version number" is added to the base
36 // so that, if the structure of that object changes, stored older versions
37 // will be safely ignored.
38 enum : uint32_t {
39 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
40 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
41 };
42
43 // The current globally-active persistent allocator for all new histograms.
44 // The object held here will obviously not be destructed at process exit
45 // but that's best since PersistentMemoryAllocator objects (that underlie
46 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
47 // anything essential at exit anyway due to the fact that they depend on data
48 // managed elsewhere and which could be destructed first. An AtomicWord is
49 // used instead of std::atomic because the latter can create global ctors
50 // and dtors.
51 subtle::AtomicWord g_allocator = 0;
52
53 // Take an array of range boundaries and create a proper BucketRanges object
54 // which is returned to the caller. A return of nullptr indicates that the
55 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)56 std::unique_ptr<BucketRanges> CreateRangesFromData(
57 HistogramBase::Sample* ranges_data,
58 uint32_t ranges_checksum,
59 size_t count) {
60 // To avoid racy destruction at shutdown, the following may be leaked.
61 std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
62 DCHECK_EQ(count, ranges->size());
63 for (size_t i = 0; i < count; ++i) {
64 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
65 return nullptr;
66 ranges->set_range(i, ranges_data[i]);
67 }
68
69 ranges->ResetChecksum();
70 if (ranges->checksum() != ranges_checksum)
71 return nullptr;
72
73 return ranges;
74 }
75
76 // Calculate the number of bytes required to store all of a histogram's
77 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)78 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
79 // 2 because each "sample count" also requires a backup "logged count"
80 // used for calculating the delta during snapshot operations.
81 const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
82
83 // If the |bucket_count| is such that it would overflow the return type,
84 // perhaps as the result of a malicious actor, then return zero to
85 // indicate the problem to the caller.
86 if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
87 return 0;
88
89 return bucket_count * kBytesPerBucket;
90 }
91
92 } // namespace
93
94 const Feature kPersistentHistogramsFeature{
95 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
96 };
97
98
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)99 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
100 PersistentMemoryAllocator* allocator)
101 : allocator_(allocator), record_iterator_(allocator) {}
102
~PersistentSparseHistogramDataManager()103 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
104
105 PersistentSampleMapRecords*
UseSampleMapRecords(uint64_t id,const void * user)106 PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
107 const void* user) {
108 base::AutoLock auto_lock(lock_);
109 return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
110 }
111
112 PersistentSampleMapRecords*
GetSampleMapRecordsWhileLocked(uint64_t id)113 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
114 uint64_t id) {
115 lock_.AssertAcquired();
116
117 auto found = sample_records_.find(id);
118 if (found != sample_records_.end())
119 return found->second.get();
120
121 std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
122 samples = MakeUnique<PersistentSampleMapRecords>(this, id);
123 return samples.get();
124 }
125
LoadRecords(PersistentSampleMapRecords * sample_map_records)126 bool PersistentSparseHistogramDataManager::LoadRecords(
127 PersistentSampleMapRecords* sample_map_records) {
128 // DataManager must be locked in order to access the found_ field of any
129 // PersistentSampleMapRecords object.
130 base::AutoLock auto_lock(lock_);
131 bool found = false;
132
133 // If there are already "found" entries for the passed object, move them.
134 if (!sample_map_records->found_.empty()) {
135 sample_map_records->records_.reserve(sample_map_records->records_.size() +
136 sample_map_records->found_.size());
137 sample_map_records->records_.insert(sample_map_records->records_.end(),
138 sample_map_records->found_.begin(),
139 sample_map_records->found_.end());
140 sample_map_records->found_.clear();
141 found = true;
142 }
143
144 // Acquiring a lock is a semi-expensive operation so load some records with
145 // each call. More than this number may be loaded if it takes longer to
146 // find at least one matching record for the passed object.
147 const int kMinimumNumberToLoad = 10;
148 const uint64_t match_id = sample_map_records->sample_map_id_;
149
150 // Loop while no enty is found OR we haven't yet loaded the minimum number.
151 // This will continue reading even after a match is found.
152 for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
153 // Get the next sample-record. The iterator will always resume from where
154 // it left off even if it previously had nothing further to return.
155 uint64_t found_id;
156 PersistentMemoryAllocator::Reference ref =
157 PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
158 &found_id);
159
160 // Stop immediately if there are none.
161 if (!ref)
162 break;
163
164 // The sample-record could be for any sparse histogram. Add the reference
165 // to the appropriate collection for later use.
166 if (found_id == match_id) {
167 sample_map_records->records_.push_back(ref);
168 found = true;
169 } else {
170 PersistentSampleMapRecords* samples =
171 GetSampleMapRecordsWhileLocked(found_id);
172 DCHECK(samples);
173 samples->found_.push_back(ref);
174 }
175 }
176
177 return found;
178 }
179
180
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id)181 PersistentSampleMapRecords::PersistentSampleMapRecords(
182 PersistentSparseHistogramDataManager* data_manager,
183 uint64_t sample_map_id)
184 : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
185
~PersistentSampleMapRecords()186 PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
187
Acquire(const void * user)188 PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
189 const void* user) {
190 DCHECK(!user_);
191 user_ = user;
192 seen_ = 0;
193 return this;
194 }
195
Release(const void * user)196 void PersistentSampleMapRecords::Release(const void* user) {
197 DCHECK_EQ(user_, user);
198 user_ = nullptr;
199 }
200
GetNext()201 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
202 DCHECK(user_);
203
204 // If there are no unseen records, lock and swap in all the found ones.
205 if (records_.size() == seen_) {
206 if (!data_manager_->LoadRecords(this))
207 return false;
208 }
209
210 // Return the next record. Records *must* be returned in the same order
211 // they are found in the persistent memory in order to ensure that all
212 // objects using this data always have the same state. Race conditions
213 // can cause duplicate records so using the "first found" is the only
214 // guarantee that all objects always access the same one.
215 DCHECK_LT(seen_, records_.size());
216 return records_[seen_++];
217 }
218
CreateNew(HistogramBase::Sample value)219 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
220 HistogramBase::Sample value) {
221 return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
222 sample_map_id_, value);
223 }
224
225
226 // This data will be held in persistent memory in order for processes to
227 // locate and use histograms created elsewhere.
228 struct PersistentHistogramAllocator::PersistentHistogramData {
229 // SHA1(Histogram): Increment this if structure changes!
230 static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
231
232 // Expected size for 32/64-bit check.
233 static constexpr size_t kExpectedInstanceSize =
234 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
235
236 int32_t histogram_type;
237 int32_t flags;
238 int32_t minimum;
239 int32_t maximum;
240 uint32_t bucket_count;
241 PersistentMemoryAllocator::Reference ranges_ref;
242 uint32_t ranges_checksum;
243 PersistentMemoryAllocator::Reference counts_ref;
244 HistogramSamples::Metadata samples_metadata;
245 HistogramSamples::Metadata logged_metadata;
246
247 // Space for the histogram name will be added during the actual allocation
248 // request. This must be the last field of the structure. A zero-size array
249 // or a "flexible" array would be preferred but is not (yet) valid C++.
250 char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
251 };
252
Iterator(PersistentHistogramAllocator * allocator)253 PersistentHistogramAllocator::Iterator::Iterator(
254 PersistentHistogramAllocator* allocator)
255 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
256
257 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)258 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
259 PersistentMemoryAllocator::Reference ref;
260 while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
261 if (ref != ignore)
262 return allocator_->GetHistogram(ref);
263 }
264 return nullptr;
265 }
266
267
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)268 PersistentHistogramAllocator::PersistentHistogramAllocator(
269 std::unique_ptr<PersistentMemoryAllocator> memory)
270 : memory_allocator_(std::move(memory)),
271 sparse_histogram_data_manager_(memory_allocator_.get()) {}
272
~PersistentHistogramAllocator()273 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
274
GetHistogram(Reference ref)275 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
276 Reference ref) {
277 // Unfortunately, the histogram "pickle" methods cannot be used as part of
278 // the persistance because the deserialization methods always create local
279 // count data (while these must reference the persistent counts) and always
280 // add it to the local list of known histograms (while these may be simple
281 // references to histograms in other processes).
282 PersistentHistogramData* histogram_data =
283 memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
284 size_t length = memory_allocator_->GetAllocSize(ref);
285
286 // Check that metadata is reasonable: name is NUL terminated and non-empty,
287 // ID fields have been loaded with a hash of the name (0 is considered
288 // unset/invalid).
289 if (!histogram_data ||
290 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0' ||
291 histogram_data->name[0] == '\0' ||
292 histogram_data->samples_metadata.id == 0 ||
293 histogram_data->logged_metadata.id == 0) {
294 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
295 NOTREACHED();
296 return nullptr;
297 }
298 return CreateHistogram(histogram_data);
299 }
300
AllocateHistogram(HistogramType histogram_type,const std::string & name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)301 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
302 HistogramType histogram_type,
303 const std::string& name,
304 int minimum,
305 int maximum,
306 const BucketRanges* bucket_ranges,
307 int32_t flags,
308 Reference* ref_ptr) {
309 // If the allocator is corrupt, don't waste time trying anything else.
310 // This also allows differentiating on the dashboard between allocations
311 // failed due to a corrupt allocator and the number of process instances
312 // with one, the latter being idicated by "newly corrupt", below.
313 if (memory_allocator_->IsCorrupt()) {
314 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
315 return nullptr;
316 }
317
318 // Create the metadata necessary for a persistent sparse histogram. This
319 // is done first because it is a small subset of what is required for
320 // other histograms. The type is "under construction" so that a crash
321 // during the datafill doesn't leave a bad record around that could cause
322 // confusion by another process trying to read it. It will be corrected
323 // once histogram construction is complete.
324 PersistentHistogramData* histogram_data =
325 memory_allocator_->New<PersistentHistogramData>(
326 offsetof(PersistentHistogramData, name) + name.length() + 1);
327 if (histogram_data) {
328 memcpy(histogram_data->name, name.c_str(), name.size() + 1);
329 histogram_data->histogram_type = histogram_type;
330 histogram_data->flags = flags | HistogramBase::kIsPersistent;
331 }
332
333 // Create the remaining metadata necessary for regular histograms.
334 if (histogram_type != SPARSE_HISTOGRAM) {
335 size_t bucket_count = bucket_ranges->bucket_count();
336 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
337 if (counts_bytes == 0) {
338 // |bucket_count| was out-of-range.
339 NOTREACHED();
340 return nullptr;
341 }
342
343 size_t ranges_count = bucket_count + 1;
344 size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
345 PersistentMemoryAllocator::Reference counts_ref =
346 memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
347 PersistentMemoryAllocator::Reference ranges_ref =
348 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
349 HistogramBase::Sample* ranges_data =
350 memory_allocator_->GetAsArray<HistogramBase::Sample>(
351 ranges_ref, kTypeIdRangesArray, ranges_count);
352
353 // Only continue here if all allocations were successful. If they weren't,
354 // there is no way to free the space but that's not really a problem since
355 // the allocations only fail because the space is full or corrupt and so
356 // any future attempts will also fail.
357 if (counts_ref && ranges_data && histogram_data) {
358 for (size_t i = 0; i < bucket_ranges->size(); ++i)
359 ranges_data[i] = bucket_ranges->range(i);
360
361 histogram_data->minimum = minimum;
362 histogram_data->maximum = maximum;
363 // |bucket_count| must fit within 32-bits or the allocation of the counts
364 // array would have failed for being too large; the allocator supports
365 // less than 4GB total size.
366 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
367 histogram_data->ranges_ref = ranges_ref;
368 histogram_data->ranges_checksum = bucket_ranges->checksum();
369 histogram_data->counts_ref = counts_ref;
370 } else {
371 histogram_data = nullptr; // Clear this for proper handling below.
372 }
373 }
374
375 if (histogram_data) {
376 // Create the histogram using resources in persistent memory. This ends up
377 // resolving the "ref" values stored in histogram_data instad of just
378 // using what is already known above but avoids duplicating the switch
379 // statement here and serves as a double-check that everything is
380 // correct before commiting the new histogram to persistent space.
381 std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
382 DCHECK(histogram);
383 DCHECK_NE(0U, histogram_data->samples_metadata.id);
384 DCHECK_NE(0U, histogram_data->logged_metadata.id);
385
386 PersistentMemoryAllocator::Reference histogram_ref =
387 memory_allocator_->GetAsReference(histogram_data);
388 if (ref_ptr != nullptr)
389 *ref_ptr = histogram_ref;
390
391 // By storing the reference within the allocator to this histogram, the
392 // next import (which will happen before the next histogram creation)
393 // will know to skip it.
394 // See also the comment in ImportHistogramsToStatisticsRecorder().
395 subtle::NoBarrier_Store(&last_created_, histogram_ref);
396 return histogram;
397 }
398
399 CreateHistogramResultType result;
400 if (memory_allocator_->IsCorrupt()) {
401 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
402 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
403 } else if (memory_allocator_->IsFull()) {
404 result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
405 } else {
406 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
407 }
408 RecordCreateHistogramResult(result);
409
410 // Crash for failures caused by internal bugs but not "full" which is
411 // dependent on outside code.
412 if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
413 NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
414
415 return nullptr;
416 }
417
FinalizeHistogram(Reference ref,bool registered)418 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
419 bool registered) {
420 if (registered) {
421 // If the created persistent histogram was registered then it needs to
422 // be marked as "iterable" in order to be found by other processes. This
423 // happens only after the histogram is fully formed so it's impossible for
424 // code iterating through the allocator to read a partially created record.
425 memory_allocator_->MakeIterable(ref);
426 } else {
427 // If it wasn't registered then a race condition must have caused two to
428 // be created. The allocator does not support releasing the acquired memory
429 // so just change the type to be empty.
430 memory_allocator_->ChangeType(ref, 0,
431 PersistentHistogramData::kPersistentTypeId,
432 /*clear=*/false);
433 }
434 }
435
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)436 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
437 HistogramBase* histogram) {
438 DCHECK(histogram);
439
440 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
441 if (!existing) {
442 // The above should never fail but if it does, no real harm is done.
443 // The data won't be merged but it also won't be recorded as merged
444 // so a future try, if successful, will get what was missed. If it
445 // continues to fail, some metric data will be lost but that is better
446 // than crashing.
447 NOTREACHED();
448 return;
449 }
450
451 // Merge the delta from the passed object to the one in the SR.
452 existing->AddSamples(*histogram->SnapshotDelta());
453 }
454
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)455 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
456 const HistogramBase* histogram) {
457 DCHECK(histogram);
458
459 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
460 if (!existing) {
461 // The above should never fail but if it does, no real harm is done.
462 // Some metric data will be lost but that is better than crashing.
463 NOTREACHED();
464 return;
465 }
466
467 // Merge the delta from the passed object to the one in the SR.
468 existing->AddSamples(*histogram->SnapshotFinalDelta());
469 }
470
UseSampleMapRecords(uint64_t id,const void * user)471 PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
472 uint64_t id,
473 const void* user) {
474 return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
475 }
476
CreateTrackingHistograms(StringPiece name)477 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
478 memory_allocator_->CreateTrackingHistograms(name);
479 }
480
UpdateTrackingHistograms()481 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
482 memory_allocator_->UpdateTrackingHistograms();
483 }
484
ClearLastCreatedReferenceForTesting()485 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
486 subtle::NoBarrier_Store(&last_created_, 0);
487 }
488
489 // static
490 HistogramBase*
GetCreateHistogramResultHistogram()491 PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
492 // Get the histogram in which create-results are stored. This is copied
493 // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
494 // added code to prevent recursion (a likely occurance because the creation
495 // of a new a histogram can end up calling this.)
496 static base::subtle::AtomicWord atomic_histogram_pointer = 0;
497 HistogramBase* histogram_pointer =
498 reinterpret_cast<HistogramBase*>(
499 base::subtle::Acquire_Load(&atomic_histogram_pointer));
500 if (!histogram_pointer) {
501 // It's possible for multiple threads to make it here in parallel but
502 // they'll always return the same result as there is a mutex in the Get.
503 // The purpose of the "initialized" variable is just to ensure that
504 // the same thread doesn't recurse which is also why it doesn't have
505 // to be atomic.
506 static bool initialized = false;
507 if (!initialized) {
508 initialized = true;
509 if (GlobalHistogramAllocator::Get()) {
510 DVLOG(1) << "Creating the results-histogram inside persistent"
511 << " memory can cause future allocations to crash if"
512 << " that memory is ever released (for testing).";
513 }
514
515 histogram_pointer = LinearHistogram::FactoryGet(
516 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
517 HistogramBase::kUmaTargetedHistogramFlag);
518 base::subtle::Release_Store(
519 &atomic_histogram_pointer,
520 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
521 }
522 }
523 return histogram_pointer;
524 }
525
CreateHistogram(PersistentHistogramData * histogram_data_ptr)526 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
527 PersistentHistogramData* histogram_data_ptr) {
528 if (!histogram_data_ptr) {
529 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
530 NOTREACHED();
531 return nullptr;
532 }
533
534 // Sparse histograms are quite different so handle them as a special case.
535 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
536 std::unique_ptr<HistogramBase> histogram =
537 SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
538 &histogram_data_ptr->samples_metadata,
539 &histogram_data_ptr->logged_metadata);
540 DCHECK(histogram);
541 histogram->SetFlags(histogram_data_ptr->flags);
542 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
543 return histogram;
544 }
545
546 // Copy the histogram_data to local storage because anything in persistent
547 // memory cannot be trusted as it could be changed at any moment by a
548 // malicious actor that shares access. The contents of histogram_data are
549 // validated below; the local copy is to ensure that the contents cannot
550 // be externally changed between validation and use.
551 PersistentHistogramData histogram_data = *histogram_data_ptr;
552
553 HistogramBase::Sample* ranges_data =
554 memory_allocator_->GetAsArray<HistogramBase::Sample>(
555 histogram_data.ranges_ref, kTypeIdRangesArray,
556 PersistentMemoryAllocator::kSizeAny);
557
558 const uint32_t max_buckets =
559 std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
560 size_t required_bytes =
561 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
562 size_t allocated_bytes =
563 memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
564 if (!ranges_data || histogram_data.bucket_count < 2 ||
565 histogram_data.bucket_count >= max_buckets ||
566 allocated_bytes < required_bytes) {
567 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
568 NOTREACHED();
569 return nullptr;
570 }
571
572 std::unique_ptr<const BucketRanges> created_ranges =
573 CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
574 histogram_data.bucket_count + 1);
575 if (!created_ranges) {
576 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
577 NOTREACHED();
578 return nullptr;
579 }
580 const BucketRanges* ranges =
581 StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
582 created_ranges.release());
583
584 HistogramBase::AtomicCount* counts_data =
585 memory_allocator_->GetAsArray<HistogramBase::AtomicCount>(
586 histogram_data.counts_ref, kTypeIdCountsArray,
587 PersistentMemoryAllocator::kSizeAny);
588 size_t counts_bytes =
589 CalculateRequiredCountsBytes(histogram_data.bucket_count);
590 if (!counts_data || counts_bytes == 0 ||
591 memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
592 counts_bytes) {
593 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
594 NOTREACHED();
595 return nullptr;
596 }
597
598 // After the main "counts" array is a second array using for storing what
599 // was previously logged. This is used to calculate the "delta" during
600 // snapshot operations.
601 HistogramBase::AtomicCount* logged_data =
602 counts_data + histogram_data.bucket_count;
603
604 std::string name(histogram_data_ptr->name);
605 std::unique_ptr<HistogramBase> histogram;
606 switch (histogram_data.histogram_type) {
607 case HISTOGRAM:
608 histogram = Histogram::PersistentCreate(
609 name, histogram_data.minimum, histogram_data.maximum, ranges,
610 counts_data, logged_data, histogram_data.bucket_count,
611 &histogram_data_ptr->samples_metadata,
612 &histogram_data_ptr->logged_metadata);
613 DCHECK(histogram);
614 break;
615 case LINEAR_HISTOGRAM:
616 histogram = LinearHistogram::PersistentCreate(
617 name, histogram_data.minimum, histogram_data.maximum, ranges,
618 counts_data, logged_data, histogram_data.bucket_count,
619 &histogram_data_ptr->samples_metadata,
620 &histogram_data_ptr->logged_metadata);
621 DCHECK(histogram);
622 break;
623 case BOOLEAN_HISTOGRAM:
624 histogram = BooleanHistogram::PersistentCreate(
625 name, ranges, counts_data, logged_data,
626 &histogram_data_ptr->samples_metadata,
627 &histogram_data_ptr->logged_metadata);
628 DCHECK(histogram);
629 break;
630 case CUSTOM_HISTOGRAM:
631 histogram = CustomHistogram::PersistentCreate(
632 name, ranges, counts_data, logged_data, histogram_data.bucket_count,
633 &histogram_data_ptr->samples_metadata,
634 &histogram_data_ptr->logged_metadata);
635 DCHECK(histogram);
636 break;
637 default:
638 NOTREACHED();
639 }
640
641 if (histogram) {
642 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
643 histogram->SetFlags(histogram_data.flags);
644 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
645 } else {
646 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
647 }
648
649 return histogram;
650 }
651
652 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)653 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
654 const HistogramBase* histogram) {
655 // This should never be called on the global histogram allocator as objects
656 // created there are already within the global statistics recorder.
657 DCHECK_NE(GlobalHistogramAllocator::Get(), this);
658 DCHECK(histogram);
659
660 HistogramBase* existing =
661 StatisticsRecorder::FindHistogram(histogram->histogram_name());
662 if (existing)
663 return existing;
664
665 // Adding the passed histogram to the SR would cause a problem if the
666 // allocator that holds it eventually goes away. Instead, create a new
667 // one from a serialized version. Deserialization calls the appropriate
668 // FactoryGet() which will create the histogram in the global persistent-
669 // histogram allocator if such is set.
670 base::Pickle pickle;
671 if (!histogram->SerializeInfo(&pickle))
672 return nullptr;
673 PickleIterator iter(pickle);
674 existing = DeserializeHistogramInfo(&iter);
675 if (!existing)
676 return nullptr;
677
678 // Make sure there is no "serialization" flag set.
679 DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
680 // Record the newly created histogram in the SR.
681 return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
682 }
683
684 // static
RecordCreateHistogramResult(CreateHistogramResultType result)685 void PersistentHistogramAllocator::RecordCreateHistogramResult(
686 CreateHistogramResultType result) {
687 HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
688 if (result_histogram)
689 result_histogram->Add(result);
690 }
691
~GlobalHistogramAllocator()692 GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
693
694 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,StringPiece name)695 void GlobalHistogramAllocator::CreateWithPersistentMemory(
696 void* base,
697 size_t size,
698 size_t page_size,
699 uint64_t id,
700 StringPiece name) {
701 Set(WrapUnique(
702 new GlobalHistogramAllocator(MakeUnique<PersistentMemoryAllocator>(
703 base, size, page_size, id, name, false))));
704 }
705
706 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name)707 void GlobalHistogramAllocator::CreateWithLocalMemory(
708 size_t size,
709 uint64_t id,
710 StringPiece name) {
711 Set(WrapUnique(new GlobalHistogramAllocator(
712 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name))));
713 }
714
715 #if !defined(OS_NACL)
716 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name)717 bool GlobalHistogramAllocator::CreateWithFile(
718 const FilePath& file_path,
719 size_t size,
720 uint64_t id,
721 StringPiece name) {
722 bool exists = PathExists(file_path);
723 File file(
724 file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
725 File::FLAG_READ | File::FLAG_WRITE);
726
727 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
728 if (exists) {
729 mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
730 } else {
731 mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
732 MemoryMappedFile::READ_WRITE_EXTEND);
733 }
734 if (!mmfile->IsValid() ||
735 !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
736 NOTREACHED();
737 return false;
738 }
739
740 Set(WrapUnique(
741 new GlobalHistogramAllocator(MakeUnique<FilePersistentMemoryAllocator>(
742 std::move(mmfile), size, id, name, false))));
743 Get()->SetPersistentLocation(file_path);
744 return true;
745 }
746
747 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,size_t size,uint64_t id,StringPiece name)748 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
749 const FilePath& active_path,
750 size_t size,
751 uint64_t id,
752 StringPiece name) {
753 if (!base::ReplaceFile(active_path, base_path, nullptr))
754 base::DeleteFile(base_path, /*recursive=*/false);
755
756 return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
757 name);
758 }
759
760 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,StringPiece name)761 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
762 size_t size,
763 uint64_t id,
764 StringPiece name) {
765 FilePath base_path, active_path;
766 ConstructFilePaths(dir, name, &base_path, &active_path);
767 return CreateWithActiveFile(base_path, active_path, size, id, name);
768 }
769
770 // static
ConstructFilePaths(const FilePath & dir,StringPiece name,FilePath * out_base_path,FilePath * out_active_path)771 void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
772 StringPiece name,
773 FilePath* out_base_path,
774 FilePath* out_active_path) {
775 if (out_base_path) {
776 *out_base_path = dir.AppendASCII(name).AddExtension(
777 PersistentMemoryAllocator::kFileExtension);
778 }
779 if (out_active_path) {
780 *out_active_path =
781 dir.AppendASCII(name.as_string() + std::string("-active"))
782 .AddExtension(PersistentMemoryAllocator::kFileExtension);
783 }
784 }
785 #endif // !defined(OS_NACL)
786
787 // static
CreateWithSharedMemoryHandle(const SharedMemoryHandle & handle,size_t size)788 void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
789 const SharedMemoryHandle& handle,
790 size_t size) {
791 std::unique_ptr<SharedMemory> shm(
792 new SharedMemory(handle, /*readonly=*/false));
793 if (!shm->Map(size) ||
794 !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
795 NOTREACHED();
796 return;
797 }
798
799 Set(WrapUnique(
800 new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
801 std::move(shm), 0, StringPiece(), /*readonly=*/false))));
802 }
803
804 // static
Set(std::unique_ptr<GlobalHistogramAllocator> allocator)805 void GlobalHistogramAllocator::Set(
806 std::unique_ptr<GlobalHistogramAllocator> allocator) {
807 // Releasing or changing an allocator is extremely dangerous because it
808 // likely has histograms stored within it. If the backing memory is also
809 // also released, future accesses to those histograms will seg-fault.
810 CHECK(!subtle::NoBarrier_Load(&g_allocator));
811 subtle::Release_Store(&g_allocator,
812 reinterpret_cast<uintptr_t>(allocator.release()));
813 size_t existing = StatisticsRecorder::GetHistogramCount();
814
815 DVLOG_IF(1, existing)
816 << existing << " histograms were created before persistence was enabled.";
817 }
818
819 // static
Get()820 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
821 return reinterpret_cast<GlobalHistogramAllocator*>(
822 subtle::Acquire_Load(&g_allocator));
823 }
824
825 // static
826 std::unique_ptr<GlobalHistogramAllocator>
ReleaseForTesting()827 GlobalHistogramAllocator::ReleaseForTesting() {
828 GlobalHistogramAllocator* histogram_allocator = Get();
829 if (!histogram_allocator)
830 return nullptr;
831 PersistentMemoryAllocator* memory_allocator =
832 histogram_allocator->memory_allocator();
833
834 // Before releasing the memory, it's necessary to have the Statistics-
835 // Recorder forget about the histograms contained therein; otherwise,
836 // some operations will try to access them and the released memory.
837 PersistentMemoryAllocator::Iterator iter(memory_allocator);
838 const PersistentHistogramData* data;
839 while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
840 StatisticsRecorder::ForgetHistogramForTesting(data->name);
841
842 // If a test breaks here then a memory region containing a histogram
843 // actively used by this code is being released back to the test.
844 // If that memory segment were to be deleted, future calls to create
845 // persistent histograms would crash. To avoid this, have the test call
846 // the method GetCreateHistogramResultHistogram() *before* setting
847 // the (temporary) memory allocator via SetGlobalAllocator() so that
848 // histogram is instead allocated from the process heap.
849 DCHECK_NE(kResultHistogram, data->name);
850 }
851
852 subtle::Release_Store(&g_allocator, 0);
853 return WrapUnique(histogram_allocator);
854 };
855
SetPersistentLocation(const FilePath & location)856 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
857 persistent_location_ = location;
858 }
859
GetPersistentLocation() const860 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
861 return persistent_location_;
862 }
863
WriteToPersistentLocation()864 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
865 #if defined(OS_NACL)
866 // NACL doesn't support file operations, including ImportantFileWriter.
867 NOTREACHED();
868 return false;
869 #else
870 // Stop if no destination is set.
871 if (persistent_location_.empty()) {
872 NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
873 << " to file because no location was set.";
874 return false;
875 }
876
877 StringPiece contents(static_cast<const char*>(data()), used());
878 if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
879 contents)) {
880 LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
881 << " to file: " << persistent_location_.value();
882 return false;
883 }
884
885 return true;
886 #endif
887 }
888
DeletePersistentLocation()889 void GlobalHistogramAllocator::DeletePersistentLocation() {
890 memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
891
892 #if defined(OS_NACL)
893 NOTREACHED();
894 #else
895 if (persistent_location_.empty())
896 return;
897
898 // Open (with delete) and then immediately close the file by going out of
899 // scope. This is the only cross-platform safe way to delete a file that may
900 // be open elsewhere. Open handles will continue to operate normally but
901 // new opens will not be possible.
902 File file(persistent_location_,
903 File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
904 #endif
905 }
906
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)907 GlobalHistogramAllocator::GlobalHistogramAllocator(
908 std::unique_ptr<PersistentMemoryAllocator> memory)
909 : PersistentHistogramAllocator(std::move(memory)),
910 import_iterator_(this) {
911 // Make sure the StatisticsRecorder is initialized to prevent duplicate
912 // histograms from being created. It's safe to call this multiple times.
913 StatisticsRecorder::Initialize();
914 }
915
ImportHistogramsToStatisticsRecorder()916 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
917 // Skip the import if it's the histogram that was last created. Should a
918 // race condition cause the "last created" to be overwritten before it
919 // is recognized here then the histogram will be created and be ignored
920 // when it is detected as a duplicate by the statistics-recorder. This
921 // simple check reduces the time of creating persistent histograms by
922 // about 40%.
923 Reference record_to_ignore = last_created();
924
925 // There is no lock on this because the iterator is lock-free while still
926 // guaranteed to only return each entry only once. The StatisticsRecorder
927 // has its own lock so the Register operation is safe.
928 while (true) {
929 std::unique_ptr<HistogramBase> histogram =
930 import_iterator_.GetNextWithIgnore(record_to_ignore);
931 if (!histogram)
932 break;
933 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
934 }
935 }
936
937 } // namespace base
938