1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_histogram_allocator.h"
6
7 #include <atomic>
8 #include <limits>
9 #include <utility>
10
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/files/important_file_writer.h"
14 #include "base/files/memory_mapped_file.h"
15 #include "base/lazy_instance.h"
16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h"
18 #include "base/memory/shared_memory_mapping.h"
19 #include "base/memory/writable_shared_memory_region.h"
20 #include "base/metrics/histogram.h"
21 #include "base/metrics/histogram_base.h"
22 #include "base/metrics/histogram_samples.h"
23 #include "base/metrics/metrics_hashes.h"
24 #include "base/metrics/persistent_sample_map.h"
25 #include "base/metrics/sparse_histogram.h"
26 #include "base/metrics/statistics_recorder.h"
27 #include "base/notreached.h"
28 #include "base/numerics/safe_conversions.h"
29 #include "base/pickle.h"
30 #include "base/process/process_handle.h"
31 #include "base/strings/strcat.h"
32 #include "base/strings/string_number_conversions.h"
33 #include "base/strings/string_piece.h"
34 #include "base/strings/string_split.h"
35 #include "base/strings/stringprintf.h"
36 #include "base/synchronization/lock.h"
37 #include "build/build_config.h"
38
39 namespace base {
40
41 namespace {
42
43 // Type identifiers used when storing in persistent memory so they can be
44 // identified during extraction; the first 4 bytes of the SHA1 of the name
45 // is used as a unique integer. A "version number" is added to the base
46 // so that, if the structure of that object changes, stored older versions
47 // will be safely ignored.
48 enum : uint32_t {
49 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
50 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
51 };
52
53 // The current globally-active persistent allocator for all new histograms.
54 // The object held here will obviously not be destructed at process exit
55 // but that's best since PersistentMemoryAllocator objects (that underlie
56 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
57 // anything essential at exit anyway due to the fact that they depend on data
58 // managed elsewhere and which could be destructed first. An AtomicWord is
59 // used instead of std::atomic because the latter can create global ctors
60 // and dtors.
61 subtle::AtomicWord g_histogram_allocator = 0;
62
63 // Take an array of range boundaries and create a proper BucketRanges object
64 // which is returned to the caller. A return of nullptr indicates that the
65 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)66 std::unique_ptr<BucketRanges> CreateRangesFromData(
67 HistogramBase::Sample* ranges_data,
68 uint32_t ranges_checksum,
69 size_t count) {
70 // To avoid racy destruction at shutdown, the following may be leaked.
71 std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
72 DCHECK_EQ(count, ranges->size());
73 for (size_t i = 0; i < count; ++i) {
74 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
75 return nullptr;
76 ranges->set_range(i, ranges_data[i]);
77 }
78
79 ranges->ResetChecksum();
80 if (ranges->checksum() != ranges_checksum)
81 return nullptr;
82
83 return ranges;
84 }
85
86 // Calculate the number of bytes required to store all of a histogram's
87 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)88 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
89 // 2 because each "sample count" also requires a backup "logged count"
90 // used for calculating the delta during snapshot operations.
91 const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
92
93 // If the |bucket_count| is such that it would overflow the return type,
94 // perhaps as the result of a malicious actor, then return zero to
95 // indicate the problem to the caller.
96 if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
97 return 0;
98
99 return bucket_count * kBytesPerBucket;
100 }
101
102 } // namespace
103
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)104 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
105 PersistentMemoryAllocator* allocator)
106 : allocator_(allocator), record_iterator_(allocator) {}
107
108 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
109 default;
110
111 PersistentSampleMapRecords*
UseSampleMapRecords(uint64_t id,const void * user)112 PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
113 const void* user) {
114 base::AutoLock auto_lock(lock_);
115 return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
116 }
117
118 PersistentSampleMapRecords*
GetSampleMapRecordsWhileLocked(uint64_t id)119 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
120 uint64_t id) {
121 auto found = sample_records_.find(id);
122 if (found != sample_records_.end())
123 return found->second.get();
124
125 std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
126 samples = std::make_unique<PersistentSampleMapRecords>(this, id);
127 return samples.get();
128 }
129
LoadRecords(PersistentSampleMapRecords * sample_map_records)130 bool PersistentSparseHistogramDataManager::LoadRecords(
131 PersistentSampleMapRecords* sample_map_records) {
132 // DataManager must be locked in order to access the found_ field of any
133 // PersistentSampleMapRecords object.
134 base::AutoLock auto_lock(lock_);
135 bool found = false;
136
137 // If there are already "found" entries for the passed object, move them.
138 if (!sample_map_records->found_.empty()) {
139 sample_map_records->records_.reserve(sample_map_records->records_.size() +
140 sample_map_records->found_.size());
141 sample_map_records->records_.insert(sample_map_records->records_.end(),
142 sample_map_records->found_.begin(),
143 sample_map_records->found_.end());
144 sample_map_records->found_.clear();
145 found = true;
146 }
147
148 // Acquiring a lock is a semi-expensive operation so load some records with
149 // each call. More than this number may be loaded if it takes longer to
150 // find at least one matching record for the passed object.
151 const int kMinimumNumberToLoad = 10;
152 const uint64_t match_id = sample_map_records->sample_map_id_;
153
154 // Loop while no enty is found OR we haven't yet loaded the minimum number.
155 // This will continue reading even after a match is found.
156 for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
157 // Get the next sample-record. The iterator will always resume from where
158 // it left off even if it previously had nothing further to return.
159 uint64_t found_id;
160 PersistentMemoryAllocator::Reference ref =
161 PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
162 &found_id);
163
164 // Stop immediately if there are none.
165 if (!ref)
166 break;
167
168 // The sample-record could be for any sparse histogram. Add the reference
169 // to the appropriate collection for later use.
170 if (found_id == match_id) {
171 sample_map_records->records_.push_back(ref);
172 found = true;
173 } else {
174 PersistentSampleMapRecords* samples =
175 GetSampleMapRecordsWhileLocked(found_id);
176 DCHECK(samples);
177 samples->found_.push_back(ref);
178 }
179 }
180
181 return found;
182 }
183
184
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id)185 PersistentSampleMapRecords::PersistentSampleMapRecords(
186 PersistentSparseHistogramDataManager* data_manager,
187 uint64_t sample_map_id)
188 : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
189
190 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
191
Acquire(const void * user)192 PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
193 const void* user) {
194 DCHECK(!user_);
195 user_ = user;
196 seen_ = 0;
197 return this;
198 }
199
Release(const void * user)200 void PersistentSampleMapRecords::Release(const void* user) {
201 DCHECK_EQ(user_, user);
202 user_ = nullptr;
203 }
204
GetNext()205 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
206 DCHECK(user_);
207
208 // If there are no unseen records, lock and swap in all the found ones.
209 if (records_.size() == seen_) {
210 if (!data_manager_->LoadRecords(this))
211 return false;
212 }
213
214 // Return the next record. Records *must* be returned in the same order
215 // they are found in the persistent memory in order to ensure that all
216 // objects using this data always have the same state. Race conditions
217 // can cause duplicate records so using the "first found" is the only
218 // guarantee that all objects always access the same one.
219 DCHECK_LT(seen_, records_.size());
220 return records_[seen_++];
221 }
222
CreateNew(HistogramBase::Sample value)223 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
224 HistogramBase::Sample value) {
225 return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
226 sample_map_id_, value);
227 }
228
229
230 // This data will be held in persistent memory in order for processes to
231 // locate and use histograms created elsewhere.
232 struct PersistentHistogramAllocator::PersistentHistogramData {
233 // SHA1(Histogram): Increment this if structure changes!
234 static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
235
236 // Expected size for 32/64-bit check.
237 static constexpr size_t kExpectedInstanceSize =
238 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
239
240 int32_t histogram_type;
241 int32_t flags;
242 int32_t minimum;
243 int32_t maximum;
244 uint32_t bucket_count;
245 PersistentMemoryAllocator::Reference ranges_ref;
246 uint32_t ranges_checksum;
247 std::atomic<PersistentMemoryAllocator::Reference> counts_ref;
248 HistogramSamples::Metadata samples_metadata;
249 HistogramSamples::Metadata logged_metadata;
250
251 // Space for the histogram name will be added during the actual allocation
252 // request. This must be the last field of the structure. A zero-size array
253 // or a "flexible" array would be preferred but is not (yet) valid C++.
254 char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
255 };
256
Iterator(PersistentHistogramAllocator * allocator)257 PersistentHistogramAllocator::Iterator::Iterator(
258 PersistentHistogramAllocator* allocator)
259 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
260
261 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)262 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
263 PersistentMemoryAllocator::Reference ref;
264 while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
265 if (ref != ignore)
266 return allocator_->GetHistogram(ref);
267 }
268 return nullptr;
269 }
270
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)271 PersistentHistogramAllocator::PersistentHistogramAllocator(
272 std::unique_ptr<PersistentMemoryAllocator> memory)
273 : memory_allocator_(std::move(memory)),
274 sparse_histogram_data_manager_(memory_allocator_.get()) {}
275
276 PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
277
GetHistogram(Reference ref)278 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
279 Reference ref) {
280 // Unfortunately, the histogram "pickle" methods cannot be used as part of
281 // the persistance because the deserialization methods always create local
282 // count data (while these must reference the persistent counts) and always
283 // add it to the local list of known histograms (while these may be simple
284 // references to histograms in other processes).
285 PersistentHistogramData* data =
286 memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
287 const size_t length = memory_allocator_->GetAllocSize(ref);
288
289 // Check that metadata is reasonable: name is null-terminated and non-empty,
290 // ID fields have been loaded with a hash of the name (0 is considered
291 // unset/invalid).
292 if (!data || data->name[0] == '\0' ||
293 reinterpret_cast<char*>(data)[length - 1] != '\0' ||
294 data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
295 // Note: Sparse histograms use |id + 1| in |logged_metadata|.
296 (data->logged_metadata.id != data->samples_metadata.id &&
297 data->logged_metadata.id != data->samples_metadata.id + 1) ||
298 // Most non-matching values happen due to truncated names. Ideally, we
299 // could just verify the name length based on the overall alloc length,
300 // but that doesn't work because the allocated block may have been
301 // aligned to the next boundary value.
302 HashMetricName(data->name) != data->samples_metadata.id) {
303 return nullptr;
304 }
305 return CreateHistogram(data);
306 }
307
AllocateHistogram(HistogramType histogram_type,const std::string & name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)308 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
309 HistogramType histogram_type,
310 const std::string& name,
311 int minimum,
312 int maximum,
313 const BucketRanges* bucket_ranges,
314 int32_t flags,
315 Reference* ref_ptr) {
316 // If the allocator is corrupt, don't waste time trying anything else.
317 // This also allows differentiating on the dashboard between allocations
318 // failed due to a corrupt allocator and the number of process instances
319 // with one, the latter being idicated by "newly corrupt", below.
320 if (memory_allocator_->IsCorrupt())
321 return nullptr;
322
323 // Create the metadata necessary for a persistent sparse histogram. This
324 // is done first because it is a small subset of what is required for
325 // other histograms. The type is "under construction" so that a crash
326 // during the datafill doesn't leave a bad record around that could cause
327 // confusion by another process trying to read it. It will be corrected
328 // once histogram construction is complete.
329 PersistentHistogramData* histogram_data =
330 memory_allocator_->New<PersistentHistogramData>(
331 offsetof(PersistentHistogramData, name) + name.length() + 1);
332 if (histogram_data) {
333 memcpy(histogram_data->name, name.c_str(), name.size() + 1);
334 histogram_data->histogram_type = histogram_type;
335 histogram_data->flags = flags | HistogramBase::kIsPersistent;
336 }
337
338 // Create the remaining metadata necessary for regular histograms.
339 if (histogram_type != SPARSE_HISTOGRAM) {
340 size_t bucket_count = bucket_ranges->bucket_count();
341 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
342 if (counts_bytes == 0) {
343 // |bucket_count| was out-of-range.
344 return nullptr;
345 }
346
347 // Since the StasticsRecorder keeps a global collection of BucketRanges
348 // objects for re-use, it would be dangerous for one to hold a reference
349 // from a persistent allocator that is not the global one (which is
350 // permanent once set). If this stops being the case, this check can
351 // become an "if" condition beside "!ranges_ref" below and before
352 // set_persistent_reference() farther down.
353 DCHECK_EQ(this, GlobalHistogramAllocator::Get());
354
355 // Re-use an existing BucketRanges persistent allocation if one is known;
356 // otherwise, create one.
357 PersistentMemoryAllocator::Reference ranges_ref =
358 bucket_ranges->persistent_reference();
359 if (!ranges_ref) {
360 size_t ranges_count = bucket_count + 1;
361 size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
362 ranges_ref =
363 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
364 if (ranges_ref) {
365 HistogramBase::Sample* ranges_data =
366 memory_allocator_->GetAsArray<HistogramBase::Sample>(
367 ranges_ref, kTypeIdRangesArray, ranges_count);
368 if (ranges_data) {
369 for (size_t i = 0; i < bucket_ranges->size(); ++i)
370 ranges_data[i] = bucket_ranges->range(i);
371 bucket_ranges->set_persistent_reference(ranges_ref);
372 } else {
373 // This should never happen but be tolerant if it does.
374 ranges_ref = PersistentMemoryAllocator::kReferenceNull;
375 }
376 }
377 } else {
378 DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
379 }
380
381
382 // Only continue here if all allocations were successful. If they weren't,
383 // there is no way to free the space but that's not really a problem since
384 // the allocations only fail because the space is full or corrupt and so
385 // any future attempts will also fail.
386 if (ranges_ref && histogram_data) {
387 histogram_data->minimum = minimum;
388 histogram_data->maximum = maximum;
389 // |bucket_count| must fit within 32-bits or the allocation of the counts
390 // array would have failed for being too large; the allocator supports
391 // less than 4GB total size.
392 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
393 histogram_data->ranges_ref = ranges_ref;
394 histogram_data->ranges_checksum = bucket_ranges->checksum();
395 } else {
396 histogram_data = nullptr; // Clear this for proper handling below.
397 }
398 }
399
400 if (histogram_data) {
401 // Create the histogram using resources in persistent memory. This ends up
402 // resolving the "ref" values stored in histogram_data instad of just
403 // using what is already known above but avoids duplicating the switch
404 // statement here and serves as a double-check that everything is
405 // correct before commiting the new histogram to persistent space.
406 std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
407 DCHECK(histogram);
408 DCHECK_NE(0U, histogram_data->samples_metadata.id);
409 DCHECK_NE(0U, histogram_data->logged_metadata.id);
410
411 PersistentMemoryAllocator::Reference histogram_ref =
412 memory_allocator_->GetAsReference(histogram_data);
413 if (ref_ptr != nullptr)
414 *ref_ptr = histogram_ref;
415
416 // By storing the reference within the allocator to this histogram, the
417 // next import (which will happen before the next histogram creation)
418 // will know to skip it.
419 // See also the comment in ImportHistogramsToStatisticsRecorder().
420 last_created_.store(histogram_ref, std::memory_order_relaxed);
421 return histogram;
422 }
423
424 return nullptr;
425 }
426
FinalizeHistogram(Reference ref,bool registered)427 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
428 bool registered) {
429 if (registered) {
430 // If the created persistent histogram was registered then it needs to
431 // be marked as "iterable" in order to be found by other processes. This
432 // happens only after the histogram is fully formed so it's impossible for
433 // code iterating through the allocator to read a partially created record.
434 memory_allocator_->MakeIterable(ref);
435 } else {
436 // If it wasn't registered then a race condition must have caused two to
437 // be created. The allocator does not support releasing the acquired memory
438 // so just change the type to be empty.
439 memory_allocator_->ChangeType(ref, 0,
440 PersistentHistogramData::kPersistentTypeId,
441 /*clear=*/false);
442 }
443 }
444
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)445 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
446 HistogramBase* histogram) {
447 DCHECK(histogram);
448
449 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
450 if (!existing) {
451 // The above should never fail but if it does, no real harm is done.
452 // The data won't be merged but it also won't be recorded as merged
453 // so a future try, if successful, will get what was missed. If it
454 // continues to fail, some metric data will be lost but that is better
455 // than crashing.
456 return;
457 }
458
459 // Merge the delta from the passed object to the one in the SR.
460 existing->AddSamples(*histogram->SnapshotDelta());
461 }
462
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)463 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
464 const HistogramBase* histogram) {
465 DCHECK(histogram);
466
467 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
468 if (!existing) {
469 // The above should never fail but if it does, no real harm is done.
470 // Some metric data will be lost but that is better than crashing.
471 return;
472 }
473
474 // Merge the delta from the passed object to the one in the SR.
475 existing->AddSamples(*histogram->SnapshotFinalDelta());
476 }
477
UseSampleMapRecords(uint64_t id,const void * user)478 PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
479 uint64_t id,
480 const void* user) {
481 return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
482 }
483
CreateTrackingHistograms(StringPiece name)484 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
485 memory_allocator_->CreateTrackingHistograms(name);
486 }
487
UpdateTrackingHistograms()488 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
489 memory_allocator_->UpdateTrackingHistograms();
490 }
491
SetRangesManager(RangesManager * ranges_manager)492 void PersistentHistogramAllocator::SetRangesManager(
493 RangesManager* ranges_manager) {
494 ranges_manager_.reset(ranges_manager);
495 }
496
ClearLastCreatedReferenceForTesting()497 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
498 last_created_.store(0, std::memory_order_relaxed);
499 }
500
CreateHistogram(PersistentHistogramData * histogram_data_ptr)501 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
502 PersistentHistogramData* histogram_data_ptr) {
503 if (!histogram_data_ptr)
504 return nullptr;
505
506 // Sparse histograms are quite different so handle them as a special case.
507 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
508 std::unique_ptr<HistogramBase> histogram =
509 SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
510 &histogram_data_ptr->samples_metadata,
511 &histogram_data_ptr->logged_metadata);
512 DCHECK(histogram);
513 histogram->SetFlags(histogram_data_ptr->flags);
514 return histogram;
515 }
516
517 // Copy the configuration fields from histogram_data_ptr to local storage
518 // because anything in persistent memory cannot be trusted as it could be
519 // changed at any moment by a malicious actor that shares access. The local
520 // values are validated below and then used to create the histogram, knowing
521 // they haven't changed between validation and use.
522 int32_t histogram_type = histogram_data_ptr->histogram_type;
523 int32_t histogram_flags = histogram_data_ptr->flags;
524 int32_t histogram_minimum = histogram_data_ptr->minimum;
525 int32_t histogram_maximum = histogram_data_ptr->maximum;
526 uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
527 uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
528 uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
529
530 HistogramBase::Sample* ranges_data =
531 memory_allocator_->GetAsArray<HistogramBase::Sample>(
532 histogram_ranges_ref, kTypeIdRangesArray,
533 PersistentMemoryAllocator::kSizeAny);
534
535 const uint32_t max_buckets =
536 std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
537 size_t required_bytes =
538 (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
539 size_t allocated_bytes =
540 memory_allocator_->GetAllocSize(histogram_ranges_ref);
541 if (!ranges_data || histogram_bucket_count < 2 ||
542 histogram_bucket_count >= max_buckets ||
543 allocated_bytes < required_bytes) {
544 return nullptr;
545 }
546
547 std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
548 ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
549 if (!created_ranges)
550 return nullptr;
551 DCHECK_EQ(created_ranges->size(), histogram_bucket_count + 1);
552 DCHECK_EQ(created_ranges->range(1), histogram_minimum);
553 DCHECK_EQ(created_ranges->range(histogram_bucket_count - 1),
554 histogram_maximum);
555 const BucketRanges* ranges;
556 if (ranges_manager_) {
557 ranges = ranges_manager_->RegisterOrDeleteDuplicateRanges(
558 created_ranges.release());
559 } else {
560 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
561 created_ranges.release());
562 }
563
564 size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
565 PersistentMemoryAllocator::Reference counts_ref =
566 histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
567 if (counts_bytes == 0 ||
568 (counts_ref != 0 &&
569 memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
570 return nullptr;
571 }
572
573 // The "counts" data (including both samples and logged samples) is a delayed
574 // persistent allocation meaning that though its size and storage for a
575 // reference is defined, no space is reserved until actually needed. When
576 // it is needed, memory will be allocated from the persistent segment and
577 // a reference to it stored at the passed address. Other threads can then
578 // notice the valid reference and access the same data.
579 DelayedPersistentAllocation counts_data(memory_allocator_.get(),
580 &histogram_data_ptr->counts_ref,
581 kTypeIdCountsArray, counts_bytes);
582
583 // A second delayed allocations is defined using the same reference storage
584 // location as the first so the allocation of one will automatically be found
585 // by the other. Within the block, the first half of the space is for "counts"
586 // and the second half is for "logged counts".
587 DelayedPersistentAllocation logged_data(
588 memory_allocator_.get(), &histogram_data_ptr->counts_ref,
589 kTypeIdCountsArray, counts_bytes, counts_bytes / 2);
590
591 // Create the right type of histogram.
592 const char* name = histogram_data_ptr->name;
593 std::unique_ptr<HistogramBase> histogram;
594 switch (histogram_type) {
595 case HISTOGRAM:
596 histogram =
597 Histogram::PersistentCreate(name, ranges, counts_data, logged_data,
598 &histogram_data_ptr->samples_metadata,
599 &histogram_data_ptr->logged_metadata);
600 DCHECK(histogram);
601 break;
602 case LINEAR_HISTOGRAM:
603 histogram = LinearHistogram::PersistentCreate(
604 name, ranges, counts_data, logged_data,
605 &histogram_data_ptr->samples_metadata,
606 &histogram_data_ptr->logged_metadata);
607 DCHECK(histogram);
608 break;
609 case BOOLEAN_HISTOGRAM:
610 histogram = BooleanHistogram::PersistentCreate(
611 name, ranges, counts_data, logged_data,
612 &histogram_data_ptr->samples_metadata,
613 &histogram_data_ptr->logged_metadata);
614 DCHECK(histogram);
615 break;
616 case CUSTOM_HISTOGRAM:
617 histogram = CustomHistogram::PersistentCreate(
618 name, ranges, counts_data, logged_data,
619 &histogram_data_ptr->samples_metadata,
620 &histogram_data_ptr->logged_metadata);
621 DCHECK(histogram);
622 break;
623 default:
624 return nullptr;
625 }
626
627 if (histogram) {
628 DCHECK_EQ(histogram_type, histogram->GetHistogramType());
629 histogram->SetFlags(histogram_flags);
630 }
631
632 return histogram;
633 }
634
635 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)636 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
637 const HistogramBase* histogram) {
638 // This should never be called on the global histogram allocator as objects
639 // created there are already within the global statistics recorder.
640 DCHECK_NE(GlobalHistogramAllocator::Get(), this);
641 DCHECK(histogram);
642
643 HistogramBase* existing =
644 StatisticsRecorder::FindHistogram(histogram->histogram_name());
645 if (existing)
646 return existing;
647
648 // Adding the passed histogram to the SR would cause a problem if the
649 // allocator that holds it eventually goes away. Instead, create a new
650 // one from a serialized version. Deserialization calls the appropriate
651 // FactoryGet() which will create the histogram in the global persistent-
652 // histogram allocator if such is set.
653 base::Pickle pickle;
654 histogram->SerializeInfo(&pickle);
655 PickleIterator iter(pickle);
656 existing = DeserializeHistogramInfo(&iter);
657 if (!existing)
658 return nullptr;
659
660 // Make sure there is no "serialization" flag set.
661 DCHECK(!existing->HasFlags(HistogramBase::kIPCSerializationSourceFlag));
662 // Record the newly created histogram in the SR.
663 return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
664 }
665
666 GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
667
668 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,StringPiece name)669 void GlobalHistogramAllocator::CreateWithPersistentMemory(
670 void* base,
671 size_t size,
672 size_t page_size,
673 uint64_t id,
674 StringPiece name) {
675 Set(WrapUnique(
676 new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
677 base, size, page_size, id, name, false))));
678 }
679
680 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name)681 void GlobalHistogramAllocator::CreateWithLocalMemory(
682 size_t size,
683 uint64_t id,
684 StringPiece name) {
685 Set(WrapUnique(new GlobalHistogramAllocator(
686 std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
687 }
688
689 #if !BUILDFLAG(IS_NACL)
690 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name,bool exclusive_write)691 bool GlobalHistogramAllocator::CreateWithFile(const FilePath& file_path,
692 size_t size,
693 uint64_t id,
694 StringPiece name,
695 bool exclusive_write) {
696 uint32_t flags = File::FLAG_OPEN_ALWAYS | File::FLAG_WIN_SHARE_DELETE |
697 File::FLAG_READ | File::FLAG_WRITE;
698 if (exclusive_write)
699 flags |= File::FLAG_WIN_EXCLUSIVE_WRITE;
700 File file(file_path, flags);
701 if (!file.IsValid())
702 return false;
703
704 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
705 bool success = false;
706 if (file.created()) {
707 success = mmfile->Initialize(std::move(file), {0, size},
708 MemoryMappedFile::READ_WRITE_EXTEND);
709 } else {
710 success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
711 }
712 if (!success ||
713 !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
714 return false;
715 }
716
717 Set(WrapUnique(new GlobalHistogramAllocator(
718 std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), 0, id,
719 name, false))));
720 Get()->SetPersistentLocation(file_path);
721 return true;
722 }
723
724 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,const FilePath & spare_path,size_t size,uint64_t id,StringPiece name)725 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
726 const FilePath& active_path,
727 const FilePath& spare_path,
728 size_t size,
729 uint64_t id,
730 StringPiece name) {
731 // Old "active" becomes "base".
732 if (!base::ReplaceFile(active_path, base_path, nullptr))
733 base::DeleteFile(base_path);
734 if (base::PathExists(active_path))
735 return false;
736
737 // Move any "spare" into "active". Okay to continue if file doesn't exist.
738 if (!spare_path.empty())
739 base::ReplaceFile(spare_path, active_path, nullptr);
740
741 return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
742 name);
743 }
744
745 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,StringPiece name)746 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
747 size_t size,
748 uint64_t id,
749 StringPiece name) {
750 FilePath base_path = ConstructFilePath(dir, name);
751 FilePath active_path = ConstructFilePathForActiveFile(dir, name);
752 FilePath spare_path = ConstructFilePath(dir, std::string(name) + "-spare");
753 return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
754 name);
755 }
756
757 // static
ConstructFilePath(const FilePath & dir,StringPiece name)758 FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
759 StringPiece name) {
760 return dir.AppendASCII(name).AddExtension(
761 PersistentMemoryAllocator::kFileExtension);
762 }
763
764 // static
ConstructFilePathForActiveFile(const FilePath & dir,StringPiece name)765 FilePath GlobalHistogramAllocator::ConstructFilePathForActiveFile(
766 const FilePath& dir,
767 StringPiece name) {
768 return ConstructFilePath(dir, std::string(name) + "-active");
769 }
770
771 // static
ConstructFilePathForUploadDir(const FilePath & dir,StringPiece name,base::Time stamp,ProcessId pid)772 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
773 const FilePath& dir,
774 StringPiece name,
775 base::Time stamp,
776 ProcessId pid) {
777 return ConstructFilePath(
778 dir,
779 StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
780 static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
781 }
782
783 // static
ConstructFilePathForUploadDir(const FilePath & dir,StringPiece name)784 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
785 const FilePath& dir,
786 StringPiece name) {
787 return ConstructFilePathForUploadDir(dir, name, Time::Now(),
788 GetCurrentProcId());
789 }
790
791 // static
ParseFilePath(const FilePath & path,std::string * out_name,Time * out_stamp,ProcessId * out_pid)792 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
793 std::string* out_name,
794 Time* out_stamp,
795 ProcessId* out_pid) {
796 std::string filename = path.BaseName().AsUTF8Unsafe();
797 std::vector<base::StringPiece> parts = base::SplitStringPiece(
798 filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
799 if (parts.size() != 4)
800 return false;
801
802 if (out_name)
803 *out_name = std::string(parts[0]);
804
805 if (out_stamp) {
806 int64_t stamp;
807 if (!HexStringToInt64(parts[1], &stamp))
808 return false;
809 *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
810 }
811
812 if (out_pid) {
813 int64_t pid;
814 if (!HexStringToInt64(parts[2], &pid))
815 return false;
816 *out_pid = static_cast<ProcessId>(pid);
817 }
818
819 return true;
820 }
821
CreateSpareFile(const FilePath & spare_path,size_t size)822 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
823 size_t size) {
824 // If the spare file already exists, it was created in a previous session and
825 // is still unused, so do nothing.
826 if (base::PathExists(spare_path)) {
827 return false;
828 }
829 FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
830 bool success;
831 {
832 File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
833 File::FLAG_READ | File::FLAG_WRITE);
834 success = spare_file.IsValid();
835
836 if (success) {
837 MemoryMappedFile mmfile;
838 success = mmfile.Initialize(std::move(spare_file), {0, size},
839 MemoryMappedFile::READ_WRITE_EXTEND);
840 }
841 }
842
843 if (success)
844 success = ReplaceFile(temp_spare_path, spare_path, nullptr);
845
846 if (!success)
847 DeleteFile(temp_spare_path);
848
849 return success;
850 }
851 #endif // !BUILDFLAG(IS_NACL)
852
853 // static
CreateWithSharedMemoryRegion(const WritableSharedMemoryRegion & region)854 void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
855 const WritableSharedMemoryRegion& region) {
856 base::WritableSharedMemoryMapping mapping = region.Map();
857 if (!mapping.IsValid() ||
858 !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
859 mapping)) {
860 return;
861 }
862
863 Set(WrapUnique(new GlobalHistogramAllocator(
864 std::make_unique<WritableSharedPersistentMemoryAllocator>(
865 std::move(mapping), 0, StringPiece()))));
866 }
867
868 // static
Set(std::unique_ptr<GlobalHistogramAllocator> allocator)869 void GlobalHistogramAllocator::Set(
870 std::unique_ptr<GlobalHistogramAllocator> allocator) {
871 // Releasing or changing an allocator is extremely dangerous because it
872 // likely has histograms stored within it. If the backing memory is also
873 // also released, future accesses to those histograms will seg-fault.
874 CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
875 subtle::Release_Store(&g_histogram_allocator,
876 reinterpret_cast<intptr_t>(allocator.release()));
877 size_t existing = StatisticsRecorder::GetHistogramCount();
878
879 DVLOG_IF(1, existing)
880 << existing << " histograms were created before persistence was enabled.";
881 }
882
883 // static
Get()884 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
885 return reinterpret_cast<GlobalHistogramAllocator*>(
886 subtle::Acquire_Load(&g_histogram_allocator));
887 }
888
889 // static
890 std::unique_ptr<GlobalHistogramAllocator>
ReleaseForTesting()891 GlobalHistogramAllocator::ReleaseForTesting() {
892 GlobalHistogramAllocator* histogram_allocator = Get();
893 if (!histogram_allocator)
894 return nullptr;
895 PersistentMemoryAllocator* memory_allocator =
896 histogram_allocator->memory_allocator();
897
898 // Before releasing the memory, it's necessary to have the Statistics-
899 // Recorder forget about the histograms contained therein; otherwise,
900 // some operations will try to access them and the released memory.
901 PersistentMemoryAllocator::Iterator iter(memory_allocator);
902 const PersistentHistogramData* data;
903 while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
904 StatisticsRecorder::ForgetHistogramForTesting(data->name);
905 }
906
907 subtle::Release_Store(&g_histogram_allocator, 0);
908 return WrapUnique(histogram_allocator);
909 }
910
SetPersistentLocation(const FilePath & location)911 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
912 persistent_location_ = location;
913 }
914
GetPersistentLocation() const915 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
916 return persistent_location_;
917 }
918
HasPersistentLocation() const919 bool GlobalHistogramAllocator::HasPersistentLocation() const {
920 return !persistent_location_.empty();
921 }
922
MovePersistentFile(const FilePath & dir)923 bool GlobalHistogramAllocator::MovePersistentFile(const FilePath& dir) {
924 DCHECK(HasPersistentLocation());
925
926 FilePath new_file_path = dir.Append(persistent_location_.BaseName());
927
928 // Change the location of the persistent file. This is fine to do even though
929 // the file is currently "opened" by this process.
930 if (!base::ReplaceFile(persistent_location_, new_file_path, nullptr)) {
931 return false;
932 }
933
934 SetPersistentLocation(new_file_path);
935 return true;
936 }
937
WriteToPersistentLocation()938 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
939 #if BUILDFLAG(IS_NACL)
940 // NACL doesn't support file operations, including ImportantFileWriter.
941 NOTREACHED();
942 return false;
943 #else
944 // Stop if no destination is set.
945 if (!HasPersistentLocation()) {
946 NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
947 << " to file because no location was set.";
948 return false;
949 }
950
951 StringPiece contents(static_cast<const char*>(data()), used());
952 if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
953 contents)) {
954 LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
955 << " to file: " << persistent_location_.value();
956 return false;
957 }
958
959 return true;
960 #endif
961 }
962
DeletePersistentLocation()963 void GlobalHistogramAllocator::DeletePersistentLocation() {
964 memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
965
966 #if BUILDFLAG(IS_NACL)
967 NOTREACHED();
968 #else
969 if (!HasPersistentLocation()) {
970 return;
971 }
972
973 // Open (with delete) and then immediately close the file by going out of
974 // scope. This is the only cross-platform safe way to delete a file that may
975 // be open elsewhere. Open handles will continue to operate normally but
976 // new opens will not be possible.
977 File file(persistent_location_,
978 File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
979 #endif
980 }
981
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)982 GlobalHistogramAllocator::GlobalHistogramAllocator(
983 std::unique_ptr<PersistentMemoryAllocator> memory)
984 : PersistentHistogramAllocator(std::move(memory)),
985 import_iterator_(this) {
986 }
987
ImportHistogramsToStatisticsRecorder()988 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
989 // Skip the import if it's the histogram that was last created. Should a
990 // race condition cause the "last created" to be overwritten before it
991 // is recognized here then the histogram will be created and be ignored
992 // when it is detected as a duplicate by the statistics-recorder. This
993 // simple check reduces the time of creating persistent histograms by
994 // about 40%.
995 Reference record_to_ignore = last_created();
996
997 // There is no lock on this because the iterator is lock-free while still
998 // guaranteed to only return each entry only once. The StatisticsRecorder
999 // has its own lock so the Register operation is safe.
1000 while (true) {
1001 std::unique_ptr<HistogramBase> histogram =
1002 import_iterator_.GetNextWithIgnore(record_to_ignore);
1003 if (!histogram)
1004 break;
1005 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1006 }
1007 }
1008
1009 } // namespace base
1010