1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9
10 #include "base/metrics/persistent_histogram_allocator.h"
11
12 #include <atomic>
13 #include <limits>
14 #include <string_view>
15 #include <utility>
16
17 #include "base/debug/crash_logging.h"
18 #include "base/files/file_path.h"
19 #include "base/files/file_util.h"
20 #include "base/files/important_file_writer.h"
21 #include "base/files/memory_mapped_file.h"
22 #include "base/lazy_instance.h"
23 #include "base/logging.h"
24 #include "base/memory/shared_memory_mapping.h"
25 #include "base/memory/unsafe_shared_memory_region.h"
26 #include "base/metrics/histogram.h"
27 #include "base/metrics/histogram_base.h"
28 #include "base/metrics/histogram_functions.h"
29 #include "base/metrics/histogram_macros.h"
30 #include "base/metrics/histogram_samples.h"
31 #include "base/metrics/metrics_hashes.h"
32 #include "base/metrics/persistent_sample_map.h"
33 #include "base/metrics/sparse_histogram.h"
34 #include "base/metrics/statistics_recorder.h"
35 #include "base/notreached.h"
36 #include "base/pickle.h"
37 #include "base/process/process_handle.h"
38 #include "base/strings/string_number_conversions.h"
39 #include "base/strings/string_split.h"
40 #include "base/strings/stringprintf.h"
41 #include "base/synchronization/lock.h"
42 #include "build/build_config.h"
43
44 namespace base {
45
46 namespace {
47
48 // Type identifiers used when storing in persistent memory so they can be
49 // identified during extraction; the first 4 bytes of the SHA1 of the name
50 // is used as a unique integer. A "version number" is added to the base
51 // so that, if the structure of that object changes, stored older versions
52 // will be safely ignored.
53 enum : uint32_t {
54 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
55 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
56 };
57
58 // The current globally-active persistent allocator for all new histograms.
59 // The object held here will obviously not be destructed at process exit
60 // but that's best since PersistentMemoryAllocator objects (that underlie
61 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
62 // anything essential at exit anyway due to the fact that they depend on data
63 // managed elsewhere and which could be destructed first. An AtomicWord is
64 // used instead of std::atomic because the latter can create global ctors
65 // and dtors.
66 subtle::AtomicWord g_histogram_allocator = 0;
67
68 // Take an array of range boundaries and create a proper BucketRanges object
69 // which is returned to the caller. A return of nullptr indicates that the
70 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)71 std::unique_ptr<BucketRanges> CreateRangesFromData(
72 HistogramBase::Sample* ranges_data,
73 uint32_t ranges_checksum,
74 size_t count) {
75 // To avoid racy destruction at shutdown, the following may be leaked.
76 std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
77 DCHECK_EQ(count, ranges->size());
78 for (size_t i = 0; i < count; ++i) {
79 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
80 return nullptr;
81 ranges->set_range(i, ranges_data[i]);
82 }
83
84 ranges->ResetChecksum();
85 if (ranges->checksum() != ranges_checksum)
86 return nullptr;
87
88 return ranges;
89 }
90
91 // Calculate the number of bytes required to store all of a histogram's
92 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)93 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
94 // 2 because each "sample count" also requires a backup "logged count"
95 // used for calculating the delta during snapshot operations.
96 const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
97
98 // If the |bucket_count| is such that it would overflow the return type,
99 // perhaps as the result of a malicious actor, then return zero to
100 // indicate the problem to the caller.
101 if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
102 return 0;
103
104 return bucket_count * kBytesPerBucket;
105 }
106
MergeSamplesToExistingHistogram(HistogramBase * existing,const HistogramBase * histogram,std::unique_ptr<HistogramSamples> samples)107 bool MergeSamplesToExistingHistogram(
108 HistogramBase* existing,
109 const HistogramBase* histogram,
110 std::unique_ptr<HistogramSamples> samples) {
111 // Check if the histograms match, which is necessary for merging their data.
112 HistogramType existing_type = existing->GetHistogramType();
113 if (existing_type == HistogramType::DUMMY_HISTOGRAM) {
114 // Merging into a dummy histogram (e.g. histogram is expired) is a no-op and
115 // not considered a failure case.
116 return true;
117 }
118 if (histogram->GetHistogramType() != existing_type) {
119 return false; // Merge failed due to different histogram types.
120 }
121
122 if (existing_type == HistogramType::HISTOGRAM ||
123 existing_type == HistogramType::LINEAR_HISTOGRAM ||
124 existing_type == HistogramType::BOOLEAN_HISTOGRAM ||
125 existing_type == HistogramType::CUSTOM_HISTOGRAM) {
126 // Only numeric histograms make use of BucketRanges.
127 const BucketRanges* existing_buckets =
128 static_cast<const Histogram*>(existing)->bucket_ranges();
129 const BucketRanges* histogram_buckets =
130 static_cast<const Histogram*>(histogram)->bucket_ranges();
131 // DCHECK because HasValidChecksum() recomputes the checksum which can be
132 // expensive to do in a loop.
133 DCHECK(existing_buckets->HasValidChecksum());
134 DCHECK(histogram_buckets->HasValidChecksum());
135
136 if (existing_buckets->checksum() != histogram_buckets->checksum()) {
137 return false; // Merge failed due to different buckets.
138 }
139 }
140
141 // Merge the delta from the passed object to the one in the SR.
142
143 // It's possible for the buckets to differ but their checksums to match due
144 // to a collision, in which case AddSamples() will return false, which we
145 // propagate to the caller (indicating histogram mismatch).
146 return existing->AddSamples(*samples);
147 }
148
149 } // namespace
150
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)151 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
152 PersistentMemoryAllocator* allocator)
153 : allocator_(allocator), record_iterator_(allocator) {}
154
155 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
156 default;
157
158 std::unique_ptr<PersistentSampleMapRecords>
CreateSampleMapRecords(uint64_t id)159 PersistentSparseHistogramDataManager::CreateSampleMapRecords(uint64_t id) {
160 base::AutoLock auto_lock(lock_);
161 return std::make_unique<PersistentSampleMapRecords>(
162 this, id, GetSampleMapRecordsWhileLocked(id));
163 }
164
165 std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
GetSampleMapRecordsWhileLocked(uint64_t id)166 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
167 uint64_t id) {
168 auto* samples = &sample_records_[id];
169 if (!samples->get()) {
170 *samples = std::make_unique<std::vector<ReferenceAndSample>>();
171 }
172 return samples->get();
173 }
174
175 std::vector<PersistentMemoryAllocator::Reference>
LoadRecords(PersistentSampleMapRecords * sample_map_records,std::optional<HistogramBase::Sample> until_value)176 PersistentSparseHistogramDataManager::LoadRecords(
177 PersistentSampleMapRecords* sample_map_records,
178 std::optional<HistogramBase::Sample> until_value) {
179 // DataManager must be locked in order to access the |sample_records_|
180 // vectors.
181 base::AutoLock auto_lock(lock_);
182
183 // Acquiring a lock is a semi-expensive operation so load some records with
184 // each call. More than this number may be loaded if it takes longer to
185 // find at least one matching record for the passed object.
186 const size_t kMinimumNumberToLoad = 10;
187 const uint64_t match_id = sample_map_records->sample_map_id_;
188
189 // Loop while no entry is found OR we haven't yet loaded the minimum number.
190 // This will continue reading even after a match is found. Note that it is
191 // possible that entries for the passed object were already found in a
192 // different call.
193 auto& found_records = *sample_map_records->records_;
194 bool found = (found_records.size() > sample_map_records->seen_);
195 size_t new_records = 0;
196 while (!found || new_records < kMinimumNumberToLoad) {
197 // Get the next sample-record. The iterator will always resume from where
198 // it left off even if it previously had nothing further to return.
199 uint64_t found_id;
200 HistogramBase::Sample value;
201 PersistentMemoryAllocator::Reference ref =
202 PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
203 &found_id, &value);
204
205 // Stop immediately if there are none.
206 if (!ref) {
207 break;
208 }
209 ++new_records;
210
211 // The sample-record could be for any sparse histogram. Add the reference
212 // to the appropriate collection for later use.
213 if (found_id == match_id) {
214 found_records.emplace_back(ref, value);
215 found = true;
216 } else {
217 std::vector<ReferenceAndSample>* samples =
218 GetSampleMapRecordsWhileLocked(found_id);
219 CHECK(samples);
220 samples->emplace_back(ref, value);
221 }
222 }
223
224 // Return all references found that have not yet been seen by
225 // |sample_map_records|, up until |until_value| (if applicable).
226 std::vector<PersistentMemoryAllocator::Reference> new_references;
227 CHECK_GE(found_records.size(), sample_map_records->seen_);
228 auto new_found_records =
229 span(found_records).subspan(/*offset=*/sample_map_records->seen_);
230 new_references.reserve(new_found_records.size());
231 for (const auto& new_record : new_found_records) {
232 new_references.push_back(new_record.reference);
233 // Maybe references after |until_value| were found. Stop here immediately in
234 // such a case, since the caller will not expect any more samples after
235 // |until_value|.
236 if (until_value.has_value() && new_record.value == until_value.value()) {
237 break;
238 }
239 }
240 return new_references;
241 }
242
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id,std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample> * records)243 PersistentSampleMapRecords::PersistentSampleMapRecords(
244 PersistentSparseHistogramDataManager* data_manager,
245 uint64_t sample_map_id,
246 std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
247 records)
248 : data_manager_(data_manager),
249 sample_map_id_(sample_map_id),
250 records_(records) {}
251
252 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
253
254 std::vector<PersistentMemoryAllocator::Reference>
GetNextRecords(std::optional<HistogramBase::Sample> until_value)255 PersistentSampleMapRecords::GetNextRecords(
256 std::optional<HistogramBase::Sample> until_value) {
257 auto references = data_manager_->LoadRecords(this, until_value);
258 seen_ += references.size();
259 return references;
260 }
261
CreateNew(HistogramBase::Sample value)262 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
263 HistogramBase::Sample value) {
264 return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
265 sample_map_id_, value);
266 }
267
268
269 // This data will be held in persistent memory in order for processes to
270 // locate and use histograms created elsewhere.
271 struct PersistentHistogramAllocator::PersistentHistogramData {
272 // SHA1(Histogram): Increment this if structure changes!
273 static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
274
275 // Expected size for 32/64-bit check.
276 static constexpr size_t kExpectedInstanceSize =
277 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
278
279 int32_t histogram_type;
280 int32_t flags;
281 int32_t minimum;
282 int32_t maximum;
283 uint32_t bucket_count;
284 PersistentMemoryAllocator::Reference ranges_ref;
285 uint32_t ranges_checksum;
286 std::atomic<PersistentMemoryAllocator::Reference> counts_ref;
287 HistogramSamples::Metadata samples_metadata;
288 HistogramSamples::Metadata logged_metadata;
289
290 // Space for the histogram name will be added during the actual allocation
291 // request. This must be the last field of the structure. A zero-size array
292 // or a "flexible" array would be preferred but is not (yet) valid C++.
293 char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
294 };
295
Iterator(PersistentHistogramAllocator * allocator)296 PersistentHistogramAllocator::Iterator::Iterator(
297 PersistentHistogramAllocator* allocator)
298 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
299
300 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)301 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
302 PersistentMemoryAllocator::Reference ref;
303 while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
304 if (ref != ignore)
305 return allocator_->GetHistogram(ref);
306 }
307 return nullptr;
308 }
309
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)310 PersistentHistogramAllocator::PersistentHistogramAllocator(
311 std::unique_ptr<PersistentMemoryAllocator> memory)
312 : memory_allocator_(std::move(memory)),
313 sparse_histogram_data_manager_(memory_allocator_.get()) {}
314
315 PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
316
GetHistogram(Reference ref)317 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
318 Reference ref) {
319 // Unfortunately, the histogram "pickle" methods cannot be used as part of
320 // the persistance because the deserialization methods always create local
321 // count data (while these must reference the persistent counts) and always
322 // add it to the local list of known histograms (while these may be simple
323 // references to histograms in other processes).
324 PersistentHistogramData* data =
325 memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
326 const size_t length = memory_allocator_->GetAllocSize(ref);
327
328 // Check that metadata is reasonable: name is null-terminated and non-empty,
329 // ID fields have been loaded with a hash of the name (0 is considered
330 // unset/invalid).
331 if (!data || data->name[0] == '\0' ||
332 reinterpret_cast<char*>(data)[length - 1] != '\0' ||
333 data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
334 // Note: Sparse histograms use |id + 1| in |logged_metadata|.
335 (data->logged_metadata.id != data->samples_metadata.id &&
336 data->logged_metadata.id != data->samples_metadata.id + 1) ||
337 // Most non-matching values happen due to truncated names. Ideally, we
338 // could just verify the name length based on the overall alloc length,
339 // but that doesn't work because the allocated block may have been
340 // aligned to the next boundary value.
341 HashMetricName(data->name) != data->samples_metadata.id) {
342 return nullptr;
343 }
344 return CreateHistogram(data);
345 }
346
AllocateHistogram(HistogramType histogram_type,std::string_view name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)347 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
348 HistogramType histogram_type,
349 std::string_view name,
350 int minimum,
351 int maximum,
352 const BucketRanges* bucket_ranges,
353 int32_t flags,
354 Reference* ref_ptr) {
355 // If the allocator is corrupt, don't waste time trying anything else.
356 // This also allows differentiating on the dashboard between allocations
357 // failed due to a corrupt allocator and the number of process instances
358 // with one, the latter being idicated by "newly corrupt", below.
359 if (memory_allocator_->IsCorrupt())
360 return nullptr;
361
362 // Create the metadata necessary for a persistent sparse histogram. This
363 // is done first because it is a small subset of what is required for
364 // other histograms. The type is "under construction" so that a crash
365 // during the datafill doesn't leave a bad record around that could cause
366 // confusion by another process trying to read it. It will be corrected
367 // once histogram construction is complete.
368 PersistentHistogramData* histogram_data =
369 memory_allocator_->New<PersistentHistogramData>(
370 offsetof(PersistentHistogramData, name) + name.size() + 1);
371 if (histogram_data) {
372 memcpy(histogram_data->name, name.data(), name.size());
373 histogram_data->name[name.size()] = '\0';
374 histogram_data->histogram_type = histogram_type;
375 histogram_data->flags = flags | HistogramBase::kIsPersistent;
376
377 // |counts_ref| relies on being zero'd out initially. Even though this
378 // should always be the case, manually zero it out again here in case there
379 // was memory corruption (e.g. if the memory was mapped from a corrupted
380 // spare file).
381 // TODO(crbug.com/40064026): Remove this if this has no effect, and try to
382 // understand better why there is sometimes garbage written in this field.
383 histogram_data->counts_ref.store(0, std::memory_order_relaxed);
384 }
385
386 // Create the remaining metadata necessary for regular histograms.
387 if (histogram_type != SPARSE_HISTOGRAM) {
388 size_t bucket_count = bucket_ranges->bucket_count();
389 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
390 if (counts_bytes == 0) {
391 // |bucket_count| was out-of-range.
392 return nullptr;
393 }
394
395 // Since the StasticsRecorder keeps a global collection of BucketRanges
396 // objects for re-use, it would be dangerous for one to hold a reference
397 // from a persistent allocator that is not the global one (which is
398 // permanent once set). If this stops being the case, this check can
399 // become an "if" condition beside "!ranges_ref" below and before
400 // set_persistent_reference() farther down.
401 DCHECK_EQ(this, GlobalHistogramAllocator::Get());
402
403 // Re-use an existing BucketRanges persistent allocation if one is known;
404 // otherwise, create one.
405 PersistentMemoryAllocator::Reference ranges_ref =
406 bucket_ranges->persistent_reference();
407 if (!ranges_ref) {
408 size_t ranges_count = bucket_count + 1;
409 size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
410 ranges_ref =
411 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
412 if (ranges_ref) {
413 HistogramBase::Sample* ranges_data =
414 memory_allocator_->GetAsArray<HistogramBase::Sample>(
415 ranges_ref, kTypeIdRangesArray, ranges_count);
416 if (ranges_data) {
417 for (size_t i = 0; i < bucket_ranges->size(); ++i)
418 ranges_data[i] = bucket_ranges->range(i);
419 bucket_ranges->set_persistent_reference(ranges_ref);
420 } else {
421 // This should never happen but be tolerant if it does.
422 ranges_ref = PersistentMemoryAllocator::kReferenceNull;
423 }
424 }
425 } else {
426 DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
427 }
428
429
430 // Only continue here if all allocations were successful. If they weren't,
431 // there is no way to free the space but that's not really a problem since
432 // the allocations only fail because the space is full or corrupt and so
433 // any future attempts will also fail.
434 if (ranges_ref && histogram_data) {
435 histogram_data->minimum = minimum;
436 histogram_data->maximum = maximum;
437 // |bucket_count| must fit within 32-bits or the allocation of the counts
438 // array would have failed for being too large; the allocator supports
439 // less than 4GB total size.
440 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
441 histogram_data->ranges_ref = ranges_ref;
442 histogram_data->ranges_checksum = bucket_ranges->checksum();
443 } else {
444 histogram_data = nullptr; // Clear this for proper handling below.
445 }
446 }
447
448 if (histogram_data) {
449 // Create the histogram using resources in persistent memory. This ends up
450 // resolving the "ref" values stored in histogram_data instad of just
451 // using what is already known above but avoids duplicating the switch
452 // statement here and serves as a double-check that everything is
453 // correct before commiting the new histogram to persistent space.
454 std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
455 DCHECK(histogram);
456 DCHECK_NE(0U, histogram_data->samples_metadata.id);
457 DCHECK_NE(0U, histogram_data->logged_metadata.id);
458
459 PersistentMemoryAllocator::Reference histogram_ref =
460 memory_allocator_->GetAsReference(histogram_data);
461 if (ref_ptr != nullptr)
462 *ref_ptr = histogram_ref;
463
464 // By storing the reference within the allocator to this histogram, the
465 // next import (which will happen before the next histogram creation)
466 // will know to skip it.
467 // See also the comment in ImportHistogramsToStatisticsRecorder().
468 last_created_.store(histogram_ref, std::memory_order_relaxed);
469 return histogram;
470 }
471
472 return nullptr;
473 }
474
FinalizeHistogram(Reference ref,bool registered)475 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
476 bool registered) {
477 if (registered) {
478 // If the created persistent histogram was registered then it needs to
479 // be marked as "iterable" in order to be found by other processes. This
480 // happens only after the histogram is fully formed so it's impossible for
481 // code iterating through the allocator to read a partially created record.
482 memory_allocator_->MakeIterable(ref);
483 } else {
484 // If it wasn't registered then a race condition must have caused two to
485 // be created. The allocator does not support releasing the acquired memory
486 // so just change the type to be empty.
487 memory_allocator_->ChangeType(ref, 0,
488 PersistentHistogramData::kPersistentTypeId,
489 /*clear=*/false);
490 }
491 }
492
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)493 bool PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
494 HistogramBase* histogram) {
495 DCHECK(histogram);
496
497 // Return immediately if the histogram has no samples since the last delta
498 // snapshot. This is to prevent looking up or registering the histogram with
499 // the StatisticsRecorder, which requires acquiring a lock.
500 std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
501 if (samples->IsDefinitelyEmpty()) {
502 return true;
503 }
504
505 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
506 if (!existing) {
507 // The above should never fail but if it does, no real harm is done.
508 // Some metric data will be lost but that is better than crashing.
509 return false;
510 }
511
512 return MergeSamplesToExistingHistogram(existing, histogram,
513 std::move(samples));
514 }
515
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)516 bool PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
517 const HistogramBase* histogram) {
518 DCHECK(histogram);
519
520 // Return immediately if the histogram has no samples. This is to prevent
521 // looking up or registering the histogram with the StatisticsRecorder, which
522 // requires acquiring a lock.
523 std::unique_ptr<HistogramSamples> samples = histogram->SnapshotFinalDelta();
524 if (samples->IsDefinitelyEmpty()) {
525 return true;
526 }
527
528 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
529 if (!existing) {
530 // The above should never fail but if it does, no real harm is done.
531 // Some metric data will be lost but that is better than crashing.
532 return false;
533 }
534
535 return MergeSamplesToExistingHistogram(existing, histogram,
536 std::move(samples));
537 }
538
539 std::unique_ptr<PersistentSampleMapRecords>
CreateSampleMapRecords(uint64_t id)540 PersistentHistogramAllocator::CreateSampleMapRecords(uint64_t id) {
541 return sparse_histogram_data_manager_.CreateSampleMapRecords(id);
542 }
543
CreateTrackingHistograms(std::string_view name)544 void PersistentHistogramAllocator::CreateTrackingHistograms(
545 std::string_view name) {
546 memory_allocator_->CreateTrackingHistograms(name);
547 }
548
UpdateTrackingHistograms()549 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
550 memory_allocator_->UpdateTrackingHistograms();
551 }
552
SetRangesManager(RangesManager * ranges_manager)553 void PersistentHistogramAllocator::SetRangesManager(
554 RangesManager* ranges_manager) {
555 ranges_manager_.reset(ranges_manager);
556 }
557
ClearLastCreatedReferenceForTesting()558 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
559 last_created_.store(0, std::memory_order_relaxed);
560 }
561
CreateHistogram(PersistentHistogramData * histogram_data_ptr)562 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
563 PersistentHistogramData* histogram_data_ptr) {
564 if (!histogram_data_ptr)
565 return nullptr;
566
567 // Sparse histograms are quite different so handle them as a special case.
568 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
569 std::unique_ptr<HistogramBase> histogram =
570 SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
571 &histogram_data_ptr->samples_metadata,
572 &histogram_data_ptr->logged_metadata);
573 DCHECK(histogram);
574 histogram->SetFlags(histogram_data_ptr->flags);
575 return histogram;
576 }
577
578 // Copy the configuration fields from histogram_data_ptr to local storage
579 // because anything in persistent memory cannot be trusted as it could be
580 // changed at any moment by a malicious actor that shares access. The local
581 // values are validated below and then used to create the histogram, knowing
582 // they haven't changed between validation and use.
583 int32_t histogram_type = histogram_data_ptr->histogram_type;
584 int32_t histogram_flags = histogram_data_ptr->flags;
585 int32_t histogram_minimum = histogram_data_ptr->minimum;
586 int32_t histogram_maximum = histogram_data_ptr->maximum;
587 uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
588 uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
589 uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
590
591 HistogramBase::Sample* ranges_data =
592 memory_allocator_->GetAsArray<HistogramBase::Sample>(
593 histogram_ranges_ref, kTypeIdRangesArray,
594 PersistentMemoryAllocator::kSizeAny);
595
596 const uint32_t max_buckets =
597 std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
598 size_t required_bytes =
599 (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
600 size_t allocated_bytes =
601 memory_allocator_->GetAllocSize(histogram_ranges_ref);
602 if (!ranges_data || histogram_bucket_count < 2 ||
603 histogram_bucket_count >= max_buckets ||
604 allocated_bytes < required_bytes) {
605 return nullptr;
606 }
607
608 std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
609 ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
610 if (!created_ranges || created_ranges->size() != histogram_bucket_count + 1 ||
611 created_ranges->range(1) != histogram_minimum ||
612 created_ranges->range(histogram_bucket_count - 1) != histogram_maximum) {
613 return nullptr;
614 }
615 const BucketRanges* ranges;
616 if (ranges_manager_) {
617 ranges =
618 ranges_manager_->GetOrRegisterCanonicalRanges(created_ranges.get());
619 if (ranges == created_ranges.get()) {
620 // `ranges_manager_` took ownership of `created_ranges`.
621 created_ranges.release();
622 }
623 } else {
624 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
625 created_ranges.release());
626 }
627
628 size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
629 PersistentMemoryAllocator::Reference counts_ref =
630 histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
631 if (counts_bytes == 0 ||
632 (counts_ref != 0 &&
633 memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
634 return nullptr;
635 }
636
637 // The "counts" data (including both samples and logged samples) is a delayed
638 // persistent allocation meaning that though its size and storage for a
639 // reference is defined, no space is reserved until actually needed. When
640 // it is needed, memory will be allocated from the persistent segment and
641 // a reference to it stored at the passed address. Other threads can then
642 // notice the valid reference and access the same data.
643 DelayedPersistentAllocation counts_data(memory_allocator_.get(),
644 &histogram_data_ptr->counts_ref,
645 kTypeIdCountsArray, counts_bytes);
646
647 // A second delayed allocations is defined using the same reference storage
648 // location as the first so the allocation of one will automatically be found
649 // by the other. Within the block, the first half of the space is for "counts"
650 // and the second half is for "logged counts".
651 DelayedPersistentAllocation logged_data(
652 memory_allocator_.get(), &histogram_data_ptr->counts_ref,
653 kTypeIdCountsArray, counts_bytes, counts_bytes / 2);
654
655 // Create the right type of histogram.
656 const char* name = histogram_data_ptr->name;
657 std::unique_ptr<HistogramBase> histogram;
658 switch (histogram_type) {
659 case HISTOGRAM:
660 histogram =
661 Histogram::PersistentCreate(name, ranges, counts_data, logged_data,
662 &histogram_data_ptr->samples_metadata,
663 &histogram_data_ptr->logged_metadata);
664 DCHECK(histogram);
665 break;
666 case LINEAR_HISTOGRAM:
667 histogram = LinearHistogram::PersistentCreate(
668 name, ranges, counts_data, logged_data,
669 &histogram_data_ptr->samples_metadata,
670 &histogram_data_ptr->logged_metadata);
671 DCHECK(histogram);
672 break;
673 case BOOLEAN_HISTOGRAM:
674 histogram = BooleanHistogram::PersistentCreate(
675 name, ranges, counts_data, logged_data,
676 &histogram_data_ptr->samples_metadata,
677 &histogram_data_ptr->logged_metadata);
678 DCHECK(histogram);
679 break;
680 case CUSTOM_HISTOGRAM:
681 histogram = CustomHistogram::PersistentCreate(
682 name, ranges, counts_data, logged_data,
683 &histogram_data_ptr->samples_metadata,
684 &histogram_data_ptr->logged_metadata);
685 DCHECK(histogram);
686 break;
687 default:
688 return nullptr;
689 }
690
691 if (histogram) {
692 DCHECK_EQ(histogram_type, histogram->GetHistogramType());
693 histogram->SetFlags(histogram_flags);
694 }
695
696 return histogram;
697 }
698
699 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)700 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
701 const HistogramBase* histogram) {
702 // This should never be called on the global histogram allocator as objects
703 // created there are already within the global statistics recorder.
704 DCHECK_NE(GlobalHistogramAllocator::Get(), this);
705 DCHECK(histogram);
706
707 HistogramBase* existing =
708 StatisticsRecorder::FindHistogram(histogram->histogram_name());
709 if (existing) {
710 return existing;
711 }
712
713 // Adding the passed histogram to the SR would cause a problem if the
714 // allocator that holds it eventually goes away. Instead, create a new
715 // one from a serialized version. Deserialization calls the appropriate
716 // FactoryGet() which will create the histogram in the global persistent-
717 // histogram allocator if such is set.
718 base::Pickle pickle;
719 histogram->SerializeInfo(&pickle);
720 PickleIterator iter(pickle);
721 existing = DeserializeHistogramInfo(&iter);
722 if (!existing)
723 return nullptr;
724
725 // Make sure there is no "serialization" flag set.
726 DCHECK(!existing->HasFlags(HistogramBase::kIPCSerializationSourceFlag));
727 // Record the newly created histogram in the SR.
728 return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
729 }
730
~GlobalHistogramAllocator()731 GlobalHistogramAllocator::~GlobalHistogramAllocator() {
732 // GlobalHistogramAllocator should never be destroyed because Histogram
733 // objects may keep pointers to its memory.
734 NOTREACHED();
735 }
736
737 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,std::string_view name)738 void GlobalHistogramAllocator::CreateWithPersistentMemory(
739 void* base,
740 size_t size,
741 size_t page_size,
742 uint64_t id,
743 std::string_view name) {
744 Set(new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
745 base, size, page_size, id, name, PersistentMemoryAllocator::kReadWrite)));
746 }
747
748 // static
CreateWithLocalMemory(size_t size,uint64_t id,std::string_view name)749 void GlobalHistogramAllocator::CreateWithLocalMemory(size_t size,
750 uint64_t id,
751 std::string_view name) {
752 Set(new GlobalHistogramAllocator(
753 std::make_unique<LocalPersistentMemoryAllocator>(size, id, name)));
754 }
755
756 #if !BUILDFLAG(IS_NACL)
757 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,std::string_view name,bool exclusive_write)758 bool GlobalHistogramAllocator::CreateWithFile(const FilePath& file_path,
759 size_t size,
760 uint64_t id,
761 std::string_view name,
762 bool exclusive_write) {
763 uint32_t flags = File::FLAG_OPEN_ALWAYS | File::FLAG_WIN_SHARE_DELETE |
764 File::FLAG_READ | File::FLAG_WRITE;
765 if (exclusive_write)
766 flags |= File::FLAG_WIN_EXCLUSIVE_WRITE;
767 File file(file_path, flags);
768 if (!file.IsValid())
769 return false;
770
771 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
772 bool success = false;
773 const bool file_created = file.created();
774 if (file_created) {
775 success = mmfile->Initialize(std::move(file), {0, size},
776 MemoryMappedFile::READ_WRITE_EXTEND);
777 } else {
778 success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
779 }
780 if (!success ||
781 !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
782 if (file_created) {
783 // If we created the file, but it couldn't be used, delete it.
784 // This could happen if we were able to create a file of all-zeroes, but
785 // couldn't write to it due to lack of disk space.
786 base::DeleteFile(file_path);
787 }
788 return false;
789 }
790
791 Set(new GlobalHistogramAllocator(
792 std::make_unique<FilePersistentMemoryAllocator>(
793 std::move(mmfile), 0, id, name,
794 PersistentMemoryAllocator::kReadWrite)));
795 Get()->SetPersistentLocation(file_path);
796 return true;
797 }
798
799 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,const FilePath & spare_path,size_t size,uint64_t id,std::string_view name)800 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
801 const FilePath& active_path,
802 const FilePath& spare_path,
803 size_t size,
804 uint64_t id,
805 std::string_view name) {
806 // Old "active" becomes "base".
807 if (!base::ReplaceFile(active_path, base_path, nullptr))
808 base::DeleteFile(base_path);
809 if (base::PathExists(active_path))
810 return false;
811
812 // Move any "spare" into "active". Okay to continue if file doesn't exist.
813 if (!spare_path.empty())
814 base::ReplaceFile(spare_path, active_path, nullptr);
815
816 return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
817 name);
818 }
819
820 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,std::string_view name)821 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(
822 const FilePath& dir,
823 size_t size,
824 uint64_t id,
825 std::string_view name) {
826 FilePath base_path = ConstructFilePath(dir, name);
827 FilePath active_path = ConstructFilePathForActiveFile(dir, name);
828 FilePath spare_path = ConstructFilePath(dir, std::string(name) + "-spare");
829 return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
830 name);
831 }
832
833 // static
ConstructFilePath(const FilePath & dir,std::string_view name)834 FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
835 std::string_view name) {
836 return dir.AppendASCII(name).AddExtension(
837 PersistentMemoryAllocator::kFileExtension);
838 }
839
840 // static
ConstructFilePathForActiveFile(const FilePath & dir,std::string_view name)841 FilePath GlobalHistogramAllocator::ConstructFilePathForActiveFile(
842 const FilePath& dir,
843 std::string_view name) {
844 return ConstructFilePath(dir, std::string(name) + "-active");
845 }
846
847 // static
ConstructFilePathForUploadDir(const FilePath & dir,std::string_view name,base::Time stamp,ProcessId pid)848 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
849 const FilePath& dir,
850 std::string_view name,
851 base::Time stamp,
852 ProcessId pid) {
853 return ConstructFilePath(
854 dir,
855 StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
856 static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
857 }
858
859 // static
ConstructFilePathForUploadDir(const FilePath & dir,std::string_view name)860 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
861 const FilePath& dir,
862 std::string_view name) {
863 return ConstructFilePathForUploadDir(dir, name, Time::Now(),
864 GetCurrentProcId());
865 }
866
867 // static
ParseFilePath(const FilePath & path,std::string * out_name,Time * out_stamp,ProcessId * out_pid)868 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
869 std::string* out_name,
870 Time* out_stamp,
871 ProcessId* out_pid) {
872 std::string filename = path.BaseName().AsUTF8Unsafe();
873 std::vector<std::string_view> parts = base::SplitStringPiece(
874 filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
875 if (parts.size() != 4)
876 return false;
877
878 if (out_name)
879 *out_name = std::string(parts[0]);
880
881 if (out_stamp) {
882 int64_t stamp;
883 if (!HexStringToInt64(parts[1], &stamp))
884 return false;
885 *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
886 }
887
888 if (out_pid) {
889 int64_t pid;
890 if (!HexStringToInt64(parts[2], &pid))
891 return false;
892 *out_pid = static_cast<ProcessId>(pid);
893 }
894
895 return true;
896 }
897
CreateSpareFile(const FilePath & spare_path,size_t size)898 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
899 size_t size) {
900 // If the spare file already exists, it was created in a previous session and
901 // is still unused, so do nothing.
902 if (base::PathExists(spare_path)) {
903 return false;
904 }
905 FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
906 bool success;
907 {
908 File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
909 File::FLAG_READ | File::FLAG_WRITE);
910 success = spare_file.IsValid();
911
912 if (success) {
913 MemoryMappedFile mmfile;
914 success = mmfile.Initialize(std::move(spare_file), {0, size},
915 MemoryMappedFile::READ_WRITE_EXTEND);
916 }
917 }
918
919 if (success)
920 success = ReplaceFile(temp_spare_path, spare_path, nullptr);
921
922 if (!success)
923 DeleteFile(temp_spare_path);
924
925 return success;
926 }
927 #endif // !BUILDFLAG(IS_NACL)
928
929 // static
CreateWithSharedMemoryRegion(const UnsafeSharedMemoryRegion & region)930 void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
931 const UnsafeSharedMemoryRegion& region) {
932 CHECK_EQ(Get(), nullptr) << "Histogram allocator has already been created";
933
934 base::WritableSharedMemoryMapping mapping = region.Map();
935 if (!mapping.IsValid() ||
936 !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
937 mapping)) {
938 DVLOG(1) << "Shared memory region is invalid or unacceptable.";
939 return;
940 }
941
942 DVLOG(1) << "Global histogram allocator initialized.";
943 Set(new GlobalHistogramAllocator(
944 std::make_unique<WritableSharedPersistentMemoryAllocator>(
945 std::move(mapping), 0, std::string_view())));
946 }
947
948 // static
Set(GlobalHistogramAllocator * allocator)949 void GlobalHistogramAllocator::Set(GlobalHistogramAllocator* allocator) {
950 // Releasing or changing an allocator is extremely dangerous because it
951 // likely has histograms stored within it. If the backing memory is also
952 // also released, future accesses to those histograms will seg-fault.
953 CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
954 subtle::Release_Store(&g_histogram_allocator,
955 reinterpret_cast<intptr_t>(allocator));
956
957 // Record the number of histograms that were sampled before the global
958 // histogram allocator was initialized.
959 //
960 // TODO(crbug.com/40945497): CHECK(histogram_count == 0) and remove emit of
961 // early histogram count once |histogram_count| is reliably zero (0) for all
962 // process types.
963 size_t histogram_count = StatisticsRecorder::GetHistogramCount();
964 if (histogram_count != 0) {
965 DVLOG(1) << histogram_count
966 << " histogram(s) created before persistence was enabled.";
967
968 if (allocator && allocator->Name() && allocator->Name()[0]) {
969 UmaHistogramCounts100(StrCat({"UMA.PersistentAllocator.EarlyHistograms.",
970 allocator->Name()}),
971 static_cast<int>(histogram_count));
972 }
973 }
974 }
975
976 // static
Get()977 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
978 return reinterpret_cast<GlobalHistogramAllocator*>(
979 subtle::Acquire_Load(&g_histogram_allocator));
980 }
981
982 // static
ReleaseForTesting()983 GlobalHistogramAllocator* GlobalHistogramAllocator::ReleaseForTesting() {
984 GlobalHistogramAllocator* histogram_allocator = Get();
985 if (!histogram_allocator)
986 return nullptr;
987 PersistentMemoryAllocator* memory_allocator =
988 histogram_allocator->memory_allocator();
989
990 // Before releasing the memory, it's necessary to have the Statistics-
991 // Recorder forget about the histograms contained therein; otherwise,
992 // some operations will try to access them and the released memory.
993 PersistentMemoryAllocator::Iterator iter(memory_allocator);
994 const PersistentHistogramData* data;
995 while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
996 StatisticsRecorder::ForgetHistogramForTesting(data->name);
997 }
998
999 subtle::Release_Store(&g_histogram_allocator, 0);
1000 ANNOTATE_LEAKING_OBJECT_PTR(histogram_allocator);
1001 return histogram_allocator;
1002 }
1003
SetPersistentLocation(const FilePath & location)1004 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
1005 persistent_location_ = location;
1006 }
1007
GetPersistentLocation() const1008 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
1009 return persistent_location_;
1010 }
1011
HasPersistentLocation() const1012 bool GlobalHistogramAllocator::HasPersistentLocation() const {
1013 return !persistent_location_.empty();
1014 }
1015
MovePersistentFile(const FilePath & dir)1016 bool GlobalHistogramAllocator::MovePersistentFile(const FilePath& dir) {
1017 DCHECK(HasPersistentLocation());
1018
1019 FilePath new_file_path = dir.Append(persistent_location_.BaseName());
1020
1021 // Change the location of the persistent file. This is fine to do even though
1022 // the file is currently "opened" by this process.
1023 if (!base::ReplaceFile(persistent_location_, new_file_path, nullptr)) {
1024 return false;
1025 }
1026
1027 SetPersistentLocation(new_file_path);
1028 return true;
1029 }
1030
WriteToPersistentLocation()1031 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
1032 #if BUILDFLAG(IS_NACL)
1033 // NACL doesn't support file operations, including ImportantFileWriter.
1034 NOTREACHED();
1035 #else
1036 // Stop if no destination is set.
1037 if (!HasPersistentLocation()) {
1038 NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
1039 << " to file because no location was set.";
1040 }
1041
1042 std::string_view contents(static_cast<const char*>(data()), used());
1043 if (!ImportantFileWriter::WriteFileAtomically(
1044 persistent_location_, contents, "PersistentHistogramAllocator")) {
1045 LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
1046 << " to file: " << persistent_location_.value();
1047 return false;
1048 }
1049
1050 return true;
1051 #endif
1052 }
1053
DeletePersistentLocation()1054 void GlobalHistogramAllocator::DeletePersistentLocation() {
1055 memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
1056
1057 #if BUILDFLAG(IS_NACL)
1058 NOTREACHED();
1059 #else
1060 if (!HasPersistentLocation()) {
1061 return;
1062 }
1063
1064 // Open (with delete) and then immediately close the file by going out of
1065 // scope. This is the only cross-platform safe way to delete a file that may
1066 // be open elsewhere. Open handles will continue to operate normally but
1067 // new opens will not be possible.
1068 File file(persistent_location_,
1069 File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
1070 #endif
1071 }
1072
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)1073 GlobalHistogramAllocator::GlobalHistogramAllocator(
1074 std::unique_ptr<PersistentMemoryAllocator> memory)
1075 : PersistentHistogramAllocator(std::move(memory)),
1076 import_iterator_(this) {
1077 }
1078
ImportHistogramsToStatisticsRecorder()1079 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
1080 // Skip the import if it's the histogram that was last created. Should a
1081 // race condition cause the "last created" to be overwritten before it
1082 // is recognized here then the histogram will be created and be ignored
1083 // when it is detected as a duplicate by the statistics-recorder. This
1084 // simple check reduces the time of creating persistent histograms by
1085 // about 40%.
1086 Reference record_to_ignore = last_created();
1087
1088 // There is no lock on this because the iterator is lock-free while still
1089 // guaranteed to only return each entry only once. The StatisticsRecorder
1090 // has its own lock so the Register operation is safe.
1091 while (true) {
1092 std::unique_ptr<HistogramBase> histogram =
1093 import_iterator_.GetNextWithIgnore(record_to_ignore);
1094 if (!histogram)
1095 break;
1096 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1097 }
1098 }
1099
1100 } // namespace base
1101