1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_sample_map.h"
6
7 #include "base/atomicops.h"
8 #include "base/check_op.h"
9 #include "base/containers/contains.h"
10 #include "base/debug/crash_logging.h"
11 #include "base/metrics/histogram_macros.h"
12 #include "base/metrics/persistent_histogram_allocator.h"
13 #include "base/notreached.h"
14 #include "base/numerics/safe_conversions.h"
15
16 namespace base {
17
18 typedef HistogramBase::Count Count;
19 typedef HistogramBase::Sample Sample;
20
21 namespace {
22
23 // An iterator for going through a PersistentSampleMap. The logic here is
24 // identical to that of the iterator for SampleMap but with different data
25 // structures. Changes here likely need to be duplicated there.
26 template <typename T, typename I>
27 class IteratorTemplate : public SampleCountIterator {
28 public:
IteratorTemplate(T & sample_counts)29 explicit IteratorTemplate(T& sample_counts)
30 : iter_(sample_counts.begin()), end_(sample_counts.end()) {
31 SkipEmptyBuckets();
32 }
33
34 ~IteratorTemplate() override;
35
36 // SampleCountIterator:
Done() const37 bool Done() const override { return iter_ == end_; }
Next()38 void Next() override {
39 DCHECK(!Done());
40 ++iter_;
41 SkipEmptyBuckets();
42 }
43 void Get(HistogramBase::Sample* min,
44 int64_t* max,
45 HistogramBase::Count* count) override;
46
47 private:
SkipEmptyBuckets()48 void SkipEmptyBuckets() {
49 while (!Done() && subtle::NoBarrier_Load(iter_->second) == 0) {
50 ++iter_;
51 }
52 }
53
54 I iter_;
55 const I end_;
56 };
57
58 typedef std::map<HistogramBase::Sample, HistogramBase::Count*> SampleToCountMap;
59 typedef IteratorTemplate<const SampleToCountMap,
60 SampleToCountMap::const_iterator>
61 PersistentSampleMapIterator;
62
63 template <>
64 PersistentSampleMapIterator::~IteratorTemplate() = default;
65
66 // Get() for an iterator of a PersistentSampleMap.
67 template <>
Get(Sample * min,int64_t * max,Count * count)68 void PersistentSampleMapIterator::Get(Sample* min, int64_t* max, Count* count) {
69 DCHECK(!Done());
70 *min = iter_->first;
71 *max = strict_cast<int64_t>(iter_->first) + 1;
72 // We have to do the following atomically, because even if the caller is using
73 // a lock, a separate process (that is not aware of this lock) may
74 // concurrently modify the value (note that iter_->second is a pointer to a
75 // sample count, which may live in shared memory).
76 *count = subtle::NoBarrier_Load(iter_->second);
77 }
78
79 typedef IteratorTemplate<SampleToCountMap, SampleToCountMap::iterator>
80 ExtractingPersistentSampleMapIterator;
81
82 template <>
~IteratorTemplate()83 ExtractingPersistentSampleMapIterator::~IteratorTemplate() {
84 // Ensure that the user has consumed all the samples in order to ensure no
85 // samples are lost.
86 DCHECK(Done());
87 }
88
89 // Get() for an extracting iterator of a PersistentSampleMap.
90 template <>
Get(Sample * min,int64_t * max,Count * count)91 void ExtractingPersistentSampleMapIterator::Get(Sample* min,
92 int64_t* max,
93 Count* count) {
94 DCHECK(!Done());
95 *min = iter_->first;
96 *max = strict_cast<int64_t>(iter_->first) + 1;
97 // We have to do the following atomically, because even if the caller is using
98 // a lock, a separate process (that is not aware of this lock) may
99 // concurrently modify the value (note that iter_->second is a pointer to a
100 // sample count, which may live in shared memory).
101 *count = subtle::NoBarrier_AtomicExchange(iter_->second, 0);
102 }
103
104 // This structure holds an entry for a PersistentSampleMap within a persistent
105 // memory allocator. The "id" must be unique across all maps held by an
106 // allocator or they will get attached to the wrong sample map.
107 struct SampleRecord {
108 // SHA1(SampleRecord): Increment this if structure changes!
109 static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
110
111 // Expected size for 32/64-bit check.
112 static constexpr size_t kExpectedInstanceSize = 16;
113
114 uint64_t id; // Unique identifier of owner.
115 Sample value; // The value for which this record holds a count.
116 Count count; // The count associated with the above value.
117 };
118
119 } // namespace
120
PersistentSampleMap(uint64_t id,PersistentHistogramAllocator * allocator,Metadata * meta)121 PersistentSampleMap::PersistentSampleMap(
122 uint64_t id,
123 PersistentHistogramAllocator* allocator,
124 Metadata* meta)
125 : HistogramSamples(id, meta), allocator_(allocator) {}
126
127 PersistentSampleMap::~PersistentSampleMap() = default;
128
Accumulate(Sample value,Count count)129 void PersistentSampleMap::Accumulate(Sample value, Count count) {
130 // We have to do the following atomically, because even if the caller is using
131 // a lock, a separate process (that is not aware of this lock) may
132 // concurrently modify the value.
133 subtle::NoBarrier_AtomicIncrement(GetOrCreateSampleCountStorage(value),
134 count);
135 IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
136 }
137
GetCount(Sample value) const138 Count PersistentSampleMap::GetCount(Sample value) const {
139 // Have to override "const" to make sure all samples have been loaded before
140 // being able to know what value to return.
141 Count* count_pointer =
142 const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
143 return count_pointer ? subtle::NoBarrier_Load(count_pointer) : 0;
144 }
145
TotalCount() const146 Count PersistentSampleMap::TotalCount() const {
147 // Have to override "const" in order to make sure all samples have been
148 // loaded before trying to iterate over the map.
149 const_cast<PersistentSampleMap*>(this)->ImportSamples(
150 /*until_value=*/absl::nullopt);
151
152 Count count = 0;
153 for (const auto& entry : sample_counts_) {
154 count += subtle::NoBarrier_Load(entry.second);
155 }
156 return count;
157 }
158
Iterator() const159 std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
160 // Have to override "const" in order to make sure all samples have been
161 // loaded before trying to iterate over the map.
162 const_cast<PersistentSampleMap*>(this)->ImportSamples(
163 /*until_value=*/absl::nullopt);
164 return std::make_unique<PersistentSampleMapIterator>(sample_counts_);
165 }
166
ExtractingIterator()167 std::unique_ptr<SampleCountIterator> PersistentSampleMap::ExtractingIterator() {
168 // Make sure all samples have been loaded before trying to iterate over the
169 // map.
170 ImportSamples(/*until_value=*/absl::nullopt);
171 return std::make_unique<ExtractingPersistentSampleMapIterator>(
172 sample_counts_);
173 }
174
IsDefinitelyEmpty() const175 bool PersistentSampleMap::IsDefinitelyEmpty() const {
176 // Not implemented.
177 NOTREACHED();
178
179 // Always return false. If we are wrong, this will just make the caller
180 // perform some extra work thinking that |this| is non-empty.
181 return false;
182 }
183
184 // static
185 PersistentMemoryAllocator::Reference
GetNextPersistentRecord(PersistentMemoryAllocator::Iterator & iterator,uint64_t * sample_map_id,Sample * value)186 PersistentSampleMap::GetNextPersistentRecord(
187 PersistentMemoryAllocator::Iterator& iterator,
188 uint64_t* sample_map_id,
189 Sample* value) {
190 const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
191 if (!record) {
192 return 0;
193 }
194
195 *sample_map_id = record->id;
196 *value = record->value;
197 return iterator.GetAsReference(record);
198 }
199
200 // static
201 PersistentMemoryAllocator::Reference
CreatePersistentRecord(PersistentMemoryAllocator * allocator,uint64_t sample_map_id,Sample value)202 PersistentSampleMap::CreatePersistentRecord(
203 PersistentMemoryAllocator* allocator,
204 uint64_t sample_map_id,
205 Sample value) {
206 SampleRecord* record = allocator->New<SampleRecord>();
207 if (!record) {
208 if (!allocator->IsFull()) {
209 #if !BUILDFLAG(IS_NACL)
210 // TODO(crbug/1432981): Remove these. They are used to investigate
211 // unexpected failures.
212 SCOPED_CRASH_KEY_BOOL("PersistentSampleMap", "corrupted",
213 allocator->IsCorrupt());
214 #endif // !BUILDFLAG(IS_NACL)
215 NOTREACHED() << "corrupt=" << allocator->IsCorrupt();
216 }
217 return 0;
218 }
219
220 record->id = sample_map_id;
221 record->value = value;
222 record->count = 0;
223
224 PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
225 allocator->MakeIterable(ref);
226 return ref;
227 }
228
AddSubtractImpl(SampleCountIterator * iter,Operator op)229 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
230 Operator op) {
231 Sample min;
232 int64_t max;
233 Count count;
234 for (; !iter->Done(); iter->Next()) {
235 iter->Get(&min, &max, &count);
236 if (count == 0)
237 continue;
238 if (strict_cast<int64_t>(min) + 1 != max)
239 return false; // SparseHistogram only supports bucket with size 1.
240
241 // We have to do the following atomically, because even if the caller is
242 // using a lock, a separate process (that is not aware of this lock) may
243 // concurrently modify the value.
244 subtle::Barrier_AtomicIncrement(
245 GetOrCreateSampleCountStorage(min),
246 (op == HistogramSamples::ADD) ? count : -count);
247 }
248 return true;
249 }
250
GetSampleCountStorage(Sample value)251 Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
252 // If |value| is already in the map, just return that.
253 auto it = sample_counts_.find(value);
254 if (it != sample_counts_.end())
255 return it->second;
256
257 // Import any new samples from persistent memory looking for the value.
258 return ImportSamples(/*until_value=*/value);
259 }
260
GetOrCreateSampleCountStorage(Sample value)261 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
262 // Get any existing count storage.
263 Count* count_pointer = GetSampleCountStorage(value);
264 if (count_pointer)
265 return count_pointer;
266
267 // Create a new record in persistent memory for the value. |records_| will
268 // have been initialized by the GetSampleCountStorage() call above.
269 CHECK(records_);
270 PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
271 if (!ref) {
272 // If a new record could not be created then the underlying allocator is
273 // full or corrupt. Instead, allocate the counter from the heap. This
274 // sample will not be persistent, will not be shared, and will leak...
275 // but it's better than crashing.
276 count_pointer = new Count(0);
277 sample_counts_[value] = count_pointer;
278 return count_pointer;
279 }
280
281 // A race condition between two independent processes (i.e. two independent
282 // histogram objects sharing the same sample data) could cause two of the
283 // above records to be created. The allocator, however, forces a strict
284 // ordering on iterable objects so use the import method to actually add the
285 // just-created record. This ensures that all PersistentSampleMap objects
286 // will always use the same record, whichever was first made iterable.
287 // Thread-safety within a process where multiple threads use the same
288 // histogram object is delegated to the controlling histogram object which,
289 // for sparse histograms, is a lock object.
290 count_pointer = ImportSamples(/*until_value=*/value);
291 DCHECK(count_pointer);
292 return count_pointer;
293 }
294
GetRecords()295 PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
296 // The |records_| pointer is lazily fetched from the |allocator_| only on
297 // first use. Sometimes duplicate histograms are created by race conditions
298 // and if both were to grab the records object, there would be a conflict.
299 // Use of a histogram, and thus a call to this method, won't occur until
300 // after the histogram has been de-dup'd.
301 if (!records_) {
302 records_ = allocator_->CreateSampleMapRecords(id());
303 }
304 return records_.get();
305 }
306
ImportSamples(absl::optional<Sample> until_value)307 Count* PersistentSampleMap::ImportSamples(absl::optional<Sample> until_value) {
308 std::vector<PersistentMemoryAllocator::Reference> refs;
309 PersistentSampleMapRecords* records = GetRecords();
310 while (!(refs = records->GetNextRecords(until_value)).empty()) {
311 // GetNextRecords() returns a list of new unseen records belonging to this
312 // map. Iterate through them all and store them internally. Note that if
313 // |until_value| was found, it will be the last element in |refs|.
314 for (auto ref : refs) {
315 SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
316 if (!record) {
317 continue;
318 }
319
320 DCHECK_EQ(id(), record->id);
321
322 // Check if the record's value is already known.
323 if (!Contains(sample_counts_, record->value)) {
324 // No: Add it to map of known values.
325 sample_counts_[record->value] = &record->count;
326 } else {
327 // Yes: Ignore it; it's a duplicate caused by a race condition -- see
328 // code & comment in GetOrCreateSampleCountStorage() for details.
329 // Check that nothing ever operated on the duplicate record.
330 DCHECK_EQ(0, record->count);
331 }
332
333 // Check if it's the value being searched for and, if so, stop here.
334 // Because race conditions can cause multiple records for a single value,
335 // be sure to return the first one found.
336 if (until_value.has_value() && record->value == until_value.value()) {
337 // Ensure that this was the last value in |refs|.
338 CHECK_EQ(refs.back(), ref);
339
340 return &record->count;
341 }
342 }
343 }
344
345 return nullptr;
346 }
347
348 } // namespace base
349