• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 #ifdef UNSAFE_BUFFERS_BUILD
5 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
6 #pragma allow_unsafe_buffers
7 #endif
8 
9 #include <memory>
10 #include <set>
11 #include <string>
12 #include <vector>
13 
14 #include "base/atomicops.h"
15 #include "base/containers/span.h"
16 #include "base/memory/raw_span.h"
17 #include "base/metrics/bucket_ranges.h"
18 #include "base/metrics/histogram.h"
19 #include "base/metrics/persistent_histogram_allocator.h"
20 #include "base/metrics/sparse_histogram.h"
21 #include "base/no_destructor.h"
22 #include "base/numerics/safe_conversions.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/test/scoped_feature_list.h"
25 #include "base/threading/simple_thread.h"
26 #include "testing/gtest/include/gtest/gtest.h"
27 
28 namespace base {
29 
30 namespace {
31 
GetPermanentName(const std::string & name)32 char const* GetPermanentName(const std::string& name) {
33   // A set of histogram names that provides the "permanent" lifetime required
34   // by histogram objects for those strings that are not already code constants
35   // or held in persistent memory.
36   static base::NoDestructor<std::set<std::string>> permanent_names;
37 
38   auto result = permanent_names->insert(name);
39   return result.first->c_str();
40 }
41 
GetBucketIndex(HistogramBase::Sample value,const BucketRanges * ranges)42 size_t GetBucketIndex(HistogramBase::Sample value, const BucketRanges* ranges) {
43   size_t bucket_count = ranges->bucket_count();
44   EXPECT_GE(bucket_count, 1U);
45   for (size_t i = 0; i < bucket_count; ++i) {
46     if (ranges->range(i) > value) {
47       return i - 1;
48     }
49   }
50   return bucket_count - 1;
51 }
52 
53 // Runs a task in a thread that will emit |num_emission_| times the passed
54 // |histograms| and snapshot them. The thread will also keep track of the
55 // actual samples emitted, as well as the ones found in the snapshots taken, so
56 // that they can be compared.
57 class SnapshotDeltaThread : public SimpleThread {
58  public:
SnapshotDeltaThread(const std::string & name,size_t num_emissions,span<HistogramBase * > histograms,HistogramBase::Sample histogram_max,subtle::Atomic32 * real_total_samples_count,span<subtle::Atomic32> real_bucket_counts,subtle::Atomic32 * snapshots_total_samples_count,span<subtle::Atomic32> snapshots_bucket_counts)59   SnapshotDeltaThread(const std::string& name,
60                       size_t num_emissions,
61                       span<HistogramBase*> histograms,
62                       HistogramBase::Sample histogram_max,
63                       subtle::Atomic32* real_total_samples_count,
64                       span<subtle::Atomic32> real_bucket_counts,
65                       subtle::Atomic32* snapshots_total_samples_count,
66                       span<subtle::Atomic32> snapshots_bucket_counts)
67       : SimpleThread(name, Options()),
68         num_emissions_(num_emissions),
69         histograms_(histograms),
70         histogram_max_(histogram_max),
71         real_total_samples_count_(real_total_samples_count),
72         real_bucket_counts_(real_bucket_counts),
73         snapshots_total_samples_count_(snapshots_total_samples_count),
74         snapshots_bucket_counts_(snapshots_bucket_counts) {}
75 
76   SnapshotDeltaThread(const SnapshotDeltaThread&) = delete;
77   SnapshotDeltaThread& operator=(const SnapshotDeltaThread&) = delete;
78 
79   ~SnapshotDeltaThread() override = default;
80 
Run()81   void Run() override {
82     for (size_t i = 0; i < num_emissions_; ++i) {
83       for (HistogramBase* histogram : histograms_) {
84         // Emit a random sample. rand() is used here to generate such a sample,
85         // but the randomness does not really matter as thread-safety is what is
86         // being tested here and there is already a lot of non-determinism
87         // surrounding scheduling.
88         Histogram::Sample sample = rand() % histogram_max_;
89         histogram->Add(sample);
90 
91         // Take a snapshot of the histogram. Because of the multithreading
92         // nature of the test, this may or may not include the sample that was
93         // just emitted, and/or may include samples that came from other
94         // threads.
95         std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotDelta();
96 
97         // Store the sample that was emitted as well as the snapshot so that
98         // the totals can be compared later on.
99         StoreActualSample(histogram, sample);
100         StoreSnapshot(std::move(snapshot));
101       }
102     }
103   }
104 
105  private:
106   // Stores an actual |sample| that was emitted for |histogram|. This is done
107   // to compare what was found in histogram snapshots (see StoreSnapshot()).
StoreActualSample(HistogramBase * histogram,Histogram::Sample sample)108   void StoreActualSample(HistogramBase* histogram, Histogram::Sample sample) {
109     subtle::NoBarrier_AtomicIncrement(real_total_samples_count_, 1);
110     switch (histogram->GetHistogramType()) {
111       case HISTOGRAM: {
112         const BucketRanges* ranges =
113             static_cast<Histogram*>(histogram)->bucket_ranges();
114         size_t bucket_index = GetBucketIndex(sample, ranges);
115         size_t bucket_min = ranges->range(bucket_index);
116         subtle::NoBarrier_AtomicIncrement(&real_bucket_counts_[bucket_min], 1);
117         break;
118       }
119       case SPARSE_HISTOGRAM:
120         subtle::NoBarrier_AtomicIncrement(
121             &real_bucket_counts_[checked_cast<size_t>(sample)], 1);
122         break;
123       case LINEAR_HISTOGRAM:
124       case BOOLEAN_HISTOGRAM:
125       case CUSTOM_HISTOGRAM:
126       case DUMMY_HISTOGRAM:
127         NOTREACHED();
128     }
129   }
130 
131   // Store a |snapshot| that was taken of a histogram. This is done to compare
132   // what was actually emitted (see StoreActualSample()).
StoreSnapshot(std::unique_ptr<HistogramSamples> snapshot)133   void StoreSnapshot(std::unique_ptr<HistogramSamples> snapshot) {
134     HistogramBase::Count snapshot_samples_count = snapshot->TotalCount();
135     subtle::NoBarrier_AtomicIncrement(snapshots_total_samples_count_,
136                                       snapshot_samples_count);
137     for (auto it = snapshot->Iterator(); !it->Done(); it->Next()) {
138       HistogramBase::Sample min;
139       int64_t max;
140       HistogramBase::Count count;
141       it->Get(&min, &max, &count);
142       // Verify that the snapshot contains only positive bucket counts.
143       // This is to ensure SnapshotDelta() is fully thread-safe, not just
144       // "eventually consistent".
145       ASSERT_GE(count, 0);
146       subtle::NoBarrier_AtomicIncrement(
147           &snapshots_bucket_counts_[checked_cast<size_t>(min)], count);
148     }
149   }
150 
151   const size_t num_emissions_;
152   raw_span<HistogramBase*> histograms_;
153   const HistogramBase::Sample histogram_max_;
154   raw_ptr<subtle::Atomic32> real_total_samples_count_;
155   raw_span<subtle::Atomic32> real_bucket_counts_;
156   raw_ptr<subtle::Atomic32> snapshots_total_samples_count_;
157   raw_span<subtle::Atomic32> snapshots_bucket_counts_;
158 };
159 
160 }  // namespace
161 
162 class HistogramThreadsafeTest : public testing::Test {
163  public:
164   HistogramThreadsafeTest() = default;
165 
166   HistogramThreadsafeTest(const HistogramThreadsafeTest&) = delete;
167   HistogramThreadsafeTest& operator=(const HistogramThreadsafeTest&) = delete;
168 
169   ~HistogramThreadsafeTest() override = default;
170 
SetUp()171   void SetUp() override {
172     GlobalHistogramAllocator::CreateWithLocalMemory(4 << 20, /*id=*/0,
173                                                     /*name=*/"");
174     ASSERT_TRUE(GlobalHistogramAllocator::Get());
175 
176     // Create a second view of the persistent memory with a new persistent
177     // histogram allocator in order to simulate a subprocess with its own view
178     // of some shared memory.
179     PersistentMemoryAllocator* allocator =
180         GlobalHistogramAllocator::Get()->memory_allocator();
181     std::unique_ptr<PersistentMemoryAllocator> memory_view =
182         std::make_unique<PersistentMemoryAllocator>(
183             /*base=*/const_cast<void*>(allocator->data()), allocator->size(),
184             /*page_size=*/0, /*id=*/0,
185             /*name=*/"GlobalHistogramAllocatorView",
186             PersistentMemoryAllocator::kReadWrite);
187     allocator_view_ =
188         std::make_unique<PersistentHistogramAllocator>(std::move(memory_view));
189   }
190 
TearDown()191   void TearDown() override {
192     histograms_.clear();
193     allocator_view_.reset();
194     GlobalHistogramAllocator::ReleaseForTesting();
195     ASSERT_FALSE(GlobalHistogramAllocator::Get());
196   }
197 
198   // Creates and returns various histograms (some that live on the persistent
199   // memory, some that live on the local heap, and some that point to the same
200   // underlying data as those that live on the persistent memory but are
201   // different objects).
CreateHistograms(size_t suffix,HistogramBase::Sample max,size_t bucket_count)202   std::vector<HistogramBase*> CreateHistograms(size_t suffix,
203                                                HistogramBase::Sample max,
204                                                size_t bucket_count) {
205     // There are 4 ways histograms can store their underlying data:
206     // PersistentSampleVector, PersistentSampleMap, SampleVector, and SampleMap.
207     // The first two are intended for when the data may be either persisted to a
208     // file or shared with another process. The last two are when the histograms
209     // are to be used by the local process only.
210     // Create 4 histograms that use those storage structures respectively.
211     std::vector<HistogramBase*> histograms;
212 
213     // Create histograms on the persistent memory (created through the
214     // GlobalHistogramAllocator, which is automatically done when using the
215     // FactoryGet() API). There is no need to store them in |histograms_|
216     // because these histograms are owned by the StatisticsRecorder.
217     std::string numeric_histogram_name =
218         StringPrintf("NumericHistogram%zu", suffix);
219     Histogram* numeric_histogram = static_cast<Histogram*>(
220         Histogram::FactoryGet(numeric_histogram_name, /*minimum=*/1, max,
221                               bucket_count, /*flags=*/HistogramBase::kNoFlags));
222     histograms.push_back(numeric_histogram);
223     std::string sparse_histogram_name =
224         StringPrintf("SparseHistogram%zu", suffix);
225     HistogramBase* sparse_histogram =
226         SparseHistogram::FactoryGet(sparse_histogram_name,
227                                     /*flags=*/HistogramBase::kNoFlags);
228     histograms.push_back(sparse_histogram);
229 
230     // Create histograms on the "local heap" (i.e., are not instantiated using
231     // the GlobalHistogramAllocator, which is automatically done when using the
232     // FactoryGet() API). Store them in |histograms_| so that they are not freed
233     // during the test.
234     std::string local_heap_histogram_name =
235         StringPrintf("LocalHeapNumericHistogram%zu", suffix);
236     auto& local_heap_histogram = histograms_.emplace_back(
237         new Histogram(GetPermanentName(local_heap_histogram_name),
238                       numeric_histogram->bucket_ranges()));
239     histograms.push_back(local_heap_histogram.get());
240     std::string local_heap_sparse_histogram_name =
241         StringPrintf("LocalHeapSparseHistogram%zu", suffix);
242     auto& local_heap_sparse_histogram =
243         histograms_.emplace_back(new SparseHistogram(
244             GetPermanentName(local_heap_sparse_histogram_name)));
245     histograms.push_back(local_heap_sparse_histogram.get());
246 
247     // Furthermore, create two additional *different* histogram objects that
248     // point to the same underlying data as the first two (|numeric_histogram|
249     // and |sparse_histogram|). This is to simulate subprocess histograms (i.e.,
250     // both the main browser process and the subprocess have their own histogram
251     // instance with possibly their own lock, but they both point to the same
252     // underlying storage, and they may both interact with it simultaneously).
253     // There is no need to do this for the "local heap" histograms because "by
254     // definition" they should only be interacted with within the same process.
255     PersistentHistogramAllocator::Iterator hist_it(allocator_view_.get());
256     std::unique_ptr<HistogramBase> subprocess_numeric_histogram;
257     std::unique_ptr<HistogramBase> subprocess_sparse_histogram;
258     while (true) {
259       // GetNext() creates a new histogram instance that points to the same
260       // underlying data as the histogram the iterator is pointing to.
261       std::unique_ptr<HistogramBase> histogram = hist_it.GetNext();
262       if (!histogram) {
263         break;
264       }
265 
266       // Make sure the "local heap" histograms are not in persistent memory.
267       EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
268       EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
269 
270       if (histogram->histogram_name() == numeric_histogram_name) {
271         subprocess_numeric_histogram = std::move(histogram);
272       } else if (histogram->histogram_name() == sparse_histogram_name) {
273         subprocess_sparse_histogram = std::move(histogram);
274       }
275     }
276     // Make sure we found the histograms, and ensure that they are not the same
277     // histogram objects. Assertions to verify that they are actually pointing
278     // to the same underlying data are not done now (to not mess up the sample
279     // counts).
280     EXPECT_TRUE(subprocess_numeric_histogram);
281     EXPECT_TRUE(subprocess_sparse_histogram);
282     histograms.push_back(subprocess_numeric_histogram.get());
283     histograms.push_back(subprocess_sparse_histogram.get());
284     EXPECT_NE(numeric_histogram, subprocess_numeric_histogram.get());
285     EXPECT_NE(sparse_histogram, subprocess_sparse_histogram.get());
286 
287     // Store the histograms in |histograms_| so that they are not freed during
288     // the test.
289     histograms_.emplace_back(std::move(subprocess_numeric_histogram));
290     histograms_.emplace_back(std::move(subprocess_sparse_histogram));
291 
292     // Lastly, again, create two additional *different* histogram objects that
293     // point to the same underlying data as the first two (|numeric_histogram|
294     // and |sparse_histogram|). Unlike above, this is not necessarily done to
295     // simulate subprocess histograms, but rather to verify that different
296     // histogram objects created through the *same* allocator work correctly
297     // together. In particular, the sparse histogram found here will use the
298     // same "data manager" (see base::PersistentSparseHistogramDataManager) as
299     // the original |sparse_histogram|. This is in contrast to the "subprocess"
300     // histograms above, which will use a different "data manager" since those
301     // histogram objects were created through a different allocator
302     // (allocator_view_). In production, this is what happens when we try to
303     // merge the histograms of a child process multiple times concurrently
304     // (e.g. while we are merging the histograms of a certain child process in
305     // the background, the browser is backgrounded, triggering another merge but
306     // on the main thread).
307     PersistentHistogramAllocator::Iterator hist_it2(
308         GlobalHistogramAllocator::Get());
309     std::unique_ptr<HistogramBase> numeric_histogram2;
310     std::unique_ptr<HistogramBase> sparse_histogram2;
311     while (true) {
312       // GetNext() creates a new histogram instance that points to the same
313       // underlying data as the histogram the iterator is pointing to.
314       std::unique_ptr<HistogramBase> histogram = hist_it2.GetNext();
315       if (!histogram) {
316         break;
317       }
318 
319       // Make sure the "local heap" histograms are not in persistent memory.
320       EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
321       EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
322 
323       if (histogram->histogram_name() == numeric_histogram_name) {
324         numeric_histogram2 = std::move(histogram);
325       } else if (histogram->histogram_name() == sparse_histogram_name) {
326         sparse_histogram2 = std::move(histogram);
327       }
328     }
329     // Make sure we found the histograms, and ensure that they are not the same
330     // histogram objects. Assertions to verify that they are actually pointing
331     // to the same underlying data are not done now (to not mess up the sample
332     // counts).
333     EXPECT_TRUE(numeric_histogram2);
334     EXPECT_TRUE(sparse_histogram2);
335     histograms.push_back(numeric_histogram2.get());
336     histograms.push_back(sparse_histogram2.get());
337     EXPECT_NE(numeric_histogram, numeric_histogram2.get());
338     EXPECT_NE(sparse_histogram, sparse_histogram2.get());
339 
340     // Store the histograms in |histograms_| so that they are not freed during
341     // the test.
342     histograms_.emplace_back(std::move(numeric_histogram2));
343     histograms_.emplace_back(std::move(sparse_histogram2));
344 
345     return histograms;
346   }
347 
348  private:
349   // A view of the GlobalHistogramAllocator to simulate a subprocess having its
350   // own view of some shared memory.
351   std::unique_ptr<PersistentHistogramAllocator> allocator_view_;
352 
353   // Used to prevent histograms from being freed during the test.
354   std::vector<std::unique_ptr<HistogramBase>> histograms_;
355 };
356 
357 // Verifies that SnapshotDelta() is thread safe. That means 1) a sample emitted
358 // while a snapshot is taken is not lost, and 2) concurrent calls to
359 // SnapshotDelta() will not return the same samples. Note that the test makes
360 // use of ASSERT_* instead EXPECT_* because the test is repeated multiple times,
361 // and the use of EXPECT_* produces spammy outputs as it does not end the test
362 // immediately.
TEST_F(HistogramThreadsafeTest,SnapshotDeltaThreadsafe)363 TEST_F(HistogramThreadsafeTest, SnapshotDeltaThreadsafe) {
364   // We try this test |kNumIterations| times to have a coverage of different
365   // scenarios. For example, for a numeric histogram, if it has only samples
366   // within the same bucket, the samples will be stored in a different way than
367   // if it had samples in multiple buckets for efficiency reasons (SingleSample
368   // vs a vector). Hence, the goal of doing this test multiple time is to have
369   // coverage of the SingleSample scenario, because once the histogram has moved
370   // to using a vector, it will not use SingleSample again.
371   // Note: |kNumIterations| was 100 on 4/2023, but was decreased because the
372   // workload was causing flakiness (timing out).
373   constexpr size_t kNumIterations = 50;
374   for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
375     // TL;DR of the test: multiple threads are created, which will each emit to
376     // the same histograms and snapshot their delta multiple times. We keep
377     // track of the actual number of samples found in the snapshots, and ensure
378     // that it matches what we actually emitted.
379 
380     // Create histograms. Two histograms should live on persistent memory,
381     // two should live on local heap, and two of them should be simulations of
382     // subprocess histograms that point to the same underlying data as first two
383     // histograms (but are different objects).
384     // The max values of the histograms will alternate between 2 and 50 in order
385     // to have coverage of histograms that are being emitted to with a small
386     // range of values, and a large range of values.
387     const HistogramBase::Sample kHistogramMax = (iteration % 2 == 0) ? 2 : 50;
388     const size_t kBucketCount = (iteration % 2 == 0) ? 3 : 10;
389     std::vector<HistogramBase*> histograms =
390         CreateHistograms(/*suffix=*/iteration, kHistogramMax, kBucketCount);
391 
392     // Start |kNumThreads| that will each emit and snapshot the histograms (see
393     // SnapshotDeltaThread). We keep track of the real samples as well as the
394     // samples found in the snapshots so that we can compare that they match
395     // later on.
396     constexpr size_t kNumThreads = 2;
397     constexpr size_t kNumEmissions = 1000;
398     subtle::Atomic32 real_total_samples_count = 0;
399     std::vector<subtle::Atomic32> real_bucket_counts(kHistogramMax, 0);
400     subtle::Atomic32 snapshots_total_samples_count = 0;
401     std::vector<subtle::Atomic32> snapshots_bucket_counts(kHistogramMax, 0);
402     std::unique_ptr<SnapshotDeltaThread> threads[kNumThreads];
403     for (size_t i = 0; i < kNumThreads; ++i) {
404       threads[i] = std::make_unique<SnapshotDeltaThread>(
405           StringPrintf("SnapshotDeltaThread.%zu.%zu", iteration, i),
406           kNumEmissions, histograms, kHistogramMax, &real_total_samples_count,
407           real_bucket_counts, &snapshots_total_samples_count,
408           snapshots_bucket_counts);
409       threads[i]->Start();
410     }
411 
412     // Wait until all threads have finished.
413     for (auto& thread : threads) {
414       thread->Join();
415     }
416 
417     // Verify that the samples found in the snapshots match what we emitted.
418     ASSERT_EQ(static_cast<size_t>(real_total_samples_count),
419               kNumThreads * kNumEmissions * histograms.size());
420     ASSERT_EQ(snapshots_total_samples_count, real_total_samples_count);
421     for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
422       ASSERT_EQ(snapshots_bucket_counts[i], real_bucket_counts[i]);
423     }
424 
425     // Also verify that no more unlogged samples remain, and that the internal
426     // logged samples of the histograms match what we emitted.
427 
428     HistogramBase::Count logged_total_samples_count = 0;
429     std::vector<HistogramBase::Count> logged_bucket_counts(
430         /*value=*/kHistogramMax, 0);
431     // We ignore the last four histograms since they are the same as the first
432     // two (they are simulations of histogram instances from a subprocess that
433     // point to the same underlying data, and different histogram instances that
434     // are created from the same allocator). Otherwise, we will be counting the
435     // samples from those histograms thrice.
436     for (size_t i = 0; i < histograms.size() - 4; ++i) {
437       HistogramBase* histogram = histograms[i];
438       ASSERT_EQ(histogram->SnapshotDelta()->TotalCount(), 0);
439       std::unique_ptr<HistogramSamples> logged_samples =
440           histogram->SnapshotSamples();
441       // Each individual histograms should have been emitted to a specific
442       // amount of times. Non-"local heap" histograms were emitted to thrice as
443       // much because they appeared thrice in the |histograms| array -- once as
444       // a normal histogram, once as a simulation of a subprocess histogram, and
445       // once as a duplicate histogram created from the same allocator.
446       size_t expected_logged_samples_count = kNumThreads * kNumEmissions;
447       if (!strstr(histogram->histogram_name(), "LocalHeap")) {
448         expected_logged_samples_count *= 3;
449       }
450       ASSERT_EQ(static_cast<size_t>(logged_samples->TotalCount()),
451                 expected_logged_samples_count);
452 
453       for (auto it = logged_samples->Iterator(); !it->Done(); it->Next()) {
454         HistogramBase::Sample min;
455         int64_t max;
456         HistogramBase::Count count;
457         it->Get(&min, &max, &count);
458         ASSERT_GE(count, 0);
459         logged_total_samples_count += count;
460         logged_bucket_counts[min] += count;
461       }
462     }
463     ASSERT_EQ(logged_total_samples_count, real_total_samples_count);
464     for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
465       ASSERT_EQ(logged_bucket_counts[i], real_bucket_counts[i]);
466     }
467 
468     // Verify that our "subprocess histograms" actually point to the same
469     // underlying data as the "main browser" histograms, despite being different
470     // instances (this was verified earlier). This is done at the end of the
471     // test so as to not mess up the sample counts.
472     HistogramBase* numeric_histogram = histograms[0];
473     HistogramBase* subprocess_numeric_histogram = histograms[4];
474     HistogramBase* sparse_histogram = histograms[1];
475     HistogramBase* subprocess_sparse_histogram = histograms[5];
476     ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 0);
477     ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 0);
478     numeric_histogram->Add(0);
479     sparse_histogram->Add(0);
480     ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 1);
481     ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 1);
482     ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
483     ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
484 
485     // Verify that our "duplicate histograms" created from the same allocator
486     // actually point to the same underlying data as the "main" histograms,
487     // despite being different instances (this was verified earlier). This is
488     // done at the end of the test so as to not mess up the sample counts.
489     HistogramBase* numeric_histogram2 = histograms[6];
490     HistogramBase* sparse_histogram2 = histograms[7];
491     ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 0);
492     ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 0);
493     numeric_histogram->Add(0);
494     sparse_histogram->Add(0);
495     ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 1);
496     ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 1);
497     ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
498     ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
499   }
500 }
501 
502 }  // namespace base
503