1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/metrics/metrics_service.h"
6
7 #include <stdint.h>
8
9 #include <algorithm>
10 #include <memory>
11 #include <string>
12
13 #include "base/containers/contains.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/bind.h"
18 #include "base/memory/raw_ptr.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram_functions.h"
21 #include "base/metrics/histogram_snapshot_manager.h"
22 #include "base/metrics/metrics_hashes.h"
23 #include "base/metrics/statistics_recorder.h"
24 #include "base/metrics/user_metrics.h"
25 #include "base/task/single_thread_task_runner.h"
26 #include "base/test/bind.h"
27 #include "base/test/metrics/histogram_tester.h"
28 #include "base/test/scoped_feature_list.h"
29 #include "base/test/task_environment.h"
30 #include "base/threading/platform_thread.h"
31 #include "build/build_config.h"
32 #include "components/metrics/clean_exit_beacon.h"
33 #include "components/metrics/client_info.h"
34 #include "components/metrics/cloned_install_detector.h"
35 #include "components/metrics/environment_recorder.h"
36 #include "components/metrics/log_decoder.h"
37 #include "components/metrics/metrics_features.h"
38 #include "components/metrics/metrics_log.h"
39 #include "components/metrics/metrics_pref_names.h"
40 #include "components/metrics/metrics_scheduler.h"
41 #include "components/metrics/metrics_state_manager.h"
42 #include "components/metrics/metrics_upload_scheduler.h"
43 #include "components/metrics/stability_metrics_helper.h"
44 #include "components/metrics/test/test_enabled_state_provider.h"
45 #include "components/metrics/test/test_metrics_provider.h"
46 #include "components/metrics/test/test_metrics_service_client.h"
47 #include "components/metrics/unsent_log_store_metrics_impl.h"
48 #include "components/prefs/testing_pref_service.h"
49 #include "components/variations/active_field_trials.h"
50 #include "testing/gtest/include/gtest/gtest.h"
51 #include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
52 #include "third_party/metrics_proto/system_profile.pb.h"
53 #include "third_party/zlib/google/compression_utils.h"
54
55 namespace metrics {
56 namespace {
57
58 const char kTestPrefName[] = "TestPref";
59
60 class TestUnsentLogStore : public UnsentLogStore {
61 public:
TestUnsentLogStore(PrefService * service)62 explicit TestUnsentLogStore(PrefService* service)
63 : UnsentLogStore(std::make_unique<UnsentLogStoreMetricsImpl>(),
64 service,
65 kTestPrefName,
66 nullptr,
67 // Set to 3 so logs are not dropped in the test.
68 UnsentLogStore::UnsentLogStoreLimits{
69 .min_log_count = 3,
70 },
71 /*signing_key=*/std::string(),
72 /*logs_event_manager=*/nullptr) {}
73 ~TestUnsentLogStore() override = default;
74
75 TestUnsentLogStore(const TestUnsentLogStore&) = delete;
76 TestUnsentLogStore& operator=(const TestUnsentLogStore&) = delete;
77
RegisterPrefs(PrefRegistrySimple * registry)78 static void RegisterPrefs(PrefRegistrySimple* registry) {
79 registry->RegisterListPref(kTestPrefName);
80 }
81 };
82
83 // Returns true if |id| is present in |proto|'s collection of FieldTrials.
IsFieldTrialPresent(const SystemProfileProto & proto,const std::string & trial_name,const std::string & group_name)84 bool IsFieldTrialPresent(const SystemProfileProto& proto,
85 const std::string& trial_name,
86 const std::string& group_name) {
87 const variations::ActiveGroupId id =
88 variations::MakeActiveGroupId(trial_name, group_name);
89
90 for (const auto& trial : proto.field_trial()) {
91 if (trial.name_id() == id.name && trial.group_id() == id.group)
92 return true;
93 }
94 return false;
95 }
96
97 class TestMetricsService : public MetricsService {
98 public:
TestMetricsService(MetricsStateManager * state_manager,MetricsServiceClient * client,PrefService * local_state)99 TestMetricsService(MetricsStateManager* state_manager,
100 MetricsServiceClient* client,
101 PrefService* local_state)
102 : MetricsService(state_manager, client, local_state) {}
103
104 TestMetricsService(const TestMetricsService&) = delete;
105 TestMetricsService& operator=(const TestMetricsService&) = delete;
106
107 ~TestMetricsService() override = default;
108
109 using MetricsService::INIT_TASK_DONE;
110 using MetricsService::INIT_TASK_SCHEDULED;
111 using MetricsService::RecordCurrentEnvironmentHelper;
112 using MetricsService::SENDING_LOGS;
113 using MetricsService::state;
114
115 // MetricsService:
SetPersistentSystemProfile(const std::string & serialized_proto,bool complete)116 void SetPersistentSystemProfile(const std::string& serialized_proto,
117 bool complete) override {
118 persistent_system_profile_provided_ = true;
119 persistent_system_profile_complete_ = complete;
120 }
121
persistent_system_profile_provided() const122 bool persistent_system_profile_provided() const {
123 return persistent_system_profile_provided_;
124 }
persistent_system_profile_complete() const125 bool persistent_system_profile_complete() const {
126 return persistent_system_profile_complete_;
127 }
128
129 private:
130 bool persistent_system_profile_provided_ = false;
131 bool persistent_system_profile_complete_ = false;
132 };
133
134 class TestMetricsLog : public MetricsLog {
135 public:
TestMetricsLog(const std::string & client_id,int session_id,MetricsServiceClient * client)136 TestMetricsLog(const std::string& client_id,
137 int session_id,
138 MetricsServiceClient* client)
139 : MetricsLog(client_id, session_id, MetricsLog::ONGOING_LOG, client) {}
140
141 TestMetricsLog(const TestMetricsLog&) = delete;
142 TestMetricsLog& operator=(const TestMetricsLog&) = delete;
143
144 ~TestMetricsLog() override = default;
145 };
146
147 const char kOnDidCreateMetricsLogHistogramName[] = "Test.OnDidCreateMetricsLog";
148
149 class TestMetricsProviderForOnDidCreateMetricsLog : public TestMetricsProvider {
150 public:
151 TestMetricsProviderForOnDidCreateMetricsLog() = default;
152 ~TestMetricsProviderForOnDidCreateMetricsLog() override = default;
153
OnDidCreateMetricsLog()154 void OnDidCreateMetricsLog() override {
155 base::UmaHistogramBoolean(kOnDidCreateMetricsLogHistogramName, true);
156 }
157 };
158
159 const char kProvideHistogramsHistogramName[] = "Test.ProvideHistograms";
160
161 class TestMetricsProviderForProvideHistograms : public TestMetricsProvider {
162 public:
163 TestMetricsProviderForProvideHistograms() = default;
164 ~TestMetricsProviderForProvideHistograms() override = default;
165
ProvideHistograms()166 bool ProvideHistograms() override {
167 base::UmaHistogramBoolean(kProvideHistogramsHistogramName, true);
168 return true;
169 }
170
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)171 void ProvideCurrentSessionData(
172 ChromeUserMetricsExtension* uma_proto) override {
173 MetricsProvider::ProvideCurrentSessionData(uma_proto);
174 }
175 };
176
177 class TestMetricsProviderForProvideHistogramsEarlyReturn
178 : public TestMetricsProviderForProvideHistograms {
179 public:
180 TestMetricsProviderForProvideHistogramsEarlyReturn() = default;
181 ~TestMetricsProviderForProvideHistogramsEarlyReturn() override = default;
182
OnDidCreateMetricsLog()183 void OnDidCreateMetricsLog() override {}
184 };
185
186 class TestIndependentMetricsProvider : public MetricsProvider {
187 public:
188 TestIndependentMetricsProvider() = default;
189 ~TestIndependentMetricsProvider() override = default;
190
191 // MetricsProvider:
HasIndependentMetrics()192 bool HasIndependentMetrics() override {
193 // Only return true the first time this is called (i.e., we only have one
194 // independent log to provide).
195 if (!has_independent_metrics_called_) {
196 has_independent_metrics_called_ = true;
197 return true;
198 }
199 return false;
200 }
ProvideIndependentMetrics(base::OnceClosure serialize_log_callback,base::OnceCallback<void (bool)> done_callback,ChromeUserMetricsExtension * uma_proto,base::HistogramSnapshotManager * snapshot_manager)201 void ProvideIndependentMetrics(
202 base::OnceClosure serialize_log_callback,
203 base::OnceCallback<void(bool)> done_callback,
204 ChromeUserMetricsExtension* uma_proto,
205 base::HistogramSnapshotManager* snapshot_manager) override {
206 provide_independent_metrics_called_ = true;
207 uma_proto->set_client_id(123);
208 std::move(done_callback).Run(true);
209 }
210
has_independent_metrics_called() const211 bool has_independent_metrics_called() const {
212 return has_independent_metrics_called_;
213 }
214
provide_independent_metrics_called() const215 bool provide_independent_metrics_called() const {
216 return provide_independent_metrics_called_;
217 }
218
219 private:
220 bool has_independent_metrics_called_ = false;
221 bool provide_independent_metrics_called_ = false;
222 };
223
224 class MetricsServiceTest : public testing::Test {
225 public:
MetricsServiceTest()226 MetricsServiceTest()
227 : enabled_state_provider_(new TestEnabledStateProvider(false, false)) {
228 base::SetRecordActionTaskRunner(
229 task_environment_.GetMainThreadTaskRunner());
230 MetricsService::RegisterPrefs(testing_local_state_.registry());
231 }
232
233 MetricsServiceTest(const MetricsServiceTest&) = delete;
234 MetricsServiceTest& operator=(const MetricsServiceTest&) = delete;
235
236 ~MetricsServiceTest() override = default;
237
SetUp()238 void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
239
GetMetricsStateManager(const base::FilePath & user_data_dir=base::FilePath (),StartupVisibility startup_visibility=StartupVisibility::kUnknown)240 MetricsStateManager* GetMetricsStateManager(
241 const base::FilePath& user_data_dir = base::FilePath(),
242 StartupVisibility startup_visibility = StartupVisibility::kUnknown) {
243 // Lazy-initialize the metrics_state_manager so that it correctly reads the
244 // stability state from prefs after tests have a chance to initialize it.
245 if (!metrics_state_manager_) {
246 metrics_state_manager_ = MetricsStateManager::Create(
247 GetLocalState(), enabled_state_provider_.get(), std::wstring(),
248 user_data_dir, startup_visibility);
249 metrics_state_manager_->InstantiateFieldTrialList();
250 }
251 return metrics_state_manager_.get();
252 }
253
InitializeTestLogStoreAndGet()254 std::unique_ptr<TestUnsentLogStore> InitializeTestLogStoreAndGet() {
255 TestUnsentLogStore::RegisterPrefs(testing_local_state_.registry());
256 return std::make_unique<TestUnsentLogStore>(GetLocalState());
257 }
258
GetLocalState()259 PrefService* GetLocalState() { return &testing_local_state_; }
260
261 // Sets metrics reporting as enabled for testing.
EnableMetricsReporting()262 void EnableMetricsReporting() { SetMetricsReporting(true); }
263
264 // Sets metrics reporting for testing.
SetMetricsReporting(bool enabled)265 void SetMetricsReporting(bool enabled) {
266 enabled_state_provider_->set_consent(enabled);
267 enabled_state_provider_->set_enabled(enabled);
268 }
269
270 // Finds a histogram with the specified |name_hash| in |histograms|.
FindHistogram(const base::StatisticsRecorder::Histograms & histograms,uint64_t name_hash)271 const base::HistogramBase* FindHistogram(
272 const base::StatisticsRecorder::Histograms& histograms,
273 uint64_t name_hash) {
274 for (const base::HistogramBase* histogram : histograms) {
275 if (name_hash == base::HashMetricName(histogram->histogram_name()))
276 return histogram;
277 }
278 return nullptr;
279 }
280
281 // Checks whether |uma_log| contains any histograms that are not flagged
282 // with kUmaStabilityHistogramFlag. Stability logs should only contain such
283 // histograms.
CheckForNonStabilityHistograms(const ChromeUserMetricsExtension & uma_log)284 void CheckForNonStabilityHistograms(
285 const ChromeUserMetricsExtension& uma_log) {
286 const int kStabilityFlags = base::HistogramBase::kUmaStabilityHistogramFlag;
287 const base::StatisticsRecorder::Histograms histograms =
288 base::StatisticsRecorder::GetHistograms();
289 for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
290 const uint64_t hash = uma_log.histogram_event(i).name_hash();
291
292 const base::HistogramBase* histogram = FindHistogram(histograms, hash);
293 EXPECT_TRUE(histogram) << hash;
294
295 EXPECT_TRUE(histogram->HasFlags(kStabilityFlags)) << hash;
296 }
297 }
298
299 // Returns the number of samples logged to the specified histogram or 0 if
300 // the histogram was not found.
GetHistogramSampleCount(const ChromeUserMetricsExtension & uma_log,base::StringPiece histogram_name)301 int GetHistogramSampleCount(const ChromeUserMetricsExtension& uma_log,
302 base::StringPiece histogram_name) {
303 const auto histogram_name_hash = base::HashMetricName(histogram_name);
304 int samples = 0;
305 for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
306 const auto& histogram = uma_log.histogram_event(i);
307 if (histogram.name_hash() == histogram_name_hash) {
308 for (int j = 0; j < histogram.bucket_size(); ++j) {
309 const auto& bucket = histogram.bucket(j);
310 // Per proto comments, count field not being set means 1 sample.
311 samples += (!bucket.has_count() ? 1 : bucket.count());
312 }
313 }
314 }
315 return samples;
316 }
317
318 // Returns the sampled count of the |kOnDidCreateMetricsLogHistogramName|
319 // histogram in the currently staged log in |test_log_store|.
GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore * test_log_store)320 int GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore* test_log_store) {
321 ChromeUserMetricsExtension log;
322 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
323 return GetHistogramSampleCount(log, kOnDidCreateMetricsLogHistogramName);
324 }
325
GetNumberOfUserActions(MetricsLogStore * test_log_store)326 int GetNumberOfUserActions(MetricsLogStore* test_log_store) {
327 ChromeUserMetricsExtension log;
328 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
329 return log.user_action_event_size();
330 }
331
user_data_dir_path()332 const base::FilePath user_data_dir_path() { return temp_dir_.GetPath(); }
333
334 protected:
335 base::test::TaskEnvironment task_environment_{
336 base::test::TaskEnvironment::TimeSource::MOCK_TIME};
337 base::test::ScopedFeatureList feature_list_;
338
339 private:
340 std::unique_ptr<TestEnabledStateProvider> enabled_state_provider_;
341 TestingPrefServiceSimple testing_local_state_;
342 std::unique_ptr<MetricsStateManager> metrics_state_manager_;
343 base::ScopedTempDir temp_dir_;
344 };
345
346 class MetricsServiceTestWithFeatures
347 : public MetricsServiceTest,
348 public ::testing::WithParamInterface<std::tuple<bool>> {
349 public:
350 MetricsServiceTestWithFeatures() = default;
351 ~MetricsServiceTestWithFeatures() override = default;
352
ShouldSnapshotInBg()353 bool ShouldSnapshotInBg() { return std::get<0>(GetParam()); }
354
SetUp()355 void SetUp() override {
356 MetricsServiceTest::SetUp();
357 std::vector<base::test::FeatureRefAndParams> enabled_features;
358 std::vector<base::test::FeatureRef> disabled_features;
359
360 if (ShouldSnapshotInBg()) {
361 enabled_features.emplace_back(features::kMetricsServiceDeltaSnapshotInBg,
362 base::FieldTrialParams());
363 } else {
364 disabled_features.emplace_back(
365 features::kMetricsServiceDeltaSnapshotInBg);
366 }
367
368 feature_list_.InitWithFeaturesAndParameters(enabled_features,
369 disabled_features);
370 }
371
372 private:
373 base::test::ScopedFeatureList feature_list_;
374 };
375
376 struct StartupVisibilityTestParams {
377 metrics::StartupVisibility startup_visibility;
378 bool expected_beacon_value;
379 };
380
381 class MetricsServiceTestWithStartupVisibility
382 : public MetricsServiceTest,
383 public ::testing::WithParamInterface<
384 std::tuple<StartupVisibilityTestParams, bool>> {
385 public:
386 MetricsServiceTestWithStartupVisibility() = default;
387 ~MetricsServiceTestWithStartupVisibility() override = default;
388
ShouldSnapshotInBg()389 bool ShouldSnapshotInBg() { return std::get<1>(GetParam()); }
390
SetUp()391 void SetUp() override {
392 MetricsServiceTest::SetUp();
393 std::vector<base::test::FeatureRefAndParams> enabled_features;
394 std::vector<base::test::FeatureRef> disabled_features;
395
396 if (ShouldSnapshotInBg()) {
397 enabled_features.emplace_back(features::kMetricsServiceDeltaSnapshotInBg,
398 base::FieldTrialParams());
399 } else {
400 disabled_features.emplace_back(
401 features::kMetricsServiceDeltaSnapshotInBg);
402 }
403
404 feature_list_.InitWithFeaturesAndParameters(enabled_features,
405 disabled_features);
406 }
407
408 private:
409 base::test::ScopedFeatureList feature_list_;
410 };
411
412 class ExperimentTestMetricsProvider : public TestMetricsProvider {
413 public:
ExperimentTestMetricsProvider(base::FieldTrial * profile_metrics_trial,base::FieldTrial * session_data_trial)414 explicit ExperimentTestMetricsProvider(
415 base::FieldTrial* profile_metrics_trial,
416 base::FieldTrial* session_data_trial)
417 : profile_metrics_trial_(profile_metrics_trial),
418 session_data_trial_(session_data_trial) {}
419
420 ~ExperimentTestMetricsProvider() override = default;
421
ProvideSystemProfileMetrics(SystemProfileProto * system_profile_proto)422 void ProvideSystemProfileMetrics(
423 SystemProfileProto* system_profile_proto) override {
424 TestMetricsProvider::ProvideSystemProfileMetrics(system_profile_proto);
425 profile_metrics_trial_->Activate();
426 }
427
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)428 void ProvideCurrentSessionData(
429 ChromeUserMetricsExtension* uma_proto) override {
430 TestMetricsProvider::ProvideCurrentSessionData(uma_proto);
431 session_data_trial_->Activate();
432 }
433
434 private:
435 raw_ptr<base::FieldTrial> profile_metrics_trial_;
436 raw_ptr<base::FieldTrial> session_data_trial_;
437 };
438
HistogramExists(base::StringPiece name)439 bool HistogramExists(base::StringPiece name) {
440 return base::StatisticsRecorder::FindHistogram(name) != nullptr;
441 }
442
GetHistogramDeltaTotalCount(base::StringPiece name)443 base::HistogramBase::Count GetHistogramDeltaTotalCount(base::StringPiece name) {
444 return base::StatisticsRecorder::FindHistogram(name)
445 ->SnapshotDelta()
446 ->TotalCount();
447 }
448
449 } // namespace
450
451 INSTANTIATE_TEST_SUITE_P(All,
452 MetricsServiceTestWithFeatures,
453 ::testing::Combine(::testing::Bool()));
454
TEST_P(MetricsServiceTestWithFeatures,RecordId)455 TEST_P(MetricsServiceTestWithFeatures, RecordId) {
456 EnableMetricsReporting();
457 GetMetricsStateManager(user_data_dir_path())->ForceClientIdCreation();
458
459 // Set an initial value for the record-ids, to make them predictable.
460 GetLocalState()->SetInteger(prefs::kMetricsLogRecordId, 1000);
461
462 TestMetricsServiceClient client;
463 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
464 &client, GetLocalState());
465
466 auto log1 = service.CreateLogForTesting(MetricsLog::ONGOING_LOG);
467 auto log2 = service.CreateLogForTesting(MetricsLog::INITIAL_STABILITY_LOG);
468 auto log3 = service.CreateLogForTesting(MetricsLog::INDEPENDENT_LOG);
469
470 EXPECT_EQ(1001, log1->uma_proto()->record_id());
471 EXPECT_EQ(1002, log2->uma_proto()->record_id());
472 EXPECT_EQ(1003, log3->uma_proto()->record_id());
473 }
474
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAfterCleanShutDown)475 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAfterCleanShutDown) {
476 base::HistogramTester histogram_tester;
477 EnableMetricsReporting();
478 // Write a beacon file indicating that Chrome exited cleanly. Note that the
479 // crash streak value is arbitrary.
480 const base::FilePath beacon_file_path =
481 user_data_dir_path().Append(kCleanExitBeaconFilename);
482 ASSERT_TRUE(base::WriteFile(
483 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
484 /*exited_cleanly=*/true, /*crash_streak=*/1)));
485
486 TestMetricsServiceClient client;
487 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
488 &client, GetLocalState());
489
490 TestMetricsProvider* test_provider = new TestMetricsProvider();
491 service.RegisterMetricsProvider(
492 std::unique_ptr<MetricsProvider>(test_provider));
493
494 service.InitializeMetricsRecordingState();
495
496 // No initial stability log should be generated.
497 EXPECT_FALSE(service.has_unsent_logs());
498
499 // Ensure that HasPreviousSessionData() is always called on providers,
500 // for consistency, even if other conditions already indicate their presence.
501 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
502
503 // The test provider should not have been called upon to provide initial
504 // stability nor regular stability metrics.
505 EXPECT_FALSE(test_provider->provide_initial_stability_metrics_called());
506 EXPECT_FALSE(test_provider->provide_stability_metrics_called());
507
508 // As there wasn't an unclean shutdown, no browser crash samples should have
509 // been emitted.
510 histogram_tester.ExpectBucketCount("Stability.Counts2",
511 StabilityEventType::kBrowserCrash, 0);
512 }
513
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAtProviderRequest)514 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAtProviderRequest) {
515 base::HistogramTester histogram_tester;
516 EnableMetricsReporting();
517
518 // Save an existing system profile to prefs, to correspond to what would be
519 // saved from a previous session.
520 TestMetricsServiceClient client;
521 TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
522 DelegatingProvider delegating_provider;
523 TestMetricsService::RecordCurrentEnvironmentHelper(&log, GetLocalState(),
524 &delegating_provider);
525
526 // Record stability build time and version from previous session, so that
527 // stability metrics (including exited cleanly flag) won't be cleared.
528 EnvironmentRecorder(GetLocalState())
529 .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
530 client.GetVersionString());
531
532 // Write a beacon file indicating that Chrome exited cleanly. Note that the
533 // crash streak value is arbitrary.
534 const base::FilePath beacon_file_path =
535 user_data_dir_path().Append(kCleanExitBeaconFilename);
536 ASSERT_TRUE(base::WriteFile(
537 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
538 /*exited_cleanly=*/true, /*crash_streak=*/1)));
539
540 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
541 &client, GetLocalState());
542 // Add a metrics provider that requests a stability log.
543 TestMetricsProvider* test_provider = new TestMetricsProvider();
544 test_provider->set_has_initial_stability_metrics(true);
545 service.RegisterMetricsProvider(
546 std::unique_ptr<MetricsProvider>(test_provider));
547
548 service.InitializeMetricsRecordingState();
549
550 // The initial stability log should be generated and persisted in unsent logs.
551 MetricsLogStore* test_log_store = service.LogStoreForTest();
552 EXPECT_TRUE(test_log_store->has_unsent_logs());
553 EXPECT_FALSE(test_log_store->has_staged_log());
554
555 // Ensure that HasPreviousSessionData() is always called on providers,
556 // for consistency, even if other conditions already indicate their presence.
557 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
558
559 // The test provider should have been called upon to provide initial
560 // stability and regular stability metrics.
561 EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
562 EXPECT_TRUE(test_provider->provide_stability_metrics_called());
563
564 // Stage the log and retrieve it.
565 test_log_store->StageNextLog();
566 EXPECT_TRUE(test_log_store->has_staged_log());
567
568 ChromeUserMetricsExtension uma_log;
569 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
570
571 EXPECT_TRUE(uma_log.has_client_id());
572 EXPECT_TRUE(uma_log.has_session_id());
573 EXPECT_TRUE(uma_log.has_system_profile());
574 EXPECT_EQ(0, uma_log.user_action_event_size());
575 EXPECT_EQ(0, uma_log.omnibox_event_size());
576 CheckForNonStabilityHistograms(uma_log);
577 EXPECT_EQ(
578 1, GetHistogramSampleCount(uma_log, "UMA.InitialStabilityRecordBeacon"));
579
580 // As there wasn't an unclean shutdown, no browser crash samples should have
581 // been emitted.
582 histogram_tester.ExpectBucketCount("Stability.Counts2",
583 StabilityEventType::kBrowserCrash, 0);
584 }
585
TEST_P(MetricsServiceTestWithFeatures,IndependentLogAtProviderRequest)586 TEST_P(MetricsServiceTestWithFeatures, IndependentLogAtProviderRequest) {
587 EnableMetricsReporting();
588 TestMetricsServiceClient client;
589 TestMetricsService service(GetMetricsStateManager(), &client,
590 GetLocalState());
591
592 // Create a a provider that will have one independent log to provide.
593 auto* test_provider = new TestIndependentMetricsProvider();
594 service.RegisterMetricsProvider(
595 std::unique_ptr<MetricsProvider>(test_provider));
596
597 service.InitializeMetricsRecordingState();
598 // Start() will create the first ongoing log.
599 service.Start();
600 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
601
602 // Verify that the independent log provider has not yet been called, and emit
603 // a histogram. This histogram should not be put into the independent log.
604 EXPECT_FALSE(test_provider->has_independent_metrics_called());
605 EXPECT_FALSE(test_provider->provide_independent_metrics_called());
606 const std::string test_histogram = "Test.Histogram";
607 base::UmaHistogramBoolean(test_histogram, true);
608
609 // Fast forward the time by |initialization_delay|, which is when the pending
610 // init tasks will run.
611 base::TimeDelta initialization_delay = service.GetInitializationDelay();
612 task_environment_.FastForwardBy(initialization_delay);
613 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
614
615 // Fast forward the time by another |initialization_delay|, which is when
616 // metrics providers are called to provide independent logs.
617 task_environment_.FastForwardBy(initialization_delay);
618 EXPECT_TRUE(test_provider->has_independent_metrics_called());
619 EXPECT_TRUE(test_provider->provide_independent_metrics_called());
620
621 // Fast forward the time until the MetricsRotationScheduler first runs, which
622 // should complete the first ongoing log.
623 // Note: The first log is only created after N = GetInitialIntervalSeconds()
624 // seconds since the start, and since we already fast forwarded by
625 // |initialization_delay| twice, we only need to fast forward by
626 // N - 2 * |initialization_delay|.
627 task_environment_.FastForwardBy(
628 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
629 2 * initialization_delay);
630 EXPECT_EQ(TestMetricsService::SENDING_LOGS, service.state());
631
632 MetricsLogStore* test_log_store = service.LogStoreForTest();
633
634 // The currently staged log should be the independent log created by the
635 // independent log provider. The log should have a client id of 123. It should
636 // also not contain |test_histogram|.
637 ASSERT_TRUE(test_log_store->has_staged_log());
638 ChromeUserMetricsExtension uma_log;
639 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
640 EXPECT_EQ(uma_log.client_id(), 123UL);
641 EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 0);
642
643 // Discard the staged log and stage the next one. It should be the first
644 // ongoing log.
645 test_log_store->DiscardStagedLog();
646 ASSERT_TRUE(test_log_store->has_unsent_logs());
647 test_log_store->StageNextLog();
648 ASSERT_TRUE(test_log_store->has_staged_log());
649
650 // Verify that the first ongoing log contains |test_histogram| (it should not
651 // have been put into the independent log).
652 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
653 EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 1);
654 }
655
TEST_P(MetricsServiceTestWithFeatures,OnDidCreateMetricsLogAtShutdown)656 TEST_P(MetricsServiceTestWithFeatures, OnDidCreateMetricsLogAtShutdown) {
657 base::HistogramTester histogram_tester;
658 EnableMetricsReporting();
659 TestMetricsServiceClient client;
660
661 TestMetricsService service(GetMetricsStateManager(), &client,
662 GetLocalState());
663
664 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
665 // in OnDidCreateMetricsLog().
666 auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
667 service.RegisterMetricsProvider(
668 std::unique_ptr<MetricsProvider>(test_provider));
669
670 service.InitializeMetricsRecordingState();
671 // Start() will create the first ongoing log.
672 service.Start();
673
674 // OnDidCreateMetricsLog() is called once when the first ongoing log is
675 // created.
676 histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
677 1);
678 service.Stop();
679
680 // OnDidCreateMetricsLog() will be called during shutdown to emit histograms.
681 histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
682 2);
683
684 // Clean up histograms.
685 base::StatisticsRecorder::ForgetHistogramForTesting(
686 kOnDidCreateMetricsLogHistogramName);
687 }
688
TEST_P(MetricsServiceTestWithFeatures,ProvideHistograms)689 TEST_P(MetricsServiceTestWithFeatures, ProvideHistograms) {
690 base::HistogramTester histogram_tester;
691 EnableMetricsReporting();
692 TestMetricsServiceClient client;
693
694 TestMetricsService service(GetMetricsStateManager(), &client,
695 GetLocalState());
696
697 // Create a provider that will log to |kProvideHistogramsHistogramName|
698 // in ProvideHistograms().
699 auto* test_provider = new TestMetricsProviderForProvideHistograms();
700 service.RegisterMetricsProvider(
701 std::unique_ptr<MetricsProvider>(test_provider));
702
703 service.InitializeMetricsRecordingState();
704 // Start() will create the first ongoing log.
705 service.Start();
706
707 // ProvideHistograms() is called in OnDidCreateMetricsLog().
708 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
709
710 service.StageCurrentLogForTest();
711
712 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
713
714 service.Stop();
715
716 // Clean up histograms.
717 base::StatisticsRecorder::ForgetHistogramForTesting(
718 kProvideHistogramsHistogramName);
719 }
720
TEST_P(MetricsServiceTestWithFeatures,ProvideHistogramsEarlyReturn)721 TEST_P(MetricsServiceTestWithFeatures, ProvideHistogramsEarlyReturn) {
722 base::HistogramTester histogram_tester;
723 EnableMetricsReporting();
724 TestMetricsServiceClient client;
725
726 TestMetricsService service(GetMetricsStateManager(), &client,
727 GetLocalState());
728
729 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
730 // in OnDidCreateMetricsLog().
731 auto* test_provider =
732 new TestMetricsProviderForProvideHistogramsEarlyReturn();
733 service.RegisterMetricsProvider(
734 std::unique_ptr<MetricsProvider>(test_provider));
735
736 service.InitializeMetricsRecordingState();
737 // Start() will create the first ongoing log.
738 service.Start();
739
740 // Make sure no histogram is emitted when having an early return.
741 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 0);
742
743 service.StageCurrentLogForTest();
744 // ProvideHistograms() should be called in ProvideCurrentSessionData() if
745 // histograms haven't been emitted.
746 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
747
748 // Try another log to make sure emission status is reset between logs.
749 service.LogStoreForTest()->DiscardStagedLog();
750 service.StageCurrentLogForTest();
751 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
752
753 service.Stop();
754
755 // Clean up histograms.
756 base::StatisticsRecorder::ForgetHistogramForTesting(
757 kProvideHistogramsHistogramName);
758 }
759
760 INSTANTIATE_TEST_SUITE_P(
761 All,
762 MetricsServiceTestWithStartupVisibility,
763 ::testing::Combine(
764 ::testing::Values(
765 StartupVisibilityTestParams{
766 .startup_visibility = StartupVisibility::kUnknown,
767 .expected_beacon_value = true},
768 StartupVisibilityTestParams{
769 .startup_visibility = StartupVisibility::kBackground,
770 .expected_beacon_value = true},
771 StartupVisibilityTestParams{
772 .startup_visibility = StartupVisibility::kForeground,
773 .expected_beacon_value = false}),
774 ::testing::Bool()));
775
TEST_P(MetricsServiceTestWithStartupVisibility,InitialStabilityLogAfterCrash)776 TEST_P(MetricsServiceTestWithStartupVisibility, InitialStabilityLogAfterCrash) {
777 base::HistogramTester histogram_tester;
778 PrefService* local_state = GetLocalState();
779 EnableMetricsReporting();
780
781 // Write a beacon file indicating that Chrome exited uncleanly. Note that the
782 // crash streak value is arbitrary.
783 const base::FilePath beacon_file_path =
784 user_data_dir_path().Append(kCleanExitBeaconFilename);
785 ASSERT_TRUE(base::WriteFile(
786 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
787 /*exited_cleanly=*/false, /*crash_streak=*/1)));
788
789 // Set up prefs to simulate restarting after a crash.
790
791 // Save an existing system profile to prefs, to correspond to what would be
792 // saved from a previous session.
793 TestMetricsServiceClient client;
794 const std::string kCrashedVersion = "4.0.321.0-64-devel";
795 client.set_version_string(kCrashedVersion);
796 TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
797 DelegatingProvider delegating_provider;
798 TestMetricsService::RecordCurrentEnvironmentHelper(&log, local_state,
799 &delegating_provider);
800
801 // Record stability build time and version from previous session, so that
802 // stability metrics (including exited cleanly flag) won't be cleared.
803 EnvironmentRecorder(local_state)
804 .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
805 client.GetVersionString());
806
807 const std::string kCurrentVersion = "5.0.322.0-64-devel";
808 client.set_version_string(kCurrentVersion);
809
810 StartupVisibilityTestParams params = std::get<0>(GetParam());
811 TestMetricsService service(
812 GetMetricsStateManager(user_data_dir_path(), params.startup_visibility),
813 &client, local_state);
814 // Add a provider.
815 TestMetricsProvider* test_provider = new TestMetricsProvider();
816 service.RegisterMetricsProvider(
817 std::unique_ptr<MetricsProvider>(test_provider));
818 service.InitializeMetricsRecordingState();
819
820 // Verify that Chrome is (or is not) watching for crashes by checking the
821 // beacon value.
822 std::string beacon_file_contents;
823 ASSERT_TRUE(base::ReadFileToString(beacon_file_path, &beacon_file_contents));
824 std::string partial_expected_contents;
825 #if BUILDFLAG(IS_ANDROID)
826 // Whether Chrome is watching for crashes after
827 // InitializeMetricsRecordingState() depends on the type of Android Chrome
828 // session. See the comments in MetricsService::InitializeMetricsState() for
829 // more details.
830 const std::string beacon_value =
831 params.expected_beacon_value ? "true" : "false";
832 partial_expected_contents = "exited_cleanly\":" + beacon_value;
833 #else
834 partial_expected_contents = "exited_cleanly\":false";
835 #endif // BUILDFLAG(IS_ANDROID)
836 EXPECT_TRUE(base::Contains(beacon_file_contents, partial_expected_contents));
837
838 // The initial stability log should be generated and persisted in unsent logs.
839 MetricsLogStore* test_log_store = service.LogStoreForTest();
840 EXPECT_TRUE(test_log_store->has_unsent_logs());
841 EXPECT_FALSE(test_log_store->has_staged_log());
842
843 // Ensure that HasPreviousSessionData() is always called on providers,
844 // for consistency, even if other conditions already indicate their presence.
845 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
846
847 // The test provider should have been called upon to provide initial
848 // stability and regular stability metrics.
849 EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
850 EXPECT_TRUE(test_provider->provide_stability_metrics_called());
851
852 // The test provider should have been called when the initial stability log
853 // was closed.
854 EXPECT_TRUE(test_provider->record_initial_histogram_snapshots_called());
855
856 // Stage the log and retrieve it.
857 test_log_store->StageNextLog();
858 EXPECT_TRUE(test_log_store->has_staged_log());
859
860 ChromeUserMetricsExtension uma_log;
861 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
862
863 EXPECT_TRUE(uma_log.has_client_id());
864 EXPECT_TRUE(uma_log.has_session_id());
865 EXPECT_TRUE(uma_log.has_system_profile());
866 EXPECT_EQ(0, uma_log.user_action_event_size());
867 EXPECT_EQ(0, uma_log.omnibox_event_size());
868 CheckForNonStabilityHistograms(uma_log);
869 EXPECT_EQ(
870 1, GetHistogramSampleCount(uma_log, "UMA.InitialStabilityRecordBeacon"));
871
872 // Verify that the histograms emitted by the test provider made it into the
873 // log.
874 EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Initial"), 1);
875 EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Regular"), 1);
876
877 EXPECT_EQ(kCrashedVersion, uma_log.system_profile().app_version());
878 EXPECT_EQ(kCurrentVersion,
879 uma_log.system_profile().log_written_by_app_version());
880
881 histogram_tester.ExpectBucketCount("Stability.Counts2",
882 StabilityEventType::kBrowserCrash, 1);
883 }
884
TEST_P(MetricsServiceTestWithFeatures,InitialLogsHaveOnDidCreateMetricsLogHistograms)885 TEST_P(MetricsServiceTestWithFeatures,
886 InitialLogsHaveOnDidCreateMetricsLogHistograms) {
887 EnableMetricsReporting();
888 TestMetricsServiceClient client;
889 TestMetricsService service(GetMetricsStateManager(), &client,
890 GetLocalState());
891
892 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
893 // in OnDidCreateMetricsLog()
894 auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
895 service.RegisterMetricsProvider(
896 std::unique_ptr<MetricsProvider>(test_provider));
897
898 service.InitializeMetricsRecordingState();
899 // Start() will create the first ongoing log.
900 service.Start();
901 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
902
903 // Fast forward the time by |initialization_delay|, which is when the pending
904 // init tasks will run.
905 base::TimeDelta initialization_delay = service.GetInitializationDelay();
906 task_environment_.FastForwardBy(initialization_delay);
907 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
908
909 // Fast forward the time until the MetricsRotationScheduler first runs, which
910 // should complete the first ongoing log. Also verify that the test provider
911 // was called when closing the log.
912 // Note: The first log is only created after N = GetInitialIntervalSeconds()
913 // seconds since the start, and since we already fast forwarded by
914 // |initialization_delay| once, we only need to fast forward by
915 // N - |initialization_delay|.
916 task_environment_.FastForwardBy(
917 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
918 initialization_delay);
919 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
920 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
921
922 MetricsLogStore* test_log_store = service.LogStoreForTest();
923
924 // Stage the next log, which should be the first ongoing log.
925 // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
926 test_log_store->StageNextLog();
927 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
928
929 // Discard the staged log and close and stage the next log, which is the
930 // second "ongoing log".
931 // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
932 // Also verify that the test provider was called when closing the new log.
933 test_provider->set_record_histogram_snapshots_called(false);
934 test_log_store->DiscardStagedLog();
935 service.StageCurrentLogForTest();
936 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
937 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
938
939 // Check one more log for good measure.
940 test_provider->set_record_histogram_snapshots_called(false);
941 test_log_store->DiscardStagedLog();
942 service.StageCurrentLogForTest();
943 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
944 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
945
946 service.Stop();
947
948 // Clean up histograms.
949 base::StatisticsRecorder::ForgetHistogramForTesting(
950 kOnDidCreateMetricsLogHistogramName);
951 }
952
TEST_P(MetricsServiceTestWithFeatures,MarkCurrentHistogramsAsReported)953 TEST_P(MetricsServiceTestWithFeatures, MarkCurrentHistogramsAsReported) {
954 EnableMetricsReporting();
955 TestMetricsServiceClient client;
956 TestMetricsService service(GetMetricsStateManager(), &client,
957 GetLocalState());
958
959 // Emit to histogram |Test.Before.Histogram|.
960 ASSERT_FALSE(HistogramExists("Test.Before.Histogram"));
961 base::UmaHistogramBoolean("Test.Before.Histogram", true);
962 ASSERT_TRUE(HistogramExists("Test.Before.Histogram"));
963
964 // Mark histogram data that has been collected until now (in particular, the
965 // |Test.Before.Histogram| sample) as reported.
966 service.MarkCurrentHistogramsAsReported();
967
968 // Emit to histogram |Test.After.Histogram|.
969 ASSERT_FALSE(HistogramExists("Test.After.Histogram"));
970 base::UmaHistogramBoolean("Test.After.Histogram", true);
971 ASSERT_TRUE(HistogramExists("Test.After.Histogram"));
972
973 // Verify that the |Test.Before.Histogram| sample was marked as reported, and
974 // is not included in the next snapshot.
975 EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
976 // Verify that the |Test.After.Histogram| sample was not marked as reported,
977 // and is included in the next snapshot.
978 EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
979
980 // Clean up histograms.
981 base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
982 base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
983 }
984
TEST_P(MetricsServiceTestWithFeatures,LogHasUserActions)985 TEST_P(MetricsServiceTestWithFeatures, LogHasUserActions) {
986 // This test verifies that user actions are properly captured in UMA logs.
987 // In particular, it checks that the first log has actions, a behavior that
988 // was buggy in the past, plus additional checks for subsequent logs with
989 // different numbers of actions.
990 EnableMetricsReporting();
991 TestMetricsServiceClient client;
992 TestMetricsService service(GetMetricsStateManager(), &client,
993 GetLocalState());
994
995 service.InitializeMetricsRecordingState();
996
997 // Start() will create an initial log.
998 service.Start();
999 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1000
1001 base::RecordAction(base::UserMetricsAction("TestAction"));
1002 base::RecordAction(base::UserMetricsAction("TestAction"));
1003 base::RecordAction(base::UserMetricsAction("DifferentAction"));
1004
1005 // Fast forward the time by |initialization_delay|, which is when the pending
1006 // init tasks will run.
1007 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1008 task_environment_.FastForwardBy(initialization_delay);
1009 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1010
1011 // Fast forward the time until the MetricsRotationScheduler first runs, which
1012 // should complete the first ongoing log.
1013 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1014 // seconds since the start, and since we already fast forwarded by
1015 // |initialization_delay| once, we only need to fast forward by
1016 // N - |initialization_delay|.
1017 task_environment_.FastForwardBy(
1018 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1019 initialization_delay);
1020 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1021
1022 MetricsLogStore* test_log_store = service.LogStoreForTest();
1023
1024 // Stage the next log, which should be the initial metrics log.
1025 test_log_store->StageNextLog();
1026 EXPECT_EQ(3, GetNumberOfUserActions(test_log_store));
1027
1028 // Log another action.
1029 base::RecordAction(base::UserMetricsAction("TestAction"));
1030 test_log_store->DiscardStagedLog();
1031 service.StageCurrentLogForTest();
1032 EXPECT_EQ(1, GetNumberOfUserActions(test_log_store));
1033
1034 // Check a log with no actions.
1035 test_log_store->DiscardStagedLog();
1036 service.StageCurrentLogForTest();
1037 EXPECT_EQ(0, GetNumberOfUserActions(test_log_store));
1038
1039 // And another one with a couple.
1040 base::RecordAction(base::UserMetricsAction("TestAction"));
1041 base::RecordAction(base::UserMetricsAction("TestAction"));
1042 test_log_store->DiscardStagedLog();
1043 service.StageCurrentLogForTest();
1044 EXPECT_EQ(2, GetNumberOfUserActions(test_log_store));
1045 }
1046
TEST_P(MetricsServiceTestWithFeatures,FirstLogCreatedBeforeUnsentLogsSent)1047 TEST_P(MetricsServiceTestWithFeatures, FirstLogCreatedBeforeUnsentLogsSent) {
1048 // This test checks that we will create and serialize the first ongoing log
1049 // before starting to send unsent logs from the past session. The latter is
1050 // simulated by injecting some fake ongoing logs into the MetricsLogStore.
1051 EnableMetricsReporting();
1052 TestMetricsServiceClient client;
1053 TestMetricsService service(GetMetricsStateManager(), &client,
1054 GetLocalState());
1055
1056 service.InitializeMetricsRecordingState();
1057 // Start() will create the first ongoing log.
1058 service.Start();
1059 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1060
1061 MetricsLogStore* test_log_store = service.LogStoreForTest();
1062
1063 // Set up the log store with an existing fake log entry. The string content
1064 // is never deserialized to proto, so we're just passing some dummy content.
1065 ASSERT_EQ(0u, test_log_store->initial_log_count());
1066 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1067 test_log_store->StoreLog("blah_blah", MetricsLog::ONGOING_LOG, LogMetadata(),
1068 MetricsLogsEventManager::CreateReason::kUnknown);
1069 // Note: |initial_log_count()| refers to initial stability logs, so the above
1070 // log is counted an ongoing log (per its type).
1071 ASSERT_EQ(0u, test_log_store->initial_log_count());
1072 ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1073
1074 // Fast forward the time by |initialization_delay|, which is when the pending
1075 // init tasks will run.
1076 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1077 task_environment_.FastForwardBy(initialization_delay);
1078 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1079
1080 // Fast forward the time until the MetricsRotationScheduler first runs, which
1081 // should complete the first ongoing log.
1082 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1083 // seconds since the start, and since we already fast forwarded by
1084 // |initialization_delay| once, we only need to fast forward by
1085 // N - |initialization_delay|.
1086 task_environment_.FastForwardBy(
1087 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1088 initialization_delay);
1089 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1090 // When the init task is complete, the first ongoing log should be created
1091 // and added to the ongoing logs.
1092 EXPECT_EQ(0u, test_log_store->initial_log_count());
1093 EXPECT_EQ(2u, test_log_store->ongoing_log_count());
1094 }
1095
TEST_P(MetricsServiceTestWithFeatures,MetricsProviderOnRecordingDisabledCalledOnInitialStop)1096 TEST_P(MetricsServiceTestWithFeatures,
1097 MetricsProviderOnRecordingDisabledCalledOnInitialStop) {
1098 TestMetricsServiceClient client;
1099 TestMetricsService service(GetMetricsStateManager(), &client,
1100 GetLocalState());
1101
1102 TestMetricsProvider* test_provider = new TestMetricsProvider();
1103 service.RegisterMetricsProvider(
1104 std::unique_ptr<MetricsProvider>(test_provider));
1105
1106 service.InitializeMetricsRecordingState();
1107 service.Stop();
1108
1109 EXPECT_TRUE(test_provider->on_recording_disabled_called());
1110 }
1111
TEST_P(MetricsServiceTestWithFeatures,MetricsProvidersInitialized)1112 TEST_P(MetricsServiceTestWithFeatures, MetricsProvidersInitialized) {
1113 TestMetricsServiceClient client;
1114 TestMetricsService service(GetMetricsStateManager(), &client,
1115 GetLocalState());
1116
1117 TestMetricsProvider* test_provider = new TestMetricsProvider();
1118 service.RegisterMetricsProvider(
1119 std::unique_ptr<MetricsProvider>(test_provider));
1120
1121 service.InitializeMetricsRecordingState();
1122
1123 EXPECT_TRUE(test_provider->init_called());
1124 }
1125
1126 // Verify that FieldTrials activated by a MetricsProvider are reported by the
1127 // FieldTrialsProvider.
TEST_P(MetricsServiceTestWithFeatures,ActiveFieldTrialsReported)1128 TEST_P(MetricsServiceTestWithFeatures, ActiveFieldTrialsReported) {
1129 EnableMetricsReporting();
1130 TestMetricsServiceClient client;
1131 TestMetricsService service(GetMetricsStateManager(), &client,
1132 GetLocalState());
1133
1134 // Set up FieldTrials.
1135 const std::string trial_name1 = "CoffeeExperiment";
1136 const std::string group_name1 = "Free";
1137 base::FieldTrial* trial1 =
1138 base::FieldTrialList::CreateFieldTrial(trial_name1, group_name1);
1139
1140 const std::string trial_name2 = "DonutExperiment";
1141 const std::string group_name2 = "MapleBacon";
1142 base::FieldTrial* trial2 =
1143 base::FieldTrialList::CreateFieldTrial(trial_name2, group_name2);
1144
1145 service.RegisterMetricsProvider(
1146 std::make_unique<ExperimentTestMetricsProvider>(trial1, trial2));
1147
1148 service.InitializeMetricsRecordingState();
1149 service.Start();
1150 service.StageCurrentLogForTest();
1151
1152 MetricsLogStore* test_log_store = service.LogStoreForTest();
1153 ChromeUserMetricsExtension uma_log;
1154 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
1155
1156 // Verify that the reported FieldTrial IDs are for the trial set up by this
1157 // test.
1158 EXPECT_TRUE(
1159 IsFieldTrialPresent(uma_log.system_profile(), trial_name1, group_name1));
1160 EXPECT_TRUE(
1161 IsFieldTrialPresent(uma_log.system_profile(), trial_name2, group_name2));
1162 }
1163
TEST_P(MetricsServiceTestWithFeatures,SystemProfileDataProvidedOnEnableRecording)1164 TEST_P(MetricsServiceTestWithFeatures,
1165 SystemProfileDataProvidedOnEnableRecording) {
1166 EnableMetricsReporting();
1167 TestMetricsServiceClient client;
1168 TestMetricsService service(GetMetricsStateManager(), &client,
1169 GetLocalState());
1170
1171 TestMetricsProvider* test_provider = new TestMetricsProvider();
1172 service.RegisterMetricsProvider(
1173 std::unique_ptr<MetricsProvider>(test_provider));
1174
1175 service.InitializeMetricsRecordingState();
1176
1177 // ProvideSystemProfileMetrics() shouldn't be called initially.
1178 EXPECT_FALSE(test_provider->provide_system_profile_metrics_called());
1179 EXPECT_FALSE(service.persistent_system_profile_provided());
1180
1181 service.Start();
1182
1183 // Start should call ProvideSystemProfileMetrics().
1184 EXPECT_TRUE(test_provider->provide_system_profile_metrics_called());
1185 EXPECT_TRUE(service.persistent_system_profile_provided());
1186 EXPECT_FALSE(service.persistent_system_profile_complete());
1187 }
1188
1189 // Verify that the two separate MetricsSchedulers (MetricsRotationScheduler and
1190 // MetricsUploadScheduler) function together properly.
TEST_P(MetricsServiceTestWithFeatures,SplitRotation)1191 TEST_P(MetricsServiceTestWithFeatures, SplitRotation) {
1192 EnableMetricsReporting();
1193 TestMetricsServiceClient client;
1194 TestMetricsService service(GetMetricsStateManager(), &client,
1195 GetLocalState());
1196 service.InitializeMetricsRecordingState();
1197 service.Start();
1198 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1199
1200 // Fast forward the time by |initialization_delay|, which is when the pending
1201 // init tasks will run.
1202 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1203 task_environment_.FastForwardBy(initialization_delay);
1204 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1205
1206 // Fast forward the time until the MetricsRotationScheduler first runs, which
1207 // should complete the first ongoing log. The independent-metrics upload job
1208 // will be started and always be a task. This should also mark the rotation
1209 // scheduler as idle, so that the next time we attempt to create a log, we
1210 // return early (and don't create a log).
1211 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1212 // seconds since the start, and since we already fast forwarded by
1213 // |initialization_delay| once, we only need to fast forward by
1214 // N - |initialization_delay|.
1215 MetricsLogStore* log_store = service.LogStoreForTest();
1216 EXPECT_FALSE(log_store->has_unsent_logs());
1217 task_environment_.FastForwardBy(
1218 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1219 initialization_delay);
1220 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1221 EXPECT_TRUE(log_store->has_unsent_logs());
1222 EXPECT_EQ(1U, log_store->ongoing_log_count());
1223
1224 // There should be three (delayed) tasks: one for querying independent logs
1225 // from metrics providers, one for uploading the unsent log, and one for
1226 // creating the next log.
1227 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1228
1229 // Fast forward the time so that the upload loop starts uploading logs.
1230 base::TimeDelta unsent_log_interval =
1231 MetricsUploadScheduler::GetUnsentLogsInterval();
1232 task_environment_.FastForwardBy(unsent_log_interval);
1233 EXPECT_TRUE(client.uploader()->is_uploading());
1234 // There should be two (delayed) tasks: one for querying independent logs from
1235 // metrics providers, and one for creating the next log. I.e., the task to
1236 // upload a log should be running, and should not be in the task queue
1237 // anymore. The uploading of this log will only be completed later on in order
1238 // to simulate an edge case here.
1239 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1240
1241 // Fast forward the time so that the task to create another log is run. This
1242 // time, however, it should return early due to being idle (i.e., not create a
1243 // log), and it should not post another task to create another log. I.e.,
1244 // there should only be one (delayed) task: one for querying independent logs
1245 // from metrics providers.
1246 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1247 // and since we already fast forwarded by |unsent_log_interval| once, we only
1248 // need to fast forward by
1249 // |rotation_scheduler_interval| - |unsent_log_interval|.
1250 base::TimeDelta rotation_scheduler_interval = client.GetUploadInterval();
1251 task_environment_.FastForwardBy(rotation_scheduler_interval -
1252 unsent_log_interval);
1253 EXPECT_EQ(1U, log_store->ongoing_log_count());
1254 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1255
1256 // Simulate completing the upload. Since there is no other log to be uploaded,
1257 // no task should be re-posted. I.e., there should only be one (delayed)
1258 // task: one for querying independent logs from metrics providers.
1259 client.uploader()->CompleteUpload(200);
1260 EXPECT_FALSE(client.uploader()->is_uploading());
1261 EXPECT_FALSE(log_store->has_unsent_logs());
1262 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1263
1264 // Simulate interacting with the browser, which should 1) set the rotation
1265 // scheduler to not idle, 2) queue a task to upload the next log (if there is
1266 // one), and 3) queue a task to create the next log. I.e., there should be
1267 // three (delayed) tasks: one for querying independent logs from metrics
1268 // providers, one for uploading an unsent log, and one for creating the next
1269 // log.
1270 service.OnApplicationNotIdle();
1271 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1272
1273 // We now simulate a more common scenario.
1274
1275 // Fast forward the time so that the task to upload a log runs. Since there
1276 // should be no logs, it should return early, and not re-post a task. I.e.,
1277 // there should be two tasks: one for querying independent logs from metrics
1278 // providers, and one for creating the next log.
1279 task_environment_.FastForwardBy(unsent_log_interval);
1280 EXPECT_FALSE(client.uploader()->is_uploading());
1281 EXPECT_FALSE(log_store->has_unsent_logs());
1282 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1283
1284 // Fast forward the time so that the next log is created. It should re-post
1285 // a task to create a new log, and should also re-start the upload scheduler.
1286 // I.e., there should be three (delayed) tasks: one for querying independent
1287 // logs from metrics providers, one for uploading an unsent log, and one for
1288 // creating the next log.
1289 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1290 // and since we already fast forwarded by |unsent_log_interval| once, we only
1291 // need to fast forward by
1292 // |rotation_scheduler_interval| - |unsent_log_interval|.
1293 task_environment_.FastForwardBy(rotation_scheduler_interval -
1294 unsent_log_interval);
1295 EXPECT_TRUE(log_store->has_unsent_logs());
1296 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1297
1298 // Fast forward the time so that the task to upload a log runs.
1299 task_environment_.FastForwardBy(unsent_log_interval);
1300 EXPECT_TRUE(client.uploader()->is_uploading());
1301 // There should be two (delayed) tasks: one for querying independent logs from
1302 // metrics providers, and one for creating the next log. I.e., the task to
1303 // upload a log should be running, and should not be in the task queue
1304 // anymore.
1305 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1306
1307 // Simulate completing the upload. However, before doing so, add a dummy log
1308 // in order to test that when the upload task completes, if it detects another
1309 // log, it will re-post a task to upload the next log. I.e., after uploading
1310 // the log, there should be three (delayed) tasks: one for querying
1311 // independent logs from metrics providers, one for uploading an unsent log,
1312 // and one for creating the next log.
1313 log_store->StoreLog("dummy log", MetricsLog::LogType::ONGOING_LOG,
1314 LogMetadata(),
1315 MetricsLogsEventManager::CreateReason::kUnknown);
1316 EXPECT_EQ(2U, log_store->ongoing_log_count());
1317 client.uploader()->CompleteUpload(200);
1318 EXPECT_FALSE(client.uploader()->is_uploading());
1319 EXPECT_EQ(1U, log_store->ongoing_log_count());
1320 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1321
1322 // Fast forward the time so that the task to upload a log runs.
1323 task_environment_.FastForwardBy(unsent_log_interval);
1324 EXPECT_TRUE(client.uploader()->is_uploading());
1325 // There should be two (delayed) tasks: one for querying independent logs from
1326 // metrics providers, and one for creating the next log. I.e., the task to
1327 // upload a log should be running, and should not be in the task queue
1328 // anymore.
1329 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1330
1331 // Simulate completing the upload. Since there is no other log to be uploaded,
1332 // no task should be posted. I.e., there should only be two (delayed) tasks:
1333 // one for querying independent logs from metrics providers, and one.
1334 client.uploader()->CompleteUpload(200);
1335 EXPECT_FALSE(client.uploader()->is_uploading());
1336 EXPECT_FALSE(log_store->has_unsent_logs());
1337 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1338
1339 // Fast forward the time so that the task to create another log is run. It
1340 // should return early due to being idle (i.e., not create a log), and it
1341 // should not post another task to create another log. I.e., there should only
1342 // be one (delayed) task: one for querying independent logs from metrics
1343 // providers.
1344 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1345 // and since we already fast forwarded by |unsent_log_interval| twice, we only
1346 // need to fast forward by
1347 // |rotation_scheduler_interval| - 2 * |unsent_log_interval|.
1348 task_environment_.FastForwardBy(rotation_scheduler_interval -
1349 2 * unsent_log_interval);
1350 EXPECT_FALSE(log_store->has_unsent_logs());
1351 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1352 }
1353
TEST_P(MetricsServiceTestWithFeatures,LastLiveTimestamp)1354 TEST_P(MetricsServiceTestWithFeatures, LastLiveTimestamp) {
1355 EnableMetricsReporting();
1356 TestMetricsServiceClient client;
1357 TestMetricsService service(GetMetricsStateManager(), &client,
1358 GetLocalState());
1359
1360 base::Time initial_last_live_time =
1361 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1362
1363 service.InitializeMetricsRecordingState();
1364 service.Start();
1365 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1366
1367 // Fast forward the time by |initialization_delay|, which is when the pending
1368 // init tasks will run.
1369 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1370 task_environment_.FastForwardBy(initialization_delay);
1371 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1372
1373 // Fast forward the time until the MetricsRotationScheduler first runs, which
1374 // should complete the first ongoing log. Also verify that the test provider
1375 // was called when closing the log.
1376 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1377 // seconds since the start, and since we already fast forwarded by
1378 // |initialization_delay| once, we only need to fast forward by
1379 // N - |initialization_delay|.
1380 task_environment_.FastForwardBy(
1381 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1382 initialization_delay);
1383 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1384 size_t num_pending_tasks = task_environment_.GetPendingMainThreadTaskCount();
1385
1386 service.StartUpdatingLastLiveTimestamp();
1387
1388 // Starting the update sequence should not write anything, but should
1389 // set up for a later write.
1390 EXPECT_EQ(
1391 initial_last_live_time,
1392 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1393 EXPECT_EQ(num_pending_tasks + 1,
1394 task_environment_.GetPendingMainThreadTaskCount());
1395
1396 // Fast forward the time so that the task to update the "last alive timestamp"
1397 // runs.
1398 task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1399
1400 // Verify that the time has updated in local state.
1401 base::Time updated_last_live_time =
1402 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1403 EXPECT_LT(initial_last_live_time, updated_last_live_time);
1404
1405 // Double check that an update was scheduled again.
1406 task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1407 EXPECT_LT(
1408 updated_last_live_time,
1409 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1410 }
1411
TEST_P(MetricsServiceTestWithFeatures,EnablementObserverNotification)1412 TEST_P(MetricsServiceTestWithFeatures, EnablementObserverNotification) {
1413 EnableMetricsReporting();
1414 TestMetricsServiceClient client;
1415 TestMetricsService service(GetMetricsStateManager(), &client,
1416 GetLocalState());
1417 service.InitializeMetricsRecordingState();
1418
1419 absl::optional<bool> enabled;
1420 auto observer = [&enabled](bool notification) { enabled = notification; };
1421
1422 auto subscription =
1423 service.AddEnablementObserver(base::BindLambdaForTesting(observer));
1424
1425 service.Start();
1426 ASSERT_TRUE(enabled.has_value());
1427 EXPECT_TRUE(enabled.value());
1428
1429 enabled.reset();
1430
1431 service.Stop();
1432 ASSERT_TRUE(enabled.has_value());
1433 EXPECT_FALSE(enabled.value());
1434 }
1435
1436 // Verifies that when a cloned install is detected, logs are purged.
TEST_P(MetricsServiceTestWithFeatures,PurgeLogsOnClonedInstallDetected)1437 TEST_P(MetricsServiceTestWithFeatures, PurgeLogsOnClonedInstallDetected) {
1438 EnableMetricsReporting();
1439 TestMetricsServiceClient client;
1440 TestMetricsService service(GetMetricsStateManager(), &client,
1441 GetLocalState());
1442 service.InitializeMetricsRecordingState();
1443
1444 // Store various logs.
1445 MetricsLogStore* test_log_store = service.LogStoreForTest();
1446 test_log_store->StoreLog("dummy log data", MetricsLog::ONGOING_LOG,
1447 LogMetadata(),
1448 MetricsLogsEventManager::CreateReason::kUnknown);
1449 test_log_store->StageNextLog();
1450 test_log_store->StoreLog("more dummy log data", MetricsLog::ONGOING_LOG,
1451 LogMetadata(),
1452 MetricsLogsEventManager::CreateReason::kUnknown);
1453 test_log_store->StoreLog("dummy stability log",
1454 MetricsLog::INITIAL_STABILITY_LOG, LogMetadata(),
1455 MetricsLogsEventManager::CreateReason::kUnknown);
1456 test_log_store->SetAlternateOngoingLogStore(InitializeTestLogStoreAndGet());
1457 test_log_store->StoreLog("dummy log for alternate ongoing log store",
1458 MetricsLog::ONGOING_LOG, LogMetadata(),
1459 MetricsLogsEventManager::CreateReason::kUnknown);
1460 EXPECT_TRUE(test_log_store->has_staged_log());
1461 EXPECT_TRUE(test_log_store->has_unsent_logs());
1462
1463 ClonedInstallDetector* cloned_install_detector =
1464 GetMetricsStateManager()->cloned_install_detector_for_testing();
1465
1466 static constexpr char kTestRawId[] = "test";
1467 // Hashed machine id for |kTestRawId|.
1468 static constexpr int kTestHashedId = 2216819;
1469
1470 // Save a machine id that will not cause a clone to be detected.
1471 GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId);
1472 cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1473 // Verify that the logs are still present.
1474 EXPECT_TRUE(test_log_store->has_staged_log());
1475 EXPECT_TRUE(test_log_store->has_unsent_logs());
1476
1477 // Save a machine id that will cause a clone to be detected.
1478 GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId + 1);
1479 cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1480 // Verify that the logs were purged.
1481 EXPECT_FALSE(test_log_store->has_staged_log());
1482 EXPECT_FALSE(test_log_store->has_unsent_logs());
1483 }
1484
1485 #if BUILDFLAG(IS_CHROMEOS_LACROS)
1486 // ResetClientId is only enabled on certain targets.
TEST_P(MetricsServiceTestWithFeatures,SetClientIdToExternalId)1487 TEST_P(MetricsServiceTestWithFeatures, SetClientIdToExternalId) {
1488 EnableMetricsReporting();
1489 TestMetricsServiceClient client;
1490 TestMetricsService service(GetMetricsStateManager(), &client,
1491 GetLocalState());
1492
1493 const std::string client_id = "d92ad666-a420-4c73-8718-94311ae2ff5f";
1494
1495 EXPECT_NE(service.GetClientId(), client_id);
1496
1497 service.SetExternalClientId(client_id);
1498 // Reset will cause the client id to be regenerated. If an external client id
1499 // is provided, it should defer to using that id instead of creating its own.
1500 service.ResetClientId();
1501
1502 EXPECT_EQ(service.GetClientId(), client_id);
1503 }
1504 #endif // BUILDFLAG(IS_CHROMEOS_LACROS)
1505
1506 #if BUILDFLAG(IS_CHROMEOS_ASH)
TEST_P(MetricsServiceTestWithFeatures,OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet)1507 TEST_P(MetricsServiceTestWithFeatures,
1508 OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet) {
1509 EnableMetricsReporting();
1510 TestMetricsServiceClient client;
1511 TestMetricsService service(GetMetricsStateManager(), &client,
1512 GetLocalState());
1513
1514 service.InitializeMetricsRecordingState();
1515 // Start() will create the first ongoing log.
1516 service.Start();
1517 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1518
1519 MetricsLogStore* test_log_store = service.LogStoreForTest();
1520 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1521 InitializeTestLogStoreAndGet();
1522 TestUnsentLogStore* alternate_ongoing_log_store_ptr =
1523 alternate_ongoing_log_store.get();
1524
1525 ASSERT_EQ(0u, test_log_store->initial_log_count());
1526 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1527
1528 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1529
1530 // Initial logs should not have been collected so the ongoing log being
1531 // recorded should not be flushed when a user log store is mounted.
1532 ASSERT_EQ(0u, test_log_store->initial_log_count());
1533 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1534
1535 // Fast forward the time by |initialization_delay|, which is when the pending
1536 // init tasks will run.
1537 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1538 task_environment_.FastForwardBy(initialization_delay);
1539 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1540
1541 // Fast forward the time until the MetricsRotationScheduler first runs, which
1542 // should complete the first ongoing log.
1543 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1544 // seconds since the start, and since we already fast forwarded by
1545 // |initialization_delay| once, we only need to fast forward by
1546 // N - |initialization_delay|.
1547 task_environment_.FastForwardBy(
1548 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1549 initialization_delay);
1550 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1551 // When the init task is complete, the first ongoing log should be created
1552 // in the alternate ongoing log store.
1553 EXPECT_EQ(0u, test_log_store->initial_log_count());
1554 EXPECT_EQ(0u, test_log_store->ongoing_log_count());
1555 EXPECT_EQ(1u, alternate_ongoing_log_store_ptr->size());
1556 }
1557
TEST_P(MetricsServiceTestWithFeatures,OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet)1558 TEST_P(MetricsServiceTestWithFeatures,
1559 OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet) {
1560 EnableMetricsReporting();
1561 TestMetricsServiceClient client;
1562 TestMetricsService service(GetMetricsStateManager(), &client,
1563 GetLocalState());
1564
1565 service.InitializeMetricsRecordingState();
1566 // Start() will create the first ongoing log.
1567 service.Start();
1568 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1569
1570 MetricsLogStore* test_log_store = service.LogStoreForTest();
1571 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1572 InitializeTestLogStoreAndGet();
1573
1574 // Init state.
1575 ASSERT_EQ(0u, test_log_store->initial_log_count());
1576 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1577
1578 // Fast forward the time by |initialization_delay|, which is when the pending
1579 // init tasks will run.
1580 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1581 task_environment_.FastForwardBy(initialization_delay);
1582 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1583
1584 // Fast forward the time until the MetricsRotationScheduler first runs, which
1585 // should complete the first ongoing log.
1586 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1587 // seconds since the start, and since we already fast forwarded by
1588 // |initialization_delay| once, we only need to fast forward by
1589 // N - |initialization_delay|.
1590 task_environment_.FastForwardBy(
1591 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1592 initialization_delay);
1593 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1594 ASSERT_EQ(0u, test_log_store->initial_log_count());
1595 ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1596
1597 // User log store set post-init.
1598 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1599
1600 // Another log should have been flushed from setting the user log store.
1601 ASSERT_EQ(0u, test_log_store->initial_log_count());
1602 ASSERT_EQ(2u, test_log_store->ongoing_log_count());
1603 }
1604
TEST_P(MetricsServiceTestWithFeatures,OngoingLogDiscardedAfterEarlyUnsetUserLogStore)1605 TEST_P(MetricsServiceTestWithFeatures,
1606 OngoingLogDiscardedAfterEarlyUnsetUserLogStore) {
1607 EnableMetricsReporting();
1608 TestMetricsServiceClient client;
1609 TestMetricsService service(GetMetricsStateManager(), &client,
1610 GetLocalState());
1611
1612 service.InitializeMetricsRecordingState();
1613 // Start() will create the first ongoing log.
1614 service.Start();
1615 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1616
1617 MetricsLogStore* test_log_store = service.LogStoreForTest();
1618 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1619 InitializeTestLogStoreAndGet();
1620
1621 ASSERT_EQ(0u, test_log_store->initial_log_count());
1622 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1623
1624 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1625
1626 // Unset the user log store before we started sending logs.
1627 base::UmaHistogramBoolean("Test.Before.Histogram", true);
1628 service.UnsetUserLogStore();
1629 base::UmaHistogramBoolean("Test.After.Histogram", true);
1630
1631 // Verify that the current log was discarded.
1632 EXPECT_FALSE(service.GetCurrentLogForTest());
1633
1634 // Verify that histograms from before unsetting the user log store were
1635 // flushed.
1636 EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
1637 EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
1638
1639 // Clean up histograms.
1640 base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
1641 base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
1642 }
1643 #endif // BUILDFLAG(IS_CHROMEOS_LACROS)
1644
1645 } // namespace metrics
1646