1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/metrics/metrics_service.h"
6
7 #include <stdint.h>
8
9 #include <algorithm>
10 #include <memory>
11 #include <string>
12
13 #include "base/containers/contains.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/bind.h"
18 #include "base/memory/raw_ptr.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram_functions.h"
21 #include "base/metrics/histogram_snapshot_manager.h"
22 #include "base/metrics/metrics_hashes.h"
23 #include "base/metrics/statistics_recorder.h"
24 #include "base/metrics/user_metrics.h"
25 #include "base/task/single_thread_task_runner.h"
26 #include "base/test/bind.h"
27 #include "base/test/metrics/histogram_tester.h"
28 #include "base/test/scoped_feature_list.h"
29 #include "base/test/task_environment.h"
30 #include "base/threading/platform_thread.h"
31 #include "build/build_config.h"
32 #include "components/metrics/clean_exit_beacon.h"
33 #include "components/metrics/client_info.h"
34 #include "components/metrics/cloned_install_detector.h"
35 #include "components/metrics/environment_recorder.h"
36 #include "components/metrics/log_decoder.h"
37 #include "components/metrics/metrics_features.h"
38 #include "components/metrics/metrics_log.h"
39 #include "components/metrics/metrics_pref_names.h"
40 #include "components/metrics/metrics_scheduler.h"
41 #include "components/metrics/metrics_state_manager.h"
42 #include "components/metrics/metrics_upload_scheduler.h"
43 #include "components/metrics/stability_metrics_helper.h"
44 #include "components/metrics/test/test_enabled_state_provider.h"
45 #include "components/metrics/test/test_metrics_provider.h"
46 #include "components/metrics/test/test_metrics_service_client.h"
47 #include "components/metrics/unsent_log_store_metrics_impl.h"
48 #include "components/prefs/testing_pref_service.h"
49 #include "components/variations/active_field_trials.h"
50 #include "testing/gtest/include/gtest/gtest.h"
51 #include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
52 #include "third_party/metrics_proto/system_profile.pb.h"
53 #include "third_party/zlib/google/compression_utils.h"
54
55 namespace metrics {
56 namespace {
57
58 const char kTestPrefName[] = "TestPref";
59
60 class TestUnsentLogStore : public UnsentLogStore {
61 public:
TestUnsentLogStore(PrefService * service)62 explicit TestUnsentLogStore(PrefService* service)
63 : UnsentLogStore(std::make_unique<UnsentLogStoreMetricsImpl>(),
64 service,
65 kTestPrefName,
66 nullptr,
67 /*min_log_count=*/3,
68 /*min_log_bytes=*/1,
69 /*max_log_size=*/0,
70 /*signing_key=*/std::string(),
71 /*logs_event_manager=*/nullptr) {}
72 ~TestUnsentLogStore() override = default;
73
74 TestUnsentLogStore(const TestUnsentLogStore&) = delete;
75 TestUnsentLogStore& operator=(const TestUnsentLogStore&) = delete;
76
RegisterPrefs(PrefRegistrySimple * registry)77 static void RegisterPrefs(PrefRegistrySimple* registry) {
78 registry->RegisterListPref(kTestPrefName);
79 }
80 };
81
82 // Returns true if |id| is present in |proto|'s collection of FieldTrials.
IsFieldTrialPresent(const SystemProfileProto & proto,const std::string & trial_name,const std::string & group_name)83 bool IsFieldTrialPresent(const SystemProfileProto& proto,
84 const std::string& trial_name,
85 const std::string& group_name) {
86 const variations::ActiveGroupId id =
87 variations::MakeActiveGroupId(trial_name, group_name);
88
89 for (const auto& trial : proto.field_trial()) {
90 if (trial.name_id() == id.name && trial.group_id() == id.group)
91 return true;
92 }
93 return false;
94 }
95
96 class TestMetricsService : public MetricsService {
97 public:
TestMetricsService(MetricsStateManager * state_manager,MetricsServiceClient * client,PrefService * local_state)98 TestMetricsService(MetricsStateManager* state_manager,
99 MetricsServiceClient* client,
100 PrefService* local_state)
101 : MetricsService(state_manager, client, local_state) {}
102
103 TestMetricsService(const TestMetricsService&) = delete;
104 TestMetricsService& operator=(const TestMetricsService&) = delete;
105
106 ~TestMetricsService() override = default;
107
108 using MetricsService::INIT_TASK_DONE;
109 using MetricsService::INIT_TASK_SCHEDULED;
110 using MetricsService::RecordCurrentEnvironmentHelper;
111 using MetricsService::SENDING_LOGS;
112 using MetricsService::state;
113
114 // MetricsService:
SetPersistentSystemProfile(const std::string & serialized_proto,bool complete)115 void SetPersistentSystemProfile(const std::string& serialized_proto,
116 bool complete) override {
117 persistent_system_profile_provided_ = true;
118 persistent_system_profile_complete_ = complete;
119 }
120
persistent_system_profile_provided() const121 bool persistent_system_profile_provided() const {
122 return persistent_system_profile_provided_;
123 }
persistent_system_profile_complete() const124 bool persistent_system_profile_complete() const {
125 return persistent_system_profile_complete_;
126 }
127
128 private:
129 bool persistent_system_profile_provided_ = false;
130 bool persistent_system_profile_complete_ = false;
131 };
132
133 class TestMetricsLog : public MetricsLog {
134 public:
TestMetricsLog(const std::string & client_id,int session_id,MetricsServiceClient * client)135 TestMetricsLog(const std::string& client_id,
136 int session_id,
137 MetricsServiceClient* client)
138 : MetricsLog(client_id, session_id, MetricsLog::ONGOING_LOG, client) {}
139
140 TestMetricsLog(const TestMetricsLog&) = delete;
141 TestMetricsLog& operator=(const TestMetricsLog&) = delete;
142
143 ~TestMetricsLog() override = default;
144 };
145
146 const char kOnDidCreateMetricsLogHistogramName[] = "Test.OnDidCreateMetricsLog";
147
148 class TestMetricsProviderForOnDidCreateMetricsLog : public TestMetricsProvider {
149 public:
150 TestMetricsProviderForOnDidCreateMetricsLog() = default;
151 ~TestMetricsProviderForOnDidCreateMetricsLog() override = default;
152
OnDidCreateMetricsLog()153 void OnDidCreateMetricsLog() override {
154 base::UmaHistogramBoolean(kOnDidCreateMetricsLogHistogramName, true);
155 }
156 };
157
158 const char kProvideHistogramsHistogramName[] = "Test.ProvideHistograms";
159
160 class TestMetricsProviderForProvideHistograms : public TestMetricsProvider {
161 public:
162 TestMetricsProviderForProvideHistograms() = default;
163 ~TestMetricsProviderForProvideHistograms() override = default;
164
ProvideHistograms()165 bool ProvideHistograms() override {
166 base::UmaHistogramBoolean(kProvideHistogramsHistogramName, true);
167 return true;
168 }
169
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)170 void ProvideCurrentSessionData(
171 ChromeUserMetricsExtension* uma_proto) override {
172 MetricsProvider::ProvideCurrentSessionData(uma_proto);
173 }
174 };
175
176 class TestMetricsProviderForProvideHistogramsEarlyReturn
177 : public TestMetricsProviderForProvideHistograms {
178 public:
179 TestMetricsProviderForProvideHistogramsEarlyReturn() = default;
180 ~TestMetricsProviderForProvideHistogramsEarlyReturn() override = default;
181
OnDidCreateMetricsLog()182 void OnDidCreateMetricsLog() override {}
183 };
184
185 class TestIndependentMetricsProvider : public MetricsProvider {
186 public:
187 TestIndependentMetricsProvider() = default;
188 ~TestIndependentMetricsProvider() override = default;
189
190 // MetricsProvider:
HasIndependentMetrics()191 bool HasIndependentMetrics() override {
192 // Only return true the first time this is called (i.e., we only have one
193 // independent log to provide).
194 if (!has_independent_metrics_called_) {
195 has_independent_metrics_called_ = true;
196 return true;
197 }
198 return false;
199 }
ProvideIndependentMetrics(base::OnceCallback<void (bool)> done_callback,ChromeUserMetricsExtension * uma_proto,base::HistogramSnapshotManager * snapshot_manager)200 void ProvideIndependentMetrics(
201 base::OnceCallback<void(bool)> done_callback,
202 ChromeUserMetricsExtension* uma_proto,
203 base::HistogramSnapshotManager* snapshot_manager) override {
204 provide_independent_metrics_called_ = true;
205 uma_proto->set_client_id(123);
206 std::move(done_callback).Run(true);
207 }
208
has_independent_metrics_called() const209 bool has_independent_metrics_called() const {
210 return has_independent_metrics_called_;
211 }
212
provide_independent_metrics_called() const213 bool provide_independent_metrics_called() const {
214 return provide_independent_metrics_called_;
215 }
216
217 private:
218 bool has_independent_metrics_called_ = false;
219 bool provide_independent_metrics_called_ = false;
220 };
221
222 class MetricsServiceTest : public testing::Test {
223 public:
MetricsServiceTest()224 MetricsServiceTest()
225 : enabled_state_provider_(new TestEnabledStateProvider(false, false)) {
226 base::SetRecordActionTaskRunner(
227 task_environment_.GetMainThreadTaskRunner());
228 MetricsService::RegisterPrefs(testing_local_state_.registry());
229 }
230
231 MetricsServiceTest(const MetricsServiceTest&) = delete;
232 MetricsServiceTest& operator=(const MetricsServiceTest&) = delete;
233
234 ~MetricsServiceTest() override = default;
235
SetUp()236 void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
237
GetMetricsStateManager(const base::FilePath & user_data_dir=base::FilePath (),StartupVisibility startup_visibility=StartupVisibility::kUnknown)238 MetricsStateManager* GetMetricsStateManager(
239 const base::FilePath& user_data_dir = base::FilePath(),
240 StartupVisibility startup_visibility = StartupVisibility::kUnknown) {
241 // Lazy-initialize the metrics_state_manager so that it correctly reads the
242 // stability state from prefs after tests have a chance to initialize it.
243 if (!metrics_state_manager_) {
244 metrics_state_manager_ = MetricsStateManager::Create(
245 GetLocalState(), enabled_state_provider_.get(), std::wstring(),
246 user_data_dir, startup_visibility);
247 metrics_state_manager_->InstantiateFieldTrialList();
248 }
249 return metrics_state_manager_.get();
250 }
251
InitializeTestLogStoreAndGet()252 std::unique_ptr<TestUnsentLogStore> InitializeTestLogStoreAndGet() {
253 TestUnsentLogStore::RegisterPrefs(testing_local_state_.registry());
254 return std::make_unique<TestUnsentLogStore>(GetLocalState());
255 }
256
GetLocalState()257 PrefService* GetLocalState() { return &testing_local_state_; }
258
259 // Sets metrics reporting as enabled for testing.
EnableMetricsReporting()260 void EnableMetricsReporting() { SetMetricsReporting(true); }
261
262 // Sets metrics reporting for testing.
SetMetricsReporting(bool enabled)263 void SetMetricsReporting(bool enabled) {
264 enabled_state_provider_->set_consent(enabled);
265 enabled_state_provider_->set_enabled(enabled);
266 }
267
268 // Finds a histogram with the specified |name_hash| in |histograms|.
FindHistogram(const base::StatisticsRecorder::Histograms & histograms,uint64_t name_hash)269 const base::HistogramBase* FindHistogram(
270 const base::StatisticsRecorder::Histograms& histograms,
271 uint64_t name_hash) {
272 for (const base::HistogramBase* histogram : histograms) {
273 if (name_hash == base::HashMetricName(histogram->histogram_name()))
274 return histogram;
275 }
276 return nullptr;
277 }
278
279 // Checks whether |uma_log| contains any histograms that are not flagged
280 // with kUmaStabilityHistogramFlag. Stability logs should only contain such
281 // histograms.
CheckForNonStabilityHistograms(const ChromeUserMetricsExtension & uma_log)282 void CheckForNonStabilityHistograms(
283 const ChromeUserMetricsExtension& uma_log) {
284 const int kStabilityFlags = base::HistogramBase::kUmaStabilityHistogramFlag;
285 const base::StatisticsRecorder::Histograms histograms =
286 base::StatisticsRecorder::GetHistograms();
287 for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
288 const uint64_t hash = uma_log.histogram_event(i).name_hash();
289
290 const base::HistogramBase* histogram = FindHistogram(histograms, hash);
291 EXPECT_TRUE(histogram) << hash;
292
293 EXPECT_TRUE(histogram->HasFlags(kStabilityFlags)) << hash;
294 }
295 }
296
297 // Returns the number of samples logged to the specified histogram or 0 if
298 // the histogram was not found.
GetHistogramSampleCount(const ChromeUserMetricsExtension & uma_log,base::StringPiece histogram_name)299 int GetHistogramSampleCount(const ChromeUserMetricsExtension& uma_log,
300 base::StringPiece histogram_name) {
301 const auto histogram_name_hash = base::HashMetricName(histogram_name);
302 int samples = 0;
303 for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
304 const auto& histogram = uma_log.histogram_event(i);
305 if (histogram.name_hash() == histogram_name_hash) {
306 for (int j = 0; j < histogram.bucket_size(); ++j) {
307 const auto& bucket = histogram.bucket(j);
308 // Per proto comments, count field not being set means 1 sample.
309 samples += (!bucket.has_count() ? 1 : bucket.count());
310 }
311 }
312 }
313 return samples;
314 }
315
316 // Returns the sampled count of the |kOnDidCreateMetricsLogHistogramName|
317 // histogram in the currently staged log in |test_log_store|.
GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore * test_log_store)318 int GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore* test_log_store) {
319 ChromeUserMetricsExtension log;
320 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
321 return GetHistogramSampleCount(log, kOnDidCreateMetricsLogHistogramName);
322 }
323
GetNumberOfUserActions(MetricsLogStore * test_log_store)324 int GetNumberOfUserActions(MetricsLogStore* test_log_store) {
325 ChromeUserMetricsExtension log;
326 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
327 return log.user_action_event_size();
328 }
329
user_data_dir_path()330 const base::FilePath user_data_dir_path() { return temp_dir_.GetPath(); }
331
332 protected:
333 base::test::TaskEnvironment task_environment_{
334 base::test::TaskEnvironment::TimeSource::MOCK_TIME};
335 base::test::ScopedFeatureList feature_list_;
336
337 private:
338 std::unique_ptr<TestEnabledStateProvider> enabled_state_provider_;
339 TestingPrefServiceSimple testing_local_state_;
340 std::unique_ptr<MetricsStateManager> metrics_state_manager_;
341 base::ScopedTempDir temp_dir_;
342 };
343
344 class MetricsServiceTestWithFeatures
345 : public MetricsServiceTest,
346 public ::testing::WithParamInterface<bool> {
347 public:
348 MetricsServiceTestWithFeatures() = default;
349 ~MetricsServiceTestWithFeatures() override = default;
350
ShouldClearLogsOnClonedInstall()351 bool ShouldClearLogsOnClonedInstall() { return GetParam(); }
352
SetUp()353 void SetUp() override {
354 MetricsServiceTest::SetUp();
355 std::vector<base::test::FeatureRefAndParams> enabled_features;
356 std::vector<base::test::FeatureRef> disabled_features;
357
358 if (ShouldClearLogsOnClonedInstall()) {
359 enabled_features.emplace_back(
360 features::kMetricsClearLogsOnClonedInstall,
361 /*params=*/std::map<std::string, std::string>());
362 } else {
363 disabled_features.emplace_back(
364 features::kMetricsClearLogsOnClonedInstall);
365 }
366
367 feature_list_.InitWithFeaturesAndParameters(enabled_features,
368 disabled_features);
369 }
370
371 private:
372 base::test::ScopedFeatureList feature_list_;
373 };
374
375 struct StartupVisibilityTestParams {
376 metrics::StartupVisibility startup_visibility;
377 bool expected_beacon_value;
378 };
379
380 class MetricsServiceTestWithStartupVisibility
381 : public MetricsServiceTest,
382 public ::testing::WithParamInterface<
383 std::tuple<StartupVisibilityTestParams, bool>> {
384 public:
385 MetricsServiceTestWithStartupVisibility() = default;
386 ~MetricsServiceTestWithStartupVisibility() override = default;
387
ShouldClearLogsOnClonedInstall()388 bool ShouldClearLogsOnClonedInstall() { return std::get<1>(GetParam()); }
389
SetUp()390 void SetUp() override {
391 MetricsServiceTest::SetUp();
392 std::vector<base::test::FeatureRefAndParams> enabled_features;
393 std::vector<base::test::FeatureRef> disabled_features;
394
395 if (ShouldClearLogsOnClonedInstall()) {
396 enabled_features.emplace_back(
397 features::kMetricsClearLogsOnClonedInstall,
398 /*params=*/std::map<std::string, std::string>());
399 } else {
400 disabled_features.emplace_back(
401 features::kMetricsClearLogsOnClonedInstall);
402 }
403
404 feature_list_.InitWithFeaturesAndParameters(enabled_features,
405 disabled_features);
406 }
407
408 private:
409 base::test::ScopedFeatureList feature_list_;
410 };
411
412 class ExperimentTestMetricsProvider : public TestMetricsProvider {
413 public:
ExperimentTestMetricsProvider(base::FieldTrial * profile_metrics_trial,base::FieldTrial * session_data_trial)414 explicit ExperimentTestMetricsProvider(
415 base::FieldTrial* profile_metrics_trial,
416 base::FieldTrial* session_data_trial)
417 : profile_metrics_trial_(profile_metrics_trial),
418 session_data_trial_(session_data_trial) {}
419
420 ~ExperimentTestMetricsProvider() override = default;
421
ProvideSystemProfileMetrics(SystemProfileProto * system_profile_proto)422 void ProvideSystemProfileMetrics(
423 SystemProfileProto* system_profile_proto) override {
424 TestMetricsProvider::ProvideSystemProfileMetrics(system_profile_proto);
425 profile_metrics_trial_->Activate();
426 }
427
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)428 void ProvideCurrentSessionData(
429 ChromeUserMetricsExtension* uma_proto) override {
430 TestMetricsProvider::ProvideCurrentSessionData(uma_proto);
431 session_data_trial_->Activate();
432 }
433
434 private:
435 raw_ptr<base::FieldTrial> profile_metrics_trial_;
436 raw_ptr<base::FieldTrial> session_data_trial_;
437 };
438
HistogramExists(base::StringPiece name)439 bool HistogramExists(base::StringPiece name) {
440 return base::StatisticsRecorder::FindHistogram(name) != nullptr;
441 }
442
GetHistogramDeltaTotalCount(base::StringPiece name)443 base::HistogramBase::Count GetHistogramDeltaTotalCount(base::StringPiece name) {
444 return base::StatisticsRecorder::FindHistogram(name)
445 ->SnapshotDelta()
446 ->TotalCount();
447 }
448
449 } // namespace
450
451 INSTANTIATE_TEST_SUITE_P(All, MetricsServiceTestWithFeatures, testing::Bool());
452
TEST_P(MetricsServiceTestWithFeatures,RecordId)453 TEST_P(MetricsServiceTestWithFeatures, RecordId) {
454 EnableMetricsReporting();
455 GetMetricsStateManager(user_data_dir_path())->ForceClientIdCreation();
456
457 // Set an initial value for the record-ids, to make them predictable.
458 GetLocalState()->SetInteger(prefs::kMetricsLogRecordId, 1000);
459
460 TestMetricsServiceClient client;
461 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
462 &client, GetLocalState());
463
464 auto log1 = service.CreateLogForTesting(MetricsLog::ONGOING_LOG);
465 auto log2 = service.CreateLogForTesting(MetricsLog::INITIAL_STABILITY_LOG);
466 auto log3 = service.CreateLogForTesting(MetricsLog::INDEPENDENT_LOG);
467
468 EXPECT_EQ(1001, log1->uma_proto()->record_id());
469 EXPECT_EQ(1002, log2->uma_proto()->record_id());
470 EXPECT_EQ(1003, log3->uma_proto()->record_id());
471 }
472
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAfterCleanShutDown)473 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAfterCleanShutDown) {
474 base::HistogramTester histogram_tester;
475 EnableMetricsReporting();
476 // Write a beacon file indicating that Chrome exited cleanly. Note that the
477 // crash streak value is arbitrary.
478 const base::FilePath beacon_file_path =
479 user_data_dir_path().Append(kCleanExitBeaconFilename);
480 ASSERT_TRUE(base::WriteFile(
481 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
482 /*exited_cleanly=*/true, /*crash_streak=*/1)));
483
484 TestMetricsServiceClient client;
485 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
486 &client, GetLocalState());
487
488 TestMetricsProvider* test_provider = new TestMetricsProvider();
489 service.RegisterMetricsProvider(
490 std::unique_ptr<MetricsProvider>(test_provider));
491
492 service.InitializeMetricsRecordingState();
493
494 // No initial stability log should be generated.
495 EXPECT_FALSE(service.has_unsent_logs());
496
497 // Ensure that HasPreviousSessionData() is always called on providers,
498 // for consistency, even if other conditions already indicate their presence.
499 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
500
501 // The test provider should not have been called upon to provide initial
502 // stability nor regular stability metrics.
503 EXPECT_FALSE(test_provider->provide_initial_stability_metrics_called());
504 EXPECT_FALSE(test_provider->provide_stability_metrics_called());
505
506 // As there wasn't an unclean shutdown, no browser crash samples should have
507 // been emitted.
508 histogram_tester.ExpectBucketCount("Stability.Counts2",
509 StabilityEventType::kBrowserCrash, 0);
510 }
511
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAtProviderRequest)512 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAtProviderRequest) {
513 base::HistogramTester histogram_tester;
514 EnableMetricsReporting();
515
516 // Save an existing system profile to prefs, to correspond to what would be
517 // saved from a previous session.
518 TestMetricsServiceClient client;
519 TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
520 DelegatingProvider delegating_provider;
521 TestMetricsService::RecordCurrentEnvironmentHelper(&log, GetLocalState(),
522 &delegating_provider);
523
524 // Record stability build time and version from previous session, so that
525 // stability metrics (including exited cleanly flag) won't be cleared.
526 EnvironmentRecorder(GetLocalState())
527 .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
528 client.GetVersionString());
529
530 // Write a beacon file indicating that Chrome exited cleanly. Note that the
531 // crash streak value is arbitrary.
532 const base::FilePath beacon_file_path =
533 user_data_dir_path().Append(kCleanExitBeaconFilename);
534 ASSERT_TRUE(base::WriteFile(
535 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
536 /*exited_cleanly=*/true, /*crash_streak=*/1)));
537
538 TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
539 &client, GetLocalState());
540 // Add a metrics provider that requests a stability log.
541 TestMetricsProvider* test_provider = new TestMetricsProvider();
542 test_provider->set_has_initial_stability_metrics(true);
543 service.RegisterMetricsProvider(
544 std::unique_ptr<MetricsProvider>(test_provider));
545
546 service.InitializeMetricsRecordingState();
547
548 // The initial stability log should be generated and persisted in unsent logs.
549 MetricsLogStore* test_log_store = service.LogStoreForTest();
550 EXPECT_TRUE(test_log_store->has_unsent_logs());
551 EXPECT_FALSE(test_log_store->has_staged_log());
552
553 // Ensure that HasPreviousSessionData() is always called on providers,
554 // for consistency, even if other conditions already indicate their presence.
555 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
556
557 // The test provider should have been called upon to provide initial
558 // stability and regular stability metrics.
559 EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
560 EXPECT_TRUE(test_provider->provide_stability_metrics_called());
561
562 // Stage the log and retrieve it.
563 test_log_store->StageNextLog();
564 EXPECT_TRUE(test_log_store->has_staged_log());
565
566 ChromeUserMetricsExtension uma_log;
567 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
568
569 EXPECT_TRUE(uma_log.has_client_id());
570 EXPECT_TRUE(uma_log.has_session_id());
571 EXPECT_TRUE(uma_log.has_system_profile());
572 EXPECT_EQ(0, uma_log.user_action_event_size());
573 EXPECT_EQ(0, uma_log.omnibox_event_size());
574 CheckForNonStabilityHistograms(uma_log);
575
576 // As there wasn't an unclean shutdown, no browser crash samples should have
577 // been emitted.
578 histogram_tester.ExpectBucketCount("Stability.Counts2",
579 StabilityEventType::kBrowserCrash, 0);
580 }
581
TEST_P(MetricsServiceTestWithFeatures,IndependentLogAtProviderRequest)582 TEST_P(MetricsServiceTestWithFeatures, IndependentLogAtProviderRequest) {
583 EnableMetricsReporting();
584 TestMetricsServiceClient client;
585 TestMetricsService service(GetMetricsStateManager(), &client,
586 GetLocalState());
587
588 // Create a a provider that will have one independent log to provide.
589 auto* test_provider = new TestIndependentMetricsProvider();
590 service.RegisterMetricsProvider(
591 std::unique_ptr<MetricsProvider>(test_provider));
592
593 service.InitializeMetricsRecordingState();
594 // Start() will create the first ongoing log.
595 service.Start();
596 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
597
598 // Verify that the independent log provider has not yet been called, and emit
599 // a histogram. This histogram should not be put into the independent log.
600 EXPECT_FALSE(test_provider->has_independent_metrics_called());
601 EXPECT_FALSE(test_provider->provide_independent_metrics_called());
602 const std::string test_histogram = "Test.Histogram";
603 base::UmaHistogramBoolean(test_histogram, true);
604
605 // Fast forward the time by |initialization_delay|, which is when the pending
606 // init tasks will run.
607 base::TimeDelta initialization_delay = service.GetInitializationDelay();
608 task_environment_.FastForwardBy(initialization_delay);
609 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
610
611 // Fast forward the time by another |initialization_delay|, which is when
612 // metrics providers are called to provide independent logs.
613 task_environment_.FastForwardBy(initialization_delay);
614 EXPECT_TRUE(test_provider->has_independent_metrics_called());
615 EXPECT_TRUE(test_provider->provide_independent_metrics_called());
616
617 // Fast forward the time until the MetricsRotationScheduler first runs, which
618 // should complete the first ongoing log.
619 // Note: The first log is only created after N = GetInitialIntervalSeconds()
620 // seconds since the start, and since we already fast forwarded by
621 // |initialization_delay| twice, we only need to fast forward by
622 // N - 2 * |initialization_delay|.
623 task_environment_.FastForwardBy(
624 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
625 2 * initialization_delay);
626 EXPECT_EQ(TestMetricsService::SENDING_LOGS, service.state());
627
628 MetricsLogStore* test_log_store = service.LogStoreForTest();
629
630 // The currently staged log should be the independent log created by the
631 // independent log provider. The log should have a client id of 123. It should
632 // also not contain |test_histogram|.
633 ASSERT_TRUE(test_log_store->has_staged_log());
634 ChromeUserMetricsExtension uma_log;
635 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
636 EXPECT_EQ(uma_log.client_id(), 123UL);
637 EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 0);
638
639 // Discard the staged log and stage the next one. It should be the first
640 // ongoing log.
641 test_log_store->DiscardStagedLog();
642 ASSERT_TRUE(test_log_store->has_unsent_logs());
643 test_log_store->StageNextLog();
644 ASSERT_TRUE(test_log_store->has_staged_log());
645
646 // Verify that the first ongoing log contains |test_histogram| (it should not
647 // have been put into the independent log).
648 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
649 EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 1);
650 }
651
TEST_P(MetricsServiceTestWithFeatures,OnDidCreateMetricsLogAtShutdown)652 TEST_P(MetricsServiceTestWithFeatures, OnDidCreateMetricsLogAtShutdown) {
653 base::HistogramTester histogram_tester;
654 EnableMetricsReporting();
655 TestMetricsServiceClient client;
656
657 TestMetricsService service(GetMetricsStateManager(), &client,
658 GetLocalState());
659
660 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
661 // in OnDidCreateMetricsLog().
662 auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
663 service.RegisterMetricsProvider(
664 std::unique_ptr<MetricsProvider>(test_provider));
665
666 service.InitializeMetricsRecordingState();
667 // Start() will create the first ongoing log.
668 service.Start();
669
670 // OnDidCreateMetricsLog() is called once when the first ongoing log is
671 // created.
672 histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
673 1);
674 service.Stop();
675
676 // OnDidCreateMetricsLog() will be called during shutdown to emit histograms.
677 histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
678 2);
679
680 // Clean up histograms.
681 base::StatisticsRecorder::ForgetHistogramForTesting(
682 kOnDidCreateMetricsLogHistogramName);
683 }
684
TEST_P(MetricsServiceTestWithFeatures,ProvideHistograms)685 TEST_P(MetricsServiceTestWithFeatures, ProvideHistograms) {
686 base::HistogramTester histogram_tester;
687 EnableMetricsReporting();
688 TestMetricsServiceClient client;
689
690 TestMetricsService service(GetMetricsStateManager(), &client,
691 GetLocalState());
692
693 // Create a provider that will log to |kProvideHistogramsHistogramName|
694 // in ProvideHistograms().
695 auto* test_provider = new TestMetricsProviderForProvideHistograms();
696 service.RegisterMetricsProvider(
697 std::unique_ptr<MetricsProvider>(test_provider));
698
699 service.InitializeMetricsRecordingState();
700 // Start() will create the first ongoing log.
701 service.Start();
702
703 // ProvideHistograms() is called in OnDidCreateMetricsLog().
704 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
705
706 service.StageCurrentLogForTest();
707
708 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
709
710 service.Stop();
711
712 // Clean up histograms.
713 base::StatisticsRecorder::ForgetHistogramForTesting(
714 kProvideHistogramsHistogramName);
715 }
716
TEST_P(MetricsServiceTestWithFeatures,ProvideHistogramsEarlyReturn)717 TEST_P(MetricsServiceTestWithFeatures, ProvideHistogramsEarlyReturn) {
718 base::HistogramTester histogram_tester;
719 EnableMetricsReporting();
720 TestMetricsServiceClient client;
721
722 TestMetricsService service(GetMetricsStateManager(), &client,
723 GetLocalState());
724
725 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
726 // in OnDidCreateMetricsLog().
727 auto* test_provider =
728 new TestMetricsProviderForProvideHistogramsEarlyReturn();
729 service.RegisterMetricsProvider(
730 std::unique_ptr<MetricsProvider>(test_provider));
731
732 service.InitializeMetricsRecordingState();
733 // Start() will create the first ongoing log.
734 service.Start();
735
736 // Make sure no histogram is emitted when having an early return.
737 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 0);
738
739 service.StageCurrentLogForTest();
740 // ProvideHistograms() should be called in ProvideCurrentSessionData() if
741 // histograms haven't been emitted.
742 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
743
744 // Try another log to make sure emission status is reset between logs.
745 service.LogStoreForTest()->DiscardStagedLog();
746 service.StageCurrentLogForTest();
747 histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
748
749 service.Stop();
750
751 // Clean up histograms.
752 base::StatisticsRecorder::ForgetHistogramForTesting(
753 kProvideHistogramsHistogramName);
754 }
755
756 INSTANTIATE_TEST_SUITE_P(
757 All,
758 MetricsServiceTestWithStartupVisibility,
759 ::testing::Combine(
760 ::testing::Values(
761 StartupVisibilityTestParams{
762 .startup_visibility = StartupVisibility::kUnknown,
763 .expected_beacon_value = true},
764 StartupVisibilityTestParams{
765 .startup_visibility = StartupVisibility::kBackground,
766 .expected_beacon_value = true},
767 StartupVisibilityTestParams{
768 .startup_visibility = StartupVisibility::kForeground,
769 .expected_beacon_value = false}),
770 ::testing::Bool()));
771
TEST_P(MetricsServiceTestWithStartupVisibility,InitialStabilityLogAfterCrash)772 TEST_P(MetricsServiceTestWithStartupVisibility, InitialStabilityLogAfterCrash) {
773 base::HistogramTester histogram_tester;
774 PrefService* local_state = GetLocalState();
775 EnableMetricsReporting();
776
777 // Write a beacon file indicating that Chrome exited uncleanly. Note that the
778 // crash streak value is arbitrary.
779 const base::FilePath beacon_file_path =
780 user_data_dir_path().Append(kCleanExitBeaconFilename);
781 ASSERT_TRUE(base::WriteFile(
782 beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
783 /*exited_cleanly=*/false, /*crash_streak=*/1)));
784
785 // Set up prefs to simulate restarting after a crash.
786
787 // Save an existing system profile to prefs, to correspond to what would be
788 // saved from a previous session.
789 TestMetricsServiceClient client;
790 const std::string kCrashedVersion = "4.0.321.0-64-devel";
791 client.set_version_string(kCrashedVersion);
792 TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
793 DelegatingProvider delegating_provider;
794 TestMetricsService::RecordCurrentEnvironmentHelper(&log, local_state,
795 &delegating_provider);
796
797 // Record stability build time and version from previous session, so that
798 // stability metrics (including exited cleanly flag) won't be cleared.
799 EnvironmentRecorder(local_state)
800 .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
801 client.GetVersionString());
802
803 const std::string kCurrentVersion = "5.0.322.0-64-devel";
804 client.set_version_string(kCurrentVersion);
805
806 StartupVisibilityTestParams params = std::get<0>(GetParam());
807 TestMetricsService service(
808 GetMetricsStateManager(user_data_dir_path(), params.startup_visibility),
809 &client, local_state);
810 // Add a provider.
811 TestMetricsProvider* test_provider = new TestMetricsProvider();
812 service.RegisterMetricsProvider(
813 std::unique_ptr<MetricsProvider>(test_provider));
814 service.InitializeMetricsRecordingState();
815
816 // Verify that Chrome is (or is not) watching for crashes by checking the
817 // beacon value.
818 std::string beacon_file_contents;
819 ASSERT_TRUE(base::ReadFileToString(beacon_file_path, &beacon_file_contents));
820 std::string partial_expected_contents;
821 #if BUILDFLAG(IS_ANDROID)
822 // Whether Chrome is watching for crashes after
823 // InitializeMetricsRecordingState() depends on the type of Android Chrome
824 // session. See the comments in MetricsService::InitializeMetricsState() for
825 // more details.
826 const std::string beacon_value =
827 params.expected_beacon_value ? "true" : "false";
828 partial_expected_contents = "exited_cleanly\":" + beacon_value;
829 #else
830 partial_expected_contents = "exited_cleanly\":false";
831 #endif // BUILDFLAG(IS_ANDROID)
832 EXPECT_TRUE(base::Contains(beacon_file_contents, partial_expected_contents));
833
834 // The initial stability log should be generated and persisted in unsent logs.
835 MetricsLogStore* test_log_store = service.LogStoreForTest();
836 EXPECT_TRUE(test_log_store->has_unsent_logs());
837 EXPECT_FALSE(test_log_store->has_staged_log());
838
839 // Ensure that HasPreviousSessionData() is always called on providers,
840 // for consistency, even if other conditions already indicate their presence.
841 EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
842
843 // The test provider should have been called upon to provide initial
844 // stability and regular stability metrics.
845 EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
846 EXPECT_TRUE(test_provider->provide_stability_metrics_called());
847
848 // The test provider should have been called when the initial stability log
849 // was closed.
850 EXPECT_TRUE(test_provider->record_initial_histogram_snapshots_called());
851
852 // Stage the log and retrieve it.
853 test_log_store->StageNextLog();
854 EXPECT_TRUE(test_log_store->has_staged_log());
855
856 ChromeUserMetricsExtension uma_log;
857 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
858
859 EXPECT_TRUE(uma_log.has_client_id());
860 EXPECT_TRUE(uma_log.has_session_id());
861 EXPECT_TRUE(uma_log.has_system_profile());
862 EXPECT_EQ(0, uma_log.user_action_event_size());
863 EXPECT_EQ(0, uma_log.omnibox_event_size());
864 CheckForNonStabilityHistograms(uma_log);
865
866 // Verify that the histograms emitted by the test provider made it into the
867 // log.
868 EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Initial"), 1);
869 EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Regular"), 1);
870
871 EXPECT_EQ(kCrashedVersion, uma_log.system_profile().app_version());
872 EXPECT_EQ(kCurrentVersion,
873 uma_log.system_profile().log_written_by_app_version());
874
875 histogram_tester.ExpectBucketCount("Stability.Counts2",
876 StabilityEventType::kBrowserCrash, 1);
877 }
878
TEST_P(MetricsServiceTestWithFeatures,InitialLogsHaveOnDidCreateMetricsLogHistograms)879 TEST_P(MetricsServiceTestWithFeatures,
880 InitialLogsHaveOnDidCreateMetricsLogHistograms) {
881 EnableMetricsReporting();
882 TestMetricsServiceClient client;
883 TestMetricsService service(GetMetricsStateManager(), &client,
884 GetLocalState());
885
886 // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
887 // in OnDidCreateMetricsLog()
888 auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
889 service.RegisterMetricsProvider(
890 std::unique_ptr<MetricsProvider>(test_provider));
891
892 service.InitializeMetricsRecordingState();
893 // Start() will create the first ongoing log.
894 service.Start();
895 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
896
897 // Fast forward the time by |initialization_delay|, which is when the pending
898 // init tasks will run.
899 base::TimeDelta initialization_delay = service.GetInitializationDelay();
900 task_environment_.FastForwardBy(initialization_delay);
901 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
902
903 // Fast forward the time until the MetricsRotationScheduler first runs, which
904 // should complete the first ongoing log. Also verify that the test provider
905 // was called when closing the log.
906 // Note: The first log is only created after N = GetInitialIntervalSeconds()
907 // seconds since the start, and since we already fast forwarded by
908 // |initialization_delay| once, we only need to fast forward by
909 // N - |initialization_delay|.
910 task_environment_.FastForwardBy(
911 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
912 initialization_delay);
913 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
914 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
915
916 MetricsLogStore* test_log_store = service.LogStoreForTest();
917
918 // Stage the next log, which should be the first ongoing log.
919 // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
920 test_log_store->StageNextLog();
921 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
922
923 // Discard the staged log and close and stage the next log, which is the
924 // second "ongoing log".
925 // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
926 // Also verify that the test provider was called when closing the new log.
927 test_provider->set_record_histogram_snapshots_called(false);
928 test_log_store->DiscardStagedLog();
929 service.StageCurrentLogForTest();
930 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
931 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
932
933 // Check one more log for good measure.
934 test_provider->set_record_histogram_snapshots_called(false);
935 test_log_store->DiscardStagedLog();
936 service.StageCurrentLogForTest();
937 EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
938 EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
939
940 service.Stop();
941
942 // Clean up histograms.
943 base::StatisticsRecorder::ForgetHistogramForTesting(
944 kOnDidCreateMetricsLogHistogramName);
945 }
946
TEST_P(MetricsServiceTestWithFeatures,MarkCurrentHistogramsAsReported)947 TEST_P(MetricsServiceTestWithFeatures, MarkCurrentHistogramsAsReported) {
948 EnableMetricsReporting();
949 TestMetricsServiceClient client;
950 TestMetricsService service(GetMetricsStateManager(), &client,
951 GetLocalState());
952
953 // Emit to histogram |Test.Before.Histogram|.
954 ASSERT_FALSE(HistogramExists("Test.Before.Histogram"));
955 base::UmaHistogramBoolean("Test.Before.Histogram", true);
956 ASSERT_TRUE(HistogramExists("Test.Before.Histogram"));
957
958 // Mark histogram data that has been collected until now (in particular, the
959 // |Test.Before.Histogram| sample) as reported.
960 service.MarkCurrentHistogramsAsReported();
961
962 // Emit to histogram |Test.After.Histogram|.
963 ASSERT_FALSE(HistogramExists("Test.After.Histogram"));
964 base::UmaHistogramBoolean("Test.After.Histogram", true);
965 ASSERT_TRUE(HistogramExists("Test.After.Histogram"));
966
967 // Verify that the |Test.Before.Histogram| sample was marked as reported, and
968 // is not included in the next snapshot.
969 EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
970 // Verify that the |Test.After.Histogram| sample was not marked as reported,
971 // and is included in the next snapshot.
972 EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
973
974 // Clean up histograms.
975 base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
976 base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
977 }
978
TEST_P(MetricsServiceTestWithFeatures,LogHasUserActions)979 TEST_P(MetricsServiceTestWithFeatures, LogHasUserActions) {
980 // This test verifies that user actions are properly captured in UMA logs.
981 // In particular, it checks that the first log has actions, a behavior that
982 // was buggy in the past, plus additional checks for subsequent logs with
983 // different numbers of actions.
984 EnableMetricsReporting();
985 TestMetricsServiceClient client;
986 TestMetricsService service(GetMetricsStateManager(), &client,
987 GetLocalState());
988
989 service.InitializeMetricsRecordingState();
990
991 // Start() will create an initial log.
992 service.Start();
993 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
994
995 base::RecordAction(base::UserMetricsAction("TestAction"));
996 base::RecordAction(base::UserMetricsAction("TestAction"));
997 base::RecordAction(base::UserMetricsAction("DifferentAction"));
998
999 // Fast forward the time by |initialization_delay|, which is when the pending
1000 // init tasks will run.
1001 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1002 task_environment_.FastForwardBy(initialization_delay);
1003 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1004
1005 // Fast forward the time until the MetricsRotationScheduler first runs, which
1006 // should complete the first ongoing log.
1007 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1008 // seconds since the start, and since we already fast forwarded by
1009 // |initialization_delay| once, we only need to fast forward by
1010 // N - |initialization_delay|.
1011 task_environment_.FastForwardBy(
1012 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1013 initialization_delay);
1014 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1015
1016 MetricsLogStore* test_log_store = service.LogStoreForTest();
1017
1018 // Stage the next log, which should be the initial metrics log.
1019 test_log_store->StageNextLog();
1020 EXPECT_EQ(3, GetNumberOfUserActions(test_log_store));
1021
1022 // Log another action.
1023 base::RecordAction(base::UserMetricsAction("TestAction"));
1024 test_log_store->DiscardStagedLog();
1025 service.StageCurrentLogForTest();
1026 EXPECT_EQ(1, GetNumberOfUserActions(test_log_store));
1027
1028 // Check a log with no actions.
1029 test_log_store->DiscardStagedLog();
1030 service.StageCurrentLogForTest();
1031 EXPECT_EQ(0, GetNumberOfUserActions(test_log_store));
1032
1033 // And another one with a couple.
1034 base::RecordAction(base::UserMetricsAction("TestAction"));
1035 base::RecordAction(base::UserMetricsAction("TestAction"));
1036 test_log_store->DiscardStagedLog();
1037 service.StageCurrentLogForTest();
1038 EXPECT_EQ(2, GetNumberOfUserActions(test_log_store));
1039 }
1040
TEST_P(MetricsServiceTestWithFeatures,FirstLogCreatedBeforeUnsentLogsSent)1041 TEST_P(MetricsServiceTestWithFeatures, FirstLogCreatedBeforeUnsentLogsSent) {
1042 // This test checks that we will create and serialize the first ongoing log
1043 // before starting to send unsent logs from the past session. The latter is
1044 // simulated by injecting some fake ongoing logs into the MetricsLogStore.
1045 EnableMetricsReporting();
1046 TestMetricsServiceClient client;
1047 TestMetricsService service(GetMetricsStateManager(), &client,
1048 GetLocalState());
1049
1050 service.InitializeMetricsRecordingState();
1051 // Start() will create the first ongoing log.
1052 service.Start();
1053 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1054
1055 MetricsLogStore* test_log_store = service.LogStoreForTest();
1056
1057 // Set up the log store with an existing fake log entry. The string content
1058 // is never deserialized to proto, so we're just passing some dummy content.
1059 ASSERT_EQ(0u, test_log_store->initial_log_count());
1060 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1061 test_log_store->StoreLog("blah_blah", MetricsLog::ONGOING_LOG, LogMetadata(),
1062 MetricsLogsEventManager::CreateReason::kUnknown);
1063 // Note: |initial_log_count()| refers to initial stability logs, so the above
1064 // log is counted an ongoing log (per its type).
1065 ASSERT_EQ(0u, test_log_store->initial_log_count());
1066 ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1067
1068 // Fast forward the time by |initialization_delay|, which is when the pending
1069 // init tasks will run.
1070 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1071 task_environment_.FastForwardBy(initialization_delay);
1072 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1073
1074 // Fast forward the time until the MetricsRotationScheduler first runs, which
1075 // should complete the first ongoing log.
1076 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1077 // seconds since the start, and since we already fast forwarded by
1078 // |initialization_delay| once, we only need to fast forward by
1079 // N - |initialization_delay|.
1080 task_environment_.FastForwardBy(
1081 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1082 initialization_delay);
1083 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1084 // When the init task is complete, the first ongoing log should be created
1085 // and added to the ongoing logs.
1086 EXPECT_EQ(0u, test_log_store->initial_log_count());
1087 EXPECT_EQ(2u, test_log_store->ongoing_log_count());
1088 }
1089
TEST_P(MetricsServiceTestWithFeatures,MetricsProviderOnRecordingDisabledCalledOnInitialStop)1090 TEST_P(MetricsServiceTestWithFeatures,
1091 MetricsProviderOnRecordingDisabledCalledOnInitialStop) {
1092 TestMetricsServiceClient client;
1093 TestMetricsService service(GetMetricsStateManager(), &client,
1094 GetLocalState());
1095
1096 TestMetricsProvider* test_provider = new TestMetricsProvider();
1097 service.RegisterMetricsProvider(
1098 std::unique_ptr<MetricsProvider>(test_provider));
1099
1100 service.InitializeMetricsRecordingState();
1101 service.Stop();
1102
1103 EXPECT_TRUE(test_provider->on_recording_disabled_called());
1104 }
1105
TEST_P(MetricsServiceTestWithFeatures,MetricsProvidersInitialized)1106 TEST_P(MetricsServiceTestWithFeatures, MetricsProvidersInitialized) {
1107 TestMetricsServiceClient client;
1108 TestMetricsService service(GetMetricsStateManager(), &client,
1109 GetLocalState());
1110
1111 TestMetricsProvider* test_provider = new TestMetricsProvider();
1112 service.RegisterMetricsProvider(
1113 std::unique_ptr<MetricsProvider>(test_provider));
1114
1115 service.InitializeMetricsRecordingState();
1116
1117 EXPECT_TRUE(test_provider->init_called());
1118 }
1119
1120 // Verify that FieldTrials activated by a MetricsProvider are reported by the
1121 // FieldTrialsProvider.
TEST_P(MetricsServiceTestWithFeatures,ActiveFieldTrialsReported)1122 TEST_P(MetricsServiceTestWithFeatures, ActiveFieldTrialsReported) {
1123 EnableMetricsReporting();
1124 TestMetricsServiceClient client;
1125 TestMetricsService service(GetMetricsStateManager(), &client,
1126 GetLocalState());
1127
1128 // Set up FieldTrials.
1129 const std::string trial_name1 = "CoffeeExperiment";
1130 const std::string group_name1 = "Free";
1131 base::FieldTrial* trial1 =
1132 base::FieldTrialList::CreateFieldTrial(trial_name1, group_name1);
1133
1134 const std::string trial_name2 = "DonutExperiment";
1135 const std::string group_name2 = "MapleBacon";
1136 base::FieldTrial* trial2 =
1137 base::FieldTrialList::CreateFieldTrial(trial_name2, group_name2);
1138
1139 service.RegisterMetricsProvider(
1140 std::make_unique<ExperimentTestMetricsProvider>(trial1, trial2));
1141
1142 service.InitializeMetricsRecordingState();
1143 service.Start();
1144 service.StageCurrentLogForTest();
1145
1146 MetricsLogStore* test_log_store = service.LogStoreForTest();
1147 ChromeUserMetricsExtension uma_log;
1148 EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
1149
1150 // Verify that the reported FieldTrial IDs are for the trial set up by this
1151 // test.
1152 EXPECT_TRUE(
1153 IsFieldTrialPresent(uma_log.system_profile(), trial_name1, group_name1));
1154 EXPECT_TRUE(
1155 IsFieldTrialPresent(uma_log.system_profile(), trial_name2, group_name2));
1156 }
1157
TEST_P(MetricsServiceTestWithFeatures,SystemProfileDataProvidedOnEnableRecording)1158 TEST_P(MetricsServiceTestWithFeatures,
1159 SystemProfileDataProvidedOnEnableRecording) {
1160 EnableMetricsReporting();
1161 TestMetricsServiceClient client;
1162 TestMetricsService service(GetMetricsStateManager(), &client,
1163 GetLocalState());
1164
1165 TestMetricsProvider* test_provider = new TestMetricsProvider();
1166 service.RegisterMetricsProvider(
1167 std::unique_ptr<MetricsProvider>(test_provider));
1168
1169 service.InitializeMetricsRecordingState();
1170
1171 // ProvideSystemProfileMetrics() shouldn't be called initially.
1172 EXPECT_FALSE(test_provider->provide_system_profile_metrics_called());
1173 EXPECT_FALSE(service.persistent_system_profile_provided());
1174
1175 service.Start();
1176
1177 // Start should call ProvideSystemProfileMetrics().
1178 EXPECT_TRUE(test_provider->provide_system_profile_metrics_called());
1179 EXPECT_TRUE(service.persistent_system_profile_provided());
1180 EXPECT_FALSE(service.persistent_system_profile_complete());
1181 }
1182
1183 // Verify that the two separate MetricsSchedulers (MetricsRotationScheduler and
1184 // MetricsUploadScheduler) function together properly.
TEST_P(MetricsServiceTestWithFeatures,SplitRotation)1185 TEST_P(MetricsServiceTestWithFeatures, SplitRotation) {
1186 EnableMetricsReporting();
1187 TestMetricsServiceClient client;
1188 TestMetricsService service(GetMetricsStateManager(), &client,
1189 GetLocalState());
1190 service.InitializeMetricsRecordingState();
1191 service.Start();
1192 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1193
1194 // Fast forward the time by |initialization_delay|, which is when the pending
1195 // init tasks will run.
1196 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1197 task_environment_.FastForwardBy(initialization_delay);
1198 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1199
1200 // Fast forward the time until the MetricsRotationScheduler first runs, which
1201 // should complete the first ongoing log. The independent-metrics upload job
1202 // will be started and always be a task. This should also mark the rotation
1203 // scheduler as idle, so that the next time we attempt to create a log, we
1204 // return early (and don't create a log).
1205 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1206 // seconds since the start, and since we already fast forwarded by
1207 // |initialization_delay| once, we only need to fast forward by
1208 // N - |initialization_delay|.
1209 MetricsLogStore* log_store = service.LogStoreForTest();
1210 EXPECT_FALSE(log_store->has_unsent_logs());
1211 task_environment_.FastForwardBy(
1212 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1213 initialization_delay);
1214 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1215 EXPECT_TRUE(log_store->has_unsent_logs());
1216 EXPECT_EQ(1U, log_store->ongoing_log_count());
1217
1218 // There should be three (delayed) tasks: one for querying independent logs
1219 // from metrics providers, one for uploading the unsent log, and one for
1220 // creating the next log.
1221 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1222
1223 // Fast forward the time so that the upload loop starts uploading logs.
1224 base::TimeDelta unsent_log_interval =
1225 MetricsUploadScheduler::GetUnsentLogsInterval();
1226 task_environment_.FastForwardBy(unsent_log_interval);
1227 EXPECT_TRUE(client.uploader()->is_uploading());
1228 // There should be two (delayed) tasks: one for querying independent logs from
1229 // metrics providers, and one for creating the next log. I.e., the task to
1230 // upload a log should be running, and should not be in the task queue
1231 // anymore. The uploading of this log will only be completed later on in order
1232 // to simulate an edge case here.
1233 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1234
1235 // Fast forward the time so that the task to create another log is run. This
1236 // time, however, it should return early due to being idle (i.e., not create a
1237 // log), and it should not post another task to create another log. I.e.,
1238 // there should only be one (delayed) task: one for querying independent logs
1239 // from metrics providers.
1240 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1241 // and since we already fast forwarded by |unsent_log_interval| once, we only
1242 // need to fast forward by
1243 // |rotation_scheduler_interval| - |unsent_log_interval|.
1244 base::TimeDelta rotation_scheduler_interval = client.GetUploadInterval();
1245 task_environment_.FastForwardBy(rotation_scheduler_interval -
1246 unsent_log_interval);
1247 EXPECT_EQ(1U, log_store->ongoing_log_count());
1248 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1249
1250 // Simulate completing the upload. Since there is no other log to be uploaded,
1251 // no task should be re-posted. I.e., there should only be one (delayed)
1252 // task: one for querying independent logs from metrics providers.
1253 client.uploader()->CompleteUpload(200);
1254 EXPECT_FALSE(client.uploader()->is_uploading());
1255 EXPECT_FALSE(log_store->has_unsent_logs());
1256 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1257
1258 // Simulate interacting with the browser, which should 1) set the rotation
1259 // scheduler to not idle, 2) queue a task to upload the next log (if there is
1260 // one), and 3) queue a task to create the next log. I.e., there should be
1261 // three (delayed) tasks: one for querying independent logs from metrics
1262 // providers, one for uploading an unsent log, and one for creating the next
1263 // log.
1264 service.OnApplicationNotIdle();
1265 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1266
1267 // We now simulate a more common scenario.
1268
1269 // Fast forward the time so that the task to upload a log runs. Since there
1270 // should be no logs, it should return early, and not re-post a task. I.e.,
1271 // there should be two tasks: one for querying independent logs from metrics
1272 // providers, and one for creating the next log.
1273 task_environment_.FastForwardBy(unsent_log_interval);
1274 EXPECT_FALSE(client.uploader()->is_uploading());
1275 EXPECT_FALSE(log_store->has_unsent_logs());
1276 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1277
1278 // Fast forward the time so that the next log is created. It should re-post
1279 // a task to create a new log, and should also re-start the upload scheduler.
1280 // I.e., there should be three (delayed) tasks: one for querying independent
1281 // logs from metrics providers, one for uploading an unsent log, and one for
1282 // creating the next log.
1283 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1284 // and since we already fast forwarded by |unsent_log_interval| once, we only
1285 // need to fast forward by
1286 // |rotation_scheduler_interval| - |unsent_log_interval|.
1287 task_environment_.FastForwardBy(rotation_scheduler_interval -
1288 unsent_log_interval);
1289 EXPECT_TRUE(log_store->has_unsent_logs());
1290 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1291
1292 // Fast forward the time so that the task to upload a log runs.
1293 task_environment_.FastForwardBy(unsent_log_interval);
1294 EXPECT_TRUE(client.uploader()->is_uploading());
1295 // There should be two (delayed) tasks: one for querying independent logs from
1296 // metrics providers, and one for creating the next log. I.e., the task to
1297 // upload a log should be running, and should not be in the task queue
1298 // anymore.
1299 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1300
1301 // Simulate completing the upload. However, before doing so, add a dummy log
1302 // in order to test that when the upload task completes, if it detects another
1303 // log, it will re-post a task to upload the next log. I.e., after uploading
1304 // the log, there should be three (delayed) tasks: one for querying
1305 // independent logs from metrics providers, one for uploading an unsent log,
1306 // and one for creating the next log.
1307 log_store->StoreLog("dummy log", MetricsLog::LogType::ONGOING_LOG,
1308 LogMetadata(),
1309 MetricsLogsEventManager::CreateReason::kUnknown);
1310 EXPECT_EQ(2U, log_store->ongoing_log_count());
1311 client.uploader()->CompleteUpload(200);
1312 EXPECT_FALSE(client.uploader()->is_uploading());
1313 EXPECT_EQ(1U, log_store->ongoing_log_count());
1314 EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1315
1316 // Fast forward the time so that the task to upload a log runs.
1317 task_environment_.FastForwardBy(unsent_log_interval);
1318 EXPECT_TRUE(client.uploader()->is_uploading());
1319 // There should be two (delayed) tasks: one for querying independent logs from
1320 // metrics providers, and one for creating the next log. I.e., the task to
1321 // upload a log should be running, and should not be in the task queue
1322 // anymore.
1323 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1324
1325 // Simulate completing the upload. Since there is no other log to be uploaded,
1326 // no task should be posted. I.e., there should only be two (delayed) tasks:
1327 // one for querying independent logs from metrics providers, and one.
1328 client.uploader()->CompleteUpload(200);
1329 EXPECT_FALSE(client.uploader()->is_uploading());
1330 EXPECT_FALSE(log_store->has_unsent_logs());
1331 EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1332
1333 // Fast forward the time so that the task to create another log is run. It
1334 // should return early due to being idle (i.e., not create a log), and it
1335 // should not post another task to create another log. I.e., there should only
1336 // be one (delayed) task: one for querying independent logs from metrics
1337 // providers.
1338 // Note: The log is only created after |rotation_scheduler_interval| seconds,
1339 // and since we already fast forwarded by |unsent_log_interval| twice, we only
1340 // need to fast forward by
1341 // |rotation_scheduler_interval| - 2 * |unsent_log_interval|.
1342 task_environment_.FastForwardBy(rotation_scheduler_interval -
1343 2 * unsent_log_interval);
1344 EXPECT_FALSE(log_store->has_unsent_logs());
1345 EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1346 }
1347
TEST_P(MetricsServiceTestWithFeatures,LastLiveTimestamp)1348 TEST_P(MetricsServiceTestWithFeatures, LastLiveTimestamp) {
1349 EnableMetricsReporting();
1350 TestMetricsServiceClient client;
1351 TestMetricsService service(GetMetricsStateManager(), &client,
1352 GetLocalState());
1353
1354 base::Time initial_last_live_time =
1355 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1356
1357 service.InitializeMetricsRecordingState();
1358 service.Start();
1359 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1360
1361 // Fast forward the time by |initialization_delay|, which is when the pending
1362 // init tasks will run.
1363 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1364 task_environment_.FastForwardBy(initialization_delay);
1365 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1366
1367 // Fast forward the time until the MetricsRotationScheduler first runs, which
1368 // should complete the first ongoing log. Also verify that the test provider
1369 // was called when closing the log.
1370 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1371 // seconds since the start, and since we already fast forwarded by
1372 // |initialization_delay| once, we only need to fast forward by
1373 // N - |initialization_delay|.
1374 task_environment_.FastForwardBy(
1375 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1376 initialization_delay);
1377 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1378 size_t num_pending_tasks = task_environment_.GetPendingMainThreadTaskCount();
1379
1380 service.StartUpdatingLastLiveTimestamp();
1381
1382 // Starting the update sequence should not write anything, but should
1383 // set up for a later write.
1384 EXPECT_EQ(
1385 initial_last_live_time,
1386 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1387 EXPECT_EQ(num_pending_tasks + 1,
1388 task_environment_.GetPendingMainThreadTaskCount());
1389
1390 // Fast forward the time so that the task to update the "last alive timestamp"
1391 // runs.
1392 task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1393
1394 // Verify that the time has updated in local state.
1395 base::Time updated_last_live_time =
1396 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1397 EXPECT_LT(initial_last_live_time, updated_last_live_time);
1398
1399 // Double check that an update was scheduled again.
1400 task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1401 EXPECT_LT(
1402 updated_last_live_time,
1403 GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1404 }
1405
TEST_P(MetricsServiceTestWithFeatures,EnablementObserverNotification)1406 TEST_P(MetricsServiceTestWithFeatures, EnablementObserverNotification) {
1407 EnableMetricsReporting();
1408 TestMetricsServiceClient client;
1409 TestMetricsService service(GetMetricsStateManager(), &client,
1410 GetLocalState());
1411 service.InitializeMetricsRecordingState();
1412
1413 absl::optional<bool> enabled;
1414 auto observer = [&enabled](bool notification) { enabled = notification; };
1415
1416 auto subscription =
1417 service.AddEnablementObserver(base::BindLambdaForTesting(observer));
1418
1419 service.Start();
1420 ASSERT_TRUE(enabled.has_value());
1421 EXPECT_TRUE(enabled.value());
1422
1423 enabled.reset();
1424
1425 service.Stop();
1426 ASSERT_TRUE(enabled.has_value());
1427 EXPECT_FALSE(enabled.value());
1428 }
1429
1430 // Verifies that when a cloned install is detected, logs are purged.
TEST_P(MetricsServiceTestWithFeatures,PurgeLogsOnClonedInstallDetected)1431 TEST_P(MetricsServiceTestWithFeatures, PurgeLogsOnClonedInstallDetected) {
1432 EnableMetricsReporting();
1433 TestMetricsServiceClient client;
1434 TestMetricsService service(GetMetricsStateManager(), &client,
1435 GetLocalState());
1436 service.InitializeMetricsRecordingState();
1437
1438 // Store various logs.
1439 MetricsLogStore* test_log_store = service.LogStoreForTest();
1440 test_log_store->StoreLog("dummy log data", MetricsLog::ONGOING_LOG,
1441 LogMetadata(),
1442 MetricsLogsEventManager::CreateReason::kUnknown);
1443 test_log_store->StageNextLog();
1444 test_log_store->StoreLog("more dummy log data", MetricsLog::ONGOING_LOG,
1445 LogMetadata(),
1446 MetricsLogsEventManager::CreateReason::kUnknown);
1447 test_log_store->StoreLog("dummy stability log",
1448 MetricsLog::INITIAL_STABILITY_LOG, LogMetadata(),
1449 MetricsLogsEventManager::CreateReason::kUnknown);
1450 test_log_store->SetAlternateOngoingLogStore(InitializeTestLogStoreAndGet());
1451 test_log_store->StoreLog("dummy log for alternate ongoing log store",
1452 MetricsLog::ONGOING_LOG, LogMetadata(),
1453 MetricsLogsEventManager::CreateReason::kUnknown);
1454 EXPECT_TRUE(test_log_store->has_staged_log());
1455 EXPECT_TRUE(test_log_store->has_unsent_logs());
1456
1457 ClonedInstallDetector* cloned_install_detector =
1458 GetMetricsStateManager()->cloned_install_detector_for_testing();
1459
1460 static constexpr char kTestRawId[] = "test";
1461 // Hashed machine id for |kTestRawId|.
1462 static constexpr int kTestHashedId = 2216819;
1463
1464 // Save a machine id that will not cause a clone to be detected.
1465 GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId);
1466 cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1467 // Verify that the logs are still present.
1468 EXPECT_TRUE(test_log_store->has_staged_log());
1469 EXPECT_TRUE(test_log_store->has_unsent_logs());
1470
1471 // Save a machine id that will cause a clone to be detected.
1472 GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId + 1);
1473 cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1474 // Verify that the logs were purged if the |kMetricsClearLogsOnClonedInstall|
1475 // feature is enabled.
1476 if (ShouldClearLogsOnClonedInstall()) {
1477 EXPECT_FALSE(test_log_store->has_staged_log());
1478 EXPECT_FALSE(test_log_store->has_unsent_logs());
1479 } else {
1480 EXPECT_TRUE(test_log_store->has_staged_log());
1481 EXPECT_TRUE(test_log_store->has_unsent_logs());
1482 }
1483 }
1484
1485 #if BUILDFLAG(IS_CHROMEOS_LACROS)
1486 // ResetClientId is only enabled on certain targets.
TEST_P(MetricsServiceTestWithFeatures,SetClientIdToExternalId)1487 TEST_P(MetricsServiceTestWithFeatures, SetClientIdToExternalId) {
1488 EnableMetricsReporting();
1489 TestMetricsServiceClient client;
1490 TestMetricsService service(GetMetricsStateManager(), &client,
1491 GetLocalState());
1492
1493 const std::string client_id = "d92ad666-a420-4c73-8718-94311ae2ff5f";
1494
1495 EXPECT_NE(service.GetClientId(), client_id);
1496
1497 service.SetExternalClientId(client_id);
1498 // Reset will cause the client id to be regenerated. If an external client id
1499 // is provided, it should defer to using that id instead of creating its own.
1500 service.ResetClientId();
1501
1502 EXPECT_EQ(service.GetClientId(), client_id);
1503 }
1504 #endif // BUILDFLAG(IS_CHROMEOS_LACROS)
1505
1506 #if BUILDFLAG(IS_CHROMEOS_ASH)
TEST_P(MetricsServiceTestWithFeatures,OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet)1507 TEST_P(MetricsServiceTestWithFeatures,
1508 OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet) {
1509 EnableMetricsReporting();
1510 TestMetricsServiceClient client;
1511 TestMetricsService service(GetMetricsStateManager(), &client,
1512 GetLocalState());
1513
1514 service.InitializeMetricsRecordingState();
1515 // Start() will create the first ongoing log.
1516 service.Start();
1517 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1518
1519 MetricsLogStore* test_log_store = service.LogStoreForTest();
1520 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1521 InitializeTestLogStoreAndGet();
1522 TestUnsentLogStore* alternate_ongoing_log_store_ptr =
1523 alternate_ongoing_log_store.get();
1524
1525 ASSERT_EQ(0u, test_log_store->initial_log_count());
1526 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1527
1528 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1529
1530 // Initial logs should not have been collected so the ongoing log being
1531 // recorded should not be flushed when a user log store is mounted.
1532 ASSERT_EQ(0u, test_log_store->initial_log_count());
1533 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1534
1535 // Fast forward the time by |initialization_delay|, which is when the pending
1536 // init tasks will run.
1537 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1538 task_environment_.FastForwardBy(initialization_delay);
1539 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1540
1541 // Fast forward the time until the MetricsRotationScheduler first runs, which
1542 // should complete the first ongoing log.
1543 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1544 // seconds since the start, and since we already fast forwarded by
1545 // |initialization_delay| once, we only need to fast forward by
1546 // N - |initialization_delay|.
1547 task_environment_.FastForwardBy(
1548 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1549 initialization_delay);
1550 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1551 // When the init task is complete, the first ongoing log should be created
1552 // in the alternate ongoing log store.
1553 EXPECT_EQ(0u, test_log_store->initial_log_count());
1554 EXPECT_EQ(0u, test_log_store->ongoing_log_count());
1555 EXPECT_EQ(1u, alternate_ongoing_log_store_ptr->size());
1556 }
1557
TEST_P(MetricsServiceTestWithFeatures,OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet)1558 TEST_P(MetricsServiceTestWithFeatures,
1559 OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet) {
1560 EnableMetricsReporting();
1561 TestMetricsServiceClient client;
1562 TestMetricsService service(GetMetricsStateManager(), &client,
1563 GetLocalState());
1564
1565 service.InitializeMetricsRecordingState();
1566 // Start() will create the first ongoing log.
1567 service.Start();
1568 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1569
1570 MetricsLogStore* test_log_store = service.LogStoreForTest();
1571 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1572 InitializeTestLogStoreAndGet();
1573
1574 // Init state.
1575 ASSERT_EQ(0u, test_log_store->initial_log_count());
1576 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1577
1578 // Fast forward the time by |initialization_delay|, which is when the pending
1579 // init tasks will run.
1580 base::TimeDelta initialization_delay = service.GetInitializationDelay();
1581 task_environment_.FastForwardBy(initialization_delay);
1582 EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1583
1584 // Fast forward the time until the MetricsRotationScheduler first runs, which
1585 // should complete the first ongoing log.
1586 // Note: The first log is only created after N = GetInitialIntervalSeconds()
1587 // seconds since the start, and since we already fast forwarded by
1588 // |initialization_delay| once, we only need to fast forward by
1589 // N - |initialization_delay|.
1590 task_environment_.FastForwardBy(
1591 base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1592 initialization_delay);
1593 ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1594 ASSERT_EQ(0u, test_log_store->initial_log_count());
1595 ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1596
1597 // User log store set post-init.
1598 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1599
1600 // Another log should have been flushed from setting the user log store.
1601 ASSERT_EQ(0u, test_log_store->initial_log_count());
1602 ASSERT_EQ(2u, test_log_store->ongoing_log_count());
1603 }
1604
TEST_P(MetricsServiceTestWithFeatures,OngoingLogDiscardedAfterEarlyUnsetUserLogStore)1605 TEST_P(MetricsServiceTestWithFeatures,
1606 OngoingLogDiscardedAfterEarlyUnsetUserLogStore) {
1607 EnableMetricsReporting();
1608 TestMetricsServiceClient client;
1609 TestMetricsService service(GetMetricsStateManager(), &client,
1610 GetLocalState());
1611
1612 service.InitializeMetricsRecordingState();
1613 // Start() will create the first ongoing log.
1614 service.Start();
1615 ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1616
1617 MetricsLogStore* test_log_store = service.LogStoreForTest();
1618 std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1619 InitializeTestLogStoreAndGet();
1620
1621 ASSERT_EQ(0u, test_log_store->initial_log_count());
1622 ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1623
1624 service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1625
1626 // Unset the user log store before we started sending logs.
1627 base::UmaHistogramBoolean("Test.Before.Histogram", true);
1628 service.UnsetUserLogStore();
1629 base::UmaHistogramBoolean("Test.After.Histogram", true);
1630
1631 // Verify that the current log was discarded.
1632 EXPECT_FALSE(service.GetCurrentLogForTest());
1633
1634 // Verify that histograms from before unsetting the user log store were
1635 // flushed.
1636 EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
1637 EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
1638
1639 // Clean up histograms.
1640 base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
1641 base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
1642 }
1643 #endif // BUILDFLAG(IS_CHROMEOS_LACROS)
1644
1645 } // namespace metrics
1646