• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <stdint.h>
8 
9 #include <memory>
10 #include <vector>
11 
12 #include "base/bind_helpers.h"
13 #include "base/memory/ptr_util.h"
14 #include "base/memory/ref_counted_memory.h"
15 #include "base/message_loop/message_loop.h"
16 #include "base/run_loop.h"
17 #include "base/strings/stringprintf.h"
18 #include "base/synchronization/waitable_event.h"
19 #include "base/test/test_io_thread.h"
20 #include "base/test/trace_event_analyzer.h"
21 #include "base/threading/platform_thread.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/threading/thread.h"
24 #include "base/threading/thread_task_runner_handle.h"
25 #include "base/trace_event/memory_dump_provider.h"
26 #include "base/trace_event/memory_infra_background_whitelist.h"
27 #include "base/trace_event/process_memory_dump.h"
28 #include "base/trace_event/trace_buffer.h"
29 #include "base/trace_event/trace_config_memory_test_util.h"
30 #include "testing/gmock/include/gmock/gmock.h"
31 #include "testing/gtest/include/gtest/gtest.h"
32 
33 using testing::_;
34 using testing::AnyNumber;
35 using testing::AtMost;
36 using testing::Between;
37 using testing::Invoke;
38 using testing::Return;
39 
40 namespace base {
41 namespace trace_event {
42 
43 // GTest matchers for MemoryDumpRequestArgs arguments.
44 MATCHER(IsDetailedDump, "") {
45   return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
46 }
47 
48 MATCHER(IsLightDump, "") {
49   return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
50 }
51 
52 MATCHER(IsBackgroundDump, "") {
53   return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
54 }
55 
56 namespace {
57 
58 const char* kMDPName = "TestDumpProvider";
59 const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
60 const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
61 
RegisterDumpProvider(MemoryDumpProvider * mdp,scoped_refptr<base::SingleThreadTaskRunner> task_runner,const MemoryDumpProvider::Options & options,const char * name=kMDPName)62 void RegisterDumpProvider(
63     MemoryDumpProvider* mdp,
64     scoped_refptr<base::SingleThreadTaskRunner> task_runner,
65     const MemoryDumpProvider::Options& options,
66     const char* name = kMDPName) {
67   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
68   mdm->set_dumper_registrations_ignored_for_testing(false);
69   mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
70   mdm->set_dumper_registrations_ignored_for_testing(true);
71 }
72 
RegisterDumpProvider(MemoryDumpProvider * mdp)73 void RegisterDumpProvider(MemoryDumpProvider* mdp) {
74   RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
75 }
76 
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,scoped_refptr<base::SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)77 void RegisterDumpProviderWithSequencedTaskRunner(
78     MemoryDumpProvider* mdp,
79     scoped_refptr<base::SequencedTaskRunner> task_runner,
80     const MemoryDumpProvider::Options& options) {
81   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
82   mdm->set_dumper_registrations_ignored_for_testing(false);
83   mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
84                                                    options);
85   mdm->set_dumper_registrations_ignored_for_testing(true);
86 }
87 
OnTraceDataCollected(Closure quit_closure,trace_event::TraceResultBuffer * buffer,const scoped_refptr<RefCountedString> & json,bool has_more_events)88 void OnTraceDataCollected(Closure quit_closure,
89                           trace_event::TraceResultBuffer* buffer,
90                           const scoped_refptr<RefCountedString>& json,
91                           bool has_more_events) {
92   buffer->AddFragment(json->data());
93   if (!has_more_events)
94     quit_closure.Run();
95 }
96 
97 }  // namespace
98 
99 // Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
100 // requests locally to the MemoryDumpManager instead of performing IPC dances.
101 class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
102  public:
MemoryDumpManagerDelegateForTesting()103   MemoryDumpManagerDelegateForTesting() {
104     ON_CALL(*this, RequestGlobalMemoryDump(_, _))
105         .WillByDefault(Invoke(
106             this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
107   }
108 
109   MOCK_METHOD2(RequestGlobalMemoryDump,
110                void(const MemoryDumpRequestArgs& args,
111                     const MemoryDumpCallback& callback));
112 
GetTracingProcessId() const113   uint64_t GetTracingProcessId() const override {
114     NOTREACHED();
115     return MemoryDumpManager::kInvalidTracingProcessId;
116   }
117 
118   // Promote the CreateProcessDump to public so it can be used by test fixtures.
119   using MemoryDumpManagerDelegate::CreateProcessDump;
120 };
121 
122 class MockMemoryDumpProvider : public MemoryDumpProvider {
123  public:
124   MOCK_METHOD0(Destructor, void());
125   MOCK_METHOD2(OnMemoryDump,
126                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
127 
MockMemoryDumpProvider()128   MockMemoryDumpProvider() : enable_mock_destructor(false) {
129     ON_CALL(*this, OnMemoryDump(_, _))
130         .WillByDefault(Invoke([](const MemoryDumpArgs&,
131                                  ProcessMemoryDump* pmd) -> bool {
132           // |session_state| should not be null under any circumstances when
133           // invoking a memory dump. The problem might arise in race conditions
134           // like crbug.com/600570 .
135           EXPECT_TRUE(pmd->session_state().get() != nullptr);
136           return true;
137         }));
138   }
~MockMemoryDumpProvider()139   ~MockMemoryDumpProvider() override {
140     if (enable_mock_destructor)
141       Destructor();
142   }
143 
144   bool enable_mock_destructor;
145 };
146 
147 class TestSequencedTaskRunner : public SequencedTaskRunner {
148  public:
TestSequencedTaskRunner()149   TestSequencedTaskRunner()
150       : worker_pool_(
151             new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
152         enabled_(true),
153         num_of_post_tasks_(0) {}
154 
set_enabled(bool value)155   void set_enabled(bool value) { enabled_ = value; }
no_of_post_tasks() const156   unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
157 
PostNonNestableDelayedTask(const tracked_objects::Location & from_here,const Closure & task,TimeDelta delay)158   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
159                                   const Closure& task,
160                                   TimeDelta delay) override {
161     NOTREACHED();
162     return false;
163   }
164 
PostDelayedTask(const tracked_objects::Location & from_here,const Closure & task,TimeDelta delay)165   bool PostDelayedTask(const tracked_objects::Location& from_here,
166                        const Closure& task,
167                        TimeDelta delay) override {
168     num_of_post_tasks_++;
169     if (enabled_)
170       return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
171     return false;
172   }
173 
RunsTasksOnCurrentThread() const174   bool RunsTasksOnCurrentThread() const override {
175     return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
176   }
177 
178  private:
~TestSequencedTaskRunner()179   ~TestSequencedTaskRunner() override {}
180 
181   scoped_refptr<SequencedWorkerPool> worker_pool_;
182   const SequencedWorkerPool::SequenceToken token_;
183   bool enabled_;
184   unsigned num_of_post_tasks_;
185 };
186 
187 class MemoryDumpManagerTest : public testing::Test {
188  public:
MemoryDumpManagerTest()189   MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
190 
SetUp()191   void SetUp() override {
192     last_callback_success_ = false;
193     message_loop_.reset(new MessageLoop());
194     mdm_.reset(new MemoryDumpManager());
195     MemoryDumpManager::SetInstanceForTesting(mdm_.get());
196     ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
197     delegate_.reset(new MemoryDumpManagerDelegateForTesting);
198   }
199 
TearDown()200   void TearDown() override {
201     MemoryDumpManager::SetInstanceForTesting(nullptr);
202     mdm_.reset();
203     delegate_.reset();
204     message_loop_.reset();
205     TraceLog::DeleteForTesting();
206   }
207 
208   // Turns a Closure into a MemoryDumpCallback, keeping track of the callback
209   // result and taking care of posting the closure on the correct task runner.
DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,Closure closure,uint64_t dump_guid,bool success)210   void DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,
211                            Closure closure,
212                            uint64_t dump_guid,
213                            bool success) {
214     last_callback_success_ = success;
215     task_runner->PostTask(FROM_HERE, closure);
216   }
217 
218  protected:
InitializeMemoryDumpManager(bool is_coordinator)219   void InitializeMemoryDumpManager(bool is_coordinator) {
220     mdm_->set_dumper_registrations_ignored_for_testing(true);
221     mdm_->Initialize(delegate_.get(), is_coordinator);
222   }
223 
RequestGlobalDumpAndWait(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)224   void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
225                                 MemoryDumpLevelOfDetail level_of_detail) {
226     RunLoop run_loop;
227     MemoryDumpCallback callback =
228         Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
229              ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
230     mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
231     run_loop.Run();
232   }
233 
EnableTracingWithLegacyCategories(const char * category)234   void EnableTracingWithLegacyCategories(const char* category) {
235     TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
236                                         TraceLog::RECORDING_MODE);
237   }
238 
EnableTracingWithTraceConfig(const std::string & trace_config)239   void EnableTracingWithTraceConfig(const std::string& trace_config) {
240     TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
241                                         TraceLog::RECORDING_MODE);
242   }
243 
DisableTracing()244   void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
245 
IsPeriodicDumpingEnabled() const246   bool IsPeriodicDumpingEnabled() const {
247     return mdm_->periodic_dump_timer_.IsRunning();
248   }
249 
GetMaxConsecutiveFailuresCount() const250   int GetMaxConsecutiveFailuresCount() const {
251     return MemoryDumpManager::kMaxConsecutiveFailuresCount;
252   }
253 
254   const MemoryDumpProvider::Options kDefaultOptions;
255   std::unique_ptr<MemoryDumpManager> mdm_;
256   std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
257   bool last_callback_success_;
258 
259  private:
260   std::unique_ptr<MessageLoop> message_loop_;
261 
262   // We want our singleton torn down after each test.
263   ShadowingAtExitManager at_exit_manager_;
264 };
265 
266 // Basic sanity checks. Registers a memory dump provider and checks that it is
267 // called, but only when memory-infra is enabled.
TEST_F(MemoryDumpManagerTest,SingleDumper)268 TEST_F(MemoryDumpManagerTest, SingleDumper) {
269   InitializeMemoryDumpManager(false /* is_coordinator */);
270   MockMemoryDumpProvider mdp;
271   RegisterDumpProvider(&mdp);
272 
273   // Check that the dumper is not called if the memory category is not enabled.
274   EnableTracingWithLegacyCategories("foobar-but-not-memory");
275   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
276   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
277   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
278                            MemoryDumpLevelOfDetail::DETAILED);
279   DisableTracing();
280 
281   // Now repeat enabling the memory category and check that the dumper is
282   // invoked this time.
283   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
284   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
285   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
286   for (int i = 0; i < 3; ++i)
287     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
288                              MemoryDumpLevelOfDetail::DETAILED);
289   DisableTracing();
290 
291   mdm_->UnregisterDumpProvider(&mdp);
292 
293   // Finally check the unregister logic: the delegate will be invoked but not
294   // the dump provider, as it has been unregistered.
295   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
296   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
297   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
298 
299   for (int i = 0; i < 3; ++i) {
300     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
301                              MemoryDumpLevelOfDetail::DETAILED);
302   }
303   DisableTracing();
304 }
305 
306 // Checks that requesting dumps with high level of detail actually propagates
307 // the level of the detail properly to OnMemoryDump() call on dump providers.
TEST_F(MemoryDumpManagerTest,CheckMemoryDumpArgs)308 TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
309   InitializeMemoryDumpManager(false /* is_coordinator */);
310   MockMemoryDumpProvider mdp;
311 
312   RegisterDumpProvider(&mdp);
313   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
314   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
315   EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
316   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
317                            MemoryDumpLevelOfDetail::DETAILED);
318   DisableTracing();
319   mdm_->UnregisterDumpProvider(&mdp);
320 
321   // Check that requesting dumps with low level of detail actually propagates to
322   // OnMemoryDump() call on dump providers.
323   RegisterDumpProvider(&mdp);
324   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
325   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
326   EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
327   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
328                            MemoryDumpLevelOfDetail::LIGHT);
329   DisableTracing();
330   mdm_->UnregisterDumpProvider(&mdp);
331 }
332 
333 // Checks that the SharedSessionState object is acqually shared over time.
TEST_F(MemoryDumpManagerTest,SharedSessionState)334 TEST_F(MemoryDumpManagerTest, SharedSessionState) {
335   InitializeMemoryDumpManager(false /* is_coordinator */);
336   MockMemoryDumpProvider mdp1;
337   MockMemoryDumpProvider mdp2;
338   RegisterDumpProvider(&mdp1);
339   RegisterDumpProvider(&mdp2);
340 
341   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
342   const MemoryDumpSessionState* session_state =
343       mdm_->session_state_for_testing().get();
344   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
345   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
346       .Times(2)
347       .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
348                                              ProcessMemoryDump* pmd) -> bool {
349         EXPECT_EQ(session_state, pmd->session_state().get());
350         return true;
351       }));
352   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
353       .Times(2)
354       .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
355                                              ProcessMemoryDump* pmd) -> bool {
356         EXPECT_EQ(session_state, pmd->session_state().get());
357         return true;
358       }));
359 
360   for (int i = 0; i < 2; ++i) {
361     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
362                              MemoryDumpLevelOfDetail::DETAILED);
363   }
364 
365   DisableTracing();
366 }
367 
368 // Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest,MultipleDumpers)369 TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
370   InitializeMemoryDumpManager(false /* is_coordinator */);
371   MockMemoryDumpProvider mdp1;
372   MockMemoryDumpProvider mdp2;
373 
374   // Enable only mdp1.
375   RegisterDumpProvider(&mdp1);
376   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
377   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
378   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
379   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
380   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
381                            MemoryDumpLevelOfDetail::DETAILED);
382   DisableTracing();
383 
384   // Invert: enable mdp1 and disable mdp2.
385   mdm_->UnregisterDumpProvider(&mdp1);
386   RegisterDumpProvider(&mdp2);
387   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
388   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
389   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
390   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
391   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
392                            MemoryDumpLevelOfDetail::DETAILED);
393   DisableTracing();
394 
395   // Enable both mdp1 and mdp2.
396   RegisterDumpProvider(&mdp1);
397   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
398   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
399   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
400   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
401   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
402                            MemoryDumpLevelOfDetail::DETAILED);
403   DisableTracing();
404 }
405 
406 // Checks that the dump provider invocations depend only on the current
407 // registration state and not on previous registrations and dumps.
TEST_F(MemoryDumpManagerTest,RegistrationConsistency)408 TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
409   InitializeMemoryDumpManager(false /* is_coordinator */);
410   MockMemoryDumpProvider mdp;
411 
412   RegisterDumpProvider(&mdp);
413 
414   {
415     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
416     EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
417     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
418     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
419                              MemoryDumpLevelOfDetail::DETAILED);
420     DisableTracing();
421   }
422 
423   mdm_->UnregisterDumpProvider(&mdp);
424 
425   {
426     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
427     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
428     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
429     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
430                              MemoryDumpLevelOfDetail::DETAILED);
431     DisableTracing();
432   }
433 
434   RegisterDumpProvider(&mdp);
435   mdm_->UnregisterDumpProvider(&mdp);
436 
437   {
438     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
439     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
440     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
441     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
442                              MemoryDumpLevelOfDetail::DETAILED);
443     DisableTracing();
444   }
445 
446   RegisterDumpProvider(&mdp);
447   mdm_->UnregisterDumpProvider(&mdp);
448   RegisterDumpProvider(&mdp);
449 
450   {
451     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
452     EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
453     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
454     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
455                              MemoryDumpLevelOfDetail::DETAILED);
456     DisableTracing();
457   }
458 }
459 
460 // Checks that the MemoryDumpManager respects the thread affinity when a
461 // MemoryDumpProvider specifies a task_runner(). The test starts creating 8
462 // threads and registering a MemoryDumpProvider on each of them. At each
463 // iteration, one thread is removed, to check the live unregistration logic.
TEST_F(MemoryDumpManagerTest,RespectTaskRunnerAffinity)464 TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
465   InitializeMemoryDumpManager(false /* is_coordinator */);
466   const uint32_t kNumInitialThreads = 8;
467 
468   std::vector<std::unique_ptr<Thread>> threads;
469   std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
470 
471   // Create the threads and setup the expectations. Given that at each iteration
472   // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
473   // invoked a number of times equal to its index.
474   for (uint32_t i = kNumInitialThreads; i > 0; --i) {
475     threads.push_back(WrapUnique(new Thread("test thread")));
476     auto* thread = threads.back().get();
477     thread->Start();
478     scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
479     mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
480     auto* mdp = mdps.back().get();
481     RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
482     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
483         .Times(i)
484         .WillRepeatedly(Invoke(
485             [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
486               EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
487               return true;
488             }));
489   }
490   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
491 
492   while (!threads.empty()) {
493     last_callback_success_ = false;
494     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
495     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
496                              MemoryDumpLevelOfDetail::DETAILED);
497     EXPECT_TRUE(last_callback_success_);
498 
499     // Unregister a MDP and destroy one thread at each iteration to check the
500     // live unregistration logic. The unregistration needs to happen on the same
501     // thread the MDP belongs to.
502     {
503       RunLoop run_loop;
504       Closure unregistration =
505           Bind(&MemoryDumpManager::UnregisterDumpProvider,
506                Unretained(mdm_.get()), Unretained(mdps.back().get()));
507       threads.back()->task_runner()->PostTaskAndReply(FROM_HERE, unregistration,
508                                                       run_loop.QuitClosure());
509       run_loop.Run();
510     }
511     mdps.pop_back();
512     threads.back()->Stop();
513     threads.pop_back();
514   }
515 
516   DisableTracing();
517 }
518 
519 // Check that the memory dump calls are always posted on task runner for
520 // SequencedTaskRunner case and that the dump provider gets disabled when
521 // PostTask fails, but the dump still succeeds.
TEST_F(MemoryDumpManagerTest,PostTaskForSequencedTaskRunner)522 TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
523   InitializeMemoryDumpManager(false /* is_coordinator */);
524   std::vector<MockMemoryDumpProvider> mdps(3);
525   scoped_refptr<TestSequencedTaskRunner> task_runner1(
526       make_scoped_refptr(new TestSequencedTaskRunner()));
527   scoped_refptr<TestSequencedTaskRunner> task_runner2(
528       make_scoped_refptr(new TestSequencedTaskRunner()));
529   RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
530                                               kDefaultOptions);
531   RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
532                                               kDefaultOptions);
533   RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
534                                               kDefaultOptions);
535   // |mdps[0]| should be disabled permanently after first dump.
536   EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
537   EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
538   EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
539   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
540 
541   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
542 
543   task_runner1->set_enabled(false);
544   last_callback_success_ = false;
545   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
546                            MemoryDumpLevelOfDetail::DETAILED);
547   // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
548   // to same task runner.
549   EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
550   EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
551   EXPECT_TRUE(last_callback_success_);
552 
553   task_runner1->set_enabled(true);
554   last_callback_success_ = false;
555   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
556                            MemoryDumpLevelOfDetail::DETAILED);
557   EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
558   EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
559   EXPECT_TRUE(last_callback_success_);
560   DisableTracing();
561 }
562 
563 // Checks that providers get disabled after 3 consecutive failures, but not
564 // otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest,DisableFailingDumpers)565 TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
566   InitializeMemoryDumpManager(false /* is_coordinator */);
567   MockMemoryDumpProvider mdp1;
568   MockMemoryDumpProvider mdp2;
569 
570   RegisterDumpProvider(&mdp1);
571   RegisterDumpProvider(&mdp2);
572   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
573 
574   const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
575   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
576 
577   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
578       .Times(GetMaxConsecutiveFailuresCount())
579       .WillRepeatedly(Return(false));
580 
581   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
582       .WillOnce(Return(false))
583       .WillOnce(Return(true))
584       .WillOnce(Return(false))
585       .WillOnce(Return(false))
586       .WillOnce(Return(true))
587       .WillOnce(Return(false));
588 
589   for (int i = 0; i < kNumDumps; i++) {
590     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
591                              MemoryDumpLevelOfDetail::DETAILED);
592   }
593 
594   DisableTracing();
595 }
596 
597 // Sneakily registers an extra memory dump provider while an existing one is
598 // dumping and expect it to take part in the already active tracing session.
TEST_F(MemoryDumpManagerTest,RegisterDumperWhileDumping)599 TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
600   InitializeMemoryDumpManager(false /* is_coordinator */);
601   MockMemoryDumpProvider mdp1;
602   MockMemoryDumpProvider mdp2;
603 
604   RegisterDumpProvider(&mdp1);
605   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
606 
607   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
608 
609   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
610       .Times(4)
611       .WillOnce(Return(true))
612       .WillOnce(
613           Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
614             RegisterDumpProvider(&mdp2);
615             return true;
616           }))
617       .WillRepeatedly(Return(true));
618 
619   // Depending on the insertion order (before or after mdp1), mdp2 might be
620   // called also immediately after it gets registered.
621   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
622       .Times(Between(2, 3))
623       .WillRepeatedly(Return(true));
624 
625   for (int i = 0; i < 4; i++) {
626     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
627                              MemoryDumpLevelOfDetail::DETAILED);
628   }
629 
630   DisableTracing();
631 }
632 
633 // Like RegisterDumperWhileDumping, but unregister the dump provider instead.
TEST_F(MemoryDumpManagerTest,UnregisterDumperWhileDumping)634 TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
635   InitializeMemoryDumpManager(false /* is_coordinator */);
636   MockMemoryDumpProvider mdp1;
637   MockMemoryDumpProvider mdp2;
638 
639   RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
640   RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
641   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
642 
643   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
644 
645   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
646       .Times(4)
647       .WillOnce(Return(true))
648       .WillOnce(
649           Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
650             MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
651             return true;
652           }))
653       .WillRepeatedly(Return(true));
654 
655   // Depending on the insertion order (before or after mdp1), mdp2 might have
656   // been already called when UnregisterDumpProvider happens.
657   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
658       .Times(Between(1, 2))
659       .WillRepeatedly(Return(true));
660 
661   for (int i = 0; i < 4; i++) {
662     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
663                              MemoryDumpLevelOfDetail::DETAILED);
664   }
665 
666   DisableTracing();
667 }
668 
669 // Checks that the dump does not abort when unregistering a provider while
670 // dumping from a different thread than the dumping thread.
TEST_F(MemoryDumpManagerTest,UnregisterDumperFromThreadWhileDumping)671 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
672   InitializeMemoryDumpManager(false /* is_coordinator */);
673   std::vector<std::unique_ptr<TestIOThread>> threads;
674   std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
675 
676   for (int i = 0; i < 2; i++) {
677     threads.push_back(
678         WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
679     mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
680     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
681                          kDefaultOptions);
682   }
683 
684   int on_memory_dump_call_count = 0;
685 
686   // When OnMemoryDump is called on either of the dump providers, it will
687   // unregister the other one.
688   for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
689     int other_idx = (mdps.front() == mdp);
690     TestIOThread* other_thread = threads[other_idx].get();
691     MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
692     auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
693         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
694       other_thread->PostTaskAndWait(
695           FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
696                                 base::Unretained(&*mdm_), other_mdp));
697       on_memory_dump_call_count++;
698       return true;
699     };
700 
701     // OnMemoryDump is called once for the provider that dumps first, and zero
702     // times for the other provider.
703     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
704         .Times(AtMost(1))
705         .WillOnce(Invoke(on_dump));
706   }
707 
708   last_callback_success_ = false;
709   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
710   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
711   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
712                            MemoryDumpLevelOfDetail::DETAILED);
713   ASSERT_EQ(1, on_memory_dump_call_count);
714   ASSERT_TRUE(last_callback_success_);
715 
716   DisableTracing();
717 }
718 
719 // If a thread (with a dump provider living on it) is torn down during a dump
720 // its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest,TearDownThreadWhileDumping)721 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
722   InitializeMemoryDumpManager(false /* is_coordinator */);
723   std::vector<std::unique_ptr<TestIOThread>> threads;
724   std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
725 
726   for (int i = 0; i < 2; i++) {
727     threads.push_back(
728         WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
729     mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
730     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
731                          kDefaultOptions);
732   }
733 
734   int on_memory_dump_call_count = 0;
735 
736   // When OnMemoryDump is called on either of the dump providers, it will
737   // tear down the thread of the other one.
738   for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
739     int other_idx = (mdps.front() == mdp);
740     TestIOThread* other_thread = threads[other_idx].get();
741     auto on_dump = [other_thread, &on_memory_dump_call_count](
742         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
743       other_thread->Stop();
744       on_memory_dump_call_count++;
745       return true;
746     };
747 
748     // OnMemoryDump is called once for the provider that dumps first, and zero
749     // times for the other provider.
750     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
751         .Times(AtMost(1))
752         .WillOnce(Invoke(on_dump));
753   }
754 
755   last_callback_success_ = false;
756   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
757   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
758   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
759                            MemoryDumpLevelOfDetail::DETAILED);
760   ASSERT_EQ(1, on_memory_dump_call_count);
761   ASSERT_TRUE(last_callback_success_);
762 
763   DisableTracing();
764 }
765 
766 // Checks that a NACK callback is invoked if RequestGlobalDump() is called when
767 // tracing is not enabled.
TEST_F(MemoryDumpManagerTest,CallbackCalledOnFailure)768 TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
769   InitializeMemoryDumpManager(false /* is_coordinator */);
770   MockMemoryDumpProvider mdp1;
771   RegisterDumpProvider(&mdp1);
772 
773   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
774   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
775 
776   last_callback_success_ = true;
777   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
778                            MemoryDumpLevelOfDetail::DETAILED);
779   EXPECT_FALSE(last_callback_success_);
780 }
781 
782 // Checks that is the MemoryDumpManager is initialized after tracing already
783 // began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest,InitializedAfterStartOfTracing)784 TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
785   MockMemoryDumpProvider mdp;
786   RegisterDumpProvider(&mdp);
787   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
788 
789   // First check that a RequestGlobalDump() issued before the MemoryDumpManager
790   // initialization gets NACK-ed cleanly.
791   {
792     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
793     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
794     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
795                              MemoryDumpLevelOfDetail::DETAILED);
796     EXPECT_FALSE(last_callback_success_);
797   }
798 
799   // Now late-initialize the MemoryDumpManager and check that the
800   // RequestGlobalDump completes successfully.
801   {
802     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
803     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
804     InitializeMemoryDumpManager(false /* is_coordinator */);
805     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
806                              MemoryDumpLevelOfDetail::DETAILED);
807     EXPECT_TRUE(last_callback_success_);
808   }
809   DisableTracing();
810 }
811 
812 // This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
813 // expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
814 // dumps in memory-infra, handling gracefully the transition between the legacy
815 // and the new-style (JSON-based) TraceConfig.
TEST_F(MemoryDumpManagerTest,TraceConfigExpectations)816 TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
817   InitializeMemoryDumpManager(false /* is_coordinator */);
818   MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
819 
820   // Don't trigger the default behavior of the mock delegate in this test,
821   // which would short-circuit the dump request to the actual
822   // CreateProcessDump().
823   // We don't want to create any dump in this test, only check whether the dumps
824   // are requested or not.
825   ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
826 
827   // Enabling memory-infra in a non-coordinator process should not trigger any
828   // periodic dumps.
829   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
830   EXPECT_FALSE(IsPeriodicDumpingEnabled());
831   DisableTracing();
832 
833   // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
834   // process with a fully defined trigger config should NOT enable any periodic
835   // dumps.
836   EnableTracingWithTraceConfig(
837       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
838   EXPECT_FALSE(IsPeriodicDumpingEnabled());
839   DisableTracing();
840 }
841 
TEST_F(MemoryDumpManagerTest,TraceConfigExpectationsWhenIsCoordinator)842 TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
843   InitializeMemoryDumpManager(true /* is_coordinator */);
844   MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
845   ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
846 
847   // Enabling memory-infra with the legacy TraceConfig (category filter) in
848   // a coordinator process should enable periodic dumps.
849   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
850   EXPECT_TRUE(IsPeriodicDumpingEnabled());
851   DisableTracing();
852 
853   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
854   // process without specifying any "memory_dump_config" section should enable
855   // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
856   // is: ticking memory-infra should dump periodically with the default config.
857   EnableTracingWithTraceConfig(
858       TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
859   EXPECT_TRUE(IsPeriodicDumpingEnabled());
860   DisableTracing();
861 
862   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
863   // process with an empty "memory_dump_config" should NOT enable periodic
864   // dumps. This is the way telemetry is supposed to use memory-infra with
865   // only explicitly triggered dumps.
866   EnableTracingWithTraceConfig(
867       TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
868   EXPECT_FALSE(IsPeriodicDumpingEnabled());
869   DisableTracing();
870 
871   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
872   // process with a fully defined trigger config should cause periodic dumps to
873   // be performed in the correct order.
874   RunLoop run_loop;
875   auto quit_closure = run_loop.QuitClosure();
876 
877   const int kHeavyDumpRate = 5;
878   const int kLightDumpPeriodMs = 1;
879   const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
880   // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
881   testing::InSequence sequence;
882   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
883   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
884       .Times(kHeavyDumpRate - 1);
885   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
886   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
887       .Times(kHeavyDumpRate - 2);
888   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
889       .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
890                                       const MemoryDumpCallback& callback) {
891         ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
892       }));
893 
894   // Swallow all the final spurious calls until tracing gets disabled.
895   EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
896 
897   EnableTracingWithTraceConfig(
898       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
899           kLightDumpPeriodMs, kHeavyDumpPeriodMs));
900   run_loop.Run();
901   DisableTracing();
902 }
903 
904 // Tests against race conditions that might arise when disabling tracing in the
905 // middle of a global memory dump.
TEST_F(MemoryDumpManagerTest,DisableTracingWhileDumping)906 TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
907   base::WaitableEvent tracing_disabled_event(
908       WaitableEvent::ResetPolicy::AUTOMATIC,
909       WaitableEvent::InitialState::NOT_SIGNALED);
910   InitializeMemoryDumpManager(false /* is_coordinator */);
911 
912   // Register a bound dump provider.
913   std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
914   mdp_thread->Start();
915   MockMemoryDumpProvider mdp_with_affinity;
916   RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
917                        kDefaultOptions);
918 
919   // Register also an unbound dump provider. Unbound dump providers are always
920   // invoked after bound ones.
921   MockMemoryDumpProvider unbound_mdp;
922   RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions);
923 
924   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
925   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
926   EXPECT_CALL(mdp_with_affinity, OnMemoryDump(_, _))
927       .Times(1)
928       .WillOnce(
929           Invoke([&tracing_disabled_event](const MemoryDumpArgs&,
930                                            ProcessMemoryDump* pmd) -> bool {
931             tracing_disabled_event.Wait();
932 
933             // At this point tracing has been disabled and the
934             // MemoryDumpManager.dump_thread_ has been shut down.
935             return true;
936           }));
937 
938   // |unbound_mdp| should never be invoked because the thread for unbound dump
939   // providers has been shutdown in the meanwhile.
940   EXPECT_CALL(unbound_mdp, OnMemoryDump(_, _)).Times(0);
941 
942   last_callback_success_ = true;
943   RunLoop run_loop;
944   MemoryDumpCallback callback =
945       Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
946            ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
947   mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
948                           MemoryDumpLevelOfDetail::DETAILED, callback);
949   DisableTracing();
950   tracing_disabled_event.Signal();
951   run_loop.Run();
952 
953   EXPECT_FALSE(last_callback_success_);
954 }
955 
956 // Tests against race conditions that can happen if tracing is disabled before
957 // the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
TEST_F(MemoryDumpManagerTest,DisableTracingRightBeforeStartOfDump)958 TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
959   base::WaitableEvent tracing_disabled_event(
960       WaitableEvent::ResetPolicy::AUTOMATIC,
961       WaitableEvent::InitialState::NOT_SIGNALED);
962   InitializeMemoryDumpManager(false /* is_coordinator */);
963 
964   std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
965   mdp_thread->Start();
966 
967   // Create both same-thread MDP and another MDP with dedicated thread
968   MockMemoryDumpProvider mdp1;
969   RegisterDumpProvider(&mdp1);
970   MockMemoryDumpProvider mdp2;
971   RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
972   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
973 
974   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
975       .WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
976                               const MemoryDumpCallback& callback) {
977         DisableTracing();
978         delegate_->CreateProcessDump(args, callback);
979       }));
980 
981   // If tracing is disabled for current session CreateProcessDump() should NOT
982   // request dumps from providers. Real-world regression: crbug.com/600570 .
983   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
984   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
985 
986   last_callback_success_ = true;
987   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
988                            MemoryDumpLevelOfDetail::DETAILED);
989   EXPECT_FALSE(last_callback_success_);
990 }
991 
TEST_F(MemoryDumpManagerTest,DumpOnBehalfOfOtherProcess)992 TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
993   using trace_analyzer::Query;
994 
995   InitializeMemoryDumpManager(false /* is_coordinator */);
996 
997   // Standard provider with default options (create dump for current process).
998   MemoryDumpProvider::Options options;
999   MockMemoryDumpProvider mdp1;
1000   RegisterDumpProvider(&mdp1, nullptr, options);
1001 
1002   // Provider with out-of-process dumping.
1003   MockMemoryDumpProvider mdp2;
1004   options.target_pid = 123;
1005   RegisterDumpProvider(&mdp2, nullptr, options);
1006 
1007   // Another provider with out-of-process dumping.
1008   MockMemoryDumpProvider mdp3;
1009   options.target_pid = 456;
1010   RegisterDumpProvider(&mdp3, nullptr, options);
1011 
1012   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1013   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
1014   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1015   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1016   EXPECT_CALL(mdp3, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1017   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1018                            MemoryDumpLevelOfDetail::DETAILED);
1019   DisableTracing();
1020 
1021   // Flush the trace into JSON.
1022   trace_event::TraceResultBuffer buffer;
1023   TraceResultBuffer::SimpleOutput trace_output;
1024   buffer.SetOutputCallback(trace_output.GetCallback());
1025   RunLoop run_loop;
1026   buffer.Start();
1027   trace_event::TraceLog::GetInstance()->Flush(
1028       Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer)));
1029   run_loop.Run();
1030   buffer.Finish();
1031 
1032   // Analyze the JSON.
1033   std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique(
1034       trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
1035   trace_analyzer::TraceEventVector events;
1036   analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
1037                        &events);
1038 
1039   ASSERT_EQ(3u, events.size());
1040   ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123)));
1041   ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456)));
1042   ASSERT_EQ(1u, trace_analyzer::CountMatches(
1043                     events, Query::EventPidIs(GetCurrentProcId())));
1044   ASSERT_EQ(events[0]->id, events[1]->id);
1045   ASSERT_EQ(events[0]->id, events[2]->id);
1046 }
1047 
1048 // Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the
1049 // unregistration should actually delete the providers and not leak them.
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoon)1050 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) {
1051   InitializeMemoryDumpManager(false /* is_coordinator */);
1052   static const int kNumProviders = 3;
1053   int dtor_count = 0;
1054   std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
1055   for (int i = 0; i < kNumProviders; ++i) {
1056     std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
1057     mdp->enable_mock_destructor = true;
1058     EXPECT_CALL(*mdp, Destructor())
1059         .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
1060     RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
1061     mdps.push_back(std::move(mdp));
1062   }
1063 
1064   while (!mdps.empty()) {
1065     mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back()));
1066     mdps.pop_back();
1067   }
1068 
1069   ASSERT_EQ(kNumProviders, dtor_count);
1070 }
1071 
1072 // This test checks against races when unregistering an unbound dump provider
1073 // from another thread while dumping. It registers one MDP and, when
1074 // OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon()
1075 // from another thread. The OnMemoryDump() and the dtor call are expected to
1076 // happen on the same thread (the MemoryDumpManager utility thread).
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoonDuringDump)1077 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
1078   InitializeMemoryDumpManager(false /* is_coordinator */);
1079   std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
1080   mdp->enable_mock_destructor = true;
1081   RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
1082 
1083   base::PlatformThreadRef thread_ref;
1084   auto self_unregister_from_another_thread = [&mdp, &thread_ref](
1085       const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
1086     thread_ref = PlatformThread::CurrentRef();
1087     TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
1088     thread_for_unregistration.PostTaskAndWait(
1089         FROM_HERE,
1090         base::Bind(
1091             &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
1092             base::Unretained(MemoryDumpManager::GetInstance()),
1093             base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
1094     thread_for_unregistration.Stop();
1095     return true;
1096   };
1097   EXPECT_CALL(*mdp, OnMemoryDump(_, _))
1098       .Times(1)
1099       .WillOnce(Invoke(self_unregister_from_another_thread));
1100   EXPECT_CALL(*mdp, Destructor())
1101       .Times(1)
1102       .WillOnce(Invoke([&thread_ref]() {
1103         EXPECT_EQ(thread_ref, PlatformThread::CurrentRef());
1104       }));
1105 
1106   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1107   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
1108   for (int i = 0; i < 2; ++i) {
1109     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1110                              MemoryDumpLevelOfDetail::DETAILED);
1111   }
1112   DisableTracing();
1113 }
1114 
TEST_F(MemoryDumpManagerTest,TestWhitelistingMDP)1115 TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
1116   InitializeMemoryDumpManager(false /* is_coordinator */);
1117   SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
1118   std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
1119   RegisterDumpProvider(mdp1.get());
1120   std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
1121   RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
1122                        kWhitelistedMDPName);
1123 
1124   EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
1125   EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
1126   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
1127 
1128   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1129   EXPECT_FALSE(IsPeriodicDumpingEnabled());
1130   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1131                            MemoryDumpLevelOfDetail::BACKGROUND);
1132   DisableTracing();
1133 }
1134 
TEST_F(MemoryDumpManagerTest,TestBackgroundTracingSetup)1135 TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
1136   InitializeMemoryDumpManager(true /* is_coordinator */);
1137 
1138   RunLoop run_loop;
1139   auto quit_closure = run_loop.QuitClosure();
1140 
1141   testing::InSequence sequence;
1142   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
1143       .Times(5);
1144   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
1145       .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
1146                                       const MemoryDumpCallback& callback) {
1147         ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
1148       }));
1149   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
1150 
1151   EnableTracingWithTraceConfig(
1152       TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
1153           1 /* period_ms */));
1154 
1155   // Only background mode dumps should be allowed with the trace config.
1156   last_callback_success_ = false;
1157   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1158                            MemoryDumpLevelOfDetail::LIGHT);
1159   EXPECT_FALSE(last_callback_success_);
1160   last_callback_success_ = false;
1161   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1162                            MemoryDumpLevelOfDetail::DETAILED);
1163   EXPECT_FALSE(last_callback_success_);
1164 
1165   ASSERT_TRUE(IsPeriodicDumpingEnabled());
1166   run_loop.Run();
1167   DisableTracing();
1168 }
1169 
1170 }  // namespace trace_event
1171 }  // namespace base
1172