1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/memory_dump_manager.h"
6
7 #include <stdint.h>
8
9 #include <memory>
10 #include <utility>
11 #include <vector>
12
13 #include "base/bind_helpers.h"
14 #include "base/callback.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/memory/ref_counted_memory.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/run_loop.h"
19 #include "base/strings/stringprintf.h"
20 #include "base/synchronization/waitable_event.h"
21 #include "base/test/sequenced_worker_pool_owner.h"
22 #include "base/test/test_io_thread.h"
23 #include "base/test/trace_event_analyzer.h"
24 #include "base/threading/platform_thread.h"
25 #include "base/threading/sequenced_task_runner_handle.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/threading/thread.h"
28 #include "base/threading/thread_task_runner_handle.h"
29 #include "base/trace_event/memory_dump_provider.h"
30 #include "base/trace_event/memory_dump_scheduler.h"
31 #include "base/trace_event/memory_infra_background_whitelist.h"
32 #include "base/trace_event/process_memory_dump.h"
33 #include "base/trace_event/trace_buffer.h"
34 #include "base/trace_event/trace_config_memory_test_util.h"
35 #include "build/build_config.h"
36 #include "testing/gmock/include/gmock/gmock.h"
37 #include "testing/gtest/include/gtest/gtest.h"
38
39 using testing::_;
40 using testing::AnyNumber;
41 using testing::AtMost;
42 using testing::Between;
43 using testing::Invoke;
44 using testing::Return;
45
46 namespace base {
47 namespace trace_event {
48
49 // GTest matchers for MemoryDumpRequestArgs arguments.
50 MATCHER(IsDetailedDump, "") {
51 return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
52 }
53
54 MATCHER(IsLightDump, "") {
55 return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
56 }
57
58 MATCHER(IsBackgroundDump, "") {
59 return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
60 }
61
62 namespace {
63
64 const char* kMDPName = "TestDumpProvider";
65 const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
66 const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
67
RegisterDumpProvider(MemoryDumpProvider * mdp,scoped_refptr<base::SingleThreadTaskRunner> task_runner,const MemoryDumpProvider::Options & options,const char * name=kMDPName)68 void RegisterDumpProvider(
69 MemoryDumpProvider* mdp,
70 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
71 const MemoryDumpProvider::Options& options,
72 const char* name = kMDPName) {
73 MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
74 mdm->set_dumper_registrations_ignored_for_testing(false);
75 mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
76 mdm->set_dumper_registrations_ignored_for_testing(true);
77 }
78
RegisterDumpProvider(MemoryDumpProvider * mdp,scoped_refptr<base::SingleThreadTaskRunner> task_runner)79 void RegisterDumpProvider(
80 MemoryDumpProvider* mdp,
81 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
82 RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
83 }
84
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,scoped_refptr<base::SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)85 void RegisterDumpProviderWithSequencedTaskRunner(
86 MemoryDumpProvider* mdp,
87 scoped_refptr<base::SequencedTaskRunner> task_runner,
88 const MemoryDumpProvider::Options& options) {
89 MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
90 mdm->set_dumper_registrations_ignored_for_testing(false);
91 mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
92 options);
93 mdm->set_dumper_registrations_ignored_for_testing(true);
94 }
95
OnTraceDataCollected(Closure quit_closure,trace_event::TraceResultBuffer * buffer,const scoped_refptr<RefCountedString> & json,bool has_more_events)96 void OnTraceDataCollected(Closure quit_closure,
97 trace_event::TraceResultBuffer* buffer,
98 const scoped_refptr<RefCountedString>& json,
99 bool has_more_events) {
100 buffer->AddFragment(json->data());
101 if (!has_more_events)
102 quit_closure.Run();
103 }
104
105 // Posts |task| to |task_runner| and blocks until it is executed.
PostTaskAndWait(const tracked_objects::Location & from_here,SequencedTaskRunner * task_runner,base::OnceClosure task)106 void PostTaskAndWait(const tracked_objects::Location& from_here,
107 SequencedTaskRunner* task_runner,
108 base::OnceClosure task) {
109 base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
110 WaitableEvent::InitialState::NOT_SIGNALED);
111 task_runner->PostTask(from_here, std::move(task));
112 task_runner->PostTask(
113 FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
114 // The SequencedTaskRunner guarantees that |event| will only be signaled after
115 // |task| is executed.
116 event.Wait();
117 }
118
119 // Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
120 // requests locally to the MemoryDumpManager instead of performing IPC dances.
121 class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
122 public:
MemoryDumpManagerDelegateForTesting(bool is_coordinator)123 MemoryDumpManagerDelegateForTesting(bool is_coordinator)
124 : is_coordinator_(is_coordinator) {
125 ON_CALL(*this, RequestGlobalMemoryDump(_, _))
126 .WillByDefault(Invoke(
127 this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
128 }
129
130 MOCK_METHOD2(RequestGlobalMemoryDump,
131 void(const MemoryDumpRequestArgs& args,
132 const MemoryDumpCallback& callback));
133
IsCoordinator() const134 bool IsCoordinator() const override { return is_coordinator_; }
135
136 // Promote the CreateProcessDump to public so it can be used by test fixtures.
137 using MemoryDumpManagerDelegate::CreateProcessDump;
138
139 private:
140 bool is_coordinator_;
141 };
142
143 class MockMemoryDumpProvider : public MemoryDumpProvider {
144 public:
145 MOCK_METHOD0(Destructor, void());
146 MOCK_METHOD2(OnMemoryDump,
147 bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
148 MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
149 MOCK_METHOD0(SuspendFastMemoryPolling, void());
150
MockMemoryDumpProvider()151 MockMemoryDumpProvider() : enable_mock_destructor(false) {
152 ON_CALL(*this, OnMemoryDump(_, _))
153 .WillByDefault(Invoke([](const MemoryDumpArgs&,
154 ProcessMemoryDump* pmd) -> bool {
155 // |session_state| should not be null under any circumstances when
156 // invoking a memory dump. The problem might arise in race conditions
157 // like crbug.com/600570 .
158 EXPECT_TRUE(pmd->session_state().get() != nullptr);
159 return true;
160 }));
161
162 ON_CALL(*this, PollFastMemoryTotal(_))
163 .WillByDefault(
164 Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
165 }
~MockMemoryDumpProvider()166 ~MockMemoryDumpProvider() override {
167 if (enable_mock_destructor)
168 Destructor();
169 }
170
171 bool enable_mock_destructor;
172 };
173
174 class TestSequencedTaskRunner : public SequencedTaskRunner {
175 public:
TestSequencedTaskRunner()176 TestSequencedTaskRunner()
177 : worker_pool_(2 /* max_threads */, "Test Task Runner"),
178 enabled_(true),
179 num_of_post_tasks_(0) {}
180
set_enabled(bool value)181 void set_enabled(bool value) { enabled_ = value; }
no_of_post_tasks() const182 unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
183
PostNonNestableDelayedTask(const tracked_objects::Location & from_here,OnceClosure task,TimeDelta delay)184 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
185 OnceClosure task,
186 TimeDelta delay) override {
187 NOTREACHED();
188 return false;
189 }
190
PostDelayedTask(const tracked_objects::Location & from_here,OnceClosure task,TimeDelta delay)191 bool PostDelayedTask(const tracked_objects::Location& from_here,
192 OnceClosure task,
193 TimeDelta delay) override {
194 num_of_post_tasks_++;
195 if (enabled_) {
196 return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
197 std::move(task));
198 }
199 return false;
200 }
201
RunsTasksOnCurrentThread() const202 bool RunsTasksOnCurrentThread() const override {
203 return worker_pool_.pool()->RunsTasksOnCurrentThread();
204 }
205
206 private:
~TestSequencedTaskRunner()207 ~TestSequencedTaskRunner() override {}
208
209 SequencedWorkerPoolOwner worker_pool_;
210 const SequencedWorkerPool::SequenceToken token_;
211 bool enabled_;
212 unsigned num_of_post_tasks_;
213 };
214
215 } // namespace
216
217 class MemoryDumpManagerTest : public testing::Test {
218 public:
MemoryDumpManagerTest()219 MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
220
SetUp()221 void SetUp() override {
222 last_callback_success_ = false;
223 message_loop_.reset(new MessageLoop());
224 mdm_.reset(new MemoryDumpManager());
225 MemoryDumpManager::SetInstanceForTesting(mdm_.get());
226 ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
227 }
228
TearDown()229 void TearDown() override {
230 MemoryDumpManager::SetInstanceForTesting(nullptr);
231 delegate_ = nullptr;
232 mdm_.reset();
233 message_loop_.reset();
234 TraceLog::DeleteForTesting();
235 }
236
237 // Turns a Closure into a MemoryDumpCallback, keeping track of the callback
238 // result and taking care of posting the closure on the correct task runner.
DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,Closure closure,uint64_t dump_guid,bool success)239 void DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,
240 Closure closure,
241 uint64_t dump_guid,
242 bool success) {
243 last_callback_success_ = success;
244 task_runner->PostTask(FROM_HERE, closure);
245 }
246
PollFastMemoryTotal(uint64_t * memory_total)247 void PollFastMemoryTotal(uint64_t* memory_total) {
248 mdm_->PollFastMemoryTotal(memory_total);
249 }
250
251 protected:
InitializeMemoryDumpManager(bool is_coordinator)252 void InitializeMemoryDumpManager(bool is_coordinator) {
253 mdm_->set_dumper_registrations_ignored_for_testing(true);
254 delegate_ = new MemoryDumpManagerDelegateForTesting(is_coordinator);
255 mdm_->Initialize(base::WrapUnique(delegate_));
256 }
257
RequestGlobalDumpAndWait(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)258 void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
259 MemoryDumpLevelOfDetail level_of_detail) {
260 RunLoop run_loop;
261 MemoryDumpCallback callback =
262 Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
263 ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
264 mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
265 run_loop.Run();
266 }
267
EnableTracingWithLegacyCategories(const char * category)268 void EnableTracingWithLegacyCategories(const char* category) {
269 TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
270 TraceLog::RECORDING_MODE);
271 }
272
EnableTracingWithTraceConfig(const std::string & trace_config)273 void EnableTracingWithTraceConfig(const std::string& trace_config) {
274 TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
275 TraceLog::RECORDING_MODE);
276 }
277
DisableTracing()278 void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
279
IsPeriodicDumpingEnabled() const280 bool IsPeriodicDumpingEnabled() const {
281 return MemoryDumpScheduler::GetInstance()
282 ->IsPeriodicTimerRunningForTesting();
283 }
284
GetMaxConsecutiveFailuresCount() const285 int GetMaxConsecutiveFailuresCount() const {
286 return MemoryDumpManager::kMaxConsecutiveFailuresCount;
287 }
288
289 const MemoryDumpProvider::Options kDefaultOptions;
290 std::unique_ptr<MemoryDumpManager> mdm_;
291 MemoryDumpManagerDelegateForTesting* delegate_;
292 bool last_callback_success_;
293
294 private:
295 std::unique_ptr<MessageLoop> message_loop_;
296
297 // We want our singleton torn down after each test.
298 ShadowingAtExitManager at_exit_manager_;
299 };
300
301 // Basic sanity checks. Registers a memory dump provider and checks that it is
302 // called, but only when memory-infra is enabled.
TEST_F(MemoryDumpManagerTest,SingleDumper)303 TEST_F(MemoryDumpManagerTest, SingleDumper) {
304 InitializeMemoryDumpManager(false /* is_coordinator */);
305 MockMemoryDumpProvider mdp;
306 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
307
308 // Check that the dumper is not called if the memory category is not enabled.
309 EnableTracingWithLegacyCategories("foobar-but-not-memory");
310 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
311 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
312 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
313 MemoryDumpLevelOfDetail::DETAILED);
314 DisableTracing();
315
316 // Now repeat enabling the memory category and check that the dumper is
317 // invoked this time.
318 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
319 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
320 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
321 for (int i = 0; i < 3; ++i)
322 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
323 MemoryDumpLevelOfDetail::DETAILED);
324 DisableTracing();
325
326 mdm_->UnregisterDumpProvider(&mdp);
327
328 // Finally check the unregister logic: the delegate will be invoked but not
329 // the dump provider, as it has been unregistered.
330 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
331 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
332 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
333
334 for (int i = 0; i < 3; ++i) {
335 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
336 MemoryDumpLevelOfDetail::DETAILED);
337 }
338 DisableTracing();
339 }
340
341 // Checks that requesting dumps with high level of detail actually propagates
342 // the level of the detail properly to OnMemoryDump() call on dump providers.
TEST_F(MemoryDumpManagerTest,CheckMemoryDumpArgs)343 TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
344 InitializeMemoryDumpManager(false /* is_coordinator */);
345 MockMemoryDumpProvider mdp;
346
347 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
348 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
349 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
350 EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
351 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
352 MemoryDumpLevelOfDetail::DETAILED);
353 DisableTracing();
354 mdm_->UnregisterDumpProvider(&mdp);
355
356 // Check that requesting dumps with low level of detail actually propagates to
357 // OnMemoryDump() call on dump providers.
358 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
359 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
360 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
361 EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
362 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
363 MemoryDumpLevelOfDetail::LIGHT);
364 DisableTracing();
365 mdm_->UnregisterDumpProvider(&mdp);
366 }
367
368 // Checks that the SharedSessionState object is acqually shared over time.
TEST_F(MemoryDumpManagerTest,SharedSessionState)369 TEST_F(MemoryDumpManagerTest, SharedSessionState) {
370 InitializeMemoryDumpManager(false /* is_coordinator */);
371 MockMemoryDumpProvider mdp1;
372 MockMemoryDumpProvider mdp2;
373 RegisterDumpProvider(&mdp1, nullptr);
374 RegisterDumpProvider(&mdp2, nullptr);
375
376 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
377 const MemoryDumpSessionState* session_state =
378 mdm_->session_state_for_testing().get();
379 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
380 EXPECT_CALL(mdp1, OnMemoryDump(_, _))
381 .Times(2)
382 .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
383 ProcessMemoryDump* pmd) -> bool {
384 EXPECT_EQ(session_state, pmd->session_state().get());
385 return true;
386 }));
387 EXPECT_CALL(mdp2, OnMemoryDump(_, _))
388 .Times(2)
389 .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
390 ProcessMemoryDump* pmd) -> bool {
391 EXPECT_EQ(session_state, pmd->session_state().get());
392 return true;
393 }));
394
395 for (int i = 0; i < 2; ++i) {
396 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
397 MemoryDumpLevelOfDetail::DETAILED);
398 }
399
400 DisableTracing();
401 }
402
403 // Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest,MultipleDumpers)404 TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
405 InitializeMemoryDumpManager(false /* is_coordinator */);
406 MockMemoryDumpProvider mdp1;
407 MockMemoryDumpProvider mdp2;
408
409 // Enable only mdp1.
410 RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
411 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
412 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
413 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
414 EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
415 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
416 MemoryDumpLevelOfDetail::DETAILED);
417 DisableTracing();
418
419 // Invert: enable mdp1 and disable mdp2.
420 mdm_->UnregisterDumpProvider(&mdp1);
421 RegisterDumpProvider(&mdp2, nullptr);
422 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
423 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
424 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
425 EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
426 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
427 MemoryDumpLevelOfDetail::DETAILED);
428 DisableTracing();
429
430 // Enable both mdp1 and mdp2.
431 RegisterDumpProvider(&mdp1, nullptr);
432 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
433 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
434 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
435 EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
436 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
437 MemoryDumpLevelOfDetail::DETAILED);
438 DisableTracing();
439 }
440
441 // Checks that the dump provider invocations depend only on the current
442 // registration state and not on previous registrations and dumps.
443 // Flaky on iOS, see crbug.com/706874
444 #if defined(OS_IOS)
445 #define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
446 #else
447 #define MAYBE_RegistrationConsistency RegistrationConsistency
448 #endif
TEST_F(MemoryDumpManagerTest,MAYBE_RegistrationConsistency)449 TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
450 InitializeMemoryDumpManager(false /* is_coordinator */);
451 MockMemoryDumpProvider mdp;
452
453 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
454
455 {
456 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
457 EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
458 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
459 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
460 MemoryDumpLevelOfDetail::DETAILED);
461 DisableTracing();
462 }
463
464 mdm_->UnregisterDumpProvider(&mdp);
465
466 {
467 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
468 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
469 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
470 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
471 MemoryDumpLevelOfDetail::DETAILED);
472 DisableTracing();
473 }
474
475 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
476 mdm_->UnregisterDumpProvider(&mdp);
477
478 {
479 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
480 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
481 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
482 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
483 MemoryDumpLevelOfDetail::DETAILED);
484 DisableTracing();
485 }
486
487 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
488 mdm_->UnregisterDumpProvider(&mdp);
489 RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
490
491 {
492 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
493 EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
494 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
495 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
496 MemoryDumpLevelOfDetail::DETAILED);
497 DisableTracing();
498 }
499 }
500
501 // Checks that the MemoryDumpManager respects the thread affinity when a
502 // MemoryDumpProvider specifies a task_runner(). The test starts creating 8
503 // threads and registering a MemoryDumpProvider on each of them. At each
504 // iteration, one thread is removed, to check the live unregistration logic.
TEST_F(MemoryDumpManagerTest,RespectTaskRunnerAffinity)505 TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
506 InitializeMemoryDumpManager(false /* is_coordinator */);
507 const uint32_t kNumInitialThreads = 8;
508
509 std::vector<std::unique_ptr<Thread>> threads;
510 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
511
512 // Create the threads and setup the expectations. Given that at each iteration
513 // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
514 // invoked a number of times equal to its index.
515 for (uint32_t i = kNumInitialThreads; i > 0; --i) {
516 threads.push_back(WrapUnique(new Thread("test thread")));
517 auto* thread = threads.back().get();
518 thread->Start();
519 scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
520 mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
521 auto* mdp = mdps.back().get();
522 RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
523 EXPECT_CALL(*mdp, OnMemoryDump(_, _))
524 .Times(i)
525 .WillRepeatedly(Invoke(
526 [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
527 EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
528 return true;
529 }));
530 }
531 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
532
533 while (!threads.empty()) {
534 last_callback_success_ = false;
535 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
536 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
537 MemoryDumpLevelOfDetail::DETAILED);
538 EXPECT_TRUE(last_callback_success_);
539
540 // Unregister a MDP and destroy one thread at each iteration to check the
541 // live unregistration logic. The unregistration needs to happen on the same
542 // thread the MDP belongs to.
543 {
544 RunLoop run_loop;
545 Closure unregistration =
546 Bind(&MemoryDumpManager::UnregisterDumpProvider,
547 Unretained(mdm_.get()), Unretained(mdps.back().get()));
548 threads.back()->task_runner()->PostTaskAndReply(FROM_HERE, unregistration,
549 run_loop.QuitClosure());
550 run_loop.Run();
551 }
552 mdps.pop_back();
553 threads.back()->Stop();
554 threads.pop_back();
555 }
556
557 DisableTracing();
558 }
559
560 // Check that the memory dump calls are always posted on task runner for
561 // SequencedTaskRunner case and that the dump provider gets disabled when
562 // PostTask fails, but the dump still succeeds.
TEST_F(MemoryDumpManagerTest,PostTaskForSequencedTaskRunner)563 TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
564 InitializeMemoryDumpManager(false /* is_coordinator */);
565 std::vector<MockMemoryDumpProvider> mdps(3);
566 scoped_refptr<TestSequencedTaskRunner> task_runner1(
567 make_scoped_refptr(new TestSequencedTaskRunner()));
568 scoped_refptr<TestSequencedTaskRunner> task_runner2(
569 make_scoped_refptr(new TestSequencedTaskRunner()));
570 RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
571 kDefaultOptions);
572 RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
573 kDefaultOptions);
574 RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
575 kDefaultOptions);
576 // |mdps[0]| should be disabled permanently after first dump.
577 EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
578 EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
579 EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
580 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
581
582 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
583
584 task_runner1->set_enabled(false);
585 last_callback_success_ = false;
586 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
587 MemoryDumpLevelOfDetail::DETAILED);
588 // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
589 // to same task runner.
590 EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
591 EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
592 EXPECT_TRUE(last_callback_success_);
593
594 task_runner1->set_enabled(true);
595 last_callback_success_ = false;
596 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
597 MemoryDumpLevelOfDetail::DETAILED);
598 EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
599 EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
600 EXPECT_TRUE(last_callback_success_);
601 DisableTracing();
602 }
603
604 // Checks that providers get disabled after 3 consecutive failures, but not
605 // otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest,DisableFailingDumpers)606 TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
607 InitializeMemoryDumpManager(false /* is_coordinator */);
608 MockMemoryDumpProvider mdp1;
609 MockMemoryDumpProvider mdp2;
610
611 RegisterDumpProvider(&mdp1, nullptr);
612 RegisterDumpProvider(&mdp2, nullptr);
613 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
614
615 const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
616 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
617
618 EXPECT_CALL(mdp1, OnMemoryDump(_, _))
619 .Times(GetMaxConsecutiveFailuresCount())
620 .WillRepeatedly(Return(false));
621
622 EXPECT_CALL(mdp2, OnMemoryDump(_, _))
623 .WillOnce(Return(false))
624 .WillOnce(Return(true))
625 .WillOnce(Return(false))
626 .WillOnce(Return(false))
627 .WillOnce(Return(true))
628 .WillOnce(Return(false));
629
630 for (int i = 0; i < kNumDumps; i++) {
631 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
632 MemoryDumpLevelOfDetail::DETAILED);
633 }
634
635 DisableTracing();
636 }
637
638 // Sneakily registers an extra memory dump provider while an existing one is
639 // dumping and expect it to take part in the already active tracing session.
TEST_F(MemoryDumpManagerTest,RegisterDumperWhileDumping)640 TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
641 InitializeMemoryDumpManager(false /* is_coordinator */);
642 MockMemoryDumpProvider mdp1;
643 MockMemoryDumpProvider mdp2;
644
645 RegisterDumpProvider(&mdp1, nullptr);
646 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
647
648 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
649
650 EXPECT_CALL(mdp1, OnMemoryDump(_, _))
651 .Times(4)
652 .WillOnce(Return(true))
653 .WillOnce(
654 Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
655 RegisterDumpProvider(&mdp2, nullptr);
656 return true;
657 }))
658 .WillRepeatedly(Return(true));
659
660 // Depending on the insertion order (before or after mdp1), mdp2 might be
661 // called also immediately after it gets registered.
662 EXPECT_CALL(mdp2, OnMemoryDump(_, _))
663 .Times(Between(2, 3))
664 .WillRepeatedly(Return(true));
665
666 for (int i = 0; i < 4; i++) {
667 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
668 MemoryDumpLevelOfDetail::DETAILED);
669 }
670
671 DisableTracing();
672 }
673
674 // Like RegisterDumperWhileDumping, but unregister the dump provider instead.
TEST_F(MemoryDumpManagerTest,UnregisterDumperWhileDumping)675 TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
676 InitializeMemoryDumpManager(false /* is_coordinator */);
677 MockMemoryDumpProvider mdp1;
678 MockMemoryDumpProvider mdp2;
679
680 RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
681 RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
682 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
683
684 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
685
686 EXPECT_CALL(mdp1, OnMemoryDump(_, _))
687 .Times(4)
688 .WillOnce(Return(true))
689 .WillOnce(
690 Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
691 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
692 return true;
693 }))
694 .WillRepeatedly(Return(true));
695
696 // Depending on the insertion order (before or after mdp1), mdp2 might have
697 // been already called when UnregisterDumpProvider happens.
698 EXPECT_CALL(mdp2, OnMemoryDump(_, _))
699 .Times(Between(1, 2))
700 .WillRepeatedly(Return(true));
701
702 for (int i = 0; i < 4; i++) {
703 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
704 MemoryDumpLevelOfDetail::DETAILED);
705 }
706
707 DisableTracing();
708 }
709
710 // Checks that the dump does not abort when unregistering a provider while
711 // dumping from a different thread than the dumping thread.
TEST_F(MemoryDumpManagerTest,UnregisterDumperFromThreadWhileDumping)712 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
713 InitializeMemoryDumpManager(false /* is_coordinator */);
714 std::vector<std::unique_ptr<TestIOThread>> threads;
715 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
716
717 for (int i = 0; i < 2; i++) {
718 threads.push_back(
719 WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
720 mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
721 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
722 kDefaultOptions);
723 }
724
725 int on_memory_dump_call_count = 0;
726
727 // When OnMemoryDump is called on either of the dump providers, it will
728 // unregister the other one.
729 for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
730 int other_idx = (mdps.front() == mdp);
731 // TestIOThread's task runner must be obtained from the main thread but can
732 // then be used from other threads.
733 scoped_refptr<SingleThreadTaskRunner> other_runner =
734 threads[other_idx]->task_runner();
735 MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
736 auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
737 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
738 PostTaskAndWait(FROM_HERE, other_runner.get(),
739 base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
740 base::Unretained(&*mdm_), other_mdp));
741 on_memory_dump_call_count++;
742 return true;
743 };
744
745 // OnMemoryDump is called once for the provider that dumps first, and zero
746 // times for the other provider.
747 EXPECT_CALL(*mdp, OnMemoryDump(_, _))
748 .Times(AtMost(1))
749 .WillOnce(Invoke(on_dump));
750 }
751
752 last_callback_success_ = false;
753 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
754 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
755 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
756 MemoryDumpLevelOfDetail::DETAILED);
757 ASSERT_EQ(1, on_memory_dump_call_count);
758 ASSERT_TRUE(last_callback_success_);
759
760 DisableTracing();
761 }
762
TEST_F(MemoryDumpManagerTest,TestPollingOnDumpThread)763 TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
764 InitializeMemoryDumpManager(false /* is_coordinator */);
765 std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
766 std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
767 mdp1->enable_mock_destructor = true;
768 mdp2->enable_mock_destructor = true;
769
770 EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
771 EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
772 EXPECT_CALL(*mdp1, Destructor());
773 EXPECT_CALL(*mdp2, Destructor());
774
775 MemoryDumpProvider::Options options;
776 options.is_fast_polling_supported = true;
777 RegisterDumpProvider(mdp1.get(), nullptr, options);
778
779 RunLoop run_loop;
780 scoped_refptr<SingleThreadTaskRunner> test_task_runner =
781 ThreadTaskRunnerHandle::Get();
782 auto quit_closure = run_loop.QuitClosure();
783
784 const int kPollsToQuit = 10;
785 int call_count = 0;
786 MemoryDumpManager* mdm = mdm_.get();
787 const auto poll_function1 = [&call_count, &test_task_runner, quit_closure,
788 &mdp2, mdm, &options, kPollsToQuit,
789 this](uint64_t* total) -> void {
790 ++call_count;
791 if (call_count == 1)
792 RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName);
793 else if (call_count == 4)
794 mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
795 else if (call_count == kPollsToQuit)
796 test_task_runner->PostTask(FROM_HERE, quit_closure);
797
798 // Record increase of 1 GiB of memory at each call.
799 *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024;
800 };
801 EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
802 .Times(testing::AtLeast(kPollsToQuit))
803 .WillRepeatedly(Invoke(poll_function1));
804
805 // Depending on the order of PostTask calls the mdp2 might be registered after
806 // all polls or in between polls.
807 EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
808 .Times(Between(0, kPollsToQuit - 1))
809 .WillRepeatedly(Return());
810
811 MemoryDumpScheduler::SetPollingIntervalForTesting(1);
812 EnableTracingWithTraceConfig(
813 TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3));
814
815 int last_poll_to_request_dump = -2;
816 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
817 .Times(testing::AtLeast(2))
818 .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count](
819 const MemoryDumpRequestArgs& args,
820 const MemoryDumpCallback& callback) -> void {
821 // Minimum number of polls between dumps must be 3 (polling interval is
822 // 1ms).
823 EXPECT_GE(call_count - last_poll_to_request_dump, 3);
824 last_poll_to_request_dump = call_count;
825 }));
826
827 run_loop.Run();
828 DisableTracing();
829 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
830 }
831
832 // If a thread (with a dump provider living on it) is torn down during a dump
833 // its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest,TearDownThreadWhileDumping)834 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
835 InitializeMemoryDumpManager(false /* is_coordinator */);
836 std::vector<std::unique_ptr<TestIOThread>> threads;
837 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
838
839 for (int i = 0; i < 2; i++) {
840 threads.push_back(
841 WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
842 mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
843 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
844 kDefaultOptions);
845 }
846
847 int on_memory_dump_call_count = 0;
848
849 // When OnMemoryDump is called on either of the dump providers, it will
850 // tear down the thread of the other one.
851 for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
852 int other_idx = (mdps.front() == mdp);
853 TestIOThread* other_thread = threads[other_idx].get();
854 // TestIOThread isn't thread-safe and must be stopped on the |main_runner|.
855 scoped_refptr<SequencedTaskRunner> main_runner =
856 SequencedTaskRunnerHandle::Get();
857 auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
858 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
859 PostTaskAndWait(
860 FROM_HERE, main_runner.get(),
861 base::Bind(&TestIOThread::Stop, base::Unretained(other_thread)));
862 on_memory_dump_call_count++;
863 return true;
864 };
865
866 // OnMemoryDump is called once for the provider that dumps first, and zero
867 // times for the other provider.
868 EXPECT_CALL(*mdp, OnMemoryDump(_, _))
869 .Times(AtMost(1))
870 .WillOnce(Invoke(on_dump));
871 }
872
873 last_callback_success_ = false;
874 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
875 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
876 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
877 MemoryDumpLevelOfDetail::DETAILED);
878 ASSERT_EQ(1, on_memory_dump_call_count);
879 ASSERT_TRUE(last_callback_success_);
880
881 DisableTracing();
882 }
883
884 // Checks that a NACK callback is invoked if RequestGlobalDump() is called when
885 // tracing is not enabled.
TEST_F(MemoryDumpManagerTest,CallbackCalledOnFailure)886 TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
887 InitializeMemoryDumpManager(false /* is_coordinator */);
888 MockMemoryDumpProvider mdp1;
889 RegisterDumpProvider(&mdp1, nullptr);
890
891 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
892 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
893
894 last_callback_success_ = true;
895 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
896 MemoryDumpLevelOfDetail::DETAILED);
897 EXPECT_FALSE(last_callback_success_);
898 }
899
900 // Checks that is the MemoryDumpManager is initialized after tracing already
901 // began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest,InitializedAfterStartOfTracing)902 TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
903 MockMemoryDumpProvider mdp;
904 RegisterDumpProvider(&mdp, nullptr);
905 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
906
907 // First check that a RequestGlobalDump() issued before the MemoryDumpManager
908 // initialization gets NACK-ed cleanly.
909 {
910 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
911 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
912 MemoryDumpLevelOfDetail::DETAILED);
913 EXPECT_FALSE(last_callback_success_);
914 }
915
916 // Now late-initialize the MemoryDumpManager and check that the
917 // RequestGlobalDump completes successfully.
918 {
919 InitializeMemoryDumpManager(false /* is_coordinator */);
920 EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
921 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
922 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
923 MemoryDumpLevelOfDetail::DETAILED);
924 EXPECT_TRUE(last_callback_success_);
925 }
926 DisableTracing();
927 }
928
929 // This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
930 // expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
931 // dumps in memory-infra, handling gracefully the transition between the legacy
932 // and the new-style (JSON-based) TraceConfig.
TEST_F(MemoryDumpManagerTest,TraceConfigExpectations)933 TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
934 InitializeMemoryDumpManager(false /* is_coordinator */);
935 MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
936
937 // Don't trigger the default behavior of the mock delegate in this test,
938 // which would short-circuit the dump request to the actual
939 // CreateProcessDump().
940 // We don't want to create any dump in this test, only check whether the dumps
941 // are requested or not.
942 ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
943
944 // Enabling memory-infra in a non-coordinator process should not trigger any
945 // periodic dumps.
946 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
947 EXPECT_FALSE(IsPeriodicDumpingEnabled());
948 DisableTracing();
949
950 // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
951 // process with a fully defined trigger config should NOT enable any periodic
952 // dumps.
953 EnableTracingWithTraceConfig(
954 TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
955 EXPECT_FALSE(IsPeriodicDumpingEnabled());
956 DisableTracing();
957 }
958
TEST_F(MemoryDumpManagerTest,TraceConfigExpectationsWhenIsCoordinator)959 TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
960 InitializeMemoryDumpManager(true /* is_coordinator */);
961 MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
962 ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
963
964 // Enabling memory-infra with the legacy TraceConfig (category filter) in
965 // a coordinator process should enable periodic dumps.
966 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
967 EXPECT_TRUE(IsPeriodicDumpingEnabled());
968 DisableTracing();
969
970 // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
971 // process without specifying any "memory_dump_config" section should enable
972 // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
973 // is: ticking memory-infra should dump periodically with the default config.
974 EnableTracingWithTraceConfig(
975 TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
976 EXPECT_TRUE(IsPeriodicDumpingEnabled());
977 DisableTracing();
978
979 // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
980 // process with an empty "memory_dump_config" should NOT enable periodic
981 // dumps. This is the way telemetry is supposed to use memory-infra with
982 // only explicitly triggered dumps.
983 EnableTracingWithTraceConfig(
984 TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
985 EXPECT_FALSE(IsPeriodicDumpingEnabled());
986 DisableTracing();
987
988 // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
989 // process with a fully defined trigger config should cause periodic dumps to
990 // be performed in the correct order.
991 RunLoop run_loop;
992 auto quit_closure = run_loop.QuitClosure();
993
994 const int kHeavyDumpRate = 5;
995 const int kLightDumpPeriodMs = 1;
996 const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
997 // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
998 testing::InSequence sequence;
999 EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
1000 EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
1001 .Times(kHeavyDumpRate - 1);
1002 EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
1003 EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
1004 .Times(kHeavyDumpRate - 2);
1005 EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
1006 .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
1007 const MemoryDumpCallback& callback) {
1008 ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
1009 }));
1010
1011 // Swallow all the final spurious calls until tracing gets disabled.
1012 EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
1013
1014 EnableTracingWithTraceConfig(
1015 TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
1016 kLightDumpPeriodMs, kHeavyDumpPeriodMs));
1017 run_loop.Run();
1018 DisableTracing();
1019 }
1020
1021 // Tests against race conditions that might arise when disabling tracing in the
1022 // middle of a global memory dump.
1023 // Flaky on iOS, see crbug.com/706961
1024 #if defined(OS_IOS)
1025 #define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
1026 #else
1027 #define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
1028 #endif
TEST_F(MemoryDumpManagerTest,MAYBE_DisableTracingWhileDumping)1029 TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
1030 base::WaitableEvent tracing_disabled_event(
1031 WaitableEvent::ResetPolicy::AUTOMATIC,
1032 WaitableEvent::InitialState::NOT_SIGNALED);
1033 InitializeMemoryDumpManager(false /* is_coordinator */);
1034
1035 // Register a bound dump provider.
1036 std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
1037 mdp_thread->Start();
1038 MockMemoryDumpProvider mdp_with_affinity;
1039 RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
1040 kDefaultOptions);
1041
1042 // Register also an unbound dump provider. Unbound dump providers are always
1043 // invoked after bound ones.
1044 MockMemoryDumpProvider unbound_mdp;
1045 RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions);
1046
1047 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1048 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
1049 EXPECT_CALL(mdp_with_affinity, OnMemoryDump(_, _))
1050 .Times(1)
1051 .WillOnce(
1052 Invoke([&tracing_disabled_event](const MemoryDumpArgs&,
1053 ProcessMemoryDump* pmd) -> bool {
1054 tracing_disabled_event.Wait();
1055
1056 // At this point tracing has been disabled and the
1057 // MemoryDumpManager.dump_thread_ has been shut down.
1058 return true;
1059 }));
1060
1061 // |unbound_mdp| should never be invoked because the thread for unbound dump
1062 // providers has been shutdown in the meanwhile.
1063 EXPECT_CALL(unbound_mdp, OnMemoryDump(_, _)).Times(0);
1064
1065 last_callback_success_ = true;
1066 RunLoop run_loop;
1067 MemoryDumpCallback callback =
1068 Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
1069 ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
1070 mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
1071 MemoryDumpLevelOfDetail::DETAILED, callback);
1072 DisableTracing();
1073 tracing_disabled_event.Signal();
1074 run_loop.Run();
1075
1076 EXPECT_FALSE(last_callback_success_);
1077 }
1078
1079 // Tests against race conditions that can happen if tracing is disabled before
1080 // the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
TEST_F(MemoryDumpManagerTest,DisableTracingRightBeforeStartOfDump)1081 TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
1082 base::WaitableEvent tracing_disabled_event(
1083 WaitableEvent::ResetPolicy::AUTOMATIC,
1084 WaitableEvent::InitialState::NOT_SIGNALED);
1085 InitializeMemoryDumpManager(false /* is_coordinator */);
1086
1087 std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
1088 mdp_thread->Start();
1089
1090 // Create both same-thread MDP and another MDP with dedicated thread
1091 MockMemoryDumpProvider mdp1;
1092 RegisterDumpProvider(&mdp1, nullptr);
1093 MockMemoryDumpProvider mdp2;
1094 RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
1095 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1096
1097 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
1098 .WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
1099 const MemoryDumpCallback& callback) {
1100 DisableTracing();
1101 delegate_->CreateProcessDump(args, callback);
1102 }));
1103
1104 // If tracing is disabled for current session CreateProcessDump() should NOT
1105 // request dumps from providers. Real-world regression: crbug.com/600570 .
1106 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
1107 EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
1108
1109 last_callback_success_ = true;
1110 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1111 MemoryDumpLevelOfDetail::DETAILED);
1112 EXPECT_FALSE(last_callback_success_);
1113 }
1114
TEST_F(MemoryDumpManagerTest,DumpOnBehalfOfOtherProcess)1115 TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
1116 using trace_analyzer::Query;
1117
1118 InitializeMemoryDumpManager(false /* is_coordinator */);
1119
1120 // Standard provider with default options (create dump for current process).
1121 MemoryDumpProvider::Options options;
1122 MockMemoryDumpProvider mdp1;
1123 RegisterDumpProvider(&mdp1, nullptr, options);
1124
1125 // Provider with out-of-process dumping.
1126 MockMemoryDumpProvider mdp2;
1127 options.target_pid = 123;
1128 RegisterDumpProvider(&mdp2, nullptr, options);
1129
1130 // Another provider with out-of-process dumping.
1131 MockMemoryDumpProvider mdp3;
1132 options.target_pid = 456;
1133 RegisterDumpProvider(&mdp3, nullptr, options);
1134
1135 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1136 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
1137 EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1138 EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1139 EXPECT_CALL(mdp3, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
1140 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1141 MemoryDumpLevelOfDetail::DETAILED);
1142 DisableTracing();
1143
1144 // Flush the trace into JSON.
1145 trace_event::TraceResultBuffer buffer;
1146 TraceResultBuffer::SimpleOutput trace_output;
1147 buffer.SetOutputCallback(trace_output.GetCallback());
1148 RunLoop run_loop;
1149 buffer.Start();
1150 trace_event::TraceLog::GetInstance()->Flush(
1151 Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer)));
1152 run_loop.Run();
1153 buffer.Finish();
1154
1155 // Analyze the JSON.
1156 std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique(
1157 trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
1158 trace_analyzer::TraceEventVector events;
1159 analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
1160 &events);
1161
1162 ASSERT_EQ(3u, events.size());
1163 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123)));
1164 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456)));
1165 ASSERT_EQ(1u, trace_analyzer::CountMatches(
1166 events, Query::EventPidIs(GetCurrentProcId())));
1167 ASSERT_EQ(events[0]->id, events[1]->id);
1168 ASSERT_EQ(events[0]->id, events[2]->id);
1169 }
1170
1171 // Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the
1172 // unregistration should actually delete the providers and not leak them.
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoon)1173 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) {
1174 InitializeMemoryDumpManager(false /* is_coordinator */);
1175 static const int kNumProviders = 3;
1176 int dtor_count = 0;
1177 std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
1178 for (int i = 0; i < kNumProviders; ++i) {
1179 std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
1180 mdp->enable_mock_destructor = true;
1181 EXPECT_CALL(*mdp, Destructor())
1182 .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
1183 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
1184 mdps.push_back(std::move(mdp));
1185 }
1186
1187 while (!mdps.empty()) {
1188 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back()));
1189 mdps.pop_back();
1190 }
1191
1192 ASSERT_EQ(kNumProviders, dtor_count);
1193 }
1194
1195 // This test checks against races when unregistering an unbound dump provider
1196 // from another thread while dumping. It registers one MDP and, when
1197 // OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon()
1198 // from another thread. The OnMemoryDump() and the dtor call are expected to
1199 // happen on the same thread (the MemoryDumpManager utility thread).
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoonDuringDump)1200 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
1201 InitializeMemoryDumpManager(false /* is_coordinator */);
1202 std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
1203 mdp->enable_mock_destructor = true;
1204 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
1205
1206 base::PlatformThreadRef thread_ref;
1207 auto self_unregister_from_another_thread = [&mdp, &thread_ref](
1208 const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
1209 thread_ref = PlatformThread::CurrentRef();
1210 TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
1211 PostTaskAndWait(
1212 FROM_HERE, thread_for_unregistration.task_runner().get(),
1213 base::Bind(
1214 &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
1215 base::Unretained(MemoryDumpManager::GetInstance()),
1216 base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
1217 thread_for_unregistration.Stop();
1218 return true;
1219 };
1220 EXPECT_CALL(*mdp, OnMemoryDump(_, _))
1221 .Times(1)
1222 .WillOnce(Invoke(self_unregister_from_another_thread));
1223 EXPECT_CALL(*mdp, Destructor())
1224 .Times(1)
1225 .WillOnce(Invoke([&thread_ref]() {
1226 EXPECT_EQ(thread_ref, PlatformThread::CurrentRef());
1227 }));
1228
1229 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1230 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
1231 for (int i = 0; i < 2; ++i) {
1232 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1233 MemoryDumpLevelOfDetail::DETAILED);
1234 }
1235 DisableTracing();
1236 }
1237
TEST_F(MemoryDumpManagerTest,TestWhitelistingMDP)1238 TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
1239 InitializeMemoryDumpManager(false /* is_coordinator */);
1240 SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
1241 std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
1242 RegisterDumpProvider(mdp1.get(), nullptr);
1243 std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
1244 RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
1245 kWhitelistedMDPName);
1246
1247 EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
1248 EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
1249 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
1250
1251 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
1252 EXPECT_FALSE(IsPeriodicDumpingEnabled());
1253 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1254 MemoryDumpLevelOfDetail::BACKGROUND);
1255 DisableTracing();
1256 }
1257
TEST_F(MemoryDumpManagerTest,TestBackgroundTracingSetup)1258 TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
1259 InitializeMemoryDumpManager(true /* is_coordinator */);
1260
1261 RunLoop run_loop;
1262 auto quit_closure = run_loop.QuitClosure();
1263
1264 testing::InSequence sequence;
1265 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
1266 .Times(5);
1267 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
1268 .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
1269 const MemoryDumpCallback& callback) {
1270 ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
1271 }));
1272 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
1273
1274 EnableTracingWithTraceConfig(
1275 TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
1276 1 /* period_ms */));
1277
1278 // Only background mode dumps should be allowed with the trace config.
1279 last_callback_success_ = false;
1280 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1281 MemoryDumpLevelOfDetail::LIGHT);
1282 EXPECT_FALSE(last_callback_success_);
1283 last_callback_success_ = false;
1284 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
1285 MemoryDumpLevelOfDetail::DETAILED);
1286 EXPECT_FALSE(last_callback_success_);
1287
1288 ASSERT_TRUE(IsPeriodicDumpingEnabled());
1289 run_loop.Run();
1290 DisableTracing();
1291 }
1292
TEST_F(MemoryDumpManagerTest,TestBlacklistedUnsafeUnregistration)1293 TEST_F(MemoryDumpManagerTest, TestBlacklistedUnsafeUnregistration) {
1294 InitializeMemoryDumpManager(false /* is_coordinator */);
1295 MockMemoryDumpProvider mdp1;
1296 RegisterDumpProvider(&mdp1, nullptr, kDefaultOptions,
1297 "BlacklistTestDumpProvider");
1298 // Not calling UnregisterAndDeleteDumpProviderSoon() should not crash.
1299 mdm_->UnregisterDumpProvider(&mdp1);
1300
1301 Thread thread("test thread");
1302 thread.Start();
1303 RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions,
1304 "BlacklistTestDumpProvider");
1305 // Unregistering on wrong thread should not crash.
1306 mdm_->UnregisterDumpProvider(&mdp1);
1307 thread.Stop();
1308 }
1309
1310 } // namespace trace_event
1311 } // namespace base
1312