• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <algorithm>
8 #include <utility>
9 
10 #include "base/atomic_sequence_num.h"
11 #include "base/base_switches.h"
12 #include "base/command_line.h"
13 #include "base/compiler_specific.h"
14 #include "base/debug/debugging_flags.h"
15 #include "base/debug/stack_trace.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/threading/thread.h"
18 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/trace_event/heap_profiler.h"
20 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
21 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
22 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
23 #include "base/trace_event/malloc_dump_provider.h"
24 #include "base/trace_event/memory_dump_provider.h"
25 #include "base/trace_event/memory_dump_session_state.h"
26 #include "base/trace_event/memory_infra_background_whitelist.h"
27 #include "base/trace_event/process_memory_dump.h"
28 #include "base/trace_event/trace_event.h"
29 #include "base/trace_event/trace_event_argument.h"
30 #include "build/build_config.h"
31 
32 #if defined(OS_ANDROID)
33 #include "base/trace_event/java_heap_dump_provider_android.h"
34 #endif
35 
36 #if defined(OS_WIN)
37 #include "base/trace_event/winheap_dump_provider_win.h"
38 #endif
39 
40 namespace base {
41 namespace trace_event {
42 
43 namespace {
44 
45 const int kTraceEventNumArgs = 1;
46 const char* kTraceEventArgNames[] = {"dumps"};
47 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
48 
49 StaticAtomicSequenceNumber g_next_guid;
50 MemoryDumpManager* g_instance_for_testing = nullptr;
51 
52 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
53 // inject trace markers.
OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,uint64_t dump_guid,bool success)54 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
55                       uint64_t dump_guid,
56                       bool success) {
57   TRACE_EVENT_NESTABLE_ASYNC_END1(
58       MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
59       TRACE_ID_MANGLE(dump_guid), "success", success);
60 
61   if (!wrapped_callback.is_null()) {
62     wrapped_callback.Run(dump_guid, success);
63     wrapped_callback.Reset();
64   }
65 }
66 
67 // Proxy class which wraps a ConvertableToTraceFormat owned by the
68 // |session_state| into a proxy object that can be added to the trace event log.
69 // This is to solve the problem that the MemoryDumpSessionState is refcounted
70 // but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>.
71 template <typename T>
72 struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
73   using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
74 
SessionStateConvertableProxybase::trace_event::__anon441de5d90111::SessionStateConvertableProxy75   SessionStateConvertableProxy(
76       scoped_refptr<MemoryDumpSessionState> session_state,
77       GetterFunctPtr getter_function)
78       : session_state(session_state), getter_function(getter_function) {}
79 
AppendAsTraceFormatbase::trace_event::__anon441de5d90111::SessionStateConvertableProxy80   void AppendAsTraceFormat(std::string* out) const override {
81     return (session_state.get()->*getter_function)()->AppendAsTraceFormat(out);
82   }
83 
EstimateTraceMemoryOverheadbase::trace_event::__anon441de5d90111::SessionStateConvertableProxy84   void EstimateTraceMemoryOverhead(
85       TraceEventMemoryOverhead* overhead) override {
86     return (session_state.get()->*getter_function)()
87         ->EstimateTraceMemoryOverhead(overhead);
88   }
89 
90   scoped_refptr<MemoryDumpSessionState> session_state;
91   GetterFunctPtr const getter_function;
92 };
93 
94 }  // namespace
95 
96 // static
97 const char* const MemoryDumpManager::kTraceCategory =
98     TRACE_DISABLED_BY_DEFAULT("memory-infra");
99 
100 // static
101 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
102 
103 // static
104 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
105 
106 // static
107 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
108 
109 // static
110 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
111 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
112     MallocDumpProvider::kAllocatedObjects;
113 #elif defined(OS_WIN)
114     WinHeapDumpProvider::kAllocatedObjects;
115 #else
116     nullptr;
117 #endif
118 
119 // static
GetInstance()120 MemoryDumpManager* MemoryDumpManager::GetInstance() {
121   if (g_instance_for_testing)
122     return g_instance_for_testing;
123 
124   return Singleton<MemoryDumpManager,
125                    LeakySingletonTraits<MemoryDumpManager>>::get();
126 }
127 
128 // static
SetInstanceForTesting(MemoryDumpManager * instance)129 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
130   g_instance_for_testing = instance;
131 }
132 
MemoryDumpManager()133 MemoryDumpManager::MemoryDumpManager()
134     : delegate_(nullptr),
135       is_coordinator_(false),
136       memory_tracing_enabled_(0),
137       tracing_process_id_(kInvalidTracingProcessId),
138       dumper_registrations_ignored_for_testing_(false),
139       heap_profiling_enabled_(false) {
140   g_next_guid.GetNext();  // Make sure that first guid is not zero.
141 
142   // At this point the command line may not be initialized but we try to
143   // enable the heap profiler to capture allocations as soon as possible.
144   EnableHeapProfilingIfNeeded();
145 }
146 
~MemoryDumpManager()147 MemoryDumpManager::~MemoryDumpManager() {
148   TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
149 }
150 
EnableHeapProfilingIfNeeded()151 void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
152   if (heap_profiling_enabled_)
153     return;
154 
155   if (!CommandLine::InitializedForCurrentProcess() ||
156       !CommandLine::ForCurrentProcess()->HasSwitch(
157           switches::kEnableHeapProfiling))
158     return;
159 
160   std::string profiling_mode = CommandLine::ForCurrentProcess()
161       ->GetSwitchValueASCII(switches::kEnableHeapProfiling);
162   if (profiling_mode == "") {
163     AllocationContextTracker::SetCaptureMode(
164         AllocationContextTracker::CaptureMode::PSEUDO_STACK);
165   }
166   else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
167 #if HAVE_TRACE_STACK_FRAME_POINTERS && \
168     (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
169     // We need frame pointers for native tracing to work, and they are
170     // enabled in profiling and debug builds.
171     AllocationContextTracker::SetCaptureMode(
172         AllocationContextTracker::CaptureMode::NATIVE_STACK);
173 #else
174     CHECK(false) << "'" << profiling_mode << "' mode for "
175                  << switches::kEnableHeapProfiling << " flag is not supported "
176                  << "for this platform / build type.";
177 #endif
178   } else {
179     CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
180                << switches::kEnableHeapProfiling << " flag.";
181   }
182 
183   for (auto mdp : dump_providers_)
184     mdp->dump_provider->OnHeapProfilingEnabled(true);
185   heap_profiling_enabled_ = true;
186 }
187 
Initialize(MemoryDumpManagerDelegate * delegate,bool is_coordinator)188 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
189                                    bool is_coordinator) {
190   {
191     AutoLock lock(lock_);
192     DCHECK(delegate);
193     DCHECK(!delegate_);
194     delegate_ = delegate;
195     is_coordinator_ = is_coordinator;
196     EnableHeapProfilingIfNeeded();
197   }
198 
199 // Enable the core dump providers.
200 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
201   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
202 #endif
203 
204 #if defined(OS_ANDROID)
205   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
206                        nullptr);
207 #endif
208 
209 #if defined(OS_WIN)
210   RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
211 #endif
212 
213   // If tracing was enabled before initializing MemoryDumpManager, we missed the
214   // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
215   bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
216   TRACE_EVENT0(kTraceCategory, "init");  // Add to trace-viewer category list.
217   TraceLog::GetInstance()->AddEnabledStateObserver(this);
218   if (is_tracing_already_enabled)
219     OnTraceLogEnabled();
220 }
221 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)222 void MemoryDumpManager::RegisterDumpProvider(
223     MemoryDumpProvider* mdp,
224     const char* name,
225     scoped_refptr<SingleThreadTaskRunner> task_runner,
226     MemoryDumpProvider::Options options) {
227   options.dumps_on_single_thread_task_runner = true;
228   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
229 }
230 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)231 void MemoryDumpManager::RegisterDumpProvider(
232     MemoryDumpProvider* mdp,
233     const char* name,
234     scoped_refptr<SingleThreadTaskRunner> task_runner) {
235   // Set |dumps_on_single_thread_task_runner| to true because all providers
236   // without task runner are run on dump thread.
237   MemoryDumpProvider::Options options;
238   options.dumps_on_single_thread_task_runner = true;
239   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
240 }
241 
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)242 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
243     MemoryDumpProvider* mdp,
244     const char* name,
245     scoped_refptr<SequencedTaskRunner> task_runner,
246     MemoryDumpProvider::Options options) {
247   DCHECK(task_runner);
248   options.dumps_on_single_thread_task_runner = false;
249   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
250 }
251 
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)252 void MemoryDumpManager::RegisterDumpProviderInternal(
253     MemoryDumpProvider* mdp,
254     const char* name,
255     scoped_refptr<SequencedTaskRunner> task_runner,
256     const MemoryDumpProvider::Options& options) {
257   if (dumper_registrations_ignored_for_testing_)
258     return;
259 
260   bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
261   scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
262       new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
263                                  whitelisted_for_background_mode);
264 
265   {
266     AutoLock lock(lock_);
267     bool already_registered = !dump_providers_.insert(mdpinfo).second;
268     // This actually happens in some tests which don't have a clean tear-down
269     // path for RenderThreadImpl::Init().
270     if (already_registered)
271       return;
272   }
273 
274   if (heap_profiling_enabled_)
275     mdp->OnHeapProfilingEnabled(true);
276 }
277 
UnregisterDumpProvider(MemoryDumpProvider * mdp)278 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
279   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
280 }
281 
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)282 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
283     std::unique_ptr<MemoryDumpProvider> mdp) {
284   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
285 }
286 
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)287 void MemoryDumpManager::UnregisterDumpProviderInternal(
288     MemoryDumpProvider* mdp,
289     bool take_mdp_ownership_and_delete_async) {
290   std::unique_ptr<MemoryDumpProvider> owned_mdp;
291   if (take_mdp_ownership_and_delete_async)
292     owned_mdp.reset(mdp);
293 
294   AutoLock lock(lock_);
295 
296   auto mdp_iter = dump_providers_.begin();
297   for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
298     if ((*mdp_iter)->dump_provider == mdp)
299       break;
300   }
301 
302   if (mdp_iter == dump_providers_.end())
303     return;  // Not registered / already unregistered.
304 
305   if (take_mdp_ownership_and_delete_async) {
306     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
307     // - At the end of this function, if no dump is in progress.
308     // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
309     //   removed from |pending_dump_providers|.
310     DCHECK(!(*mdp_iter)->owned_dump_provider);
311     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
312   } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
313     // If you hit this DCHECK, your dump provider has a bug.
314     // Unregistration of a MemoryDumpProvider is safe only if:
315     // - The MDP has specified a sequenced task runner affinity AND the
316     //   unregistration happens on the same task runner. So that the MDP cannot
317     //   unregister and be in the middle of a OnMemoryDump() at the same time.
318     // - The MDP has NOT specified a task runner affinity and its ownership is
319     //   transferred via UnregisterAndDeleteDumpProviderSoon().
320     // In all the other cases, it is not possible to guarantee that the
321     // unregistration will not race with OnMemoryDump() calls.
322     DCHECK((*mdp_iter)->task_runner &&
323            (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
324         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
325         << "unregister itself in a racy way. Please file a crbug.";
326   }
327 
328   // The MDPInfo instance can still be referenced by the
329   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
330   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
331   // to just skip it, without actually invoking the |mdp|, which might be
332   // destroyed by the caller soon after this method returns.
333   (*mdp_iter)->disabled = true;
334   dump_providers_.erase(mdp_iter);
335 }
336 
RequestGlobalDump(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail,const MemoryDumpCallback & callback)337 void MemoryDumpManager::RequestGlobalDump(
338     MemoryDumpType dump_type,
339     MemoryDumpLevelOfDetail level_of_detail,
340     const MemoryDumpCallback& callback) {
341   // Bail out immediately if tracing is not enabled at all or if the dump mode
342   // is not allowed.
343   if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
344       !IsDumpModeAllowed(level_of_detail)) {
345     VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
346             << " tracing category is not enabled or the requested dump mode is "
347                "not allowed by trace config.";
348     if (!callback.is_null())
349       callback.Run(0u /* guid */, false /* success */);
350     return;
351   }
352 
353   const uint64_t guid =
354       TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
355 
356   // Creates an async event to keep track of the global dump evolution.
357   // The |wrapped_callback| will generate the ASYNC_END event and then invoke
358   // the real |callback| provided by the caller.
359   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
360                                     TRACE_ID_MANGLE(guid));
361   MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
362 
363   // Technically there is no need to grab the |lock_| here as the delegate is
364   // long-lived and can only be set by Initialize(), which is locked and
365   // necessarily happens before memory_tracing_enabled_ == true.
366   // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
367   // (memory-infra is enabled) we're not in the fast-path anymore.
368   MemoryDumpManagerDelegate* delegate;
369   {
370     AutoLock lock(lock_);
371     delegate = delegate_;
372   }
373 
374   // The delegate will coordinate the IPC broadcast and at some point invoke
375   // CreateProcessDump() to get a dump for the current process.
376   MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
377   delegate->RequestGlobalMemoryDump(args, wrapped_callback);
378 }
379 
RequestGlobalDump(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)380 void MemoryDumpManager::RequestGlobalDump(
381     MemoryDumpType dump_type,
382     MemoryDumpLevelOfDetail level_of_detail) {
383   RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
384 }
385 
CreateProcessDump(const MemoryDumpRequestArgs & args,const MemoryDumpCallback & callback)386 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
387                                           const MemoryDumpCallback& callback) {
388   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
389                                     TRACE_ID_MANGLE(args.dump_guid));
390 
391   // If argument filter is enabled then only background mode dumps should be
392   // allowed. In case the trace config passed for background tracing session
393   // missed the allowed modes argument, it crashes here instead of creating
394   // unexpected dumps.
395   if (TraceLog::GetInstance()
396           ->GetCurrentTraceConfig()
397           .IsArgumentFilterEnabled()) {
398     CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
399   }
400 
401   std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
402   {
403     AutoLock lock(lock_);
404 
405     // |dump_thread_| can be nullptr is tracing was disabled before reaching
406     // here. SetupNextMemoryDump() is robust enough to tolerate it and will
407     // NACK the dump.
408     pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
409         args, dump_providers_, session_state_, callback,
410         dump_thread_ ? dump_thread_->task_runner() : nullptr));
411 
412     // Safety check to prevent reaching here without calling RequestGlobalDump,
413     // with disallowed modes. If |session_state_| is null then tracing is
414     // disabled.
415     CHECK(!session_state_ ||
416           session_state_->memory_dump_config().allowed_dump_modes.count(
417               args.level_of_detail));
418   }
419 
420   TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
421                          TRACE_ID_MANGLE(args.dump_guid),
422                          TRACE_EVENT_FLAG_FLOW_OUT);
423 
424   // Start the process dump. This involves task runner hops as specified by the
425   // MemoryDumpProvider(s) in RegisterDumpProvider()).
426   SetupNextMemoryDump(std::move(pmd_async_state));
427 }
428 
429 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
430 // PostTask is always required for a generic SequencedTaskRunner to ensure that
431 // no other task is running on it concurrently. SetupNextMemoryDump() and
432 // InvokeOnMemoryDump() are called alternatively which linearizes the dump
433 // provider's OnMemoryDump invocations.
434 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
435 // active at any time for a given PMD, regardless of status of the |lock_|.
436 // |lock_| is used in these functions purely to ensure consistency w.r.t.
437 // (un)registrations of |dump_providers_|.
SetupNextMemoryDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)438 void MemoryDumpManager::SetupNextMemoryDump(
439     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
440   HEAP_PROFILER_SCOPED_IGNORE;
441   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
442   // in the PostTask below don't end up registering their own dump providers
443   // (for discounting trace memory overhead) while holding the |lock_|.
444   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
445 
446   // |dump_thread_| might be destroyed before getting this point.
447   // It means that tracing was disabled right before starting this dump.
448   // Anyway either tracing is stopped or this was the last hop, create a trace
449   // event, add it to the trace and finalize process dump invoking the callback.
450   if (!pmd_async_state->dump_thread_task_runner.get()) {
451     if (pmd_async_state->pending_dump_providers.empty()) {
452       VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
453               << " before finalizing the dump";
454     } else {
455       VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
456               << " before dumping "
457               << pmd_async_state->pending_dump_providers.back().get()->name;
458     }
459     pmd_async_state->dump_successful = false;
460     pmd_async_state->pending_dump_providers.clear();
461   }
462   if (pmd_async_state->pending_dump_providers.empty())
463     return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
464 
465   // Read MemoryDumpProviderInfo thread safety considerations in
466   // memory_dump_manager.h when accessing |mdpinfo| fields.
467   MemoryDumpProviderInfo* mdpinfo =
468       pmd_async_state->pending_dump_providers.back().get();
469 
470   // If we are in background tracing, we should invoke only the whitelisted
471   // providers. Ignore other providers and continue.
472   if (pmd_async_state->req_args.level_of_detail ==
473           MemoryDumpLevelOfDetail::BACKGROUND &&
474       !mdpinfo->whitelisted_for_background_mode) {
475     pmd_async_state->pending_dump_providers.pop_back();
476     return SetupNextMemoryDump(std::move(pmd_async_state));
477   }
478 
479   // If the dump provider did not specify a task runner affinity, dump on
480   // |dump_thread_| which is already checked above for presence.
481   SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
482   if (!task_runner) {
483     DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
484     task_runner = pmd_async_state->dump_thread_task_runner.get();
485     DCHECK(task_runner);
486   }
487 
488   if (mdpinfo->options.dumps_on_single_thread_task_runner &&
489       task_runner->RunsTasksOnCurrentThread()) {
490     // If |dumps_on_single_thread_task_runner| is true then no PostTask is
491     // required if we are on the right thread.
492     return InvokeOnMemoryDump(pmd_async_state.release());
493   }
494 
495   bool did_post_task = task_runner->PostTask(
496       FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
497                       Unretained(pmd_async_state.get())));
498 
499   if (did_post_task) {
500     // Ownership is tranferred to InvokeOnMemoryDump().
501     ignore_result(pmd_async_state.release());
502     return;
503   }
504 
505   // PostTask usually fails only if the process or thread is shut down. So, the
506   // dump provider is disabled here. But, don't disable unbound dump providers.
507   // The utility thread is normally shutdown when disabling the trace and
508   // getting here in this case is expected.
509   if (mdpinfo->task_runner) {
510     LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
511                << "\". Failed to post task on the task runner provided.";
512 
513     // A locked access is required to R/W |disabled| (for the
514     // UnregisterAndDeleteDumpProviderSoon() case).
515     AutoLock lock(lock_);
516     mdpinfo->disabled = true;
517   }
518 
519   // PostTask failed. Ignore the dump provider and continue.
520   pmd_async_state->pending_dump_providers.pop_back();
521   SetupNextMemoryDump(std::move(pmd_async_state));
522 }
523 
524 // This function is called on the right task runner for current MDP. It is
525 // either the task runner specified by MDP or |dump_thread_task_runner| if the
526 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
527 // (unless disabled).
InvokeOnMemoryDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)528 void MemoryDumpManager::InvokeOnMemoryDump(
529     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
530   HEAP_PROFILER_SCOPED_IGNORE;
531   // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
532   // why it isn't is because of the corner case logic of |did_post_task|
533   // above, which needs to take back the ownership of the |pmd_async_state| when
534   // the PostTask() fails.
535   // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
536   // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
537   // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
538   auto pmd_async_state = WrapUnique(owned_pmd_async_state);
539   owned_pmd_async_state = nullptr;
540 
541   // Read MemoryDumpProviderInfo thread safety considerations in
542   // memory_dump_manager.h when accessing |mdpinfo| fields.
543   MemoryDumpProviderInfo* mdpinfo =
544       pmd_async_state->pending_dump_providers.back().get();
545 
546   DCHECK(!mdpinfo->task_runner ||
547          mdpinfo->task_runner->RunsTasksOnCurrentThread());
548 
549   bool should_dump;
550   {
551     // A locked access is required to R/W |disabled| (for the
552     // UnregisterAndDeleteDumpProviderSoon() case).
553     AutoLock lock(lock_);
554 
555     // Unregister the dump provider if it failed too many times consecutively.
556     if (!mdpinfo->disabled &&
557         mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
558       mdpinfo->disabled = true;
559       LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
560                  << "\". Dump failed multiple times consecutively.";
561     }
562     should_dump = !mdpinfo->disabled;
563   }  // AutoLock lock(lock_);
564 
565   if (should_dump) {
566     // Invoke the dump provider.
567     TRACE_EVENT_WITH_FLOW1(kTraceCategory,
568                            "MemoryDumpManager::InvokeOnMemoryDump",
569                            TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
570                            TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
571                            "dump_provider.name", mdpinfo->name);
572 
573     // Pid of the target process being dumped. Often kNullProcessId (= current
574     // process), non-zero when the coordinator process creates dumps on behalf
575     // of child processes (see crbug.com/461788).
576     ProcessId target_pid = mdpinfo->options.target_pid;
577     MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
578     ProcessMemoryDump* pmd =
579         pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
580                                                                   args);
581     bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
582     mdpinfo->consecutive_failures =
583         dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
584   }
585 
586   pmd_async_state->pending_dump_providers.pop_back();
587   SetupNextMemoryDump(std::move(pmd_async_state));
588 }
589 
590 // static
FinalizeDumpAndAddToTrace(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)591 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
592     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
593   HEAP_PROFILER_SCOPED_IGNORE;
594   DCHECK(pmd_async_state->pending_dump_providers.empty());
595   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
596   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
597     scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
598         pmd_async_state->callback_task_runner;
599     callback_task_runner->PostTask(
600         FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
601                         Passed(&pmd_async_state)));
602     return;
603   }
604 
605   TRACE_EVENT_WITH_FLOW0(kTraceCategory,
606                          "MemoryDumpManager::FinalizeDumpAndAddToTrace",
607                          TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
608 
609   for (const auto& kv : pmd_async_state->process_dumps) {
610     ProcessId pid = kv.first;  // kNullProcessId for the current process.
611     ProcessMemoryDump* process_memory_dump = kv.second.get();
612     std::unique_ptr<TracedValue> traced_value(new TracedValue);
613     process_memory_dump->AsValueInto(traced_value.get());
614     traced_value->SetString("level_of_detail",
615                             MemoryDumpLevelOfDetailToString(
616                                 pmd_async_state->req_args.level_of_detail));
617     const char* const event_name =
618         MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
619 
620     std::unique_ptr<ConvertableToTraceFormat> event_value(
621         std::move(traced_value));
622     TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
623         TRACE_EVENT_PHASE_MEMORY_DUMP,
624         TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
625         trace_event_internal::kGlobalScope, dump_guid, pid,
626         kTraceEventNumArgs, kTraceEventArgNames,
627         kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
628         TRACE_EVENT_FLAG_HAS_ID);
629   }
630 
631   bool tracing_still_enabled;
632   TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
633   if (!tracing_still_enabled) {
634     pmd_async_state->dump_successful = false;
635     VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
636             << " the dump was completed";
637   }
638 
639   if (!pmd_async_state->callback.is_null()) {
640     pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
641     pmd_async_state->callback.Reset();
642   }
643 
644   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
645                                   TRACE_ID_MANGLE(dump_guid));
646 }
647 
OnTraceLogEnabled()648 void MemoryDumpManager::OnTraceLogEnabled() {
649   bool enabled;
650   TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
651   if (!enabled)
652     return;
653 
654   // Initialize the TraceLog for the current thread. This is to avoid that the
655   // TraceLog memory dump provider is registered lazily in the PostTask() below
656   // while the |lock_| is taken;
657   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
658 
659   // Spin-up the thread used to invoke unbound dump providers.
660   std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
661   if (!dump_thread->Start()) {
662     LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
663     return;
664   }
665 
666   const TraceConfig trace_config =
667       TraceLog::GetInstance()->GetCurrentTraceConfig();
668   scoped_refptr<MemoryDumpSessionState> session_state =
669       new MemoryDumpSessionState;
670   session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
671   if (heap_profiling_enabled_) {
672     // If heap profiling is enabled, the stack frame deduplicator and type name
673     // deduplicator will be in use. Add a metadata events to write the frames
674     // and type IDs.
675     session_state->SetStackFrameDeduplicator(
676         WrapUnique(new StackFrameDeduplicator));
677 
678     session_state->SetTypeNameDeduplicator(
679         WrapUnique(new TypeNameDeduplicator));
680 
681     TRACE_EVENT_API_ADD_METADATA_EVENT(
682         TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
683         "stackFrames",
684         WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
685             session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
686 
687     TRACE_EVENT_API_ADD_METADATA_EVENT(
688         TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
689         "typeNames",
690         WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
691             session_state, &MemoryDumpSessionState::type_name_deduplicator)));
692   }
693 
694   {
695     AutoLock lock(lock_);
696 
697     DCHECK(delegate_);  // At this point we must have a delegate.
698     session_state_ = session_state;
699 
700     DCHECK(!dump_thread_);
701     dump_thread_ = std::move(dump_thread);
702 
703     subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
704 
705     // TODO(primiano): This is a temporary hack to disable periodic memory dumps
706     // when running memory benchmarks until telemetry uses TraceConfig to
707     // enable/disable periodic dumps. See crbug.com/529184 .
708     if (!is_coordinator_ ||
709         CommandLine::ForCurrentProcess()->HasSwitch(
710             "enable-memory-benchmarking")) {
711       return;
712     }
713   }
714 
715   // Enable periodic dumps if necessary.
716   periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
717 }
718 
OnTraceLogDisabled()719 void MemoryDumpManager::OnTraceLogDisabled() {
720   // There might be a memory dump in progress while this happens. Therefore,
721   // ensure that the MDM state which depends on the tracing enabled / disabled
722   // state is always accessed by the dumping methods holding the |lock_|.
723   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
724   std::unique_ptr<Thread> dump_thread;
725   {
726     AutoLock lock(lock_);
727     dump_thread = std::move(dump_thread_);
728     session_state_ = nullptr;
729   }
730 
731   // Thread stops are blocking and must be performed outside of the |lock_|
732   // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
733   periodic_dump_timer_.Stop();
734   if (dump_thread)
735     dump_thread->Stop();
736 }
737 
IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode)738 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
739   AutoLock lock(lock_);
740   if (!session_state_)
741     return false;
742   return session_state_->memory_dump_config().allowed_dump_modes.count(
743              dump_mode) != 0;
744 }
745 
GetTracingProcessId() const746 uint64_t MemoryDumpManager::GetTracingProcessId() const {
747   return delegate_->GetTracingProcessId();
748 }
749 
MemoryDumpProviderInfo(MemoryDumpProvider * dump_provider,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options,bool whitelisted_for_background_mode)750 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
751     MemoryDumpProvider* dump_provider,
752     const char* name,
753     scoped_refptr<SequencedTaskRunner> task_runner,
754     const MemoryDumpProvider::Options& options,
755     bool whitelisted_for_background_mode)
756     : dump_provider(dump_provider),
757       name(name),
758       task_runner(std::move(task_runner)),
759       options(options),
760       consecutive_failures(0),
761       disabled(false),
762       whitelisted_for_background_mode(whitelisted_for_background_mode) {}
763 
~MemoryDumpProviderInfo()764 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
765 
operator ()(const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> & a,const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> & b) const766 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
767     const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
768     const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
769   if (!a || !b)
770     return a.get() < b.get();
771   // Ensure that unbound providers (task_runner == nullptr) always run last.
772   // Rationale: some unbound dump providers are known to be slow, keep them last
773   // to avoid skewing timings of the other dump providers.
774   return std::tie(a->task_runner, a->dump_provider) >
775          std::tie(b->task_runner, b->dump_provider);
776 }
777 
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,scoped_refptr<MemoryDumpSessionState> session_state,MemoryDumpCallback callback,scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)778 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
779     MemoryDumpRequestArgs req_args,
780     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
781     scoped_refptr<MemoryDumpSessionState> session_state,
782     MemoryDumpCallback callback,
783     scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
784     : req_args(req_args),
785       session_state(std::move(session_state)),
786       callback(callback),
787       dump_successful(true),
788       callback_task_runner(ThreadTaskRunnerHandle::Get()),
789       dump_thread_task_runner(std::move(dump_thread_task_runner)) {
790   pending_dump_providers.reserve(dump_providers.size());
791   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
792 }
793 
~ProcessMemoryDumpAsyncState()794 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
795 }
796 
797 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,const MemoryDumpArgs & dump_args)798     GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
799                                              const MemoryDumpArgs& dump_args) {
800   auto iter = process_dumps.find(pid);
801   if (iter == process_dumps.end()) {
802     std::unique_ptr<ProcessMemoryDump> new_pmd(
803         new ProcessMemoryDump(session_state, dump_args));
804     iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
805   }
806   return iter->second.get();
807 }
808 
PeriodicGlobalDumpTimer()809 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
810 
~PeriodicGlobalDumpTimer()811 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
812   Stop();
813 }
814 
Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger> & triggers_list)815 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
816     const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
817   if (triggers_list.empty())
818     return;
819 
820   // At the moment the periodic support is limited to at most one periodic
821   // trigger per dump mode. All intervals should be an integer multiple of the
822   // smallest interval specified.
823   periodic_dumps_count_ = 0;
824   uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
825   uint32_t light_dump_period_ms = 0;
826   uint32_t heavy_dump_period_ms = 0;
827   DCHECK_LE(triggers_list.size(), 3u);
828   auto* mdm = MemoryDumpManager::GetInstance();
829   for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
830     DCHECK_NE(0u, config.periodic_interval_ms);
831     switch (config.level_of_detail) {
832       case MemoryDumpLevelOfDetail::BACKGROUND:
833         DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
834         break;
835       case MemoryDumpLevelOfDetail::LIGHT:
836         DCHECK_EQ(0u, light_dump_period_ms);
837         DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
838         light_dump_period_ms = config.periodic_interval_ms;
839         break;
840       case MemoryDumpLevelOfDetail::DETAILED:
841         DCHECK_EQ(0u, heavy_dump_period_ms);
842         DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
843         heavy_dump_period_ms = config.periodic_interval_ms;
844         break;
845     }
846     min_timer_period_ms =
847         std::min(min_timer_period_ms, config.periodic_interval_ms);
848   }
849 
850   DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
851   light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
852   DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
853   heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
854 
855   timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
856                base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
857                           base::Unretained(this)));
858 }
859 
Stop()860 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
861   if (IsRunning()) {
862     timer_.Stop();
863   }
864 }
865 
IsRunning()866 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
867   return timer_.IsRunning();
868 }
869 
RequestPeriodicGlobalDump()870 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
871   MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
872   if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
873     level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
874   if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
875     level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
876   ++periodic_dumps_count_;
877 
878   MemoryDumpManager::GetInstance()->RequestGlobalDump(
879       MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
880 }
881 
882 }  // namespace trace_event
883 }  // namespace base
884