• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <inttypes.h>
8 #include <stdio.h>
9 
10 #include <algorithm>
11 #include <memory>
12 #include <utility>
13 
14 #include "base/allocator/buildflags.h"
15 #include "base/base_switches.h"
16 #include "base/command_line.h"
17 #include "base/debug/alias.h"
18 #include "base/debug/stack_trace.h"
19 #include "base/debug/thread_heap_usage_tracker.h"
20 #include "base/memory/ptr_util.h"
21 #include "base/sequenced_task_runner.h"
22 #include "base/strings/string_util.h"
23 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
24 #include "base/threading/thread.h"
25 #include "base/threading/thread_task_runner_handle.h"
26 #include "base/trace_event/heap_profiler.h"
27 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
28 #include "base/trace_event/heap_profiler_event_filter.h"
29 #include "base/trace_event/malloc_dump_provider.h"
30 #include "base/trace_event/memory_dump_provider.h"
31 #include "base/trace_event/memory_dump_scheduler.h"
32 #include "base/trace_event/memory_infra_background_whitelist.h"
33 #include "base/trace_event/process_memory_dump.h"
34 #include "base/trace_event/trace_event.h"
35 #include "base/trace_event/trace_event_argument.h"
36 #include "build/build_config.h"
37 
38 #if defined(OS_ANDROID)
39 #include "base/trace_event/java_heap_dump_provider_android.h"
40 
41 #if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
42 #include "base/trace_event/cfi_backtrace_android.h"
43 #endif
44 
45 #endif  // defined(OS_ANDROID)
46 
47 namespace base {
48 namespace trace_event {
49 
50 namespace {
51 
52 MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
53 
54 // Temporary (until scheduler is moved outside of here)
55 // trampoline function to match the |request_dump_function| passed to Initialize
56 // to the callback expected by MemoryDumpScheduler.
57 // TODO(primiano): remove this.
DoGlobalDumpWithoutCallback(MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)58 void DoGlobalDumpWithoutCallback(
59     MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
60     MemoryDumpType dump_type,
61     MemoryDumpLevelOfDetail level_of_detail) {
62   global_dump_fn.Run(dump_type, level_of_detail);
63 }
64 
65 }  // namespace
66 
67 // static
68 const char* const MemoryDumpManager::kTraceCategory =
69     TRACE_DISABLED_BY_DEFAULT("memory-infra");
70 
71 // static
72 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
73 
74 // static
75 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
76 
77 // static
78 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
79 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
80     MallocDumpProvider::kAllocatedObjects;
81 #else
82     nullptr;
83 #endif
84 
85 // static
GetInstance()86 MemoryDumpManager* MemoryDumpManager::GetInstance() {
87   if (g_memory_dump_manager_for_testing)
88     return g_memory_dump_manager_for_testing;
89 
90   return Singleton<MemoryDumpManager,
91                    LeakySingletonTraits<MemoryDumpManager>>::get();
92 }
93 
94 // static
95 std::unique_ptr<MemoryDumpManager>
CreateInstanceForTesting()96 MemoryDumpManager::CreateInstanceForTesting() {
97   DCHECK(!g_memory_dump_manager_for_testing);
98   std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
99   g_memory_dump_manager_for_testing = instance.get();
100   return instance;
101 }
102 
MemoryDumpManager()103 MemoryDumpManager::MemoryDumpManager()
104     : is_coordinator_(false),
105       tracing_process_id_(kInvalidTracingProcessId),
106       dumper_registrations_ignored_for_testing_(false) {}
107 
~MemoryDumpManager()108 MemoryDumpManager::~MemoryDumpManager() {
109   Thread* dump_thread = nullptr;
110   {
111     AutoLock lock(lock_);
112     if (dump_thread_) {
113       dump_thread = dump_thread_.get();
114     }
115   }
116   if (dump_thread) {
117     dump_thread->Stop();
118   }
119   AutoLock lock(lock_);
120   dump_thread_.reset();
121   g_memory_dump_manager_for_testing = nullptr;
122 }
123 
Initialize(RequestGlobalDumpFunction request_dump_function,bool is_coordinator)124 void MemoryDumpManager::Initialize(
125     RequestGlobalDumpFunction request_dump_function,
126     bool is_coordinator) {
127   {
128     AutoLock lock(lock_);
129     DCHECK(!request_dump_function.is_null());
130     DCHECK(!can_request_global_dumps());
131     request_dump_function_ = request_dump_function;
132     is_coordinator_ = is_coordinator;
133   }
134 
135 // Enable the core dump providers.
136 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
137   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
138 #endif
139 
140 #if defined(OS_ANDROID)
141   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
142                        nullptr);
143 #endif
144 
145   TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
146 }
147 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)148 void MemoryDumpManager::RegisterDumpProvider(
149     MemoryDumpProvider* mdp,
150     const char* name,
151     scoped_refptr<SingleThreadTaskRunner> task_runner,
152     MemoryDumpProvider::Options options) {
153   options.dumps_on_single_thread_task_runner = true;
154   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
155 }
156 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)157 void MemoryDumpManager::RegisterDumpProvider(
158     MemoryDumpProvider* mdp,
159     const char* name,
160     scoped_refptr<SingleThreadTaskRunner> task_runner) {
161   // Set |dumps_on_single_thread_task_runner| to true because all providers
162   // without task runner are run on dump thread.
163   MemoryDumpProvider::Options options;
164   options.dumps_on_single_thread_task_runner = true;
165   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
166 }
167 
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)168 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
169     MemoryDumpProvider* mdp,
170     const char* name,
171     scoped_refptr<SequencedTaskRunner> task_runner,
172     MemoryDumpProvider::Options options) {
173   DCHECK(task_runner);
174   options.dumps_on_single_thread_task_runner = false;
175   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
176 }
177 
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)178 void MemoryDumpManager::RegisterDumpProviderInternal(
179     MemoryDumpProvider* mdp,
180     const char* name,
181     scoped_refptr<SequencedTaskRunner> task_runner,
182     const MemoryDumpProvider::Options& options) {
183   if (dumper_registrations_ignored_for_testing_)
184     return;
185 
186   // Only a handful of MDPs are required to compute the memory metrics. These
187   // have small enough performance overhead that it is resonable to run them
188   // in the background while the user is doing other things. Those MDPs are
189   // 'whitelisted for background mode'.
190   bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
191 
192   scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
193       new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
194                                  whitelisted_for_background_mode);
195 
196   {
197     AutoLock lock(lock_);
198     bool already_registered = !dump_providers_.insert(mdpinfo).second;
199     // This actually happens in some tests which don't have a clean tear-down
200     // path for RenderThreadImpl::Init().
201     if (already_registered)
202       return;
203   }
204 }
205 
UnregisterDumpProvider(MemoryDumpProvider * mdp)206 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
207   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
208 }
209 
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)210 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
211     std::unique_ptr<MemoryDumpProvider> mdp) {
212   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
213 }
214 
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)215 void MemoryDumpManager::UnregisterDumpProviderInternal(
216     MemoryDumpProvider* mdp,
217     bool take_mdp_ownership_and_delete_async) {
218   std::unique_ptr<MemoryDumpProvider> owned_mdp;
219   if (take_mdp_ownership_and_delete_async)
220     owned_mdp.reset(mdp);
221 
222   AutoLock lock(lock_);
223 
224   auto mdp_iter = dump_providers_.begin();
225   for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
226     if ((*mdp_iter)->dump_provider == mdp)
227       break;
228   }
229 
230   if (mdp_iter == dump_providers_.end())
231     return;  // Not registered / already unregistered.
232 
233   if (take_mdp_ownership_and_delete_async) {
234     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
235     // - At the end of this function, if no dump is in progress.
236     // - In ContinueAsyncProcessDump() when MDPInfo is removed from
237     //   |pending_dump_providers|.
238     DCHECK(!(*mdp_iter)->owned_dump_provider);
239     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
240   } else {
241     // If you hit this DCHECK, your dump provider has a bug.
242     // Unregistration of a MemoryDumpProvider is safe only if:
243     // - The MDP has specified a sequenced task runner affinity AND the
244     //   unregistration happens on the same task runner. So that the MDP cannot
245     //   unregister and be in the middle of a OnMemoryDump() at the same time.
246     // - The MDP has NOT specified a task runner affinity and its ownership is
247     //   transferred via UnregisterAndDeleteDumpProviderSoon().
248     // In all the other cases, it is not possible to guarantee that the
249     // unregistration will not race with OnMemoryDump() calls.
250     DCHECK((*mdp_iter)->task_runner &&
251            (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
252         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
253         << "unregister itself in a racy way. Please file a crbug.";
254   }
255 
256   // The MDPInfo instance can still be referenced by the
257   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
258   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
259   // to just skip it, without actually invoking the |mdp|, which might be
260   // destroyed by the caller soon after this method returns.
261   (*mdp_iter)->disabled = true;
262   dump_providers_.erase(mdp_iter);
263 }
264 
IsDumpProviderRegisteredForTesting(MemoryDumpProvider * provider)265 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
266     MemoryDumpProvider* provider) {
267   AutoLock lock(lock_);
268 
269   for (const auto& info : dump_providers_) {
270     if (info->dump_provider == provider)
271       return true;
272   }
273   return false;
274 }
275 
276 scoped_refptr<base::SequencedTaskRunner>
GetOrCreateBgTaskRunnerLocked()277 MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
278   lock_.AssertAcquired();
279 
280   if (dump_thread_)
281     return dump_thread_->task_runner();
282 
283   dump_thread_ = std::make_unique<Thread>("MemoryInfra");
284   bool started = dump_thread_->Start();
285   CHECK(started);
286 
287   return dump_thread_->task_runner();
288 }
289 
CreateProcessDump(const MemoryDumpRequestArgs & args,const ProcessMemoryDumpCallback & callback)290 void MemoryDumpManager::CreateProcessDump(
291     const MemoryDumpRequestArgs& args,
292     const ProcessMemoryDumpCallback& callback) {
293   char guid_str[20];
294   sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
295   TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
296                                     TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
297                                     TRACE_STR_COPY(guid_str));
298 
299   // If argument filter is enabled then only background mode dumps should be
300   // allowed. In case the trace config passed for background tracing session
301   // missed the allowed modes argument, it crashes here instead of creating
302   // unexpected dumps.
303   if (TraceLog::GetInstance()
304           ->GetCurrentTraceConfig()
305           .IsArgumentFilterEnabled()) {
306     CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
307   }
308 
309   std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
310   {
311     AutoLock lock(lock_);
312 
313     pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
314         args, dump_providers_, callback, GetOrCreateBgTaskRunnerLocked()));
315   }
316 
317   // Start the process dump. This involves task runner hops as specified by the
318   // MemoryDumpProvider(s) in RegisterDumpProvider()).
319   ContinueAsyncProcessDump(pmd_async_state.release());
320 }
321 
322 // Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
323 // on the current sequenced task runner. If the next MDP does not run in current
324 // sequenced task runner, then switches to that task runner and continues. All
325 // OnMemoryDump() invocations are linearized. |lock_| is used in these functions
326 // purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
ContinueAsyncProcessDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)327 void MemoryDumpManager::ContinueAsyncProcessDump(
328     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
329   HEAP_PROFILER_SCOPED_IGNORE;
330   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
331   // in the PostTask below don't end up registering their own dump providers
332   // (for discounting trace memory overhead) while holding the |lock_|.
333   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
334 
335   // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
336   // why it isn't is because of the corner case logic of |did_post_task|
337   // above, which needs to take back the ownership of the |pmd_async_state| when
338   // the PostTask() fails.
339   // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
340   // to prevent accidental leaks. Using a unique_ptr would prevent us to to
341   // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
342   auto pmd_async_state = WrapUnique(owned_pmd_async_state);
343   owned_pmd_async_state = nullptr;
344 
345   while (!pmd_async_state->pending_dump_providers.empty()) {
346     // Read MemoryDumpProviderInfo thread safety considerations in
347     // memory_dump_manager.h when accessing |mdpinfo| fields.
348     MemoryDumpProviderInfo* mdpinfo =
349         pmd_async_state->pending_dump_providers.back().get();
350 
351     // If we are in background mode, we should invoke only the whitelisted
352     // providers. Ignore other providers and continue.
353     if (pmd_async_state->req_args.level_of_detail ==
354             MemoryDumpLevelOfDetail::BACKGROUND &&
355         !mdpinfo->whitelisted_for_background_mode) {
356       pmd_async_state->pending_dump_providers.pop_back();
357       continue;
358     }
359 
360     // If the dump provider did not specify a task runner affinity, dump on
361     // |dump_thread_|.
362     scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
363     if (!task_runner) {
364       DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
365       task_runner = pmd_async_state->dump_thread_task_runner;
366       DCHECK(task_runner);
367     }
368 
369     // If |RunsTasksInCurrentSequence()| is true then no PostTask is
370     // required since we are on the right SequencedTaskRunner.
371     if (task_runner->RunsTasksInCurrentSequence()) {
372       InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
373       pmd_async_state->pending_dump_providers.pop_back();
374       continue;
375     }
376 
377     bool did_post_task = task_runner->PostTask(
378         FROM_HERE,
379         BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
380                  Unretained(pmd_async_state.get())));
381 
382     if (did_post_task) {
383       // Ownership is tranferred to the posted task.
384       ignore_result(pmd_async_state.release());
385       return;
386     }
387 
388     // PostTask usually fails only if the process or thread is shut down. So,
389     // the dump provider is disabled here. But, don't disable unbound dump
390     // providers, since the |dump_thread_| is controlled by MDM.
391     if (mdpinfo->task_runner) {
392       // A locked access is required to R/W |disabled| (for the
393       // UnregisterAndDeleteDumpProviderSoon() case).
394       AutoLock lock(lock_);
395       mdpinfo->disabled = true;
396     }
397 
398     // PostTask failed. Ignore the dump provider and continue.
399     pmd_async_state->pending_dump_providers.pop_back();
400   }
401 
402   FinishAsyncProcessDump(std::move(pmd_async_state));
403 }
404 
405 // This function is called on the right task runner for current MDP. It is
406 // either the task runner specified by MDP or |dump_thread_task_runner| if the
407 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
408 // (unless disabled).
InvokeOnMemoryDump(MemoryDumpProviderInfo * mdpinfo,ProcessMemoryDump * pmd)409 void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
410                                            ProcessMemoryDump* pmd) {
411   HEAP_PROFILER_SCOPED_IGNORE;
412   DCHECK(!mdpinfo->task_runner ||
413          mdpinfo->task_runner->RunsTasksInCurrentSequence());
414 
415   TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
416                "dump_provider.name", mdpinfo->name);
417 
418   // Do not add any other TRACE_EVENT macro (or function that might have them)
419   // below this point. Under some rare circunstances, they can re-initialize
420   // and invalide the current ThreadLocalEventBuffer MDP, making the
421   // |should_dump| check below susceptible to TOCTTOU bugs
422   // (https://crbug.com/763365).
423 
424   bool is_thread_bound;
425   {
426     // A locked access is required to R/W |disabled| (for the
427     // UnregisterAndDeleteDumpProviderSoon() case).
428     AutoLock lock(lock_);
429 
430     // Unregister the dump provider if it failed too many times consecutively.
431     if (!mdpinfo->disabled &&
432         mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
433       mdpinfo->disabled = true;
434       DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
435                   << "\". Dump failed multiple times consecutively.";
436     }
437     if (mdpinfo->disabled)
438       return;
439 
440     is_thread_bound = mdpinfo->task_runner != nullptr;
441   }  // AutoLock lock(lock_);
442 
443   // Invoke the dump provider.
444 
445   // A stack allocated string with dump provider name is useful to debug
446   // crashes while invoking dump after a |dump_provider| is not unregistered
447   // in safe way.
448   char provider_name_for_debugging[16];
449   strncpy(provider_name_for_debugging, mdpinfo->name,
450           sizeof(provider_name_for_debugging) - 1);
451   provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
452   base::debug::Alias(provider_name_for_debugging);
453 
454   ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
455   CHECK(!is_thread_bound ||
456         !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
457   bool dump_successful =
458       mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
459   mdpinfo->consecutive_failures =
460       dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
461 }
462 
FinishAsyncProcessDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)463 void MemoryDumpManager::FinishAsyncProcessDump(
464     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
465   HEAP_PROFILER_SCOPED_IGNORE;
466   DCHECK(pmd_async_state->pending_dump_providers.empty());
467   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
468   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
469     scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
470         pmd_async_state->callback_task_runner;
471     callback_task_runner->PostTask(
472         FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
473                             Unretained(this), std::move(pmd_async_state)));
474     return;
475   }
476 
477   TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
478 
479   if (!pmd_async_state->callback.is_null()) {
480     pmd_async_state->callback.Run(
481         true /* success */, dump_guid,
482         std::move(pmd_async_state->process_memory_dump));
483     pmd_async_state->callback.Reset();
484   }
485 
486   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
487                                   TRACE_ID_LOCAL(dump_guid));
488 }
489 
SetupForTracing(const TraceConfig::MemoryDumpConfig & memory_dump_config)490 void MemoryDumpManager::SetupForTracing(
491     const TraceConfig::MemoryDumpConfig& memory_dump_config) {
492   AutoLock lock(lock_);
493 
494   // At this point we must have the ability to request global dumps.
495   DCHECK(can_request_global_dumps());
496 
497   MemoryDumpScheduler::Config periodic_config;
498   for (const auto& trigger : memory_dump_config.triggers) {
499     if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
500       if (periodic_config.triggers.empty()) {
501         periodic_config.callback =
502             BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
503                           MemoryDumpType::PERIODIC_INTERVAL);
504       }
505       periodic_config.triggers.push_back(
506           {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
507     }
508   }
509 
510   // Only coordinator process triggers periodic memory dumps.
511   if (is_coordinator_ && !periodic_config.triggers.empty()) {
512     MemoryDumpScheduler::GetInstance()->Start(periodic_config,
513                                               GetOrCreateBgTaskRunnerLocked());
514   }
515 }
516 
TeardownForTracing()517 void MemoryDumpManager::TeardownForTracing() {
518   // There might be a memory dump in progress while this happens. Therefore,
519   // ensure that the MDM state which depends on the tracing enabled / disabled
520   // state is always accessed by the dumping methods holding the |lock_|.
521   AutoLock lock(lock_);
522 
523   MemoryDumpScheduler::GetInstance()->Stop();
524 }
525 
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,ProcessMemoryDumpCallback callback,scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)526 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
527     MemoryDumpRequestArgs req_args,
528     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
529     ProcessMemoryDumpCallback callback,
530     scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
531     : req_args(req_args),
532       callback(callback),
533       callback_task_runner(ThreadTaskRunnerHandle::Get()),
534       dump_thread_task_runner(std::move(dump_thread_task_runner)) {
535   pending_dump_providers.reserve(dump_providers.size());
536   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
537   MemoryDumpArgs args = {req_args.level_of_detail, req_args.dump_guid};
538   process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
539 }
540 
541 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
542     default;
543 
544 }  // namespace trace_event
545 }  // namespace base
546