1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/memory_dump_manager.h"
6
7 #include <inttypes.h>
8 #include <stdio.h>
9
10 #include <algorithm>
11 #include <array>
12 #include <memory>
13 #include <tuple>
14 #include <utility>
15
16 #include "base/base_switches.h"
17 #include "base/command_line.h"
18 #include "base/debug/alias.h"
19 #include "base/debug/stack_trace.h"
20 #include "base/logging.h"
21 #include "base/memory/ptr_util.h"
22 #include "base/strings/string_util.h"
23 #include "base/task/sequenced_task_runner.h"
24 #include "base/task/single_thread_task_runner.h"
25 #include "base/threading/thread.h"
26 #include "base/trace_event/heap_profiler.h"
27 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
28 #include "base/trace_event/malloc_dump_provider.h"
29 #include "base/trace_event/memory_dump_provider.h"
30 #include "base/trace_event/memory_dump_scheduler.h"
31 #include "base/trace_event/memory_infra_background_allowlist.h"
32 #include "base/trace_event/process_memory_dump.h"
33 #include "base/trace_event/trace_event.h"
34 #include "base/trace_event/traced_value.h"
35 #include "build/build_config.h"
36 #include "partition_alloc/buildflags.h"
37 #include "third_party/abseil-cpp/absl/base/dynamic_annotations.h"
38
39 #if BUILDFLAG(IS_ANDROID)
40 #include "base/trace_event/java_heap_dump_provider_android.h"
41
42 #if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
43 #include "base/trace_event/cfi_backtrace_android.h"
44 #endif
45
46 #endif // BUILDFLAG(IS_ANDROID)
47
48 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
49 #include "base/trace_event/address_space_dump_provider.h"
50 #endif
51
52 namespace base {
53 namespace trace_event {
54
55 namespace {
56
57 MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
58
59 // Temporary (until scheduler is moved outside of here)
60 // trampoline function to match the |request_dump_function| passed to Initialize
61 // to the callback expected by MemoryDumpScheduler.
62 // TODO(primiano): remove this.
DoGlobalDumpWithoutCallback(MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)63 void DoGlobalDumpWithoutCallback(
64 MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
65 MemoryDumpType dump_type,
66 MemoryDumpLevelOfDetail level_of_detail) {
67 global_dump_fn.Run(dump_type, level_of_detail);
68 }
69
70 } // namespace
71
72 // static
73 constexpr const char* MemoryDumpManager::kTraceCategory;
74
75 // static
76 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
77
78 // static
79 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
80
81 // static
82 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
83 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
84 MallocDumpProvider::kAllocatedObjects;
85 #else
86 nullptr;
87 #endif
88
89 // static
GetInstance()90 MemoryDumpManager* MemoryDumpManager::GetInstance() {
91 if (g_memory_dump_manager_for_testing)
92 return g_memory_dump_manager_for_testing;
93
94 return Singleton<MemoryDumpManager,
95 LeakySingletonTraits<MemoryDumpManager>>::get();
96 }
97
98 // static
99 std::unique_ptr<MemoryDumpManager>
CreateInstanceForTesting()100 MemoryDumpManager::CreateInstanceForTesting() {
101 DCHECK(!g_memory_dump_manager_for_testing);
102 std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
103 g_memory_dump_manager_for_testing = instance.get();
104 return instance;
105 }
106
107 MemoryDumpManager::MemoryDumpManager() = default;
108
~MemoryDumpManager()109 MemoryDumpManager::~MemoryDumpManager() {
110 Thread* dump_thread = nullptr;
111 {
112 AutoLock lock(lock_);
113 if (dump_thread_) {
114 dump_thread = dump_thread_.get();
115 }
116 }
117 if (dump_thread) {
118 dump_thread->Stop();
119 }
120 AutoLock lock(lock_);
121 dump_thread_.reset();
122 g_memory_dump_manager_for_testing = nullptr;
123 }
124
Initialize(RequestGlobalDumpFunction request_dump_function,bool is_coordinator)125 void MemoryDumpManager::Initialize(
126 RequestGlobalDumpFunction request_dump_function,
127 bool is_coordinator) {
128 {
129 AutoLock lock(lock_);
130 DCHECK(!request_dump_function.is_null());
131 DCHECK(!can_request_global_dumps());
132 request_dump_function_ = request_dump_function;
133 is_coordinator_ = is_coordinator;
134 }
135
136 // Enable the core dump providers.
137 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
138 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
139 #endif
140
141 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
142 RegisterDumpProvider(AddressSpaceDumpProvider::GetInstance(),
143 "PartitionAlloc.AddressSpace", nullptr);
144 #endif
145
146 #if BUILDFLAG(IS_ANDROID)
147 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
148 nullptr);
149 #endif
150 }
151
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)152 void MemoryDumpManager::RegisterDumpProvider(
153 MemoryDumpProvider* mdp,
154 const char* name,
155 scoped_refptr<SingleThreadTaskRunner> task_runner,
156 MemoryDumpProvider::Options options) {
157 options.dumps_on_single_thread_task_runner = true;
158 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
159 }
160
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)161 void MemoryDumpManager::RegisterDumpProvider(
162 MemoryDumpProvider* mdp,
163 const char* name,
164 scoped_refptr<SingleThreadTaskRunner> task_runner) {
165 // Set |dumps_on_single_thread_task_runner| to true because all providers
166 // without task runner are run on dump thread.
167 MemoryDumpProvider::Options options;
168 options.dumps_on_single_thread_task_runner = true;
169 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
170 }
171
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)172 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
173 MemoryDumpProvider* mdp,
174 const char* name,
175 scoped_refptr<SequencedTaskRunner> task_runner,
176 MemoryDumpProvider::Options options) {
177 DCHECK(task_runner);
178 options.dumps_on_single_thread_task_runner = false;
179 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
180 }
181
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)182 void MemoryDumpManager::RegisterDumpProviderInternal(
183 MemoryDumpProvider* mdp,
184 const char* name,
185 scoped_refptr<SequencedTaskRunner> task_runner,
186 const MemoryDumpProvider::Options& options) {
187 if (dumper_registrations_ignored_for_testing_)
188 return;
189
190 // Only a handful of MDPs are required to compute the memory metrics. These
191 // have small enough performance overhead that it is reasonable to run them
192 // in the background while the user is doing other things. Those MDPs are
193 // 'allowed in background mode'.
194 bool allowed_in_background_mode = IsMemoryDumpProviderInAllowlist(name);
195
196 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
197 mdp, name, std::move(task_runner), options, allowed_in_background_mode);
198
199 {
200 AutoLock lock(lock_);
201 bool already_registered = !dump_providers_.insert(mdpinfo).second;
202 // This actually happens in some tests which don't have a clean tear-down
203 // path for RenderThreadImpl::Init().
204 if (already_registered)
205 return;
206 }
207 }
208
UnregisterDumpProvider(MemoryDumpProvider * mdp)209 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
210 UnregisterDumpProviderInternal(mdp, false /* delete_async */);
211 }
212
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)213 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
214 std::unique_ptr<MemoryDumpProvider> mdp) {
215 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
216 }
217
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)218 void MemoryDumpManager::UnregisterDumpProviderInternal(
219 MemoryDumpProvider* mdp,
220 bool take_mdp_ownership_and_delete_async) {
221 std::unique_ptr<MemoryDumpProvider> owned_mdp;
222 if (take_mdp_ownership_and_delete_async)
223 owned_mdp.reset(mdp);
224
225 AutoLock lock(lock_);
226
227 auto mdp_iter = dump_providers_.begin();
228 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
229 if ((*mdp_iter)->dump_provider == mdp)
230 break;
231 }
232
233 if (mdp_iter == dump_providers_.end())
234 return; // Not registered / already unregistered.
235
236 if (take_mdp_ownership_and_delete_async) {
237 // The MDP will be deleted whenever the MDPInfo struct will, that is either:
238 // - At the end of this function, if no dump is in progress.
239 // - In ContinueAsyncProcessDump() when MDPInfo is removed from
240 // |pending_dump_providers|.
241 DCHECK(!(*mdp_iter)->owned_dump_provider);
242 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
243 } else {
244 // If you hit this DCHECK, your dump provider has a bug.
245 // Unregistration of a MemoryDumpProvider is safe only if:
246 // - The MDP has specified a sequenced task runner affinity AND the
247 // unregistration happens on the same task runner. So that the MDP cannot
248 // unregister and be in the middle of a OnMemoryDump() at the same time.
249 // - The MDP has NOT specified a task runner affinity and its ownership is
250 // transferred via UnregisterAndDeleteDumpProviderSoon().
251 // In all the other cases, it is not possible to guarantee that the
252 // unregistration will not race with OnMemoryDump() calls.
253 DCHECK((*mdp_iter)->task_runner &&
254 (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
255 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
256 << "unregister itself in a racy way. Please file a crbug.";
257 }
258
259 // The MDPInfo instance can still be referenced by the
260 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
261 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
262 // to just skip it, without actually invoking the |mdp|, which might be
263 // destroyed by the caller soon after this method returns.
264 (*mdp_iter)->disabled = true;
265 dump_providers_.erase(mdp_iter);
266 }
267
IsDumpProviderRegisteredForTesting(MemoryDumpProvider * provider)268 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
269 MemoryDumpProvider* provider) {
270 AutoLock lock(lock_);
271
272 for (const auto& info : dump_providers_) {
273 if (info->dump_provider == provider)
274 return true;
275 }
276 return false;
277 }
278
ResetForTesting()279 void MemoryDumpManager::ResetForTesting() {
280 AutoLock lock(lock_);
281 request_dump_function_.Reset();
282 dump_providers_.clear();
283 }
284
285 scoped_refptr<SequencedTaskRunner>
GetDumpThreadTaskRunner()286 MemoryDumpManager::GetDumpThreadTaskRunner() {
287 base::AutoLock lock(lock_);
288 return GetOrCreateBgTaskRunnerLocked();
289 }
290
291 scoped_refptr<base::SequencedTaskRunner>
GetOrCreateBgTaskRunnerLocked()292 MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
293 if (dump_thread_)
294 return dump_thread_->task_runner();
295
296 dump_thread_ = std::make_unique<Thread>("MemoryInfra");
297 bool started = dump_thread_->Start();
298 CHECK(started);
299
300 return dump_thread_->task_runner();
301 }
302
CreateProcessDump(const MemoryDumpRequestArgs & args,ProcessMemoryDumpCallback callback)303 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
304 ProcessMemoryDumpCallback callback) {
305 char guid_str[20];
306 snprintf(guid_str, std::size(guid_str), "0x%" PRIx64, args.dump_guid);
307 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
308 TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
309 TRACE_STR_COPY(guid_str));
310
311 // If argument filter is enabled then only background mode dumps should be
312 // allowed. In case the trace config passed for background tracing session
313 // missed the allowed modes argument, it crashes here instead of creating
314 // unexpected dumps.
315 if (TraceLog::GetInstance()
316 ->GetCurrentTraceConfig()
317 .IsArgumentFilterEnabled()) {
318 CHECK_EQ(MemoryDumpLevelOfDetail::kBackground, args.level_of_detail);
319 }
320
321 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
322 {
323 AutoLock lock(lock_);
324
325 pmd_async_state = std::make_unique<ProcessMemoryDumpAsyncState>(
326 args, dump_providers_, std::move(callback),
327 GetOrCreateBgTaskRunnerLocked());
328 }
329
330 // Start the process dump. This involves task runner hops as specified by the
331 // MemoryDumpProvider(s) in RegisterDumpProvider()).
332 ContinueAsyncProcessDump(pmd_async_state.release());
333 }
334
335 // Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
336 // on the current sequenced task runner. If the next MDP does not run in current
337 // sequenced task runner, then switches to that task runner and continues. All
338 // OnMemoryDump() invocations are linearized. |lock_| is used in these functions
339 // purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
ContinueAsyncProcessDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)340 void MemoryDumpManager::ContinueAsyncProcessDump(
341 ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
342 HEAP_PROFILER_SCOPED_IGNORE;
343 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
344 // in the PostTask below don't end up registering their own dump providers
345 // (for discounting trace memory overhead) while holding the |lock_|.
346 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
347
348 // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
349 // why it isn't is because of the corner case logic of |did_post_task|
350 // above, which needs to take back the ownership of the |pmd_async_state| when
351 // the PostTask() fails.
352 // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
353 // to prevent accidental leaks. Using a unique_ptr would prevent us to to
354 // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
355 auto pmd_async_state = WrapUnique(owned_pmd_async_state);
356 owned_pmd_async_state = nullptr;
357
358 while (!pmd_async_state->pending_dump_providers.empty()) {
359 // Read MemoryDumpProviderInfo thread safety considerations in
360 // memory_dump_manager.h when accessing |mdpinfo| fields.
361 MemoryDumpProviderInfo* mdpinfo =
362 pmd_async_state->pending_dump_providers.back().get();
363
364 // If we are in background mode, we should invoke only the allowed
365 // providers. Ignore other providers and continue.
366 if (pmd_async_state->req_args.level_of_detail ==
367 MemoryDumpLevelOfDetail::kBackground &&
368 !mdpinfo->allowed_in_background_mode) {
369 pmd_async_state->pending_dump_providers.pop_back();
370 continue;
371 }
372
373 // If the dump provider did not specify a task runner affinity, dump on
374 // |dump_thread_|.
375 scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
376 if (!task_runner) {
377 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
378 task_runner = pmd_async_state->dump_thread_task_runner;
379 DCHECK(task_runner);
380 }
381
382 // If |RunsTasksInCurrentSequence()| is true then no PostTask is
383 // required since we are on the right SequencedTaskRunner.
384 if (task_runner->RunsTasksInCurrentSequence()) {
385 InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
386 pmd_async_state->pending_dump_providers.pop_back();
387 continue;
388 }
389
390 bool did_post_task = task_runner->PostTask(
391 FROM_HERE,
392 BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
393 Unretained(pmd_async_state.get())));
394
395 if (did_post_task) {
396 // Ownership is transferred to the posted task.
397 std::ignore = pmd_async_state.release();
398 return;
399 }
400
401 // PostTask usually fails only if the process or thread is shut down. So,
402 // the dump provider is disabled here. But, don't disable unbound dump
403 // providers, since the |dump_thread_| is controlled by MDM.
404 if (mdpinfo->task_runner) {
405 // A locked access is required to R/W |disabled| (for the
406 // UnregisterAndDeleteDumpProviderSoon() case).
407 AutoLock lock(lock_);
408 mdpinfo->disabled = true;
409 }
410
411 // PostTask failed. Ignore the dump provider and continue.
412 pmd_async_state->pending_dump_providers.pop_back();
413 }
414
415 FinishAsyncProcessDump(std::move(pmd_async_state));
416 }
417
418 // This function is called on the right task runner for current MDP. It is
419 // either the task runner specified by MDP or |dump_thread_task_runner| if the
420 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
421 // (unless disabled).
InvokeOnMemoryDump(MemoryDumpProviderInfo * mdpinfo,ProcessMemoryDump * pmd)422 void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
423 ProcessMemoryDump* pmd) {
424 HEAP_PROFILER_SCOPED_IGNORE;
425 DCHECK(!mdpinfo->task_runner ||
426 mdpinfo->task_runner->RunsTasksInCurrentSequence());
427
428 TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
429 "dump_provider.name", mdpinfo->name);
430
431 // Do not add any other TRACE_EVENT macro (or function that might have them)
432 // below this point. Under some rare circunstances, they can re-initialize
433 // and invalide the current ThreadLocalEventBuffer MDP, making the
434 // |should_dump| check below susceptible to TOCTTOU bugs
435 // (https://crbug.com/763365).
436
437 bool is_thread_bound;
438 {
439 // A locked access is required to R/W |disabled| (for the
440 // UnregisterAndDeleteDumpProviderSoon() case).
441 AutoLock lock(lock_);
442
443 // Unregister the dump provider if it failed too many times consecutively.
444 if (!mdpinfo->disabled &&
445 mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
446 mdpinfo->disabled = true;
447 DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
448 << "\". Dump failed multiple times consecutively.";
449 }
450 if (mdpinfo->disabled)
451 return;
452
453 is_thread_bound = mdpinfo->task_runner != nullptr;
454 } // AutoLock lock(lock_);
455
456 // Invoke the dump provider.
457
458 // A stack allocated string with dump provider name is useful to debug
459 // crashes while invoking dump after a |dump_provider| is not unregistered
460 // in safe way.
461 DEBUG_ALIAS_FOR_CSTR(provider_name_for_debugging, mdpinfo->name, 16);
462
463 ABSL_ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
464 CHECK(!is_thread_bound ||
465 !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
466 bool dump_successful =
467 mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
468 mdpinfo->consecutive_failures =
469 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
470 }
471
FinishAsyncProcessDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)472 void MemoryDumpManager::FinishAsyncProcessDump(
473 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
474 HEAP_PROFILER_SCOPED_IGNORE;
475 DCHECK(pmd_async_state->pending_dump_providers.empty());
476 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
477 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
478 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
479 pmd_async_state->callback_task_runner;
480 callback_task_runner->PostTask(
481 FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
482 Unretained(this), std::move(pmd_async_state)));
483 return;
484 }
485
486 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
487
488 if (!pmd_async_state->callback.is_null()) {
489 std::move(pmd_async_state->callback)
490 .Run(true /* success */, dump_guid,
491 std::move(pmd_async_state->process_memory_dump));
492 }
493
494 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
495 TRACE_ID_LOCAL(dump_guid));
496 }
497
SetupForTracing(const TraceConfig::MemoryDumpConfig & memory_dump_config)498 void MemoryDumpManager::SetupForTracing(
499 const TraceConfig::MemoryDumpConfig& memory_dump_config) {
500 AutoLock lock(lock_);
501
502 // At this point we must have the ability to request global dumps.
503 DCHECK(can_request_global_dumps());
504
505 MemoryDumpScheduler::Config periodic_config;
506 for (const auto& trigger : memory_dump_config.triggers) {
507 if (trigger.trigger_type == MemoryDumpType::kPeriodicInterval) {
508 if (periodic_config.triggers.empty()) {
509 periodic_config.callback =
510 BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
511 MemoryDumpType::kPeriodicInterval);
512 }
513 periodic_config.triggers.push_back(
514 {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
515 }
516 }
517
518 // Only coordinator process triggers periodic memory dumps.
519 if (is_coordinator_ && !periodic_config.triggers.empty()) {
520 MemoryDumpScheduler::GetInstance()->Start(periodic_config,
521 GetOrCreateBgTaskRunnerLocked());
522 }
523 }
524
TeardownForTracing()525 void MemoryDumpManager::TeardownForTracing() {
526 // There might be a memory dump in progress while this happens. Therefore,
527 // ensure that the MDM state which depends on the tracing enabled / disabled
528 // state is always accessed by the dumping methods holding the |lock_|.
529 AutoLock lock(lock_);
530
531 MemoryDumpScheduler::GetInstance()->Stop();
532 }
533
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,ProcessMemoryDumpCallback callback,scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)534 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
535 MemoryDumpRequestArgs req_args,
536 const MemoryDumpProviderInfo::OrderedSet& dump_providers,
537 ProcessMemoryDumpCallback callback,
538 scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
539 : req_args(req_args),
540 callback(std::move(callback)),
541 callback_task_runner(SingleThreadTaskRunner::GetCurrentDefault()),
542 dump_thread_task_runner(std::move(dump_thread_task_runner)) {
543 pending_dump_providers.reserve(dump_providers.size());
544 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
545 MemoryDumpArgs args = {req_args.level_of_detail, req_args.determinism,
546 req_args.dump_guid};
547 process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
548 }
549
550 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
551 default;
552
553 } // namespace trace_event
554 } // namespace base
555