1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/memory_dump_manager.h"
6
7 #include <inttypes.h>
8 #include <stdio.h>
9
10 #include <algorithm>
11 #include <utility>
12
13 #include "base/allocator/features.h"
14 #include "base/atomic_sequence_num.h"
15 #include "base/base_switches.h"
16 #include "base/command_line.h"
17 #include "base/compiler_specific.h"
18 #include "base/debug/alias.h"
19 #include "base/debug/debugging_flags.h"
20 #include "base/debug/stack_trace.h"
21 #include "base/debug/thread_heap_usage_tracker.h"
22 #include "base/memory/ptr_util.h"
23 #include "base/strings/pattern.h"
24 #include "base/strings/string_piece.h"
25 #include "base/threading/thread.h"
26 #include "base/threading/thread_task_runner_handle.h"
27 #include "base/trace_event/heap_profiler.h"
28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
29 #include "base/trace_event/heap_profiler_event_filter.h"
30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
32 #include "base/trace_event/malloc_dump_provider.h"
33 #include "base/trace_event/memory_dump_provider.h"
34 #include "base/trace_event/memory_dump_scheduler.h"
35 #include "base/trace_event/memory_dump_session_state.h"
36 #include "base/trace_event/memory_infra_background_whitelist.h"
37 #include "base/trace_event/process_memory_dump.h"
38 #include "base/trace_event/trace_event.h"
39 #include "base/trace_event/trace_event_argument.h"
40 #include "build/build_config.h"
41
42 #if defined(OS_ANDROID)
43 #include "base/trace_event/java_heap_dump_provider_android.h"
44 #endif
45
46 namespace base {
47 namespace trace_event {
48
49 namespace {
50
51 const int kTraceEventNumArgs = 1;
52 const char* kTraceEventArgNames[] = {"dumps"};
53 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
54
55 StaticAtomicSequenceNumber g_next_guid;
56 MemoryDumpManager* g_instance_for_testing = nullptr;
57
58 // The list of names of dump providers that are blacklisted from strict thread
59 // affinity check on unregistration. These providers could potentially cause
60 // crashes on build bots if they do not unregister on right thread.
61 // TODO(ssid): Fix all the dump providers to unregister if needed and clear the
62 // blacklist, crbug.com/643438.
63 const char* const kStrictThreadCheckBlacklist[] = {
64 "ClientDiscardableSharedMemoryManager",
65 "ContextProviderCommandBuffer",
66 "DiscardableSharedMemoryManager",
67 "FontCaches",
68 "GpuMemoryBufferVideoFramePool",
69 "IndexedDBBackingStore",
70 "Sql",
71 "ThreadLocalEventBuffer",
72 "TraceLog",
73 "URLRequestContext",
74 "VpxVideoDecoder",
75 "cc::SoftwareImageDecodeCache",
76 "cc::StagingBufferPool",
77 "gpu::BufferManager",
78 "gpu::MappedMemoryManager",
79 "gpu::RenderbufferManager",
80 "BlacklistTestDumpProvider" // for testing
81 };
82
83 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
84 // inject trace markers.
OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,uint64_t dump_guid,bool success)85 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
86 uint64_t dump_guid,
87 bool success) {
88 char guid_str[20];
89 sprintf(guid_str, "0x%" PRIx64, dump_guid);
90 TRACE_EVENT_NESTABLE_ASYNC_END2(MemoryDumpManager::kTraceCategory,
91 "GlobalMemoryDump", TRACE_ID_LOCAL(dump_guid),
92 "dump_guid", TRACE_STR_COPY(guid_str),
93 "success", success);
94
95 if (!wrapped_callback.is_null()) {
96 wrapped_callback.Run(dump_guid, success);
97 wrapped_callback.Reset();
98 }
99 }
100
101 // Proxy class which wraps a ConvertableToTraceFormat owned by the
102 // |session_state| into a proxy object that can be added to the trace event log.
103 // This is to solve the problem that the MemoryDumpSessionState is refcounted
104 // but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>.
105 template <typename T>
106 struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
107 using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
108
SessionStateConvertableProxybase::trace_event::__anon621d13170111::SessionStateConvertableProxy109 SessionStateConvertableProxy(
110 scoped_refptr<MemoryDumpSessionState> session_state,
111 GetterFunctPtr getter_function)
112 : session_state(session_state), getter_function(getter_function) {}
113
AppendAsTraceFormatbase::trace_event::__anon621d13170111::SessionStateConvertableProxy114 void AppendAsTraceFormat(std::string* out) const override {
115 return (session_state.get()->*getter_function)()->AppendAsTraceFormat(out);
116 }
117
EstimateTraceMemoryOverheadbase::trace_event::__anon621d13170111::SessionStateConvertableProxy118 void EstimateTraceMemoryOverhead(
119 TraceEventMemoryOverhead* overhead) override {
120 return (session_state.get()->*getter_function)()
121 ->EstimateTraceMemoryOverhead(overhead);
122 }
123
124 scoped_refptr<MemoryDumpSessionState> session_state;
125 GetterFunctPtr const getter_function;
126 };
127
128 } // namespace
129
130 // static
131 const char* const MemoryDumpManager::kTraceCategory =
132 TRACE_DISABLED_BY_DEFAULT("memory-infra");
133
134 // static
135 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
136
137 // static
138 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
139
140 // static
141 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
142
143 // static
144 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
145 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
146 MallocDumpProvider::kAllocatedObjects;
147 #else
148 nullptr;
149 #endif
150
151 // static
GetInstance()152 MemoryDumpManager* MemoryDumpManager::GetInstance() {
153 if (g_instance_for_testing)
154 return g_instance_for_testing;
155
156 return Singleton<MemoryDumpManager,
157 LeakySingletonTraits<MemoryDumpManager>>::get();
158 }
159
160 // static
SetInstanceForTesting(MemoryDumpManager * instance)161 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
162 g_instance_for_testing = instance;
163 }
164
MemoryDumpManager()165 MemoryDumpManager::MemoryDumpManager()
166 : memory_tracing_enabled_(0),
167 tracing_process_id_(kInvalidTracingProcessId),
168 dumper_registrations_ignored_for_testing_(false),
169 heap_profiling_enabled_(false) {
170 g_next_guid.GetNext(); // Make sure that first guid is not zero.
171
172 // At this point the command line may not be initialized but we try to
173 // enable the heap profiler to capture allocations as soon as possible.
174 EnableHeapProfilingIfNeeded();
175
176 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
177 std::end(kStrictThreadCheckBlacklist));
178 }
179
~MemoryDumpManager()180 MemoryDumpManager::~MemoryDumpManager() {
181 TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
182 }
183
EnableHeapProfilingIfNeeded()184 void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
185 if (heap_profiling_enabled_)
186 return;
187
188 if (!CommandLine::InitializedForCurrentProcess() ||
189 !CommandLine::ForCurrentProcess()->HasSwitch(
190 switches::kEnableHeapProfiling))
191 return;
192
193 std::string profiling_mode = CommandLine::ForCurrentProcess()
194 ->GetSwitchValueASCII(switches::kEnableHeapProfiling);
195 if (profiling_mode == "") {
196 AllocationContextTracker::SetCaptureMode(
197 AllocationContextTracker::CaptureMode::PSEUDO_STACK);
198 #if HAVE_TRACE_STACK_FRAME_POINTERS && \
199 (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
200 } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
201 // We need frame pointers for native tracing to work, and they are
202 // enabled in profiling and debug builds.
203 AllocationContextTracker::SetCaptureMode(
204 AllocationContextTracker::CaptureMode::NATIVE_STACK);
205 #endif
206 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
207 } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
208 // Enable heap tracking, which in turn enables capture of heap usage
209 // tracking in tracked_objects.cc.
210 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
211 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
212 #endif
213 } else {
214 CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
215 << switches::kEnableHeapProfiling << " flag.";
216 }
217
218 for (auto mdp : dump_providers_)
219 mdp->dump_provider->OnHeapProfilingEnabled(true);
220 heap_profiling_enabled_ = true;
221 }
222
Initialize(std::unique_ptr<MemoryDumpManagerDelegate> delegate)223 void MemoryDumpManager::Initialize(
224 std::unique_ptr<MemoryDumpManagerDelegate> delegate) {
225 {
226 AutoLock lock(lock_);
227 DCHECK(delegate);
228 DCHECK(!delegate_);
229 delegate_ = std::move(delegate);
230 EnableHeapProfilingIfNeeded();
231 }
232
233 // Enable the core dump providers.
234 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
235 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
236 #endif
237
238 #if defined(OS_ANDROID)
239 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
240 nullptr);
241 #endif
242
243 TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
244
245 // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
246 // more allocations (crbug.com/625170).
247 if (AllocationContextTracker::capture_mode() ==
248 AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
249 !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
250 // Create trace config with heap profiling filter.
251 std::string filter_string = "*";
252 const char* const kFilteredCategories[] = {
253 TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
254 MemoryDumpManager::kTraceCategory};
255 for (const char* cat : kFilteredCategories)
256 filter_string = filter_string + "," + cat;
257 TraceConfigCategoryFilter category_filter;
258 category_filter.InitializeFromString(filter_string);
259
260 TraceConfig::EventFilterConfig heap_profiler_filter_config(
261 HeapProfilerEventFilter::kName);
262 heap_profiler_filter_config.SetCategoryFilter(category_filter);
263
264 TraceConfig::EventFilters filters;
265 filters.push_back(heap_profiler_filter_config);
266 TraceConfig filtering_trace_config;
267 filtering_trace_config.SetEventFilters(filters);
268
269 TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
270 TraceLog::FILTERING_MODE);
271 }
272
273 // If tracing was enabled before initializing MemoryDumpManager, we missed the
274 // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
275 // IsEnabled is called before adding observer to avoid calling
276 // OnTraceLogEnabled twice.
277 bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
278 TraceLog::GetInstance()->AddEnabledStateObserver(this);
279 if (is_tracing_already_enabled)
280 OnTraceLogEnabled();
281 }
282
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)283 void MemoryDumpManager::RegisterDumpProvider(
284 MemoryDumpProvider* mdp,
285 const char* name,
286 scoped_refptr<SingleThreadTaskRunner> task_runner,
287 MemoryDumpProvider::Options options) {
288 options.dumps_on_single_thread_task_runner = true;
289 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
290 }
291
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)292 void MemoryDumpManager::RegisterDumpProvider(
293 MemoryDumpProvider* mdp,
294 const char* name,
295 scoped_refptr<SingleThreadTaskRunner> task_runner) {
296 // Set |dumps_on_single_thread_task_runner| to true because all providers
297 // without task runner are run on dump thread.
298 MemoryDumpProvider::Options options;
299 options.dumps_on_single_thread_task_runner = true;
300 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
301 }
302
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)303 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
304 MemoryDumpProvider* mdp,
305 const char* name,
306 scoped_refptr<SequencedTaskRunner> task_runner,
307 MemoryDumpProvider::Options options) {
308 DCHECK(task_runner);
309 options.dumps_on_single_thread_task_runner = false;
310 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
311 }
312
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)313 void MemoryDumpManager::RegisterDumpProviderInternal(
314 MemoryDumpProvider* mdp,
315 const char* name,
316 scoped_refptr<SequencedTaskRunner> task_runner,
317 const MemoryDumpProvider::Options& options) {
318 if (dumper_registrations_ignored_for_testing_)
319 return;
320
321 bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
322 scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
323 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
324 whitelisted_for_background_mode);
325
326 if (options.is_fast_polling_supported) {
327 DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
328 "polling must NOT be thread bound.";
329 }
330
331 {
332 AutoLock lock(lock_);
333 bool already_registered = !dump_providers_.insert(mdpinfo).second;
334 // This actually happens in some tests which don't have a clean tear-down
335 // path for RenderThreadImpl::Init().
336 if (already_registered)
337 return;
338
339 // The list of polling MDPs is populated OnTraceLogEnabled(). This code
340 // deals with the case of a MDP capable of fast polling that is registered
341 // after the OnTraceLogEnabled()
342 if (options.is_fast_polling_supported && dump_thread_) {
343 dump_thread_->task_runner()->PostTask(
344 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
345 Unretained(this), mdpinfo));
346 }
347 }
348
349 if (heap_profiling_enabled_)
350 mdp->OnHeapProfilingEnabled(true);
351 }
352
UnregisterDumpProvider(MemoryDumpProvider * mdp)353 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
354 UnregisterDumpProviderInternal(mdp, false /* delete_async */);
355 }
356
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)357 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
358 std::unique_ptr<MemoryDumpProvider> mdp) {
359 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
360 }
361
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)362 void MemoryDumpManager::UnregisterDumpProviderInternal(
363 MemoryDumpProvider* mdp,
364 bool take_mdp_ownership_and_delete_async) {
365 std::unique_ptr<MemoryDumpProvider> owned_mdp;
366 if (take_mdp_ownership_and_delete_async)
367 owned_mdp.reset(mdp);
368
369 AutoLock lock(lock_);
370
371 auto mdp_iter = dump_providers_.begin();
372 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
373 if ((*mdp_iter)->dump_provider == mdp)
374 break;
375 }
376
377 if (mdp_iter == dump_providers_.end())
378 return; // Not registered / already unregistered.
379
380 if (take_mdp_ownership_and_delete_async) {
381 // The MDP will be deleted whenever the MDPInfo struct will, that is either:
382 // - At the end of this function, if no dump is in progress.
383 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
384 // removed from |pending_dump_providers|.
385 // - When the provider is removed from |dump_providers_for_polling_|.
386 DCHECK(!(*mdp_iter)->owned_dump_provider);
387 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
388 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
389 subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
390 // If dump provider's name is on |strict_thread_check_blacklist_|, then the
391 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
392 // fired even when tracing is not enabled (stricter).
393 // TODO(ssid): Remove this condition after removing all the dump providers
394 // in the blacklist and the buildbots are no longer flakily hitting the
395 // DCHECK, crbug.com/643438.
396
397 // If you hit this DCHECK, your dump provider has a bug.
398 // Unregistration of a MemoryDumpProvider is safe only if:
399 // - The MDP has specified a sequenced task runner affinity AND the
400 // unregistration happens on the same task runner. So that the MDP cannot
401 // unregister and be in the middle of a OnMemoryDump() at the same time.
402 // - The MDP has NOT specified a task runner affinity and its ownership is
403 // transferred via UnregisterAndDeleteDumpProviderSoon().
404 // In all the other cases, it is not possible to guarantee that the
405 // unregistration will not race with OnMemoryDump() calls.
406 DCHECK((*mdp_iter)->task_runner &&
407 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
408 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
409 << "unregister itself in a racy way. Please file a crbug.";
410 }
411
412 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
413 DCHECK(take_mdp_ownership_and_delete_async);
414 dump_thread_->task_runner()->PostTask(
415 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
416 Unretained(this), *mdp_iter));
417 }
418
419 // The MDPInfo instance can still be referenced by the
420 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
421 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
422 // to just skip it, without actually invoking the |mdp|, which might be
423 // destroyed by the caller soon after this method returns.
424 (*mdp_iter)->disabled = true;
425 dump_providers_.erase(mdp_iter);
426 }
427
RegisterPollingMDPOnDumpThread(scoped_refptr<MemoryDumpProviderInfo> mdpinfo)428 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
429 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
430 AutoLock lock(lock_);
431 dump_providers_for_polling_.insert(mdpinfo);
432
433 // Notify ready for polling when first polling supported provider is
434 // registered. This handles the case where OnTraceLogEnabled() did not notify
435 // ready since no polling supported mdp has yet been registered.
436 if (dump_providers_for_polling_.size() == 1)
437 MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded();
438 }
439
UnregisterPollingMDPOnDumpThread(scoped_refptr<MemoryDumpProviderInfo> mdpinfo)440 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
441 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
442 mdpinfo->dump_provider->SuspendFastMemoryPolling();
443
444 AutoLock lock(lock_);
445 dump_providers_for_polling_.erase(mdpinfo);
446 DCHECK(!dump_providers_for_polling_.empty())
447 << "All polling MDPs cannot be unregistered.";
448 }
449
RequestGlobalDump(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail,const MemoryDumpCallback & callback)450 void MemoryDumpManager::RequestGlobalDump(
451 MemoryDumpType dump_type,
452 MemoryDumpLevelOfDetail level_of_detail,
453 const MemoryDumpCallback& callback) {
454 // Bail out immediately if tracing is not enabled at all or if the dump mode
455 // is not allowed.
456 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
457 !IsDumpModeAllowed(level_of_detail)) {
458 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
459 << " tracing category is not enabled or the requested dump mode is "
460 "not allowed by trace config.";
461 if (!callback.is_null())
462 callback.Run(0u /* guid */, false /* success */);
463 return;
464 }
465
466 const uint64_t guid =
467 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
468
469 // Creates an async event to keep track of the global dump evolution.
470 // The |wrapped_callback| will generate the ASYNC_END event and then invoke
471 // the real |callback| provided by the caller.
472 TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(
473 kTraceCategory, "GlobalMemoryDump", TRACE_ID_LOCAL(guid), "dump_type",
474 MemoryDumpTypeToString(dump_type), "level_of_detail",
475 MemoryDumpLevelOfDetailToString(level_of_detail));
476 MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
477
478 // The delegate will coordinate the IPC broadcast and at some point invoke
479 // CreateProcessDump() to get a dump for the current process.
480 MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
481 delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
482 }
483
RequestGlobalDump(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)484 void MemoryDumpManager::RequestGlobalDump(
485 MemoryDumpType dump_type,
486 MemoryDumpLevelOfDetail level_of_detail) {
487 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
488 }
489
IsDumpProviderRegisteredForTesting(MemoryDumpProvider * provider)490 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
491 MemoryDumpProvider* provider) {
492 AutoLock lock(lock_);
493
494 for (const auto& info : dump_providers_) {
495 if (info->dump_provider == provider)
496 return true;
497 }
498 return false;
499 }
500
CreateProcessDump(const MemoryDumpRequestArgs & args,const MemoryDumpCallback & callback)501 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
502 const MemoryDumpCallback& callback) {
503 char guid_str[20];
504 sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
505 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
506 TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
507 TRACE_STR_COPY(guid_str));
508
509 // If argument filter is enabled then only background mode dumps should be
510 // allowed. In case the trace config passed for background tracing session
511 // missed the allowed modes argument, it crashes here instead of creating
512 // unexpected dumps.
513 if (TraceLog::GetInstance()
514 ->GetCurrentTraceConfig()
515 .IsArgumentFilterEnabled()) {
516 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
517 }
518
519 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
520 {
521 AutoLock lock(lock_);
522
523 // |dump_thread_| can be nullptr is tracing was disabled before reaching
524 // here. SetupNextMemoryDump() is robust enough to tolerate it and will
525 // NACK the dump.
526 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
527 args, dump_providers_, session_state_, callback,
528 dump_thread_ ? dump_thread_->task_runner() : nullptr));
529
530 // Safety check to prevent reaching here without calling RequestGlobalDump,
531 // with disallowed modes. If |session_state_| is null then tracing is
532 // disabled.
533 CHECK(!session_state_ ||
534 session_state_->IsDumpModeAllowed(args.level_of_detail));
535
536 MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered();
537 }
538
539 // Start the process dump. This involves task runner hops as specified by the
540 // MemoryDumpProvider(s) in RegisterDumpProvider()).
541 SetupNextMemoryDump(std::move(pmd_async_state));
542 }
543
544 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
545 // PostTask is always required for a generic SequencedTaskRunner to ensure that
546 // no other task is running on it concurrently. SetupNextMemoryDump() and
547 // InvokeOnMemoryDump() are called alternatively which linearizes the dump
548 // provider's OnMemoryDump invocations.
549 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
550 // active at any time for a given PMD, regardless of status of the |lock_|.
551 // |lock_| is used in these functions purely to ensure consistency w.r.t.
552 // (un)registrations of |dump_providers_|.
SetupNextMemoryDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)553 void MemoryDumpManager::SetupNextMemoryDump(
554 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
555 HEAP_PROFILER_SCOPED_IGNORE;
556 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
557 // in the PostTask below don't end up registering their own dump providers
558 // (for discounting trace memory overhead) while holding the |lock_|.
559 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
560
561 // |dump_thread_| might be destroyed before getting this point.
562 // It means that tracing was disabled right before starting this dump.
563 // Anyway either tracing is stopped or this was the last hop, create a trace
564 // event, add it to the trace and finalize process dump invoking the callback.
565 if (!pmd_async_state->dump_thread_task_runner.get()) {
566 if (pmd_async_state->pending_dump_providers.empty()) {
567 VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
568 << " before finalizing the dump";
569 } else {
570 VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
571 << " before dumping "
572 << pmd_async_state->pending_dump_providers.back().get()->name;
573 }
574 pmd_async_state->dump_successful = false;
575 pmd_async_state->pending_dump_providers.clear();
576 }
577 if (pmd_async_state->pending_dump_providers.empty())
578 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
579
580 // Read MemoryDumpProviderInfo thread safety considerations in
581 // memory_dump_manager.h when accessing |mdpinfo| fields.
582 MemoryDumpProviderInfo* mdpinfo =
583 pmd_async_state->pending_dump_providers.back().get();
584
585 // If we are in background tracing, we should invoke only the whitelisted
586 // providers. Ignore other providers and continue.
587 if (pmd_async_state->req_args.level_of_detail ==
588 MemoryDumpLevelOfDetail::BACKGROUND &&
589 !mdpinfo->whitelisted_for_background_mode) {
590 pmd_async_state->pending_dump_providers.pop_back();
591 return SetupNextMemoryDump(std::move(pmd_async_state));
592 }
593
594 // If the dump provider did not specify a task runner affinity, dump on
595 // |dump_thread_| which is already checked above for presence.
596 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
597 if (!task_runner) {
598 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
599 task_runner = pmd_async_state->dump_thread_task_runner.get();
600 DCHECK(task_runner);
601 }
602
603 if (mdpinfo->options.dumps_on_single_thread_task_runner &&
604 task_runner->RunsTasksOnCurrentThread()) {
605 // If |dumps_on_single_thread_task_runner| is true then no PostTask is
606 // required if we are on the right thread.
607 return InvokeOnMemoryDump(pmd_async_state.release());
608 }
609
610 bool did_post_task = task_runner->PostTask(
611 FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
612 Unretained(pmd_async_state.get())));
613
614 if (did_post_task) {
615 // Ownership is tranferred to InvokeOnMemoryDump().
616 ignore_result(pmd_async_state.release());
617 return;
618 }
619
620 // PostTask usually fails only if the process or thread is shut down. So, the
621 // dump provider is disabled here. But, don't disable unbound dump providers.
622 // The utility thread is normally shutdown when disabling the trace and
623 // getting here in this case is expected.
624 if (mdpinfo->task_runner) {
625 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
626 << "\". Failed to post task on the task runner provided.";
627
628 // A locked access is required to R/W |disabled| (for the
629 // UnregisterAndDeleteDumpProviderSoon() case).
630 AutoLock lock(lock_);
631 mdpinfo->disabled = true;
632 }
633
634 // PostTask failed. Ignore the dump provider and continue.
635 pmd_async_state->pending_dump_providers.pop_back();
636 SetupNextMemoryDump(std::move(pmd_async_state));
637 }
638
639 // This function is called on the right task runner for current MDP. It is
640 // either the task runner specified by MDP or |dump_thread_task_runner| if the
641 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
642 // (unless disabled).
InvokeOnMemoryDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)643 void MemoryDumpManager::InvokeOnMemoryDump(
644 ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
645 HEAP_PROFILER_SCOPED_IGNORE;
646 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
647 // why it isn't is because of the corner case logic of |did_post_task|
648 // above, which needs to take back the ownership of the |pmd_async_state| when
649 // the PostTask() fails.
650 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
651 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
652 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
653 auto pmd_async_state = WrapUnique(owned_pmd_async_state);
654 owned_pmd_async_state = nullptr;
655
656 // Read MemoryDumpProviderInfo thread safety considerations in
657 // memory_dump_manager.h when accessing |mdpinfo| fields.
658 MemoryDumpProviderInfo* mdpinfo =
659 pmd_async_state->pending_dump_providers.back().get();
660
661 DCHECK(!mdpinfo->task_runner ||
662 mdpinfo->task_runner->RunsTasksOnCurrentThread());
663
664 bool should_dump;
665 {
666 // A locked access is required to R/W |disabled| (for the
667 // UnregisterAndDeleteDumpProviderSoon() case).
668 AutoLock lock(lock_);
669
670 // Unregister the dump provider if it failed too many times consecutively.
671 if (!mdpinfo->disabled &&
672 mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
673 mdpinfo->disabled = true;
674 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
675 << "\". Dump failed multiple times consecutively.";
676 }
677 should_dump = !mdpinfo->disabled;
678 } // AutoLock lock(lock_);
679
680 if (should_dump) {
681 // Invoke the dump provider.
682 TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
683 "dump_provider.name", mdpinfo->name);
684
685 // A stack allocated string with dump provider name is useful to debug
686 // crashes while invoking dump after a |dump_provider| is not unregistered
687 // in safe way.
688 // TODO(ssid): Remove this after fixing crbug.com/643438.
689 char provider_name_for_debugging[16];
690 strncpy(provider_name_for_debugging, mdpinfo->name,
691 sizeof(provider_name_for_debugging) - 1);
692 provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
693 base::debug::Alias(provider_name_for_debugging);
694
695 // Pid of the target process being dumped. Often kNullProcessId (= current
696 // process), non-zero when the coordinator process creates dumps on behalf
697 // of child processes (see crbug.com/461788).
698 ProcessId target_pid = mdpinfo->options.target_pid;
699 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
700 ProcessMemoryDump* pmd =
701 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
702 args);
703 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
704 mdpinfo->consecutive_failures =
705 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
706 }
707
708 pmd_async_state->pending_dump_providers.pop_back();
709 SetupNextMemoryDump(std::move(pmd_async_state));
710 }
711
PollFastMemoryTotal(uint64_t * memory_total)712 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
713 #if DCHECK_IS_ON()
714 {
715 AutoLock lock(lock_);
716 if (dump_thread_)
717 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
718 }
719 #endif
720 if (dump_providers_for_polling_.empty())
721 return false;
722
723 *memory_total = 0;
724 // Note that we call PollFastMemoryTotal() even if the dump provider is
725 // disabled (unregistered). This is to avoid taking lock while polling.
726 for (const auto& mdpinfo : dump_providers_for_polling_) {
727 uint64_t value = 0;
728 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
729 *memory_total += value;
730 }
731 return true;
732 }
733
734 // static
GetDumpsSumKb(const std::string & pattern,const ProcessMemoryDump * pmd)735 uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
736 const ProcessMemoryDump* pmd) {
737 uint64_t sum = 0;
738 for (const auto& kv : pmd->allocator_dumps()) {
739 auto name = StringPiece(kv.first);
740 if (MatchPattern(name, pattern))
741 sum += kv.second->GetSize();
742 }
743 return sum / 1024;
744 }
745
746 // static
FinalizeDumpAndAddToTrace(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)747 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
748 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
749 HEAP_PROFILER_SCOPED_IGNORE;
750 DCHECK(pmd_async_state->pending_dump_providers.empty());
751 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
752 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
753 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
754 pmd_async_state->callback_task_runner;
755 callback_task_runner->PostTask(
756 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
757 Passed(&pmd_async_state)));
758 return;
759 }
760
761 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
762
763 // The results struct to fill.
764 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
765 MemoryDumpCallbackResult result;
766
767 for (const auto& kv : pmd_async_state->process_dumps) {
768 ProcessId pid = kv.first; // kNullProcessId for the current process.
769 ProcessMemoryDump* process_memory_dump = kv.second.get();
770 std::unique_ptr<TracedValue> traced_value(new TracedValue);
771 process_memory_dump->AsValueInto(traced_value.get());
772 traced_value->SetString("level_of_detail",
773 MemoryDumpLevelOfDetailToString(
774 pmd_async_state->req_args.level_of_detail));
775 const char* const event_name =
776 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
777
778 std::unique_ptr<ConvertableToTraceFormat> event_value(
779 std::move(traced_value));
780 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
781 TRACE_EVENT_PHASE_MEMORY_DUMP,
782 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
783 trace_event_internal::kGlobalScope, dump_guid, pid,
784 kTraceEventNumArgs, kTraceEventArgNames,
785 kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
786 TRACE_EVENT_FLAG_HAS_ID);
787
788 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
789 // Don't try to fill the struct in detailed mode since it is hard to avoid
790 // double counting.
791 if (pmd_async_state->req_args.level_of_detail ==
792 MemoryDumpLevelOfDetail::DETAILED)
793 continue;
794
795 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
796 if (pid == kNullProcessId) {
797 result.chrome_dump.malloc_total_kb =
798 GetDumpsSumKb("malloc", process_memory_dump);
799 result.chrome_dump.v8_total_kb =
800 GetDumpsSumKb("v8/*", process_memory_dump);
801
802 // partition_alloc reports sizes for both allocated_objects and
803 // partitions. The memory allocated_objects uses is a subset of
804 // the partitions memory so to avoid double counting we only
805 // count partitions memory.
806 result.chrome_dump.partition_alloc_total_kb =
807 GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
808 result.chrome_dump.blink_gc_total_kb =
809 GetDumpsSumKb("blink_gc", process_memory_dump);
810 }
811 }
812
813 bool tracing_still_enabled;
814 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
815 if (!tracing_still_enabled) {
816 pmd_async_state->dump_successful = false;
817 VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
818 << " the dump was completed";
819 }
820
821 if (!pmd_async_state->callback.is_null()) {
822 pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
823 pmd_async_state->callback.Reset();
824 }
825
826 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
827 TRACE_ID_LOCAL(dump_guid));
828 }
829
OnTraceLogEnabled()830 void MemoryDumpManager::OnTraceLogEnabled() {
831 bool enabled;
832 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
833 if (!enabled)
834 return;
835
836 // Initialize the TraceLog for the current thread. This is to avoid that the
837 // TraceLog memory dump provider is registered lazily in the PostTask() below
838 // while the |lock_| is taken;
839 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
840
841 // Spin-up the thread used to invoke unbound dump providers.
842 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
843 if (!dump_thread->Start()) {
844 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
845 return;
846 }
847
848 const TraceConfig& trace_config =
849 TraceLog::GetInstance()->GetCurrentTraceConfig();
850 const TraceConfig::MemoryDumpConfig& memory_dump_config =
851 trace_config.memory_dump_config();
852 scoped_refptr<MemoryDumpSessionState> session_state =
853 new MemoryDumpSessionState;
854 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
855 session_state->set_heap_profiler_breakdown_threshold_bytes(
856 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
857 if (heap_profiling_enabled_) {
858 // If heap profiling is enabled, the stack frame deduplicator and type name
859 // deduplicator will be in use. Add a metadata events to write the frames
860 // and type IDs.
861 session_state->SetStackFrameDeduplicator(
862 WrapUnique(new StackFrameDeduplicator));
863
864 session_state->SetTypeNameDeduplicator(
865 WrapUnique(new TypeNameDeduplicator));
866
867 TRACE_EVENT_API_ADD_METADATA_EVENT(
868 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
869 "stackFrames",
870 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
871 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
872
873 TRACE_EVENT_API_ADD_METADATA_EVENT(
874 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
875 "typeNames",
876 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
877 session_state, &MemoryDumpSessionState::type_name_deduplicator));
878 }
879
880 {
881 AutoLock lock(lock_);
882
883 DCHECK(delegate_); // At this point we must have a delegate.
884 session_state_ = session_state;
885
886 DCHECK(!dump_thread_);
887 dump_thread_ = std::move(dump_thread);
888
889 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
890
891 dump_providers_for_polling_.clear();
892 for (const auto& mdpinfo : dump_providers_) {
893 if (mdpinfo->options.is_fast_polling_supported)
894 dump_providers_for_polling_.insert(mdpinfo);
895 }
896
897 MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance();
898 dump_scheduler->Setup(this, dump_thread_->task_runner());
899 DCHECK_LE(memory_dump_config.triggers.size(), 3u);
900 for (const auto& trigger : memory_dump_config.triggers) {
901 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
902 NOTREACHED();
903 continue;
904 }
905 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
906 trigger.min_time_between_dumps_ms);
907 }
908
909 // Notify polling supported only if some polling supported provider was
910 // registered, else RegisterPollingMDPOnDumpThread() will notify when first
911 // polling MDP registers.
912 if (!dump_providers_for_polling_.empty())
913 dump_scheduler->EnablePollingIfNeeded();
914
915 // Only coordinator process triggers periodic global memory dumps.
916 if (delegate_->IsCoordinator())
917 dump_scheduler->EnablePeriodicTriggerIfNeeded();
918 }
919
920 }
921
OnTraceLogDisabled()922 void MemoryDumpManager::OnTraceLogDisabled() {
923 // There might be a memory dump in progress while this happens. Therefore,
924 // ensure that the MDM state which depends on the tracing enabled / disabled
925 // state is always accessed by the dumping methods holding the |lock_|.
926 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
927 return;
928 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
929 std::unique_ptr<Thread> dump_thread;
930 {
931 AutoLock lock(lock_);
932 dump_thread = std::move(dump_thread_);
933 session_state_ = nullptr;
934 MemoryDumpScheduler::GetInstance()->DisableAllTriggers();
935 }
936
937 // Thread stops are blocking and must be performed outside of the |lock_|
938 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
939 if (dump_thread)
940 dump_thread->Stop();
941
942 // |dump_providers_for_polling_| must be cleared only after the dump thread is
943 // stopped (polling tasks are done).
944 {
945 AutoLock lock(lock_);
946 for (const auto& mdpinfo : dump_providers_for_polling_)
947 mdpinfo->dump_provider->SuspendFastMemoryPolling();
948 dump_providers_for_polling_.clear();
949 }
950 }
951
IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode)952 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
953 AutoLock lock(lock_);
954 if (!session_state_)
955 return false;
956 return session_state_->IsDumpModeAllowed(dump_mode);
957 }
958
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,scoped_refptr<MemoryDumpSessionState> session_state,MemoryDumpCallback callback,scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)959 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
960 MemoryDumpRequestArgs req_args,
961 const MemoryDumpProviderInfo::OrderedSet& dump_providers,
962 scoped_refptr<MemoryDumpSessionState> session_state,
963 MemoryDumpCallback callback,
964 scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
965 : req_args(req_args),
966 session_state(std::move(session_state)),
967 callback(callback),
968 dump_successful(true),
969 callback_task_runner(ThreadTaskRunnerHandle::Get()),
970 dump_thread_task_runner(std::move(dump_thread_task_runner)) {
971 pending_dump_providers.reserve(dump_providers.size());
972 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
973 }
974
~ProcessMemoryDumpAsyncState()975 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
976 }
977
978 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,const MemoryDumpArgs & dump_args)979 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
980 const MemoryDumpArgs& dump_args) {
981 auto iter = process_dumps.find(pid);
982 if (iter == process_dumps.end()) {
983 std::unique_ptr<ProcessMemoryDump> new_pmd(
984 new ProcessMemoryDump(session_state, dump_args));
985 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
986 }
987 return iter->second.get();
988 }
989
990 } // namespace trace_event
991 } // namespace base
992