1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6
7 #include <algorithm>
8 #include <iterator>
9
10 #include "base/atomicops.h"
11 #include "base/debug/leak_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_local_storage.h"
14 #include "base/trace_event/heap_profiler_allocation_context.h"
15
16 #if defined(OS_LINUX) || defined(OS_ANDROID)
17 #include <sys/prctl.h>
18 #endif
19
20 namespace base {
21 namespace trace_event {
22
23 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
24 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
25
26 namespace {
27
28 const size_t kMaxStackDepth = 128u;
29 const size_t kMaxTaskDepth = 16u;
30 AllocationContextTracker* const kInitializingSentinel =
31 reinterpret_cast<AllocationContextTracker*>(-1);
32
33 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
34
35 // This function is added to the TLS slot to clean up the instance when the
36 // thread exits.
DestructAllocationContextTracker(void * alloc_ctx_tracker)37 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
38 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
39 }
40
41 // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
42 // deadlock when lock is already held by ThreadIdNameManager before the current
43 // allocation. Gets the thread name from kernel if available or returns a string
44 // with id. This function intenionally leaks the allocated strings since they
45 // are used to tag allocations even after the thread dies.
GetAndLeakThreadName()46 const char* GetAndLeakThreadName() {
47 char name[16];
48 #if defined(OS_LINUX) || defined(OS_ANDROID)
49 // If the thread name is not set, try to get it from prctl. Thread name might
50 // not be set in cases where the thread started before heap profiling was
51 // enabled.
52 int err = prctl(PR_GET_NAME, name);
53 if (!err) {
54 return strdup(name);
55 }
56 #endif // defined(OS_LINUX) || defined(OS_ANDROID)
57
58 // Use tid if we don't have a thread name.
59 snprintf(name, sizeof(name), "%lu",
60 static_cast<unsigned long>(PlatformThread::CurrentId()));
61 return strdup(name);
62 }
63
64 } // namespace
65
66 // static
67 AllocationContextTracker*
GetInstanceForCurrentThread()68 AllocationContextTracker::GetInstanceForCurrentThread() {
69 AllocationContextTracker* tracker =
70 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
71 if (tracker == kInitializingSentinel)
72 return nullptr; // Re-entrancy case.
73
74 if (!tracker) {
75 g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
76 tracker = new AllocationContextTracker();
77 g_tls_alloc_ctx_tracker.Set(tracker);
78 }
79
80 return tracker;
81 }
82
AllocationContextTracker()83 AllocationContextTracker::AllocationContextTracker()
84 : thread_name_(nullptr), ignore_scope_depth_(0) {
85 pseudo_stack_.reserve(kMaxStackDepth);
86 task_contexts_.reserve(kMaxTaskDepth);
87 }
~AllocationContextTracker()88 AllocationContextTracker::~AllocationContextTracker() {}
89
90 // static
SetCurrentThreadName(const char * name)91 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
92 if (name && capture_mode() != CaptureMode::DISABLED) {
93 GetInstanceForCurrentThread()->thread_name_ = name;
94 }
95 }
96
97 // static
SetCaptureMode(CaptureMode mode)98 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
99 // When enabling capturing, also initialize the TLS slot. This does not create
100 // a TLS instance yet.
101 if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
102 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
103
104 // Release ordering ensures that when a thread observes |capture_mode_| to
105 // be true through an acquire load, the TLS slot has been initialized.
106 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
107 }
108
PushPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)109 void AllocationContextTracker::PushPseudoStackFrame(
110 AllocationContextTracker::PseudoStackFrame stack_frame) {
111 // Impose a limit on the height to verify that every push is popped, because
112 // in practice the pseudo stack never grows higher than ~20 frames.
113 if (pseudo_stack_.size() < kMaxStackDepth)
114 pseudo_stack_.push_back(stack_frame);
115 else
116 NOTREACHED();
117 }
118
PopPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)119 void AllocationContextTracker::PopPseudoStackFrame(
120 AllocationContextTracker::PseudoStackFrame stack_frame) {
121 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
122 // scope, the frame was never pushed, so it is possible that pop is called
123 // on an empty stack.
124 if (pseudo_stack_.empty())
125 return;
126
127 // Assert that pushes and pops are nested correctly. This DCHECK can be
128 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
129 // without a corresponding TRACE_EVENT_BEGIN).
130 DCHECK(stack_frame == pseudo_stack_.back())
131 << "Encountered an unmatched TRACE_EVENT_END: "
132 << stack_frame.trace_event_name
133 << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
134
135 pseudo_stack_.pop_back();
136 }
137
PushCurrentTaskContext(const char * context)138 void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
139 DCHECK(context);
140 if (task_contexts_.size() < kMaxTaskDepth)
141 task_contexts_.push_back(context);
142 else
143 NOTREACHED();
144 }
145
PopCurrentTaskContext(const char * context)146 void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
147 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
148 // scope, the context was never pushed, so it is possible that pop is called
149 // on an empty stack.
150 if (task_contexts_.empty())
151 return;
152
153 DCHECK_EQ(context, task_contexts_.back())
154 << "Encountered an unmatched context end";
155 task_contexts_.pop_back();
156 }
157
158 // static
GetContextSnapshot(AllocationContext * ctx)159 bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
160 if (ignore_scope_depth_)
161 return false;
162
163 CaptureMode mode = static_cast<CaptureMode>(
164 subtle::NoBarrier_Load(&capture_mode_));
165
166 auto* backtrace = std::begin(ctx->backtrace.frames);
167 auto* backtrace_end = std::end(ctx->backtrace.frames);
168
169 if (!thread_name_) {
170 // Ignore the string allocation made by GetAndLeakThreadName to avoid
171 // reentrancy.
172 ignore_scope_depth_++;
173 thread_name_ = GetAndLeakThreadName();
174 ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
175 DCHECK(thread_name_);
176 ignore_scope_depth_--;
177 }
178
179 // Add the thread name as the first entry in pseudo stack.
180 if (thread_name_) {
181 *backtrace++ = StackFrame::FromThreadName(thread_name_);
182 }
183
184 switch (mode) {
185 case CaptureMode::DISABLED:
186 {
187 break;
188 }
189 case CaptureMode::PSEUDO_STACK:
190 {
191 for (const PseudoStackFrame& stack_frame : pseudo_stack_) {
192 if (backtrace == backtrace_end) {
193 break;
194 }
195 *backtrace++ =
196 StackFrame::FromTraceEventName(stack_frame.trace_event_name);
197 }
198 break;
199 }
200 case CaptureMode::NATIVE_STACK:
201 {
202 // Backtrace contract requires us to return bottom frames, i.e.
203 // from main() and up. Stack unwinding produces top frames, i.e.
204 // from this point and up until main(). We request many frames to
205 // make sure we reach main(), and then copy bottom portion of them.
206 const void* frames[128];
207 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
208 "not requesting enough frames to fill Backtrace");
209 #if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
210 size_t frame_count = debug::TraceStackFramePointers(
211 frames,
212 arraysize(frames),
213 1 /* exclude this function from the trace */ );
214 #else
215 size_t frame_count = 0;
216 NOTREACHED();
217 #endif
218
219 // Copy frames backwards
220 size_t backtrace_capacity = backtrace_end - backtrace;
221 int32_t top_frame_index = (backtrace_capacity >= frame_count)
222 ? 0
223 : frame_count - backtrace_capacity;
224 for (int32_t i = frame_count - 1; i >= top_frame_index; --i) {
225 const void* frame = frames[i];
226 *backtrace++ = StackFrame::FromProgramCounter(frame);
227 }
228 break;
229 }
230 }
231
232 ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
233
234 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
235 // (component name) in the heap profiler and not piggy back on the type name.
236 if (!task_contexts_.empty()) {
237 ctx->type_name = task_contexts_.back();
238 } else if (!pseudo_stack_.empty()) {
239 // If task context was unavailable, then the category names are taken from
240 // trace events.
241 ctx->type_name = pseudo_stack_.back().trace_event_category;
242 } else {
243 ctx->type_name = nullptr;
244 }
245
246 return true;
247 }
248
249 } // namespace trace_event
250 } // namespace base
251