1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6
7 #include <algorithm>
8 #include <iterator>
9
10 #include "base/atomicops.h"
11 #include "base/debug/debugging_buildflags.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/stack_trace.h"
14 #include "base/no_destructor.h"
15 #include "base/threading/platform_thread.h"
16 #include "base/threading/thread_local_storage.h"
17 #include "base/trace_event/heap_profiler_allocation_context.h"
18 #include "build/build_config.h"
19
20 #if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
21 #include "base/trace_event/cfi_backtrace_android.h"
22 #endif
23
24 #if defined(OS_LINUX) || defined(OS_ANDROID)
25 #include <sys/prctl.h>
26 #endif
27
28 namespace base {
29 namespace trace_event {
30
31 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
32 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
33
34 namespace {
35
36 const size_t kMaxStackDepth = 128u;
37 const size_t kMaxTaskDepth = 16u;
38 AllocationContextTracker* const kInitializingSentinel =
39 reinterpret_cast<AllocationContextTracker*>(-1);
40
41 // This function is added to the TLS slot to clean up the instance when the
42 // thread exits.
DestructAllocationContextTracker(void * alloc_ctx_tracker)43 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
44 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
45 }
46
AllocationContextTrackerTLS()47 ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
48 static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
49 &DestructAllocationContextTracker);
50 return *tls_alloc_ctx_tracker;
51 }
52
53 // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
54 // deadlock when lock is already held by ThreadIdNameManager before the current
55 // allocation. Gets the thread name from kernel if available or returns a string
56 // with id. This function intentionally leaks the allocated strings since they
57 // are used to tag allocations even after the thread dies.
GetAndLeakThreadName()58 const char* GetAndLeakThreadName() {
59 char name[16];
60 #if defined(OS_LINUX) || defined(OS_ANDROID)
61 // If the thread name is not set, try to get it from prctl. Thread name might
62 // not be set in cases where the thread started before heap profiling was
63 // enabled.
64 int err = prctl(PR_GET_NAME, name);
65 if (!err) {
66 return strdup(name);
67 }
68 #endif // defined(OS_LINUX) || defined(OS_ANDROID)
69
70 // Use tid if we don't have a thread name.
71 snprintf(name, sizeof(name), "%lu",
72 static_cast<unsigned long>(PlatformThread::CurrentId()));
73 return strdup(name);
74 }
75
76 } // namespace
77
78 // static
79 AllocationContextTracker*
GetInstanceForCurrentThread()80 AllocationContextTracker::GetInstanceForCurrentThread() {
81 AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
82 AllocationContextTrackerTLS().Get());
83 if (tracker == kInitializingSentinel)
84 return nullptr; // Re-entrancy case.
85
86 if (!tracker) {
87 AllocationContextTrackerTLS().Set(kInitializingSentinel);
88 tracker = new AllocationContextTracker();
89 AllocationContextTrackerTLS().Set(tracker);
90 }
91
92 return tracker;
93 }
94
AllocationContextTracker()95 AllocationContextTracker::AllocationContextTracker()
96 : thread_name_(nullptr), ignore_scope_depth_(0) {
97 tracked_stack_.reserve(kMaxStackDepth);
98 task_contexts_.reserve(kMaxTaskDepth);
99 }
100 AllocationContextTracker::~AllocationContextTracker() = default;
101
102 // static
SetCurrentThreadName(const char * name)103 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
104 if (name && capture_mode() != CaptureMode::DISABLED) {
105 GetInstanceForCurrentThread()->thread_name_ = name;
106 }
107 }
108
109 // static
SetCaptureMode(CaptureMode mode)110 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
111 // Release ordering ensures that when a thread observes |capture_mode_| to
112 // be true through an acquire load, the TLS slot has been initialized.
113 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
114 }
115
PushPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)116 void AllocationContextTracker::PushPseudoStackFrame(
117 AllocationContextTracker::PseudoStackFrame stack_frame) {
118 // Impose a limit on the height to verify that every push is popped, because
119 // in practice the pseudo stack never grows higher than ~20 frames.
120 if (tracked_stack_.size() < kMaxStackDepth) {
121 tracked_stack_.push_back(
122 StackFrame::FromTraceEventName(stack_frame.trace_event_name));
123 } else {
124 NOTREACHED();
125 }
126 }
127
PopPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)128 void AllocationContextTracker::PopPseudoStackFrame(
129 AllocationContextTracker::PseudoStackFrame stack_frame) {
130 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
131 // scope, the frame was never pushed, so it is possible that pop is called
132 // on an empty stack.
133 if (tracked_stack_.empty())
134 return;
135
136 tracked_stack_.pop_back();
137 }
138
PushNativeStackFrame(const void * pc)139 void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
140 if (tracked_stack_.size() < kMaxStackDepth)
141 tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
142 else
143 NOTREACHED();
144 }
145
PopNativeStackFrame(const void * pc)146 void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
147 if (tracked_stack_.empty())
148 return;
149
150 DCHECK_EQ(pc, tracked_stack_.back().value);
151 tracked_stack_.pop_back();
152 }
153
PushCurrentTaskContext(const char * context)154 void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
155 DCHECK(context);
156 if (task_contexts_.size() < kMaxTaskDepth)
157 task_contexts_.push_back(context);
158 else
159 NOTREACHED();
160 }
161
PopCurrentTaskContext(const char * context)162 void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
163 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
164 // scope, the context was never pushed, so it is possible that pop is called
165 // on an empty stack.
166 if (task_contexts_.empty())
167 return;
168
169 DCHECK_EQ(context, task_contexts_.back())
170 << "Encountered an unmatched context end";
171 task_contexts_.pop_back();
172 }
173
GetContextSnapshot(AllocationContext * ctx)174 bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
175 if (ignore_scope_depth_)
176 return false;
177
178 CaptureMode mode = static_cast<CaptureMode>(
179 subtle::NoBarrier_Load(&capture_mode_));
180
181 auto* backtrace = std::begin(ctx->backtrace.frames);
182 auto* backtrace_end = std::end(ctx->backtrace.frames);
183
184 if (!thread_name_) {
185 // Ignore the string allocation made by GetAndLeakThreadName to avoid
186 // reentrancy.
187 ignore_scope_depth_++;
188 thread_name_ = GetAndLeakThreadName();
189 ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
190 DCHECK(thread_name_);
191 ignore_scope_depth_--;
192 }
193
194 // Add the thread name as the first entry in pseudo stack.
195 if (thread_name_) {
196 *backtrace++ = StackFrame::FromThreadName(thread_name_);
197 }
198
199 switch (mode) {
200 case CaptureMode::DISABLED:
201 {
202 break;
203 }
204 case CaptureMode::PSEUDO_STACK:
205 case CaptureMode::MIXED_STACK:
206 {
207 for (const StackFrame& stack_frame : tracked_stack_) {
208 if (backtrace == backtrace_end)
209 break;
210 *backtrace++ = stack_frame;
211 }
212 break;
213 }
214 case CaptureMode::NATIVE_STACK:
215 {
216 // Backtrace contract requires us to return bottom frames, i.e.
217 // from main() and up. Stack unwinding produces top frames, i.e.
218 // from this point and up until main(). We intentionally request
219 // kMaxFrameCount + 1 frames, so that we know if there are more frames
220 // than our backtrace capacity.
221 #if !defined(OS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
222 #if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
223 const void* frames[Backtrace::kMaxFrameCount + 1];
224 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
225 "not requesting enough frames to fill Backtrace");
226 size_t frame_count =
227 CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
228 frames, arraysize(frames));
229 #elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
230 const void* frames[Backtrace::kMaxFrameCount + 1];
231 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
232 "not requesting enough frames to fill Backtrace");
233 size_t frame_count = debug::TraceStackFramePointers(
234 frames, arraysize(frames),
235 1 /* exclude this function from the trace */);
236 #else
237 // Fall-back to capturing the stack with base::debug::StackTrace,
238 // which is likely slower, but more reliable.
239 base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
240 size_t frame_count = 0u;
241 const void* const* frames = stack_trace.Addresses(&frame_count);
242 #endif
243
244 // If there are too many frames, keep the ones furthest from main().
245 size_t backtrace_capacity = backtrace_end - backtrace;
246 int32_t starting_frame_index = frame_count;
247 if (frame_count > backtrace_capacity) {
248 starting_frame_index = backtrace_capacity - 1;
249 *backtrace++ = StackFrame::FromTraceEventName("<truncated>");
250 }
251 for (int32_t i = starting_frame_index - 1; i >= 0; --i) {
252 const void* frame = frames[i];
253 *backtrace++ = StackFrame::FromProgramCounter(frame);
254 }
255 #endif // !defined(OS_NACL)
256 break;
257 }
258 }
259
260 ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
261
262 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
263 // (component name) in the heap profiler and not piggy back on the type name.
264 if (!task_contexts_.empty()) {
265 ctx->type_name = task_contexts_.back();
266 } else {
267 ctx->type_name = nullptr;
268 }
269
270 return true;
271 }
272
273 } // namespace trace_event
274 } // namespace base
275