• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 
7 #include <algorithm>
8 #include <iterator>
9 
10 #include "base/atomicops.h"
11 #include "base/debug/leak_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_local_storage.h"
14 #include "base/trace_event/heap_profiler_allocation_context.h"
15 
16 #if defined(OS_LINUX) || defined(OS_ANDROID)
17 #include <sys/prctl.h>
18 #endif
19 
20 namespace base {
21 namespace trace_event {
22 
23 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
24     static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
25 
26 namespace {
27 
28 const size_t kMaxStackDepth = 128u;
29 const size_t kMaxTaskDepth = 16u;
30 AllocationContextTracker* const kInitializingSentinel =
31     reinterpret_cast<AllocationContextTracker*>(-1);
32 const char kTracingOverhead[] = "tracing_overhead";
33 
34 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
35 
36 // This function is added to the TLS slot to clean up the instance when the
37 // thread exits.
DestructAllocationContextTracker(void * alloc_ctx_tracker)38 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
39   delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
40 }
41 
42 // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
43 // deadlock when lock is already held by ThreadIdNameManager before the current
44 // allocation. Gets the thread name from kernel if available or returns a string
45 // with id. This function intenionally leaks the allocated strings since they
46 // are used to tag allocations even after the thread dies.
GetAndLeakThreadName()47 const char* GetAndLeakThreadName() {
48   char name[16];
49 #if defined(OS_LINUX) || defined(OS_ANDROID)
50   // If the thread name is not set, try to get it from prctl. Thread name might
51   // not be set in cases where the thread started before heap profiling was
52   // enabled.
53   int err = prctl(PR_GET_NAME, name);
54   if (!err) {
55     return strdup(name);
56   }
57 #endif  // defined(OS_LINUX) || defined(OS_ANDROID)
58 
59   // Use tid if we don't have a thread name.
60   snprintf(name, sizeof(name), "%lu",
61            static_cast<unsigned long>(PlatformThread::CurrentId()));
62   return strdup(name);
63 }
64 
65 }  // namespace
66 
67 // static
68 AllocationContextTracker*
GetInstanceForCurrentThread()69 AllocationContextTracker::GetInstanceForCurrentThread() {
70   AllocationContextTracker* tracker =
71       static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
72   if (tracker == kInitializingSentinel)
73     return nullptr;  // Re-entrancy case.
74 
75   if (!tracker) {
76     g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
77     tracker = new AllocationContextTracker();
78     g_tls_alloc_ctx_tracker.Set(tracker);
79   }
80 
81   return tracker;
82 }
83 
AllocationContextTracker()84 AllocationContextTracker::AllocationContextTracker()
85     : thread_name_(nullptr), ignore_scope_depth_(0) {
86   pseudo_stack_.reserve(kMaxStackDepth);
87   task_contexts_.reserve(kMaxTaskDepth);
88 }
~AllocationContextTracker()89 AllocationContextTracker::~AllocationContextTracker() {}
90 
91 // static
SetCurrentThreadName(const char * name)92 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
93   if (name && capture_mode() != CaptureMode::DISABLED) {
94     GetInstanceForCurrentThread()->thread_name_ = name;
95   }
96 }
97 
98 // static
SetCaptureMode(CaptureMode mode)99 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
100   // When enabling capturing, also initialize the TLS slot. This does not create
101   // a TLS instance yet.
102   if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
103     g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
104 
105   // Release ordering ensures that when a thread observes |capture_mode_| to
106   // be true through an acquire load, the TLS slot has been initialized.
107   subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
108 }
109 
PushPseudoStackFrame(const char * trace_event_name)110 void AllocationContextTracker::PushPseudoStackFrame(
111     const char* trace_event_name) {
112   // Impose a limit on the height to verify that every push is popped, because
113   // in practice the pseudo stack never grows higher than ~20 frames.
114   if (pseudo_stack_.size() < kMaxStackDepth)
115     pseudo_stack_.push_back(trace_event_name);
116   else
117     NOTREACHED();
118 }
119 
PopPseudoStackFrame(const char * trace_event_name)120 void AllocationContextTracker::PopPseudoStackFrame(
121     const char* trace_event_name) {
122   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
123   // scope, the frame was never pushed, so it is possible that pop is called
124   // on an empty stack.
125   if (pseudo_stack_.empty())
126     return;
127 
128   // Assert that pushes and pops are nested correctly. This DCHECK can be
129   // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
130   // without a corresponding TRACE_EVENT_BEGIN).
131   DCHECK_EQ(trace_event_name, pseudo_stack_.back())
132       << "Encountered an unmatched TRACE_EVENT_END";
133 
134   pseudo_stack_.pop_back();
135 }
136 
PushCurrentTaskContext(const char * context)137 void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
138   DCHECK(context);
139   if (task_contexts_.size() < kMaxTaskDepth)
140     task_contexts_.push_back(context);
141   else
142     NOTREACHED();
143 }
144 
PopCurrentTaskContext(const char * context)145 void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
146   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
147   // scope, the context was never pushed, so it is possible that pop is called
148   // on an empty stack.
149   if (task_contexts_.empty())
150     return;
151 
152   DCHECK_EQ(context, task_contexts_.back())
153       << "Encountered an unmatched context end";
154   task_contexts_.pop_back();
155 }
156 
157 // static
GetContextSnapshot()158 AllocationContext AllocationContextTracker::GetContextSnapshot() {
159   AllocationContext ctx;
160 
161   if (ignore_scope_depth_) {
162     ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
163     ctx.type_name = kTracingOverhead;
164     ctx.backtrace.frame_count = 1;
165     return ctx;
166   }
167 
168   CaptureMode mode = static_cast<CaptureMode>(
169       subtle::NoBarrier_Load(&capture_mode_));
170 
171   auto* backtrace = std::begin(ctx.backtrace.frames);
172   auto* backtrace_end = std::end(ctx.backtrace.frames);
173 
174   if (!thread_name_) {
175     // Ignore the string allocation made by GetAndLeakThreadName to avoid
176     // reentrancy.
177     ignore_scope_depth_++;
178     thread_name_ = GetAndLeakThreadName();
179     ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
180     DCHECK(thread_name_);
181     ignore_scope_depth_--;
182   }
183 
184   // Add the thread name as the first entry in pseudo stack.
185   if (thread_name_) {
186     *backtrace++ = StackFrame::FromThreadName(thread_name_);
187   }
188 
189   switch (mode) {
190     case CaptureMode::DISABLED:
191       {
192         break;
193       }
194     case CaptureMode::PSEUDO_STACK:
195       {
196         for (const char* event_name: pseudo_stack_) {
197           if (backtrace == backtrace_end) {
198             break;
199           }
200           *backtrace++ = StackFrame::FromTraceEventName(event_name);
201         }
202         break;
203       }
204     case CaptureMode::NATIVE_STACK:
205       {
206         // Backtrace contract requires us to return bottom frames, i.e.
207         // from main() and up. Stack unwinding produces top frames, i.e.
208         // from this point and up until main(). We request many frames to
209         // make sure we reach main(), and then copy bottom portion of them.
210         const void* frames[128];
211         static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
212                       "not requesting enough frames to fill Backtrace");
213 #if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
214         size_t frame_count = debug::TraceStackFramePointers(
215             frames,
216             arraysize(frames),
217             1 /* exclude this function from the trace */ );
218 #else
219         size_t frame_count = 0;
220         NOTREACHED();
221 #endif
222 
223         // Copy frames backwards
224         size_t backtrace_capacity = backtrace_end - backtrace;
225         size_t top_frame_index = (backtrace_capacity >= frame_count) ?
226             0 :
227             frame_count - backtrace_capacity;
228         for (size_t i = frame_count; i > top_frame_index;) {
229           const void* frame = frames[--i];
230           *backtrace++ = StackFrame::FromProgramCounter(frame);
231         }
232         break;
233       }
234   }
235 
236   ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
237 
238   // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
239   // (component name) in the heap profiler and not piggy back on the type name.
240   ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
241 
242   return ctx;
243 }
244 
245 }  // namespace trace_event
246 }  // namespace base
247