• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 
7 #include <ostream>
8 
9 #include "base/atomicops.h"
10 #include "base/check_op.h"
11 #include "base/no_destructor.h"
12 #include "base/notreached.h"
13 #include "base/threading/thread_local_storage.h"
14 
15 namespace base {
16 namespace trace_event {
17 
18 std::atomic<AllocationContextTracker::CaptureMode>
19     AllocationContextTracker::capture_mode_{
20         AllocationContextTracker::CaptureMode::kDisabled};
21 
22 namespace {
23 
24 const size_t kMaxTaskDepth = 16u;
25 AllocationContextTracker* const kInitializingSentinel =
26     reinterpret_cast<AllocationContextTracker*>(-1);
27 
28 // This function is added to the TLS slot to clean up the instance when the
29 // thread exits.
DestructAllocationContextTracker(void * alloc_ctx_tracker)30 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
31   delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
32 }
33 
AllocationContextTrackerTLS()34 ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
35   static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
36       &DestructAllocationContextTracker);
37   return *tls_alloc_ctx_tracker;
38 }
39 
40 }  // namespace
41 
42 // static
43 AllocationContextTracker*
GetInstanceForCurrentThread()44 AllocationContextTracker::GetInstanceForCurrentThread() {
45   AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
46       AllocationContextTrackerTLS().Get());
47   if (tracker == kInitializingSentinel)
48     return nullptr;  // Re-entrancy case.
49 
50   if (!tracker) {
51     AllocationContextTrackerTLS().Set(kInitializingSentinel);
52     tracker = new AllocationContextTracker();
53     AllocationContextTrackerTLS().Set(tracker);
54   }
55 
56   return tracker;
57 }
58 
AllocationContextTracker()59 AllocationContextTracker::AllocationContextTracker() {
60   task_contexts_.reserve(kMaxTaskDepth);
61   task_contexts_.push_back("UntrackedTask");
62 }
63 AllocationContextTracker::~AllocationContextTracker() = default;
64 
65 // static
SetCurrentThreadName(const char * name)66 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
67   if (name && capture_mode() != CaptureMode::kDisabled) {
68     GetInstanceForCurrentThread()->thread_name_ = name;
69   }
70 }
71 
72 // static
SetCaptureMode(CaptureMode mode)73 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
74   // Release ordering ensures that when a thread observes |capture_mode_| to
75   // be true through an acquire load, the TLS slot has been initialized.
76   capture_mode_.store(mode, std::memory_order_release);
77 }
78 
PushCurrentTaskContext(const char * context)79 void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
80   DCHECK(context);
81   if (task_contexts_.size() < kMaxTaskDepth)
82     task_contexts_.push_back(context);
83   else
84     NOTREACHED();
85 }
86 
PopCurrentTaskContext(const char * context)87 void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
88   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
89   // scope, the context was never pushed, so it is possible that pop is called
90   // on an empty stack. Note that the context always contains "UntrackedTask".
91   if (task_contexts_.size() == 1)
92     return;
93 
94   DCHECK_EQ(context, task_contexts_.back())
95       << "Encountered an unmatched context end";
96   task_contexts_.pop_back();
97 }
98 
99 }  // namespace trace_event
100 }  // namespace base
101