• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
7 
8 #include <atomic>
9 #include <cstdint>
10 #include <vector>
11 
12 #include "base/base_export.h"
13 
14 namespace base {
15 namespace trace_event {
16 
17 // AllocationContextTracker is a thread-local object. Its main purpose is to
18 // keep track of context pointers for memory allocation samples. See
19 // |AllocationContext|.
20 //
21 // A thread-local instance of the context tracker is initialized lazily when it
22 // is first accessed.
23 class BASE_EXPORT AllocationContextTracker {
24  public:
25   enum class CaptureMode : int32_t {
26     kDisabled,     // Don't capture anything
27     kNativeStack,  // Backtrace has full native backtraces from stack unwinding
28   };
29 
30   // Globally sets capturing mode.
31   // TODO(primiano): How to guard against *Stack -> kDisabled -> *Stack?
32   static void SetCaptureMode(CaptureMode mode);
33 
34   // Returns global capturing mode.
capture_mode()35   inline static CaptureMode capture_mode() {
36     // A little lag after heap profiling is enabled or disabled is fine, it is
37     // more important that the check is as cheap as possible when capturing is
38     // not enabled, so do not issue a memory barrier in the fast path.
39     if (capture_mode_.load(std::memory_order_relaxed) ==
40         CaptureMode::kDisabled) {
41       return CaptureMode::kDisabled;
42     }
43 
44     // In the slow path, an acquire load is required to pair with the release
45     // store in |SetCaptureMode|. This is to ensure that the TLS slot for
46     // the thread-local allocation context tracker has been initialized if
47     // |capture_mode| returns something other than kDisabled.
48     return capture_mode_.load(std::memory_order_acquire);
49   }
50 
51   // Returns the thread-local instance, creating one if necessary. Returns
52   // always a valid instance, unless it is called re-entrantly, in which case
53   // returns nullptr in the nested calls.
54   static AllocationContextTracker* GetInstanceForCurrentThread();
55 
56   // Set the thread name in the AllocationContextTracker of the current thread
57   // if capture is enabled.
58   static void SetCurrentThreadName(const char* name);
59 
60   AllocationContextTracker(const AllocationContextTracker&) = delete;
61   AllocationContextTracker& operator=(const AllocationContextTracker&) = delete;
62 
63   // Push and pop current task's context. A stack is used to support nested
64   // tasks and the top of the stack will be used in allocation context.
65   void PushCurrentTaskContext(const char* context);
66   void PopCurrentTaskContext(const char* context);
67 
68   // Returns most recent task context added by ScopedTaskExecutionTracker.
69   // TODO(crbug.com/40875107): Audit callers of TaskContext() to see if
70   // any are useful. If not, remove AllocationContextTracker entirely.
TaskContext()71   const char* TaskContext() const {
72     return task_contexts_.empty() ? nullptr : task_contexts_.back();
73   }
74 
75   ~AllocationContextTracker();
76 
77  private:
78   AllocationContextTracker();
79 
80   static std::atomic<CaptureMode> capture_mode_;
81 
82   // The thread name is used as the first entry in the pseudo stack.
83   const char* thread_name_ = nullptr;
84 
85   // Stack of tasks' contexts.
86   std::vector<const char*> task_contexts_;
87 };
88 
89 }  // namespace trace_event
90 }  // namespace base
91 
92 #endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
93