• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_DEBUG_ALLOCATION_TRACE_H_
6 #define BASE_DEBUG_ALLOCATION_TRACE_H_
7 
8 #include <algorithm>
9 #include <array>
10 #include <atomic>
11 #include <bit>
12 #include <cstdint>
13 
14 #include "base/allocator/dispatcher/notification_data.h"
15 #include "base/base_export.h"
16 #include "base/compiler_specific.h"
17 #include "base/debug/debugging_buildflags.h"
18 #include "base/debug/stack_trace.h"
19 #include "base/memory/raw_ptr_exclusion.h"
20 #include "build/build_config.h"
21 
22 namespace base::debug::tracer {
23 
24 // Number of traces that can be stored. This number must be a power of two to
25 // allow for fast computation of modulo.
26 constexpr size_t kMaximumNumberOfMemoryOperationTraces = (1 << 15);
27 // Number of frames stored for each operation. Probably the lower frames
28 // represent the memory allocation system. Hence, we store more frames to
29 // increase chances of having a meaningful trace of the path that caused the
30 // allocation or free.
31 constexpr size_t kStackTraceSize = 16;
32 
33 // The type of an operation stored in the recorder.
34 enum class OperationType {
35   // The state of an operation record before calling any of the initialization
36   // functions.
37   kNone = 0,
38   // The record represents an allocation operation.
39   kAllocation,
40   // The record represents a free operation.
41   kFree,
42 };
43 
44 using StackTraceContainer = std::array<const void*, kStackTraceSize>;
45 
46 // The record for a single operation. A record can represent any type of
47 // operation, allocation or free, but not at the same time.
48 //
49 // A record protects itself from concurrent initializations. If a thread B calls
50 // any of the Initialize*-functions while another thread A is currently
51 // initializing, B's invocations shall immediately return |false| without
52 // interfering with thread A.
53 class BASE_EXPORT OperationRecord {
54  public:
55   constexpr OperationRecord() = default;
56 
57   OperationRecord(const OperationRecord&) = delete;
58   OperationRecord& operator=(const OperationRecord&) = delete;
59 
60   // Is the record currently being taken?
61   bool IsRecording() const;
62 
63   OperationType GetOperationType() const;
64   // The address allocated or freed.
65   const void* GetAddress() const;
66   // Number of allocated bytes. Returns 0 for free operations.
67   size_t GetSize() const;
68   // The stacktrace as taken by the Initialize*-functions.
69   const StackTraceContainer& GetStackTrace() const LIFETIME_BOUND;
70 
71   // Initialize the record with data for another operation. Data from any
72   // previous operation will be silently overwritten. These functions are
73   // declared ALWAYS_INLINE to minimize pollution of the recorded stack trace.
74   //
75   // Both functions return false in case no record was taken, i.e. if another
76   // thread is capturing.
InitializeFree(const void * freed_address)77   ALWAYS_INLINE bool InitializeFree(const void* freed_address) {
78     return InitializeOperationRecord(freed_address, 0, OperationType::kFree);
79   }
80 
InitializeAllocation(const void * allocated_address,size_t allocated_size)81   ALWAYS_INLINE bool InitializeAllocation(const void* allocated_address,
82                                           size_t allocated_size) {
83     return InitializeOperationRecord(allocated_address, allocated_size,
84                                      OperationType::kAllocation);
85   }
86 
87  private:
88   // Initialize a record with the given data. Return true if the record was
89   // initialized successfully, false if no record was taken, i.e. if another
90   // thread is capturing.
91   ALWAYS_INLINE bool InitializeOperationRecord(const void* address,
92                                                size_t size,
93                                                OperationType operation_type);
94   ALWAYS_INLINE void StoreStackTrace();
95 
96   // The stack trace taken in one of the Initialize* functions.
97   StackTraceContainer stack_trace_ = {};
98   // The number of allocated bytes.
99   size_t size_ = 0;
100   // The address that was allocated or freed.
101   // We use a raw C++ pointer instead of base::raw_ptr for performance
102   // reasons.
103   // - In the recorder we only store pointers, we never allocate or free on
104   //   our own.
105   // - Storing is the hot path. base::raw_ptr::operator== may perform sanity
106   //   checks which do not make sense in our case (otherwise the allocated
107   //   address would have been quirky)
108   RAW_PTR_EXCLUSION const void* address_ = nullptr;
109   // The type of the operation that was performed. In the course of making a
110   // record, this value is reset to |OperationType::kNone| and later set to
111   // the operation type specific value, so if the process crashes whilst writing
112   // the record, it's marked as empty. To prevent the compiler from optimizing
113   // away the initial reset, this value is marked as volatile.
114   volatile OperationType operation_type_ = OperationType::kNone;
115   // Is the record currently being taken from another thread? Used to prevent
116   // concurrent writes to the same record.
117   //
118   // The value is mutable since pre C++20 there is no const getter in
119   // atomic_flag. All ways to get the value involve setting it.
120   // TODO(crbug.com/42050406): Remove mutable and make IsRecording() use
121   // atomic_flag::test();
122   mutable std::atomic_flag is_recording_ = ATOMIC_FLAG_INIT;
123 };
124 
InitializeOperationRecord(const void * address,size_t size,OperationType operation_type)125 ALWAYS_INLINE bool OperationRecord::InitializeOperationRecord(
126     const void* address,
127     size_t size,
128     OperationType operation_type) {
129   if (is_recording_.test_and_set(std::memory_order_acquire)) {
130     return false;
131   }
132 
133   operation_type_ = operation_type;
134   StoreStackTrace();
135   address_ = address;
136   size_ = size;
137 
138   is_recording_.clear(std::memory_order_release);
139 
140   return true;
141 }
142 
StoreStackTrace()143 ALWAYS_INLINE void OperationRecord::StoreStackTrace() {
144   stack_trace_.fill(nullptr);
145 
146 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
147   // Currently we limit ourselves to use TraceStackFramePointers. We know that
148   // TraceStackFramePointers has an acceptable performance impact on Android.
149   base::debug::TraceStackFramePointers(stack_trace_, 0);
150 #elif BUILDFLAG(IS_LINUX)
151   // Use base::debug::CollectStackTrace as an alternative for tests on Linux. We
152   // still have a check in /base/debug/debug.gni to prevent that
153   // AllocationStackTraceRecorder is enabled accidentally on Linux.
154   base::debug::CollectStackTrace(stack_trace_);
155 #else
156 #error "No supported stack tracer found."
157 #endif
158 }
159 
160 struct BASE_EXPORT AllocationTraceRecorderStatistics {
161 #if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
162   AllocationTraceRecorderStatistics(size_t total_number_of_allocations,
163                                     size_t total_number_of_collisions);
164 #else
165   AllocationTraceRecorderStatistics(size_t total_number_of_allocations);
166 #endif
167 
168   // The total number of allocations that have been recorded.
169   size_t total_number_of_allocations;
170 #if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
171   // The total number of collisions that have been encountered. A collision
172   // happens when two threads concurrently try to record using the same slot.
173   size_t total_number_of_collisions;
174 #endif
175 };
176 
177 // The recorder which holds entries for past memory operations.
178 //
179 // The memory image of the recorder will be copied into the crash-handler.
180 // Therefore, it must not hold any references to external data which are vital
181 // for proper functioning.
182 //
183 // It is important that the recorder itself does not allocate to prevent
184 // recursive calls and save as much runtime overhead as possible.
185 //
186 // Therefore, records are stored in a preallocated buffer with a compile time
187 // constant maximum size, see |kMaximumNumberOfMemoryOperationTraces|. Once all
188 // records have been used, old records will be overwritten (fifo-style).
189 //
190 // The recorder works in an multithreaded environment without external locking.
191 // Concurrent writes are prevented by two means:
192 //  1 - We atomically increment and calculate the effective index of the record
193 //  to be written.
194 //  2 - If this entry is still being used (the recording thread didn't finish
195 //  yet), we go back to step 1
196 // Currently we do not enforce separate cache lines for each entry, which means
197 // false sharing can occur. On the other hand, with 64 byte cachelines a clean
198 // separation would introduce some 3*64 - sizeof(OperationRecord) = 40 bytes of
199 // padding per entry.
200 //
201 // Note: As a process might be terminated for whatever reason while stack
202 // traces are being written, the recorded data may contain some garbage.
203 //
204 // TODO(crbug.com/40258550): Evaluate the impact of the shared cache
205 // lines between entries.
206 class BASE_EXPORT AllocationTraceRecorder {
207  public:
208   constexpr AllocationTraceRecorder() = default;
209 
210   AllocationTraceRecorder(const AllocationTraceRecorder&) = delete;
211   AllocationTraceRecorder& operator=(const AllocationTraceRecorder&) = delete;
212 
213   // The allocation event observer interface. See the dispatcher for further
214   // details. The functions are marked NO_INLINE. All other functions called but
215   // the one taking the call stack are marked ALWAYS_INLINE. This way we ensure
216   // the number of frames recorded from these functions is fixed.
217   inline void OnAllocation(
218       const base::allocator::dispatcher::AllocationNotificationData&
219           allocation_data);
220 
221   // Handle all free events.
222   inline void OnFree(
223       const base::allocator::dispatcher::FreeNotificationData& free_data);
224 
225   // Access functions to retrieve the current content of the recorder.
226   // Note: Since the recorder is usually updated upon each allocation or free,
227   // it is important to prevent updates if you want to read the entries at any
228   // point.
229 
230   // Get the current number of entries stored in the recorder. When the
231   // recorder has reached its maximum capacity, it always returns
232   // |GetMaximumNumberOfTraces()|.
233   size_t size() const;
234 
235   // Access the record of an operation by index. Oldest operation is always
236   // accessible at index 0, latest operation at |size()-1|.
237   // Note: Since a process might have crashed while a trace is being written,
238   // especially the last records might be corrupted.
239   const OperationRecord& operator[](size_t idx) const;
240 
GetMaximumNumberOfTraces()241   constexpr size_t GetMaximumNumberOfTraces() const {
242     return kMaximumNumberOfMemoryOperationTraces;
243   }
244 
245   AllocationTraceRecorderStatistics GetRecorderStatistics() const;
246 
247  private:
248   // Handle all allocation events.
249   NOINLINE void OnAllocation(const void* allocated_address,
250                              size_t allocated_size);
251 
252   // Handle all free events.
253   NOINLINE void OnFree(const void* freed_address);
254 
255   ALWAYS_INLINE size_t GetNextIndex();
256 
257   ALWAYS_INLINE static constexpr size_t WrapIdxIfNeeded(size_t idx);
258 
259   // The actual container.
260   std::array<OperationRecord, kMaximumNumberOfMemoryOperationTraces>
261       alloc_trace_buffer_ = {};
262   // The total number of records that have been taken so far. Note that this
263   // might be greater than |kMaximumNumberOfMemoryOperationTraces| since we
264   // overwrite oldest items.
265   std::atomic<size_t> total_number_of_records_ = 0;
266 #if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
267   std::atomic<size_t> total_number_of_collisions_ = 0;
268 #endif
269 };
270 
OnAllocation(const base::allocator::dispatcher::AllocationNotificationData & allocation_data)271 inline void AllocationTraceRecorder::OnAllocation(
272     const base::allocator::dispatcher::AllocationNotificationData&
273         allocation_data) {
274   OnAllocation(allocation_data.address(), allocation_data.size());
275 }
276 
277 // Handle all free events.
OnFree(const base::allocator::dispatcher::FreeNotificationData & free_data)278 inline void AllocationTraceRecorder::OnFree(
279     const base::allocator::dispatcher::FreeNotificationData& free_data) {
280   OnFree(free_data.address());
281 }
282 
WrapIdxIfNeeded(size_t idx)283 ALWAYS_INLINE constexpr size_t AllocationTraceRecorder::WrapIdxIfNeeded(
284     size_t idx) {
285   // Wrapping around counter, e.g. for BUFFER_SIZE = 256, the counter will
286   // wrap around when reaching 256. To enable the compiler to emit more
287   // optimized code we assert |kMaximumNumberOfMemoryOperationTraces| is a power
288   // of two .
289   static_assert(
290       std::has_single_bit(kMaximumNumberOfMemoryOperationTraces),
291       "kMaximumNumberOfMemoryOperationTraces should be a power of 2 to "
292       "allow for fast modulo operation.");
293 
294   return idx % kMaximumNumberOfMemoryOperationTraces;
295 }
296 
GetNextIndex()297 ALWAYS_INLINE size_t AllocationTraceRecorder::GetNextIndex() {
298   const auto raw_idx =
299       total_number_of_records_.fetch_add(1, std::memory_order_relaxed);
300   return WrapIdxIfNeeded(raw_idx);
301 }
302 
303 }  // namespace base::debug::tracer
304 
305 #endif  // BASE_DEBUG_ALLOCATION_TRACE_H_
306