• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/debug/thread_heap_usage_tracker.h"
6 
7 #include <stdint.h>
8 #include <algorithm>
9 #include <limits>
10 #include <new>
11 #include <type_traits>
12 
13 #include "base/allocator/allocator_shim.h"
14 #include "base/allocator/buildflags.h"
15 #include "base/logging.h"
16 #include "base/no_destructor.h"
17 #include "base/threading/thread_local_storage.h"
18 #include "build/build_config.h"
19 
20 #if defined(OS_MACOSX) || defined(OS_IOS)
21 #include <malloc/malloc.h>
22 #else
23 #include <malloc.h>
24 #endif
25 
26 namespace base {
27 namespace debug {
28 
29 namespace {
30 
31 using base::allocator::AllocatorDispatch;
32 
33 const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1;
34 ThreadHeapUsage* const kInitializationSentinel =
35     reinterpret_cast<ThreadHeapUsage*>(kSentinelMask);
36 ThreadHeapUsage* const kTeardownSentinel =
37     reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1);
38 
ThreadAllocationUsage()39 ThreadLocalStorage::Slot& ThreadAllocationUsage() {
40   static NoDestructor<ThreadLocalStorage::Slot> thread_allocator_usage(
41       [](void* thread_heap_usage) {
42         // This destructor will be called twice. Once to destroy the actual
43         // ThreadHeapUsage instance and a second time, immediately after, for
44         // the sentinel. Re-setting the TLS slow (below) does re-initialize the
45         // TLS slot. The ThreadLocalStorage code is designed to deal with this
46         // use case and will re-call the destructor with the kTeardownSentinel
47         // as arg.
48         if (thread_heap_usage == kTeardownSentinel)
49           return;
50         DCHECK_NE(thread_heap_usage, kInitializationSentinel);
51 
52         // Deleting the ThreadHeapUsage TLS object will re-enter the shim and
53         // hit RecordFree() (see below). The sentinel prevents RecordFree() from
54         // re-creating another ThreadHeapUsage object.
55         ThreadAllocationUsage().Set(kTeardownSentinel);
56         delete static_cast<ThreadHeapUsage*>(thread_heap_usage);
57       });
58   return *thread_allocator_usage;
59 }
60 
61 bool g_heap_tracking_enabled = false;
62 
63 // Forward declared as it needs to delegate memory allocation to the next
64 // lower shim.
65 ThreadHeapUsage* GetOrCreateThreadUsage();
66 
GetAllocSizeEstimate(const AllocatorDispatch * next,void * ptr,void * context)67 size_t GetAllocSizeEstimate(const AllocatorDispatch* next,
68                             void* ptr,
69                             void* context) {
70   if (ptr == nullptr)
71     return 0U;
72 
73   return next->get_size_estimate_function(next, ptr, context);
74 }
75 
RecordAlloc(const AllocatorDispatch * next,void * ptr,size_t size,void * context)76 void RecordAlloc(const AllocatorDispatch* next,
77                  void* ptr,
78                  size_t size,
79                  void* context) {
80   ThreadHeapUsage* usage = GetOrCreateThreadUsage();
81   if (usage == nullptr)
82     return;
83 
84   usage->alloc_ops++;
85   size_t estimate = GetAllocSizeEstimate(next, ptr, context);
86   if (size && estimate) {
87     // Only keep track of the net number of bytes allocated in the scope if the
88     // size estimate function returns sane values, e.g. non-zero.
89     usage->alloc_bytes += estimate;
90     usage->alloc_overhead_bytes += estimate - size;
91 
92     // Record the max outstanding number of bytes, but only if the difference
93     // is net positive (e.g. more bytes allocated than freed in the scope).
94     if (usage->alloc_bytes > usage->free_bytes) {
95       uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
96       if (allocated_bytes > usage->max_allocated_bytes)
97         usage->max_allocated_bytes = allocated_bytes;
98     }
99   } else {
100     usage->alloc_bytes += size;
101   }
102 }
103 
RecordFree(const AllocatorDispatch * next,void * ptr,void * context)104 void RecordFree(const AllocatorDispatch* next, void* ptr, void* context) {
105   ThreadHeapUsage* usage = GetOrCreateThreadUsage();
106   if (usage == nullptr)
107     return;
108 
109   size_t estimate = GetAllocSizeEstimate(next, ptr, context);
110   usage->free_ops++;
111   usage->free_bytes += estimate;
112 }
113 
AllocFn(const AllocatorDispatch * self,size_t size,void * context)114 void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
115   void* ret = self->next->alloc_function(self->next, size, context);
116   if (ret != nullptr)
117     RecordAlloc(self->next, ret, size, context);
118 
119   return ret;
120 }
121 
AllocZeroInitializedFn(const AllocatorDispatch * self,size_t n,size_t size,void * context)122 void* AllocZeroInitializedFn(const AllocatorDispatch* self,
123                              size_t n,
124                              size_t size,
125                              void* context) {
126   void* ret =
127       self->next->alloc_zero_initialized_function(self->next, n, size, context);
128   if (ret != nullptr)
129     RecordAlloc(self->next, ret, size, context);
130 
131   return ret;
132 }
133 
AllocAlignedFn(const AllocatorDispatch * self,size_t alignment,size_t size,void * context)134 void* AllocAlignedFn(const AllocatorDispatch* self,
135                      size_t alignment,
136                      size_t size,
137                      void* context) {
138   void* ret =
139       self->next->alloc_aligned_function(self->next, alignment, size, context);
140   if (ret != nullptr)
141     RecordAlloc(self->next, ret, size, context);
142 
143   return ret;
144 }
145 
ReallocFn(const AllocatorDispatch * self,void * address,size_t size,void * context)146 void* ReallocFn(const AllocatorDispatch* self,
147                 void* address,
148                 size_t size,
149                 void* context) {
150   if (address != nullptr)
151     RecordFree(self->next, address, context);
152 
153   void* ret = self->next->realloc_function(self->next, address, size, context);
154   if (ret != nullptr && size != 0)
155     RecordAlloc(self->next, ret, size, context);
156 
157   return ret;
158 }
159 
FreeFn(const AllocatorDispatch * self,void * address,void * context)160 void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
161   if (address != nullptr)
162     RecordFree(self->next, address, context);
163   self->next->free_function(self->next, address, context);
164 }
165 
GetSizeEstimateFn(const AllocatorDispatch * self,void * address,void * context)166 size_t GetSizeEstimateFn(const AllocatorDispatch* self,
167                          void* address,
168                          void* context) {
169   return self->next->get_size_estimate_function(self->next, address, context);
170 }
171 
BatchMallocFn(const AllocatorDispatch * self,size_t size,void ** results,unsigned num_requested,void * context)172 unsigned BatchMallocFn(const AllocatorDispatch* self,
173                        size_t size,
174                        void** results,
175                        unsigned num_requested,
176                        void* context) {
177   unsigned count = self->next->batch_malloc_function(self->next, size, results,
178                                                      num_requested, context);
179   for (unsigned i = 0; i < count; ++i) {
180     RecordAlloc(self->next, results[i], size, context);
181   }
182   return count;
183 }
184 
BatchFreeFn(const AllocatorDispatch * self,void ** to_be_freed,unsigned num_to_be_freed,void * context)185 void BatchFreeFn(const AllocatorDispatch* self,
186                  void** to_be_freed,
187                  unsigned num_to_be_freed,
188                  void* context) {
189   for (unsigned i = 0; i < num_to_be_freed; ++i) {
190     if (to_be_freed[i] != nullptr) {
191       RecordFree(self->next, to_be_freed[i], context);
192     }
193   }
194   self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
195                                   context);
196 }
197 
FreeDefiniteSizeFn(const AllocatorDispatch * self,void * ptr,size_t size,void * context)198 void FreeDefiniteSizeFn(const AllocatorDispatch* self,
199                         void* ptr,
200                         size_t size,
201                         void* context) {
202   if (ptr != nullptr)
203     RecordFree(self->next, ptr, context);
204   self->next->free_definite_size_function(self->next, ptr, size, context);
205 }
206 
207 // The allocator dispatch used to intercept heap operations.
208 AllocatorDispatch allocator_dispatch = {&AllocFn,
209                                         &AllocZeroInitializedFn,
210                                         &AllocAlignedFn,
211                                         &ReallocFn,
212                                         &FreeFn,
213                                         &GetSizeEstimateFn,
214                                         &BatchMallocFn,
215                                         &BatchFreeFn,
216                                         &FreeDefiniteSizeFn,
217                                         nullptr};
218 
GetOrCreateThreadUsage()219 ThreadHeapUsage* GetOrCreateThreadUsage() {
220   auto tls_ptr = reinterpret_cast<uintptr_t>(ThreadAllocationUsage().Get());
221   if ((tls_ptr & kSentinelMask) == kSentinelMask)
222     return nullptr;  // Re-entrancy case.
223 
224   auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr);
225   if (allocator_usage == nullptr) {
226     // Prevent reentrancy due to the allocation below.
227     ThreadAllocationUsage().Set(kInitializationSentinel);
228 
229     allocator_usage = new ThreadHeapUsage();
230     static_assert(std::is_pod<ThreadHeapUsage>::value,
231                   "AllocatorDispatch must be POD");
232     memset(allocator_usage, 0, sizeof(*allocator_usage));
233     ThreadAllocationUsage().Set(allocator_usage);
234   }
235 
236   return allocator_usage;
237 }
238 
239 }  // namespace
240 
ThreadHeapUsageTracker()241 ThreadHeapUsageTracker::ThreadHeapUsageTracker() : thread_usage_(nullptr) {
242   static_assert(std::is_pod<ThreadHeapUsage>::value, "Must be POD.");
243 }
244 
~ThreadHeapUsageTracker()245 ThreadHeapUsageTracker::~ThreadHeapUsageTracker() {
246   DCHECK(thread_checker_.CalledOnValidThread());
247 
248   if (thread_usage_ != nullptr) {
249     // If this tracker wasn't stopped, make it inclusive so that the
250     // usage isn't lost.
251     Stop(false);
252   }
253 }
254 
Start()255 void ThreadHeapUsageTracker::Start() {
256   DCHECK(thread_checker_.CalledOnValidThread());
257 
258   thread_usage_ = GetOrCreateThreadUsage();
259   usage_ = *thread_usage_;
260 
261   // Reset the stats for our current scope.
262   // The per-thread usage instance now tracks this scope's usage, while this
263   // instance persists the outer scope's usage stats. On destruction, this
264   // instance will restore the outer scope's usage stats with this scope's
265   // usage added.
266   memset(thread_usage_, 0, sizeof(*thread_usage_));
267 }
268 
Stop(bool usage_is_exclusive)269 void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) {
270   DCHECK(thread_checker_.CalledOnValidThread());
271   DCHECK_NE(nullptr, thread_usage_);
272 
273   ThreadHeapUsage current = *thread_usage_;
274   if (usage_is_exclusive) {
275     // Restore the outer scope.
276     *thread_usage_ = usage_;
277   } else {
278     // Update the outer scope with the accrued inner usage.
279     if (thread_usage_->max_allocated_bytes) {
280       uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes;
281 
282       thread_usage_->max_allocated_bytes =
283           std::max(usage_.max_allocated_bytes,
284                    outer_net_alloc_bytes + thread_usage_->max_allocated_bytes);
285     }
286 
287     thread_usage_->alloc_ops += usage_.alloc_ops;
288     thread_usage_->alloc_bytes += usage_.alloc_bytes;
289     thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes;
290     thread_usage_->free_ops += usage_.free_ops;
291     thread_usage_->free_bytes += usage_.free_bytes;
292   }
293 
294   thread_usage_ = nullptr;
295   usage_ = current;
296 }
297 
GetUsageSnapshot()298 ThreadHeapUsage ThreadHeapUsageTracker::GetUsageSnapshot() {
299   ThreadHeapUsage* usage = GetOrCreateThreadUsage();
300   DCHECK_NE(nullptr, usage);
301   return *usage;
302 }
303 
EnableHeapTracking()304 void ThreadHeapUsageTracker::EnableHeapTracking() {
305   EnsureTLSInitialized();
306 
307   CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
308   g_heap_tracking_enabled = true;
309 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
310   base::allocator::InsertAllocatorDispatch(&allocator_dispatch);
311 #else
312   CHECK(false) << "Can't enable heap tracking without the shim.";
313 #endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
314 }
315 
IsHeapTrackingEnabled()316 bool ThreadHeapUsageTracker::IsHeapTrackingEnabled() {
317   return g_heap_tracking_enabled;
318 }
319 
DisableHeapTrackingForTesting()320 void ThreadHeapUsageTracker::DisableHeapTrackingForTesting() {
321 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
322   base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
323 #else
324   CHECK(false) << "Can't disable heap tracking without the shim.";
325 #endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
326   DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled.";
327   g_heap_tracking_enabled = false;
328 }
329 
330 base::allocator::AllocatorDispatch*
GetDispatchForTesting()331 ThreadHeapUsageTracker::GetDispatchForTesting() {
332   return &allocator_dispatch;
333 }
334 
EnsureTLSInitialized()335 void ThreadHeapUsageTracker::EnsureTLSInitialized() {
336   ignore_result(ThreadAllocationUsage());
337 }
338 
339 }  // namespace debug
340 }  // namespace base
341