• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- asan_stats.cc -----------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Code related to statistics collected by AddressSanitizer.
13 //===----------------------------------------------------------------------===//
14 #include "asan_interceptors.h"
15 #include "asan_internal.h"
16 #include "asan_stats.h"
17 #include "asan_thread.h"
18 #include "sanitizer_common/sanitizer_allocator_interface.h"
19 #include "sanitizer_common/sanitizer_mutex.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 
22 namespace __asan {
23 
AsanStats()24 AsanStats::AsanStats() {
25   Clear();
26 }
27 
Clear()28 void AsanStats::Clear() {
29   CHECK(REAL(memset));
30   REAL(memset)(this, 0, sizeof(AsanStats));
31 }
32 
PrintMallocStatsArray(const char * prefix,uptr (& array)[kNumberOfSizeClasses])33 static void PrintMallocStatsArray(const char *prefix,
34                                   uptr (&array)[kNumberOfSizeClasses]) {
35   Printf("%s", prefix);
36   for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
37     if (!array[i]) continue;
38     Printf("%zu:%zu; ", i, array[i]);
39   }
40   Printf("\n");
41 }
42 
Print()43 void AsanStats::Print() {
44   Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n",
45              malloced>>20, malloced_redzones>>20, mallocs);
46   Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs);
47   Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
48   Printf("Stats: %zuM really freed by %zu calls\n",
49              really_freed>>20, real_frees);
50   Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
51              (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
52              mmaps, munmaps);
53 
54   PrintMallocStatsArray("  mmaps   by size class: ", mmaped_by_size);
55   PrintMallocStatsArray("  mallocs by size class: ", malloced_by_size);
56   PrintMallocStatsArray("  frees   by size class: ", freed_by_size);
57   PrintMallocStatsArray("  rfrees  by size class: ", really_freed_by_size);
58   Printf("Stats: malloc large: %zu small slow: %zu\n",
59              malloc_large, malloc_small_slow);
60 }
61 
MergeFrom(const AsanStats * stats)62 void AsanStats::MergeFrom(const AsanStats *stats) {
63   uptr *dst_ptr = reinterpret_cast<uptr*>(this);
64   const uptr *src_ptr = reinterpret_cast<const uptr*>(stats);
65   uptr num_fields = sizeof(*this) / sizeof(uptr);
66   for (uptr i = 0; i < num_fields; i++)
67     dst_ptr[i] += src_ptr[i];
68 }
69 
70 static BlockingMutex print_lock(LINKER_INITIALIZED);
71 
72 static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
73 static AsanStats dead_threads_stats(LINKER_INITIALIZED);
74 static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
75 // Required for malloc_zone_statistics() on OS X. This can't be stored in
76 // per-thread AsanStats.
77 static uptr max_malloced_memory;
78 
MergeThreadStats(ThreadContextBase * tctx_base,void * arg)79 static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
80   AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg);
81   AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
82   if (AsanThread *t = tctx->thread)
83     accumulated_stats->MergeFrom(&t->stats());
84 }
85 
GetAccumulatedStats(AsanStats * stats)86 static void GetAccumulatedStats(AsanStats *stats) {
87   stats->Clear();
88   {
89     ThreadRegistryLock l(&asanThreadRegistry());
90     asanThreadRegistry()
91         .RunCallbackForEachThreadLocked(MergeThreadStats, stats);
92   }
93   stats->MergeFrom(&unknown_thread_stats);
94   {
95     BlockingMutexLock lock(&dead_threads_stats_lock);
96     stats->MergeFrom(&dead_threads_stats);
97   }
98   // This is not very accurate: we may miss allocation peaks that happen
99   // between two updates of accumulated_stats_. For more accurate bookkeeping
100   // the maximum should be updated on every malloc(), which is unacceptable.
101   if (max_malloced_memory < stats->malloced) {
102     max_malloced_memory = stats->malloced;
103   }
104 }
105 
FlushToDeadThreadStats(AsanStats * stats)106 void FlushToDeadThreadStats(AsanStats *stats) {
107   BlockingMutexLock lock(&dead_threads_stats_lock);
108   dead_threads_stats.MergeFrom(stats);
109   stats->Clear();
110 }
111 
FillMallocStatistics(AsanMallocStats * malloc_stats)112 void FillMallocStatistics(AsanMallocStats *malloc_stats) {
113   AsanStats stats;
114   GetAccumulatedStats(&stats);
115   malloc_stats->blocks_in_use = stats.mallocs;
116   malloc_stats->size_in_use = stats.malloced;
117   malloc_stats->max_size_in_use = max_malloced_memory;
118   malloc_stats->size_allocated = stats.mmaped;
119 }
120 
GetCurrentThreadStats()121 AsanStats &GetCurrentThreadStats() {
122   AsanThread *t = GetCurrentThread();
123   return (t) ? t->stats() : unknown_thread_stats;
124 }
125 
PrintAccumulatedStats()126 static void PrintAccumulatedStats() {
127   AsanStats stats;
128   GetAccumulatedStats(&stats);
129   // Use lock to keep reports from mixing up.
130   BlockingMutexLock lock(&print_lock);
131   stats.Print();
132   StackDepotStats *stack_depot_stats = StackDepotGetStats();
133   Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
134          stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
135   PrintInternalAllocatorStats();
136 }
137 
138 }  // namespace __asan
139 
140 // ---------------------- Interface ---------------- {{{1
141 using namespace __asan;  // NOLINT
142 
__sanitizer_get_current_allocated_bytes()143 uptr __sanitizer_get_current_allocated_bytes() {
144   AsanStats stats;
145   GetAccumulatedStats(&stats);
146   uptr malloced = stats.malloced;
147   uptr freed = stats.freed;
148   // Return sane value if malloced < freed due to racy
149   // way we update accumulated stats.
150   return (malloced > freed) ? malloced - freed : 1;
151 }
__asan_get_current_allocated_bytes()152 uptr __asan_get_current_allocated_bytes() {
153   return __sanitizer_get_current_allocated_bytes();
154 }
155 
__sanitizer_get_heap_size()156 uptr __sanitizer_get_heap_size() {
157   AsanStats stats;
158   GetAccumulatedStats(&stats);
159   return stats.mmaped - stats.munmaped;
160 }
__asan_get_heap_size()161 uptr __asan_get_heap_size() {
162   return __sanitizer_get_heap_size();
163 }
164 
__sanitizer_get_free_bytes()165 uptr __sanitizer_get_free_bytes() {
166   AsanStats stats;
167   GetAccumulatedStats(&stats);
168   uptr total_free = stats.mmaped
169                   - stats.munmaped
170                   + stats.really_freed
171                   + stats.really_freed_redzones;
172   uptr total_used = stats.malloced
173                   + stats.malloced_redzones;
174   // Return sane value if total_free < total_used due to racy
175   // way we update accumulated stats.
176   return (total_free > total_used) ? total_free - total_used : 1;
177 }
__asan_get_free_bytes()178 uptr __asan_get_free_bytes() {
179   return __sanitizer_get_free_bytes();
180 }
181 
__sanitizer_get_unmapped_bytes()182 uptr __sanitizer_get_unmapped_bytes() {
183   return 0;
184 }
__asan_get_unmapped_bytes()185 uptr __asan_get_unmapped_bytes() {
186   return __sanitizer_get_unmapped_bytes();
187 }
188 
__asan_print_accumulated_stats()189 void __asan_print_accumulated_stats() {
190   PrintAccumulatedStats();
191 }
192