• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- asan_stats.cc -----------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Code related to statistics collected by AddressSanitizer.
13 //===----------------------------------------------------------------------===//
14 #include "asan_interceptors.h"
15 #include "asan_internal.h"
16 #include "asan_stats.h"
17 #include "asan_thread.h"
18 #include "sanitizer_common/sanitizer_mutex.h"
19 #include "sanitizer_common/sanitizer_stackdepot.h"
20 
21 namespace __asan {
22 
AsanStats()23 AsanStats::AsanStats() {
24   CHECK(REAL(memset));
25   REAL(memset)(this, 0, sizeof(AsanStats));
26 }
27 
PrintMallocStatsArray(const char * prefix,uptr (& array)[kNumberOfSizeClasses])28 static void PrintMallocStatsArray(const char *prefix,
29                                   uptr (&array)[kNumberOfSizeClasses]) {
30   Printf("%s", prefix);
31   for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
32     if (!array[i]) continue;
33     Printf("%zu:%zu; ", i, array[i]);
34   }
35   Printf("\n");
36 }
37 
Print()38 void AsanStats::Print() {
39   Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n",
40              malloced>>20, malloced_redzones>>20, mallocs);
41   Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs);
42   Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
43   Printf("Stats: %zuM really freed by %zu calls\n",
44              really_freed>>20, real_frees);
45   Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
46              (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
47              mmaps, munmaps);
48 
49   PrintMallocStatsArray("  mmaps   by size class: ", mmaped_by_size);
50   PrintMallocStatsArray("  mallocs by size class: ", malloced_by_size);
51   PrintMallocStatsArray("  frees   by size class: ", freed_by_size);
52   PrintMallocStatsArray("  rfrees  by size class: ", really_freed_by_size);
53   Printf("Stats: malloc large: %zu small slow: %zu\n",
54              malloc_large, malloc_small_slow);
55 }
56 
57 static BlockingMutex print_lock(LINKER_INITIALIZED);
58 
PrintAccumulatedStats()59 static void PrintAccumulatedStats() {
60   AsanStats stats;
61   GetAccumulatedStats(&stats);
62   // Use lock to keep reports from mixing up.
63   BlockingMutexLock lock(&print_lock);
64   stats.Print();
65   StackDepotStats *stack_depot_stats = StackDepotGetStats();
66   Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
67          stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
68   PrintInternalAllocatorStats();
69 }
70 
71 static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
72 static AsanStats accumulated_stats(LINKER_INITIALIZED);
73 // Required for malloc_zone_statistics() on OS X. This can't be stored in
74 // per-thread AsanStats.
75 static uptr max_malloced_memory;
76 static BlockingMutex acc_stats_lock(LINKER_INITIALIZED);
77 
FlushToAccumulatedStatsUnlocked(AsanStats * stats)78 static void FlushToAccumulatedStatsUnlocked(AsanStats *stats) {
79   acc_stats_lock.CheckLocked();
80   uptr *dst = (uptr*)&accumulated_stats;
81   uptr *src = (uptr*)stats;
82   uptr num_fields = sizeof(*stats) / sizeof(uptr);
83   for (uptr i = 0; i < num_fields; i++) {
84     dst[i] += src[i];
85     src[i] = 0;
86   }
87 }
88 
FlushThreadStats(ThreadContextBase * tctx_base,void * arg)89 static void FlushThreadStats(ThreadContextBase *tctx_base, void *arg) {
90   AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
91   if (AsanThread *t = tctx->thread)
92     FlushToAccumulatedStatsUnlocked(&t->stats());
93 }
94 
UpdateAccumulatedStatsUnlocked()95 static void UpdateAccumulatedStatsUnlocked() {
96   acc_stats_lock.CheckLocked();
97   {
98     ThreadRegistryLock l(&asanThreadRegistry());
99     asanThreadRegistry().RunCallbackForEachThreadLocked(FlushThreadStats, 0);
100   }
101   FlushToAccumulatedStatsUnlocked(&unknown_thread_stats);
102   // This is not very accurate: we may miss allocation peaks that happen
103   // between two updates of accumulated_stats_. For more accurate bookkeeping
104   // the maximum should be updated on every malloc(), which is unacceptable.
105   if (max_malloced_memory < accumulated_stats.malloced) {
106     max_malloced_memory = accumulated_stats.malloced;
107   }
108 }
109 
FlushToAccumulatedStats(AsanStats * stats)110 void FlushToAccumulatedStats(AsanStats *stats) {
111   BlockingMutexLock lock(&acc_stats_lock);
112   FlushToAccumulatedStatsUnlocked(stats);
113 }
114 
GetAccumulatedStats(AsanStats * stats)115 void GetAccumulatedStats(AsanStats *stats) {
116   BlockingMutexLock lock(&acc_stats_lock);
117   UpdateAccumulatedStatsUnlocked();
118   internal_memcpy(stats, &accumulated_stats, sizeof(accumulated_stats));
119 }
120 
FillMallocStatistics(AsanMallocStats * malloc_stats)121 void FillMallocStatistics(AsanMallocStats *malloc_stats) {
122   BlockingMutexLock lock(&acc_stats_lock);
123   UpdateAccumulatedStatsUnlocked();
124   malloc_stats->blocks_in_use = accumulated_stats.mallocs;
125   malloc_stats->size_in_use = accumulated_stats.malloced;
126   malloc_stats->max_size_in_use = max_malloced_memory;
127   malloc_stats->size_allocated = accumulated_stats.mmaped;
128 }
129 
GetCurrentThreadStats()130 AsanStats &GetCurrentThreadStats() {
131   AsanThread *t = GetCurrentThread();
132   return (t) ? t->stats() : unknown_thread_stats;
133 }
134 
135 }  // namespace __asan
136 
137 // ---------------------- Interface ---------------- {{{1
138 using namespace __asan;  // NOLINT
139 
__asan_get_current_allocated_bytes()140 uptr __asan_get_current_allocated_bytes() {
141   BlockingMutexLock lock(&acc_stats_lock);
142   UpdateAccumulatedStatsUnlocked();
143   uptr malloced = accumulated_stats.malloced;
144   uptr freed = accumulated_stats.freed;
145   // Return sane value if malloced < freed due to racy
146   // way we update accumulated stats.
147   return (malloced > freed) ? malloced - freed : 1;
148 }
149 
__asan_get_heap_size()150 uptr __asan_get_heap_size() {
151   BlockingMutexLock lock(&acc_stats_lock);
152   UpdateAccumulatedStatsUnlocked();
153   return accumulated_stats.mmaped - accumulated_stats.munmaped;
154 }
155 
__asan_get_free_bytes()156 uptr __asan_get_free_bytes() {
157   BlockingMutexLock lock(&acc_stats_lock);
158   UpdateAccumulatedStatsUnlocked();
159   uptr total_free = accumulated_stats.mmaped
160                   - accumulated_stats.munmaped
161                   + accumulated_stats.really_freed
162                   + accumulated_stats.really_freed_redzones;
163   uptr total_used = accumulated_stats.malloced
164                   + accumulated_stats.malloced_redzones;
165   // Return sane value if total_free < total_used due to racy
166   // way we update accumulated stats.
167   return (total_free > total_used) ? total_free - total_used : 1;
168 }
169 
__asan_get_unmapped_bytes()170 uptr __asan_get_unmapped_bytes() {
171   return 0;
172 }
173 
__asan_print_accumulated_stats()174 void __asan_print_accumulated_stats() {
175   PrintAccumulatedStats();
176 }
177