• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- memprof_rtl.cpp --------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemProfiler, a memory profiler.
10 //
11 // Main file of the MemProf run-time library.
12 //===----------------------------------------------------------------------===//
13 
14 #include "memprof_allocator.h"
15 #include "memprof_interceptors.h"
16 #include "memprof_interface_internal.h"
17 #include "memprof_internal.h"
18 #include "memprof_mapping.h"
19 #include "memprof_stack.h"
20 #include "memprof_stats.h"
21 #include "memprof_thread.h"
22 #include "sanitizer_common/sanitizer_atomic.h"
23 #include "sanitizer_common/sanitizer_flags.h"
24 #include "sanitizer_common/sanitizer_libc.h"
25 #include "sanitizer_common/sanitizer_symbolizer.h"
26 
27 #include <time.h>
28 
29 uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol.
30 
31 // Allow the user to specify a profile output file via the binary.
32 SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1];
33 
34 namespace __memprof {
35 
MemprofDie()36 static void MemprofDie() {
37   static atomic_uint32_t num_calls;
38   if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
39     // Don't die twice - run a busy loop.
40     while (1) {
41     }
42   }
43   if (common_flags()->print_module_map >= 1)
44     DumpProcessMap();
45   if (flags()->unmap_shadow_on_exit) {
46     if (kHighShadowEnd)
47       UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
48   }
49 }
50 
MemprofCheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)51 static void MemprofCheckFailed(const char *file, int line, const char *cond,
52                                u64 v1, u64 v2) {
53   Report("MemProfiler CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line,
54          cond, (uptr)v1, (uptr)v2);
55 
56   // Print a stack trace the first time we come here. Otherwise, we probably
57   // failed a CHECK during symbolization.
58   static atomic_uint32_t num_calls;
59   if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
60     PRINT_CURRENT_STACK_CHECK();
61   }
62 
63   Die();
64 }
65 
66 // -------------------------- Globals --------------------- {{{1
67 int memprof_inited;
68 int memprof_init_done;
69 bool memprof_init_is_running;
70 int memprof_timestamp_inited;
71 long memprof_init_timestamp_s;
72 
73 uptr kHighMemEnd;
74 
75 // -------------------------- Run-time entry ------------------- {{{1
76 // exported functions
77 
78 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr);
79 
80 #define MEMPROF_MEMORY_ACCESS_CALLBACK(type)                                   \
81   extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) {   \
82     MEMPROF_MEMORY_ACCESS_CALLBACK_BODY()                                      \
83   }
84 
85 MEMPROF_MEMORY_ACCESS_CALLBACK(load)
MEMPROF_MEMORY_ACCESS_CALLBACK(store)86 MEMPROF_MEMORY_ACCESS_CALLBACK(store)
87 
88 // Force the linker to keep the symbols for various MemProf interface
89 // functions. We want to keep those in the executable in order to let the
90 // instrumented dynamic libraries access the symbol even if it is not used by
91 // the executable itself. This should help if the build system is removing dead
92 // code at link time.
93 static NOINLINE void force_interface_symbols() {
94   volatile int fake_condition = 0; // prevent dead condition elimination.
95   // clang-format off
96   switch (fake_condition) {
97     case 1: __memprof_record_access(nullptr); break;
98     case 2: __memprof_record_access_range(nullptr, 0); break;
99   }
100   // clang-format on
101 }
102 
memprof_atexit()103 static void memprof_atexit() {
104   Printf("MemProfiler exit stats:\n");
105   __memprof_print_accumulated_stats();
106 }
107 
InitializeHighMemEnd()108 static void InitializeHighMemEnd() {
109   kHighMemEnd = GetMaxUserVirtualAddress();
110   // Increase kHighMemEnd to make sure it's properly
111   // aligned together with kHighMemBeg:
112   kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
113 }
114 
PrintAddressSpaceLayout()115 void PrintAddressSpaceLayout() {
116   if (kHighMemBeg) {
117     Printf("|| `[%p, %p]` || HighMem    ||\n", (void *)kHighMemBeg,
118            (void *)kHighMemEnd);
119     Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg,
120            (void *)kHighShadowEnd);
121   }
122   Printf("|| `[%p, %p]` || ShadowGap  ||\n", (void *)kShadowGapBeg,
123          (void *)kShadowGapEnd);
124   if (kLowShadowBeg) {
125     Printf("|| `[%p, %p]` || LowShadow  ||\n", (void *)kLowShadowBeg,
126            (void *)kLowShadowEnd);
127     Printf("|| `[%p, %p]` || LowMem     ||\n", (void *)kLowMemBeg,
128            (void *)kLowMemEnd);
129   }
130   Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg),
131          (void *)MEM_TO_SHADOW(kLowShadowEnd));
132   if (kHighMemBeg) {
133     Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg),
134            (void *)MEM_TO_SHADOW(kHighShadowEnd));
135   }
136   Printf("\n");
137   Printf("malloc_context_size=%zu\n",
138          (uptr)common_flags()->malloc_context_size);
139 
140   Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
141   Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
142   Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
143   CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
144 }
145 
__anona09d88060102null146 static bool UNUSED __local_memprof_dyninit = [] {
147   MaybeStartBackgroudThread();
148   SetSoftRssLimitExceededCallback(MemprofSoftRssLimitExceededCallback);
149 
150   return false;
151 }();
152 
MemprofInitInternal()153 static void MemprofInitInternal() {
154   if (LIKELY(memprof_inited))
155     return;
156   SanitizerToolName = "MemProfiler";
157   CHECK(!memprof_init_is_running && "MemProf init calls itself!");
158   memprof_init_is_running = true;
159 
160   CacheBinaryName();
161 
162   // Initialize flags. This must be done early, because most of the
163   // initialization steps look at flags().
164   InitializeFlags();
165 
166   AvoidCVE_2016_2143();
167 
168   SetMallocContextSize(common_flags()->malloc_context_size);
169 
170   InitializeHighMemEnd();
171 
172   // Make sure we are not statically linked.
173   MemprofDoesNotSupportStaticLinkage();
174 
175   // Install tool-specific callbacks in sanitizer_common.
176   AddDieCallback(MemprofDie);
177   SetCheckFailedCallback(MemprofCheckFailed);
178 
179   // Use profile name specified via the binary itself if it exists, and hasn't
180   // been overrriden by a flag at runtime.
181   if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path)
182     __sanitizer_set_report_path(__memprof_profile_filename);
183   else
184     __sanitizer_set_report_path(common_flags()->log_path);
185 
186   __sanitizer::InitializePlatformEarly();
187 
188   // Re-exec ourselves if we need to set additional env or command line args.
189   MaybeReexec();
190 
191   // Setup internal allocator callback.
192   SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
193 
194   InitializeMemprofInterceptors();
195   CheckASLR();
196 
197   ReplaceSystemMalloc();
198 
199   DisableCoreDumperIfNecessary();
200 
201   InitializeShadowMemory();
202 
203   TSDInit(PlatformTSDDtor);
204 
205   InitializeAllocator();
206 
207   // On Linux MemprofThread::ThreadStart() calls malloc() that's why
208   // memprof_inited should be set to 1 prior to initializing the threads.
209   memprof_inited = 1;
210   memprof_init_is_running = false;
211 
212   if (flags()->atexit)
213     Atexit(memprof_atexit);
214 
215   InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
216 
217   // interceptors
218   InitTlsSize();
219 
220   // Create main thread.
221   MemprofThread *main_thread = CreateMainThread();
222   CHECK_EQ(0, main_thread->tid());
223   force_interface_symbols(); // no-op.
224   SanitizerInitializeUnwinder();
225 
226   Symbolizer::LateInitialize();
227 
228   VReport(1, "MemProfiler Init done\n");
229 
230   memprof_init_done = 1;
231 }
232 
MemprofInitTime()233 void MemprofInitTime() {
234   if (LIKELY(memprof_timestamp_inited))
235     return;
236   timespec ts;
237   clock_gettime(CLOCK_REALTIME, &ts);
238   memprof_init_timestamp_s = ts.tv_sec;
239   memprof_timestamp_inited = 1;
240 }
241 
242 // Initialize as requested from some part of MemProf runtime library
243 // (interceptors, allocator, etc).
MemprofInitFromRtl()244 void MemprofInitFromRtl() { MemprofInitInternal(); }
245 
246 #if MEMPROF_DYNAMIC
247 // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable
248 // (and thus normal initializers from .preinit_array or modules haven't run).
249 
250 class MemprofInitializer {
251 public:
MemprofInitializer()252   MemprofInitializer() { MemprofInitFromRtl(); }
253 };
254 
255 static MemprofInitializer memprof_initializer;
256 #endif // MEMPROF_DYNAMIC
257 
258 } // namespace __memprof
259 
260 // ---------------------- Interface ---------------- {{{1
261 using namespace __memprof;
262 
263 // Initialize as requested from instrumented application code.
__memprof_init()264 void __memprof_init() {
265   MemprofInitTime();
266   MemprofInitInternal();
267 }
268 
__memprof_preinit()269 void __memprof_preinit() { MemprofInitInternal(); }
270 
__memprof_version_mismatch_check_v1()271 void __memprof_version_mismatch_check_v1() {}
272 
__memprof_record_access(void const volatile * addr)273 void __memprof_record_access(void const volatile *addr) {
274   __memprof::RecordAccess((uptr)addr);
275 }
276 
277 // We only record the access on the first location in the range,
278 // since we will later accumulate the access counts across the
279 // full allocation, and we don't want to inflate the hotness from
280 // a memory intrinsic on a large range of memory.
281 // TODO: Should we do something else so we can better track utilization?
__memprof_record_access_range(void const volatile * addr,UNUSED uptr size)282 void __memprof_record_access_range(void const volatile *addr,
283                                    UNUSED uptr size) {
284   __memprof::RecordAccess((uptr)addr);
285 }
286 
287 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16
__sanitizer_unaligned_load16(const uu16 * p)288 __sanitizer_unaligned_load16(const uu16 *p) {
289   __memprof_record_access(p);
290   return *p;
291 }
292 
293 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32
__sanitizer_unaligned_load32(const uu32 * p)294 __sanitizer_unaligned_load32(const uu32 *p) {
295   __memprof_record_access(p);
296   return *p;
297 }
298 
299 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64
__sanitizer_unaligned_load64(const uu64 * p)300 __sanitizer_unaligned_load64(const uu64 *p) {
301   __memprof_record_access(p);
302   return *p;
303 }
304 
305 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store16(uu16 * p,u16 x)306 __sanitizer_unaligned_store16(uu16 *p, u16 x) {
307   __memprof_record_access(p);
308   *p = x;
309 }
310 
311 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store32(uu32 * p,u32 x)312 __sanitizer_unaligned_store32(uu32 *p, u32 x) {
313   __memprof_record_access(p);
314   *p = x;
315 }
316 
317 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store64(uu64 * p,u64 x)318 __sanitizer_unaligned_store64(uu64 *p, u64 x) {
319   __memprof_record_access(p);
320   *p = x;
321 }
322