• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "lsan_common.h"
16 
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_stoptheworld.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_report_decorator.h"
26 
27 #if CAN_SANITIZE_LEAKS
28 namespace __lsan {
29 
30 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31 // also to protect the global list of root regions.
32 BlockingMutex global_mutex(LINKER_INITIALIZED);
33 
34 THREADLOCAL int disable_counter;
DisabledInThisThread()35 bool DisabledInThisThread() { return disable_counter > 0; }
36 
37 Flags lsan_flags;
38 
InitializeFlags()39 static void InitializeFlags() {
40   Flags *f = flags();
41   // Default values.
42   f->report_objects = false;
43   f->resolution = 0;
44   f->max_leaks = 0;
45   f->exitcode = 23;
46   f->print_suppressions = true;
47   f->suppressions="";
48   f->use_registers = true;
49   f->use_globals = true;
50   f->use_stacks = true;
51   f->use_tls = true;
52   f->use_root_regions = true;
53   f->use_unaligned = false;
54   f->use_poisoned = false;
55   f->log_pointers = false;
56   f->log_threads = false;
57 
58   const char *options = GetEnv("LSAN_OPTIONS");
59   if (options) {
60     ParseFlag(options, &f->use_registers, "use_registers", "");
61     ParseFlag(options, &f->use_globals, "use_globals", "");
62     ParseFlag(options, &f->use_stacks, "use_stacks", "");
63     ParseFlag(options, &f->use_tls, "use_tls", "");
64     ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
65     ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
66     ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
67     ParseFlag(options, &f->report_objects, "report_objects", "");
68     ParseFlag(options, &f->resolution, "resolution", "");
69     CHECK_GE(&f->resolution, 0);
70     ParseFlag(options, &f->max_leaks, "max_leaks", "");
71     CHECK_GE(&f->max_leaks, 0);
72     ParseFlag(options, &f->log_pointers, "log_pointers", "");
73     ParseFlag(options, &f->log_threads, "log_threads", "");
74     ParseFlag(options, &f->exitcode, "exitcode", "");
75     ParseFlag(options, &f->print_suppressions, "print_suppressions", "");
76     ParseFlag(options, &f->suppressions, "suppressions", "");
77   }
78 }
79 
80 #define LOG_POINTERS(...)                           \
81   do {                                              \
82     if (flags()->log_pointers) Report(__VA_ARGS__); \
83   } while (0);
84 
85 #define LOG_THREADS(...)                           \
86   do {                                             \
87     if (flags()->log_threads) Report(__VA_ARGS__); \
88   } while (0);
89 
90 SuppressionContext *suppression_ctx;
91 
InitializeSuppressions()92 void InitializeSuppressions() {
93   CHECK(!suppression_ctx);
94   ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
95   suppression_ctx = new(placeholder) SuppressionContext;
96   char *suppressions_from_file;
97   uptr buffer_size;
98   if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
99                        &buffer_size, 1 << 26 /* max_len */))
100     suppression_ctx->Parse(suppressions_from_file);
101   if (flags()->suppressions[0] && !buffer_size) {
102     Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
103            flags()->suppressions);
104     Die();
105   }
106   if (&__lsan_default_suppressions)
107     suppression_ctx->Parse(__lsan_default_suppressions());
108 }
109 
110 struct RootRegion {
111   const void *begin;
112   uptr size;
113 };
114 
115 InternalMmapVector<RootRegion> *root_regions;
116 
InitializeRootRegions()117 void InitializeRootRegions() {
118   CHECK(!root_regions);
119   ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
120   root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
121 }
122 
InitCommonLsan()123 void InitCommonLsan() {
124   InitializeFlags();
125   InitializeRootRegions();
126   if (common_flags()->detect_leaks) {
127     // Initialization which can fail or print warnings should only be done if
128     // LSan is actually enabled.
129     InitializeSuppressions();
130     InitializePlatformSpecificModules();
131   }
132 }
133 
134 class Decorator: public __sanitizer::SanitizerCommonDecorator {
135  public:
Decorator()136   Decorator() : SanitizerCommonDecorator() { }
Error()137   const char *Error() { return Red(); }
Leak()138   const char *Leak() { return Blue(); }
End()139   const char *End() { return Default(); }
140 };
141 
CanBeAHeapPointer(uptr p)142 static inline bool CanBeAHeapPointer(uptr p) {
143   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
144   // bound on heap addresses.
145   const uptr kMinAddress = 4 * 4096;
146   if (p < kMinAddress) return false;
147 #ifdef __x86_64__
148   // Accept only canonical form user-space addresses.
149   return ((p >> 47) == 0);
150 #else
151   return true;
152 #endif
153 }
154 
155 // Scans the memory range, looking for byte patterns that point into allocator
156 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
157 // There are two usage modes for this function: finding reachable or ignored
158 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
159 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
160 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)161 void ScanRangeForPointers(uptr begin, uptr end,
162                           Frontier *frontier,
163                           const char *region_type, ChunkTag tag) {
164   const uptr alignment = flags()->pointer_alignment();
165   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
166   uptr pp = begin;
167   if (pp % alignment)
168     pp = pp + alignment - pp % alignment;
169   for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
170     void *p = *reinterpret_cast<void **>(pp);
171     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
172     uptr chunk = PointsIntoChunk(p);
173     if (!chunk) continue;
174     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
175     if (chunk == begin) continue;
176     LsanMetadata m(chunk);
177     // Reachable beats ignored beats leaked.
178     if (m.tag() == kReachable) continue;
179     if (m.tag() == kIgnored && tag != kReachable) continue;
180 
181     // Do this check relatively late so we can log only the interesting cases.
182     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
183       LOG_POINTERS(
184           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
185           "%zu.\n",
186           pp, p, chunk, chunk + m.requested_size(), m.requested_size());
187       continue;
188     }
189 
190     m.set_tag(tag);
191     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
192                  chunk, chunk + m.requested_size(), m.requested_size());
193     if (frontier)
194       frontier->push_back(chunk);
195   }
196 }
197 
ForEachExtraStackRangeCb(uptr begin,uptr end,void * arg)198 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
199   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
200   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
201 }
202 
203 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)204 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
205                            Frontier *frontier) {
206   InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
207   uptr registers_begin = reinterpret_cast<uptr>(registers.data());
208   uptr registers_end = registers_begin + registers.size();
209   for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
210     uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
211     LOG_THREADS("Processing thread %d.\n", os_id);
212     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
213     bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
214                                               &tls_begin, &tls_end,
215                                               &cache_begin, &cache_end);
216     if (!thread_found) {
217       // If a thread can't be found in the thread registry, it's probably in the
218       // process of destruction. Log this event and move on.
219       LOG_THREADS("Thread %d not found in registry.\n", os_id);
220       continue;
221     }
222     uptr sp;
223     bool have_registers =
224         (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
225     if (!have_registers) {
226       Report("Unable to get registers from thread %d.\n");
227       // If unable to get SP, consider the entire stack to be reachable.
228       sp = stack_begin;
229     }
230 
231     if (flags()->use_registers && have_registers)
232       ScanRangeForPointers(registers_begin, registers_end, frontier,
233                            "REGISTERS", kReachable);
234 
235     if (flags()->use_stacks) {
236       LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
237       if (sp < stack_begin || sp >= stack_end) {
238         // SP is outside the recorded stack range (e.g. the thread is running a
239         // signal handler on alternate stack). Again, consider the entire stack
240         // range to be reachable.
241         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
242       } else {
243         // Shrink the stack range to ignore out-of-scope values.
244         stack_begin = sp;
245       }
246       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
247                            kReachable);
248       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
249     }
250 
251     if (flags()->use_tls) {
252       LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
253       if (cache_begin == cache_end) {
254         ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
255       } else {
256         // Because LSan should not be loaded with dlopen(), we can assume
257         // that allocator cache will be part of static TLS image.
258         CHECK_LE(tls_begin, cache_begin);
259         CHECK_GE(tls_end, cache_end);
260         if (tls_begin < cache_begin)
261           ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
262                                kReachable);
263         if (tls_end > cache_end)
264           ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
265       }
266     }
267   }
268 }
269 
ProcessRootRegion(Frontier * frontier,uptr root_begin,uptr root_end)270 static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
271                               uptr root_end) {
272   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
273   uptr begin, end, prot;
274   while (proc_maps.Next(&begin, &end,
275                         /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
276                         &prot)) {
277     uptr intersection_begin = Max(root_begin, begin);
278     uptr intersection_end = Min(end, root_end);
279     if (intersection_begin >= intersection_end) continue;
280     bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
281     LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
282                  root_begin, root_end, begin, end,
283                  is_readable ? "readable" : "unreadable");
284     if (is_readable)
285       ScanRangeForPointers(intersection_begin, intersection_end, frontier,
286                            "ROOT", kReachable);
287   }
288 }
289 
290 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)291 static void ProcessRootRegions(Frontier *frontier) {
292   if (!flags()->use_root_regions) return;
293   CHECK(root_regions);
294   for (uptr i = 0; i < root_regions->size(); i++) {
295     RootRegion region = (*root_regions)[i];
296     uptr begin_addr = reinterpret_cast<uptr>(region.begin);
297     ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
298   }
299 }
300 
FloodFillTag(Frontier * frontier,ChunkTag tag)301 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
302   while (frontier->size()) {
303     uptr next_chunk = frontier->back();
304     frontier->pop_back();
305     LsanMetadata m(next_chunk);
306     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
307                          "HEAP", tag);
308   }
309 }
310 
311 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
312 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)313 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
314   chunk = GetUserBegin(chunk);
315   LsanMetadata m(chunk);
316   if (m.allocated() && m.tag() != kReachable) {
317     ScanRangeForPointers(chunk, chunk + m.requested_size(),
318                          /* frontier */ 0, "HEAP", kIndirectlyLeaked);
319   }
320 }
321 
322 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
323 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)324 static void CollectIgnoredCb(uptr chunk, void *arg) {
325   CHECK(arg);
326   chunk = GetUserBegin(chunk);
327   LsanMetadata m(chunk);
328   if (m.allocated() && m.tag() == kIgnored)
329     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
330 }
331 
332 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads)333 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
334   // Holds the flood fill frontier.
335   Frontier frontier(1);
336 
337   ProcessGlobalRegions(&frontier);
338   ProcessThreads(suspended_threads, &frontier);
339   ProcessRootRegions(&frontier);
340   FloodFillTag(&frontier, kReachable);
341   // The check here is relatively expensive, so we do this in a separate flood
342   // fill. That way we can skip the check for chunks that are reachable
343   // otherwise.
344   LOG_POINTERS("Processing platform-specific allocations.\n");
345   ProcessPlatformSpecificAllocations(&frontier);
346   FloodFillTag(&frontier, kReachable);
347 
348   LOG_POINTERS("Scanning ignored chunks.\n");
349   CHECK_EQ(0, frontier.size());
350   ForEachChunk(CollectIgnoredCb, &frontier);
351   FloodFillTag(&frontier, kIgnored);
352 
353   // Iterate over leaked chunks and mark those that are reachable from other
354   // leaked chunks.
355   LOG_POINTERS("Scanning leaked chunks.\n");
356   ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
357 }
358 
PrintStackTraceById(u32 stack_trace_id)359 static void PrintStackTraceById(u32 stack_trace_id) {
360   CHECK(stack_trace_id);
361   uptr size = 0;
362   const uptr *trace = StackDepotGet(stack_trace_id, &size);
363   StackTrace::PrintStack(trace, size);
364 }
365 
366 // ForEachChunk callback. Aggregates information about unreachable chunks into
367 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)368 static void CollectLeaksCb(uptr chunk, void *arg) {
369   CHECK(arg);
370   LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
371   chunk = GetUserBegin(chunk);
372   LsanMetadata m(chunk);
373   if (!m.allocated()) return;
374   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
375     uptr resolution = flags()->resolution;
376     u32 stack_trace_id = 0;
377     if (resolution > 0) {
378       uptr size = 0;
379       const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
380       size = Min(size, resolution);
381       stack_trace_id = StackDepotPut(trace, size);
382     } else {
383       stack_trace_id = m.stack_trace_id();
384     }
385     leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
386                                 m.tag());
387   }
388 }
389 
PrintMatchedSuppressions()390 static void PrintMatchedSuppressions() {
391   InternalMmapVector<Suppression *> matched(1);
392   suppression_ctx->GetMatched(&matched);
393   if (!matched.size())
394     return;
395   const char *line = "-----------------------------------------------------";
396   Printf("%s\n", line);
397   Printf("Suppressions used:\n");
398   Printf("  count      bytes template\n");
399   for (uptr i = 0; i < matched.size(); i++)
400     Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
401            matched[i]->weight, matched[i]->templ);
402   Printf("%s\n\n", line);
403 }
404 
405 struct DoLeakCheckParam {
406   bool success;
407   LeakReport leak_report;
408 };
409 
DoLeakCheckCallback(const SuspendedThreadsList & suspended_threads,void * arg)410 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
411                                 void *arg) {
412   DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
413   CHECK(param);
414   CHECK(!param->success);
415   ClassifyAllChunks(suspended_threads);
416   ForEachChunk(CollectLeaksCb, &param->leak_report);
417   param->success = true;
418 }
419 
DoLeakCheck()420 void DoLeakCheck() {
421   EnsureMainThreadIDIsCorrect();
422   BlockingMutexLock l(&global_mutex);
423   static bool already_done;
424   if (already_done) return;
425   already_done = true;
426   if (&__lsan_is_turned_off && __lsan_is_turned_off())
427       return;
428 
429   DoLeakCheckParam param;
430   param.success = false;
431   LockThreadRegistry();
432   LockAllocator();
433   StopTheWorld(DoLeakCheckCallback, &param);
434   UnlockAllocator();
435   UnlockThreadRegistry();
436 
437   if (!param.success) {
438     Report("LeakSanitizer has encountered a fatal error.\n");
439     Die();
440   }
441   param.leak_report.ApplySuppressions();
442   uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
443   if (unsuppressed_count > 0) {
444     Decorator d;
445     Printf("\n"
446            "================================================================="
447            "\n");
448     Printf("%s", d.Error());
449     Report("ERROR: LeakSanitizer: detected memory leaks\n");
450     Printf("%s", d.End());
451     param.leak_report.ReportTopLeaks(flags()->max_leaks);
452   }
453   if (flags()->print_suppressions)
454     PrintMatchedSuppressions();
455   if (unsuppressed_count > 0) {
456     param.leak_report.PrintSummary();
457     if (flags()->exitcode)
458       internal__exit(flags()->exitcode);
459   }
460 }
461 
GetSuppressionForAddr(uptr addr)462 static Suppression *GetSuppressionForAddr(uptr addr) {
463   Suppression *s;
464 
465   // Suppress by module name.
466   const char *module_name;
467   uptr module_offset;
468   if (Symbolizer::Get()->GetModuleNameAndOffsetForPC(addr, &module_name,
469                                                      &module_offset) &&
470       suppression_ctx->Match(module_name, SuppressionLeak, &s))
471     return s;
472 
473   // Suppress by file or function name.
474   static const uptr kMaxAddrFrames = 16;
475   InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
476   for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
477   uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
478       addr, addr_frames.data(), kMaxAddrFrames);
479   for (uptr i = 0; i < addr_frames_num; i++) {
480     if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
481         suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s))
482       return s;
483   }
484   return 0;
485 }
486 
GetSuppressionForStack(u32 stack_trace_id)487 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
488   uptr size = 0;
489   const uptr *trace = StackDepotGet(stack_trace_id, &size);
490   for (uptr i = 0; i < size; i++) {
491     Suppression *s =
492         GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
493     if (s) return s;
494   }
495   return 0;
496 }
497 
498 ///// LeakReport implementation. /////
499 
500 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
501 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
502 // in real-world applications.
503 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
504 // use a hash table.
505 const uptr kMaxLeaksConsidered = 5000;
506 
AddLeakedChunk(uptr chunk,u32 stack_trace_id,uptr leaked_size,ChunkTag tag)507 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
508                                 uptr leaked_size, ChunkTag tag) {
509   CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
510   bool is_directly_leaked = (tag == kDirectlyLeaked);
511   uptr i;
512   for (i = 0; i < leaks_.size(); i++) {
513     if (leaks_[i].stack_trace_id == stack_trace_id &&
514         leaks_[i].is_directly_leaked == is_directly_leaked) {
515       leaks_[i].hit_count++;
516       leaks_[i].total_size += leaked_size;
517       break;
518     }
519   }
520   if (i == leaks_.size()) {
521     if (leaks_.size() == kMaxLeaksConsidered) return;
522     Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
523                   is_directly_leaked, /* is_suppressed */ false };
524     leaks_.push_back(leak);
525   }
526   if (flags()->report_objects) {
527     LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
528     leaked_objects_.push_back(obj);
529   }
530 }
531 
LeakComparator(const Leak & leak1,const Leak & leak2)532 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
533   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
534     return leak1.total_size > leak2.total_size;
535   else
536     return leak1.is_directly_leaked;
537 }
538 
ReportTopLeaks(uptr num_leaks_to_report)539 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
540   CHECK(leaks_.size() <= kMaxLeaksConsidered);
541   Printf("\n");
542   if (leaks_.size() == kMaxLeaksConsidered)
543     Printf("Too many leaks! Only the first %zu leaks encountered will be "
544            "reported.\n",
545            kMaxLeaksConsidered);
546 
547   uptr unsuppressed_count = UnsuppressedLeakCount();
548   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
549     Printf("The %zu top leak(s):\n", num_leaks_to_report);
550   InternalSort(&leaks_, leaks_.size(), LeakComparator);
551   uptr leaks_reported = 0;
552   for (uptr i = 0; i < leaks_.size(); i++) {
553     if (leaks_[i].is_suppressed) continue;
554     PrintReportForLeak(i);
555     leaks_reported++;
556     if (leaks_reported == num_leaks_to_report) break;
557   }
558   if (leaks_reported < unsuppressed_count) {
559     uptr remaining = unsuppressed_count - leaks_reported;
560     Printf("Omitting %zu more leak(s).\n", remaining);
561   }
562 }
563 
PrintReportForLeak(uptr index)564 void LeakReport::PrintReportForLeak(uptr index) {
565   Decorator d;
566   Printf("%s", d.Leak());
567   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
568          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
569          leaks_[index].total_size, leaks_[index].hit_count);
570   Printf("%s", d.End());
571 
572   PrintStackTraceById(leaks_[index].stack_trace_id);
573 
574   if (flags()->report_objects) {
575     Printf("Objects leaked above:\n");
576     PrintLeakedObjectsForLeak(index);
577     Printf("\n");
578   }
579 }
580 
PrintLeakedObjectsForLeak(uptr index)581 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
582   u32 leak_id = leaks_[index].id;
583   for (uptr j = 0; j < leaked_objects_.size(); j++) {
584     if (leaked_objects_[j].leak_id == leak_id)
585       Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
586              leaked_objects_[j].size);
587   }
588 }
589 
PrintSummary()590 void LeakReport::PrintSummary() {
591   CHECK(leaks_.size() <= kMaxLeaksConsidered);
592   uptr bytes = 0, allocations = 0;
593   for (uptr i = 0; i < leaks_.size(); i++) {
594       if (leaks_[i].is_suppressed) continue;
595       bytes += leaks_[i].total_size;
596       allocations += leaks_[i].hit_count;
597   }
598   InternalScopedBuffer<char> summary(kMaxSummaryLength);
599   internal_snprintf(summary.data(), summary.size(),
600                     "%zu byte(s) leaked in %zu allocation(s).", bytes,
601                     allocations);
602   ReportErrorSummary(summary.data());
603 }
604 
ApplySuppressions()605 void LeakReport::ApplySuppressions() {
606   for (uptr i = 0; i < leaks_.size(); i++) {
607     Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
608     if (s) {
609       s->weight += leaks_[i].total_size;
610       s->hit_count += leaks_[i].hit_count;
611       leaks_[i].is_suppressed = true;
612     }
613   }
614 }
615 
UnsuppressedLeakCount()616 uptr LeakReport::UnsuppressedLeakCount() {
617   uptr result = 0;
618   for (uptr i = 0; i < leaks_.size(); i++)
619     if (!leaks_[i].is_suppressed) result++;
620   return result;
621 }
622 
623 }  // namespace __lsan
624 #endif  // CAN_SANITIZE_LEAKS
625 
626 using namespace __lsan;  // NOLINT
627 
628 extern "C" {
629 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)630 void __lsan_ignore_object(const void *p) {
631 #if CAN_SANITIZE_LEAKS
632   if (!common_flags()->detect_leaks)
633     return;
634   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
635   // locked.
636   BlockingMutexLock l(&global_mutex);
637   IgnoreObjectResult res = IgnoreObjectLocked(p);
638   if (res == kIgnoreObjectInvalid)
639     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
640   if (res == kIgnoreObjectAlreadyIgnored)
641     VReport(1, "__lsan_ignore_object(): "
642            "heap object at %p is already being ignored\n", p);
643   if (res == kIgnoreObjectSuccess)
644     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
645 #endif  // CAN_SANITIZE_LEAKS
646 }
647 
648 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)649 void __lsan_register_root_region(const void *begin, uptr size) {
650 #if CAN_SANITIZE_LEAKS
651   BlockingMutexLock l(&global_mutex);
652   CHECK(root_regions);
653   RootRegion region = {begin, size};
654   root_regions->push_back(region);
655   VReport(1, "Registered root region at %p of size %llu\n", begin, size);
656 #endif  // CAN_SANITIZE_LEAKS
657 }
658 
659 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)660 void __lsan_unregister_root_region(const void *begin, uptr size) {
661 #if CAN_SANITIZE_LEAKS
662   BlockingMutexLock l(&global_mutex);
663   CHECK(root_regions);
664   bool removed = false;
665   for (uptr i = 0; i < root_regions->size(); i++) {
666     RootRegion region = (*root_regions)[i];
667     if (region.begin == begin && region.size == size) {
668       removed = true;
669       uptr last_index = root_regions->size() - 1;
670       (*root_regions)[i] = (*root_regions)[last_index];
671       root_regions->pop_back();
672       VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
673       break;
674     }
675   }
676   if (!removed) {
677     Report(
678         "__lsan_unregister_root_region(): region at %p of size %llu has not "
679         "been registered.\n",
680         begin, size);
681     Die();
682   }
683 #endif  // CAN_SANITIZE_LEAKS
684 }
685 
686 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()687 void __lsan_disable() {
688 #if CAN_SANITIZE_LEAKS
689   __lsan::disable_counter++;
690 #endif
691 }
692 
693 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()694 void __lsan_enable() {
695 #if CAN_SANITIZE_LEAKS
696   if (!__lsan::disable_counter && common_flags()->detect_leaks) {
697     Report("Unmatched call to __lsan_enable().\n");
698     Die();
699   }
700   __lsan::disable_counter--;
701 #endif
702 }
703 
704 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()705 void __lsan_do_leak_check() {
706 #if CAN_SANITIZE_LEAKS
707   if (common_flags()->detect_leaks)
708     __lsan::DoLeakCheck();
709 #endif  // CAN_SANITIZE_LEAKS
710 }
711 
712 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
713 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_is_turned_off()714 int __lsan_is_turned_off() {
715   return 0;
716 }
717 #endif
718 }  // extern "C"
719