1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "lsan_common.h"
16
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_suppressions.h"
24
25 #if CAN_SANITIZE_LEAKS
26 namespace __lsan {
27
28 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
29 BlockingMutex global_mutex(LINKER_INITIALIZED);
30
31 THREADLOCAL int disable_counter;
DisabledInThisThread()32 bool DisabledInThisThread() { return disable_counter > 0; }
33
34 Flags lsan_flags;
35
InitializeFlags()36 static void InitializeFlags() {
37 Flags *f = flags();
38 // Default values.
39 f->report_objects = false;
40 f->resolution = 0;
41 f->max_leaks = 0;
42 f->exitcode = 23;
43 f->suppressions="";
44 f->use_registers = true;
45 f->use_globals = true;
46 f->use_stacks = true;
47 f->use_tls = true;
48 f->use_unaligned = false;
49 f->verbosity = 0;
50 f->log_pointers = false;
51 f->log_threads = false;
52
53 const char *options = GetEnv("LSAN_OPTIONS");
54 if (options) {
55 ParseFlag(options, &f->use_registers, "use_registers");
56 ParseFlag(options, &f->use_globals, "use_globals");
57 ParseFlag(options, &f->use_stacks, "use_stacks");
58 ParseFlag(options, &f->use_tls, "use_tls");
59 ParseFlag(options, &f->use_unaligned, "use_unaligned");
60 ParseFlag(options, &f->report_objects, "report_objects");
61 ParseFlag(options, &f->resolution, "resolution");
62 CHECK_GE(&f->resolution, 0);
63 ParseFlag(options, &f->max_leaks, "max_leaks");
64 CHECK_GE(&f->max_leaks, 0);
65 ParseFlag(options, &f->verbosity, "verbosity");
66 ParseFlag(options, &f->log_pointers, "log_pointers");
67 ParseFlag(options, &f->log_threads, "log_threads");
68 ParseFlag(options, &f->exitcode, "exitcode");
69 ParseFlag(options, &f->suppressions, "suppressions");
70 }
71 }
72
73 SuppressionContext *suppression_ctx;
74
InitializeSuppressions()75 void InitializeSuppressions() {
76 CHECK(!suppression_ctx);
77 ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
78 suppression_ctx = new(placeholder_) SuppressionContext;
79 char *suppressions_from_file;
80 uptr buffer_size;
81 if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
82 &buffer_size, 1 << 26 /* max_len */))
83 suppression_ctx->Parse(suppressions_from_file);
84 if (flags()->suppressions[0] && !buffer_size) {
85 Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
86 flags()->suppressions);
87 Die();
88 }
89 if (&__lsan_default_suppressions)
90 suppression_ctx->Parse(__lsan_default_suppressions());
91 }
92
InitCommonLsan()93 void InitCommonLsan() {
94 InitializeFlags();
95 InitializeSuppressions();
96 InitializePlatformSpecificModules();
97 }
98
CanBeAHeapPointer(uptr p)99 static inline bool CanBeAHeapPointer(uptr p) {
100 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
101 // bound on heap addresses.
102 const uptr kMinAddress = 4 * 4096;
103 if (p < kMinAddress) return false;
104 #ifdef __x86_64__
105 // Accept only canonical form user-space addresses.
106 return ((p >> 47) == 0);
107 #else
108 return true;
109 #endif
110 }
111
112 // Scans the memory range, looking for byte patterns that point into allocator
113 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
114 // There are two usage modes for this function: finding reachable or ignored
115 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
116 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
117 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)118 void ScanRangeForPointers(uptr begin, uptr end,
119 Frontier *frontier,
120 const char *region_type, ChunkTag tag) {
121 const uptr alignment = flags()->pointer_alignment();
122 if (flags()->log_pointers)
123 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
124 uptr pp = begin;
125 if (pp % alignment)
126 pp = pp + alignment - pp % alignment;
127 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
128 void *p = *reinterpret_cast<void **>(pp);
129 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
130 uptr chunk = PointsIntoChunk(p);
131 if (!chunk) continue;
132 LsanMetadata m(chunk);
133 // Reachable beats ignored beats leaked.
134 if (m.tag() == kReachable) continue;
135 if (m.tag() == kIgnored && tag != kReachable) continue;
136 m.set_tag(tag);
137 if (flags()->log_pointers)
138 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
139 chunk, chunk + m.requested_size(), m.requested_size());
140 if (frontier)
141 frontier->push_back(chunk);
142 }
143 }
144
145 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)146 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
147 Frontier *frontier) {
148 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
149 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
150 uptr registers_end = registers_begin + registers.size();
151 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
152 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
153 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
154 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
155 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
156 &tls_begin, &tls_end,
157 &cache_begin, &cache_end);
158 if (!thread_found) {
159 // If a thread can't be found in the thread registry, it's probably in the
160 // process of destruction. Log this event and move on.
161 if (flags()->log_threads)
162 Report("Thread %d not found in registry.\n", os_id);
163 continue;
164 }
165 uptr sp;
166 bool have_registers =
167 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
168 if (!have_registers) {
169 Report("Unable to get registers from thread %d.\n");
170 // If unable to get SP, consider the entire stack to be reachable.
171 sp = stack_begin;
172 }
173
174 if (flags()->use_registers && have_registers)
175 ScanRangeForPointers(registers_begin, registers_end, frontier,
176 "REGISTERS", kReachable);
177
178 if (flags()->use_stacks) {
179 if (flags()->log_threads)
180 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
181 if (sp < stack_begin || sp >= stack_end) {
182 // SP is outside the recorded stack range (e.g. the thread is running a
183 // signal handler on alternate stack). Again, consider the entire stack
184 // range to be reachable.
185 if (flags()->log_threads)
186 Report("WARNING: stack pointer not in stack range.\n");
187 } else {
188 // Shrink the stack range to ignore out-of-scope values.
189 stack_begin = sp;
190 }
191 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
192 kReachable);
193 }
194
195 if (flags()->use_tls) {
196 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
197 if (cache_begin == cache_end) {
198 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
199 } else {
200 // Because LSan should not be loaded with dlopen(), we can assume
201 // that allocator cache will be part of static TLS image.
202 CHECK_LE(tls_begin, cache_begin);
203 CHECK_GE(tls_end, cache_end);
204 if (tls_begin < cache_begin)
205 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
206 kReachable);
207 if (tls_end > cache_end)
208 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
209 }
210 }
211 }
212 }
213
FloodFillTag(Frontier * frontier,ChunkTag tag)214 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
215 while (frontier->size()) {
216 uptr next_chunk = frontier->back();
217 frontier->pop_back();
218 LsanMetadata m(next_chunk);
219 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
220 "HEAP", tag);
221 }
222 }
223
224 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
225 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)226 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
227 chunk = GetUserBegin(chunk);
228 LsanMetadata m(chunk);
229 if (m.allocated() && m.tag() != kReachable) {
230 ScanRangeForPointers(chunk, chunk + m.requested_size(),
231 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
232 }
233 }
234
235 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
236 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)237 static void CollectIgnoredCb(uptr chunk, void *arg) {
238 CHECK(arg);
239 chunk = GetUserBegin(chunk);
240 LsanMetadata m(chunk);
241 if (m.allocated() && m.tag() == kIgnored)
242 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
243 }
244
245 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads)246 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
247 // Holds the flood fill frontier.
248 Frontier frontier(GetPageSizeCached());
249
250 if (flags()->use_globals)
251 ProcessGlobalRegions(&frontier);
252 ProcessThreads(suspended_threads, &frontier);
253 FloodFillTag(&frontier, kReachable);
254 // The check here is relatively expensive, so we do this in a separate flood
255 // fill. That way we can skip the check for chunks that are reachable
256 // otherwise.
257 ProcessPlatformSpecificAllocations(&frontier);
258 FloodFillTag(&frontier, kReachable);
259
260 if (flags()->log_pointers)
261 Report("Scanning ignored chunks.\n");
262 CHECK_EQ(0, frontier.size());
263 ForEachChunk(CollectIgnoredCb, &frontier);
264 FloodFillTag(&frontier, kIgnored);
265
266 // Iterate over leaked chunks and mark those that are reachable from other
267 // leaked chunks.
268 if (flags()->log_pointers)
269 Report("Scanning leaked chunks.\n");
270 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
271 }
272
PrintStackTraceById(u32 stack_trace_id)273 static void PrintStackTraceById(u32 stack_trace_id) {
274 CHECK(stack_trace_id);
275 uptr size = 0;
276 const uptr *trace = StackDepotGet(stack_trace_id, &size);
277 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
278 common_flags()->strip_path_prefix, 0);
279 }
280
281 // ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)282 static void CollectLeaksCb(uptr chunk, void *arg) {
283 CHECK(arg);
284 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
285 chunk = GetUserBegin(chunk);
286 LsanMetadata m(chunk);
287 if (!m.allocated()) return;
288 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
289 uptr resolution = flags()->resolution;
290 if (resolution > 0) {
291 uptr size = 0;
292 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
293 size = Min(size, resolution);
294 leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
295 } else {
296 leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
297 }
298 }
299 }
300
301 // ForEachChunkCallback. Prints addresses of unreachable chunks.
PrintLeakedCb(uptr chunk,void * arg)302 static void PrintLeakedCb(uptr chunk, void *arg) {
303 chunk = GetUserBegin(chunk);
304 LsanMetadata m(chunk);
305 if (!m.allocated()) return;
306 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
307 Printf("%s leaked %zu byte object at %p.\n",
308 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
309 m.requested_size(), chunk);
310 }
311 }
312
PrintMatchedSuppressions()313 static void PrintMatchedSuppressions() {
314 InternalMmapVector<Suppression *> matched(1);
315 suppression_ctx->GetMatched(&matched);
316 if (!matched.size())
317 return;
318 const char *line = "-----------------------------------------------------";
319 Printf("%s\n", line);
320 Printf("Suppressions used:\n");
321 Printf(" count bytes template\n");
322 for (uptr i = 0; i < matched.size(); i++)
323 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
324 matched[i]->weight, matched[i]->templ);
325 Printf("%s\n\n", line);
326 }
327
PrintLeaked()328 static void PrintLeaked() {
329 Printf("\n");
330 Printf("Reporting individual objects:\n");
331 ForEachChunk(PrintLeakedCb, 0 /* arg */);
332 }
333
334 struct DoLeakCheckParam {
335 bool success;
336 LeakReport leak_report;
337 };
338
DoLeakCheckCallback(const SuspendedThreadsList & suspended_threads,void * arg)339 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
340 void *arg) {
341 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
342 CHECK(param);
343 CHECK(!param->success);
344 CHECK(param->leak_report.IsEmpty());
345 ClassifyAllChunks(suspended_threads);
346 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
347 if (!param->leak_report.IsEmpty() && flags()->report_objects)
348 PrintLeaked();
349 param->success = true;
350 }
351
DoLeakCheck()352 void DoLeakCheck() {
353 EnsureMainThreadIDIsCorrect();
354 BlockingMutexLock l(&global_mutex);
355 static bool already_done;
356 if (already_done) return;
357 already_done = true;
358 if (&__lsan_is_turned_off && __lsan_is_turned_off())
359 return;
360
361 DoLeakCheckParam param;
362 param.success = false;
363 LockThreadRegistry();
364 LockAllocator();
365 StopTheWorld(DoLeakCheckCallback, ¶m);
366 UnlockAllocator();
367 UnlockThreadRegistry();
368
369 if (!param.success) {
370 Report("LeakSanitizer has encountered a fatal error.\n");
371 Die();
372 }
373 uptr have_unsuppressed = param.leak_report.ApplySuppressions();
374 if (have_unsuppressed) {
375 Printf("\n"
376 "================================================================="
377 "\n");
378 Report("ERROR: LeakSanitizer: detected memory leaks\n");
379 param.leak_report.PrintLargest(flags()->max_leaks);
380 }
381 if (have_unsuppressed || (flags()->verbosity >= 1)) {
382 PrintMatchedSuppressions();
383 param.leak_report.PrintSummary();
384 }
385 if (have_unsuppressed && flags()->exitcode)
386 internal__exit(flags()->exitcode);
387 }
388
GetSuppressionForAddr(uptr addr)389 static Suppression *GetSuppressionForAddr(uptr addr) {
390 static const uptr kMaxAddrFrames = 16;
391 InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
392 for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
393 uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(),
394 kMaxAddrFrames);
395 for (uptr i = 0; i < addr_frames_num; i++) {
396 Suppression* s;
397 if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
398 suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
399 suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
400 return s;
401 }
402 return 0;
403 }
404
GetSuppressionForStack(u32 stack_trace_id)405 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
406 uptr size = 0;
407 const uptr *trace = StackDepotGet(stack_trace_id, &size);
408 for (uptr i = 0; i < size; i++) {
409 Suppression *s =
410 GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
411 if (s) return s;
412 }
413 return 0;
414 }
415
416 ///// LeakReport implementation. /////
417
418 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
419 // in LeakReport::Add(). We don't expect to ever see this many leaks in
420 // real-world applications.
421 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
422 // use a hash table.
423 const uptr kMaxLeaksConsidered = 5000;
424
Add(u32 stack_trace_id,uptr leaked_size,ChunkTag tag)425 void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
426 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
427 bool is_directly_leaked = (tag == kDirectlyLeaked);
428 for (uptr i = 0; i < leaks_.size(); i++)
429 if (leaks_[i].stack_trace_id == stack_trace_id &&
430 leaks_[i].is_directly_leaked == is_directly_leaked) {
431 leaks_[i].hit_count++;
432 leaks_[i].total_size += leaked_size;
433 return;
434 }
435 if (leaks_.size() == kMaxLeaksConsidered) return;
436 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
437 is_directly_leaked, /* is_suppressed */ false };
438 leaks_.push_back(leak);
439 }
440
LeakComparator(const Leak & leak1,const Leak & leak2)441 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
442 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
443 return leak1.total_size > leak2.total_size;
444 else
445 return leak1.is_directly_leaked;
446 }
447
PrintLargest(uptr num_leaks_to_print)448 void LeakReport::PrintLargest(uptr num_leaks_to_print) {
449 CHECK(leaks_.size() <= kMaxLeaksConsidered);
450 Printf("\n");
451 if (leaks_.size() == kMaxLeaksConsidered)
452 Printf("Too many leaks! Only the first %zu leaks encountered will be "
453 "reported.\n",
454 kMaxLeaksConsidered);
455
456 uptr unsuppressed_count = 0;
457 for (uptr i = 0; i < leaks_.size(); i++)
458 if (!leaks_[i].is_suppressed) unsuppressed_count++;
459 if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
460 Printf("The %zu largest leak(s):\n", num_leaks_to_print);
461 InternalSort(&leaks_, leaks_.size(), LeakComparator);
462 uptr leaks_printed = 0;
463 for (uptr i = 0; i < leaks_.size(); i++) {
464 if (leaks_[i].is_suppressed) continue;
465 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
466 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
467 leaks_[i].total_size, leaks_[i].hit_count);
468 PrintStackTraceById(leaks_[i].stack_trace_id);
469 Printf("\n");
470 leaks_printed++;
471 if (leaks_printed == num_leaks_to_print) break;
472 }
473 if (leaks_printed < unsuppressed_count) {
474 uptr remaining = unsuppressed_count - leaks_printed;
475 Printf("Omitting %zu more leak(s).\n", remaining);
476 }
477 }
478
PrintSummary()479 void LeakReport::PrintSummary() {
480 CHECK(leaks_.size() <= kMaxLeaksConsidered);
481 uptr bytes = 0, allocations = 0;
482 for (uptr i = 0; i < leaks_.size(); i++) {
483 if (leaks_[i].is_suppressed) continue;
484 bytes += leaks_[i].total_size;
485 allocations += leaks_[i].hit_count;
486 }
487 const int kMaxSummaryLength = 128;
488 InternalScopedBuffer<char> summary(kMaxSummaryLength);
489 internal_snprintf(summary.data(), kMaxSummaryLength,
490 "LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
491 bytes, allocations);
492 __sanitizer_report_error_summary(summary.data());
493 }
494
ApplySuppressions()495 uptr LeakReport::ApplySuppressions() {
496 uptr unsuppressed_count = 0;
497 for (uptr i = 0; i < leaks_.size(); i++) {
498 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
499 if (s) {
500 s->weight += leaks_[i].total_size;
501 s->hit_count += leaks_[i].hit_count;
502 leaks_[i].is_suppressed = true;
503 } else {
504 unsuppressed_count++;
505 }
506 }
507 return unsuppressed_count;
508 }
509 } // namespace __lsan
510 #endif // CAN_SANITIZE_LEAKS
511
512 using namespace __lsan; // NOLINT
513
514 extern "C" {
515 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)516 void __lsan_ignore_object(const void *p) {
517 #if CAN_SANITIZE_LEAKS
518 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
519 // locked.
520 BlockingMutexLock l(&global_mutex);
521 IgnoreObjectResult res = IgnoreObjectLocked(p);
522 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
523 Report("__lsan_ignore_object(): no heap object found at %p", p);
524 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
525 Report("__lsan_ignore_object(): "
526 "heap object at %p is already being ignored\n", p);
527 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
528 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
529 #endif // CAN_SANITIZE_LEAKS
530 }
531
532 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()533 void __lsan_disable() {
534 #if CAN_SANITIZE_LEAKS
535 __lsan::disable_counter++;
536 #endif
537 }
538
539 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()540 void __lsan_enable() {
541 #if CAN_SANITIZE_LEAKS
542 if (!__lsan::disable_counter) {
543 Report("Unmatched call to __lsan_enable().\n");
544 Die();
545 }
546 __lsan::disable_counter--;
547 #endif
548 }
549
550 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()551 void __lsan_do_leak_check() {
552 #if CAN_SANITIZE_LEAKS
553 if (common_flags()->detect_leaks)
554 __lsan::DoLeakCheck();
555 #endif // CAN_SANITIZE_LEAKS
556 }
557
558 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
559 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
__lsan_is_turned_off()560 int __lsan_is_turned_off() {
561 return 0;
562 }
563 #endif
564 } // extern "C"
565