• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
20 #include "tsan_rtl.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
27 #include "tsan_fd.h"
28 
29 namespace __tsan {
30 
31 using namespace __sanitizer;  // NOLINT
32 
33 static ReportStack *SymbolizeStack(StackTrace trace);
34 
TsanCheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)35 void TsanCheckFailed(const char *file, int line, const char *cond,
36                      u64 v1, u64 v2) {
37   // There is high probability that interceptors will check-fail as well,
38   // on the other hand there is no sense in processing interceptors
39   // since we are going to die soon.
40   ScopedIgnoreInterceptors ignore;
41   Printf("FATAL: ThreadSanitizer CHECK failed: "
42          "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
43          file, line, cond, (uptr)v1, (uptr)v2);
44   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
45   Die();
46 }
47 
48 // Can be overriden by an application/test to intercept reports.
49 #ifdef TSAN_EXTERNAL_HOOKS
50 bool OnReport(const ReportDesc *rep, bool suppressed);
51 #else
52 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)53 bool OnReport(const ReportDesc *rep, bool suppressed) {
54   (void)rep;
55   return suppressed;
56 }
57 #endif
58 
59 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)60 void __tsan_on_report(const ReportDesc *rep) {
61   (void)rep;
62 }
63 
StackStripMain(SymbolizedStack * frames)64 static void StackStripMain(SymbolizedStack *frames) {
65   SymbolizedStack *last_frame = nullptr;
66   SymbolizedStack *last_frame2 = nullptr;
67   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
68     last_frame2 = last_frame;
69     last_frame = cur;
70   }
71 
72   if (last_frame2 == 0)
73     return;
74 #ifndef SANITIZER_GO
75   const char *last = last_frame->info.function;
76   const char *last2 = last_frame2->info.function;
77   // Strip frame above 'main'
78   if (last2 && 0 == internal_strcmp(last2, "main")) {
79     last_frame->ClearAll();
80     last_frame2->next = nullptr;
81   // Strip our internal thread start routine.
82   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
83     last_frame->ClearAll();
84     last_frame2->next = nullptr;
85   // Strip global ctors init.
86   } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
87     last_frame->ClearAll();
88     last_frame2->next = nullptr;
89   // If both are 0, then we probably just failed to symbolize.
90   } else if (last || last2) {
91     // Ensure that we recovered stack completely. Trimmed stack
92     // can actually happen if we do not instrument some code,
93     // so it's only a debug print. However we must try hard to not miss it
94     // due to our fault.
95     DPrintf("Bottom stack frame is missed\n");
96   }
97 #else
98   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
99   last_frame->ClearAll();
100   last_frame2->next = nullptr;
101 #endif
102 }
103 
SymbolizeStackId(u32 stack_id)104 ReportStack *SymbolizeStackId(u32 stack_id) {
105   if (stack_id == 0)
106     return 0;
107   StackTrace stack = StackDepotGet(stack_id);
108   if (stack.trace == nullptr)
109     return nullptr;
110   return SymbolizeStack(stack);
111 }
112 
SymbolizeStack(StackTrace trace)113 static ReportStack *SymbolizeStack(StackTrace trace) {
114   if (trace.size == 0)
115     return 0;
116   SymbolizedStack *top = nullptr;
117   for (uptr si = 0; si < trace.size; si++) {
118     const uptr pc = trace.trace[si];
119     uptr pc1 = pc;
120     // We obtain the return address, but we're interested in the previous
121     // instruction.
122     if ((pc & kExternalPCBit) == 0)
123       pc1 = StackTrace::GetPreviousInstructionPc(pc);
124     SymbolizedStack *ent = SymbolizeCode(pc1);
125     CHECK_NE(ent, 0);
126     SymbolizedStack *last = ent;
127     while (last->next) {
128       last->info.address = pc;  // restore original pc for report
129       last = last->next;
130     }
131     last->info.address = pc;  // restore original pc for report
132     last->next = top;
133     top = ent;
134   }
135   StackStripMain(top);
136 
137   ReportStack *stack = ReportStack::New();
138   stack->frames = top;
139   return stack;
140 }
141 
ScopedReport(ReportType typ)142 ScopedReport::ScopedReport(ReportType typ) {
143   ctx->thread_registry->CheckLocked();
144   void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
145   rep_ = new(mem) ReportDesc;
146   rep_->typ = typ;
147   ctx->report_mtx.Lock();
148   CommonSanitizerReportMutex.Lock();
149 }
150 
~ScopedReport()151 ScopedReport::~ScopedReport() {
152   CommonSanitizerReportMutex.Unlock();
153   ctx->report_mtx.Unlock();
154   DestroyAndFree(rep_);
155 }
156 
AddStack(StackTrace stack,bool suppressable)157 void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
158   ReportStack **rs = rep_->stacks.PushBack();
159   *rs = SymbolizeStack(stack);
160   (*rs)->suppressable = suppressable;
161 }
162 
AddMemoryAccess(uptr addr,Shadow s,StackTrace stack,const MutexSet * mset)163 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
164                                    const MutexSet *mset) {
165   void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
166   ReportMop *mop = new(mem) ReportMop;
167   rep_->mops.PushBack(mop);
168   mop->tid = s.tid();
169   mop->addr = addr + s.addr0();
170   mop->size = s.size();
171   mop->write = s.IsWrite();
172   mop->atomic = s.IsAtomic();
173   mop->stack = SymbolizeStack(stack);
174   if (mop->stack)
175     mop->stack->suppressable = true;
176   for (uptr i = 0; i < mset->Size(); i++) {
177     MutexSet::Desc d = mset->Get(i);
178     u64 mid = this->AddMutex(d.id);
179     ReportMopMutex mtx = {mid, d.write};
180     mop->mset.PushBack(mtx);
181   }
182 }
183 
AddUniqueTid(int unique_tid)184 void ScopedReport::AddUniqueTid(int unique_tid) {
185   rep_->unique_tids.PushBack(unique_tid);
186 }
187 
AddThread(const ThreadContext * tctx,bool suppressable)188 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
189   for (uptr i = 0; i < rep_->threads.Size(); i++) {
190     if ((u32)rep_->threads[i]->id == tctx->tid)
191       return;
192   }
193   void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
194   ReportThread *rt = new(mem) ReportThread;
195   rep_->threads.PushBack(rt);
196   rt->id = tctx->tid;
197   rt->os_id = tctx->os_id;
198   rt->running = (tctx->status == ThreadStatusRunning);
199   rt->name = internal_strdup(tctx->name);
200   rt->parent_tid = tctx->parent_tid;
201   rt->stack = 0;
202   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
203   if (rt->stack)
204     rt->stack->suppressable = suppressable;
205 }
206 
207 #ifndef SANITIZER_GO
FindThreadByUidLockedCallback(ThreadContextBase * tctx,void * arg)208 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
209   int unique_id = *(int *)arg;
210   return tctx->unique_id == (u32)unique_id;
211 }
212 
FindThreadByUidLocked(int unique_id)213 static ThreadContext *FindThreadByUidLocked(int unique_id) {
214   ctx->thread_registry->CheckLocked();
215   return static_cast<ThreadContext *>(
216       ctx->thread_registry->FindThreadContextLocked(
217           FindThreadByUidLockedCallback, &unique_id));
218 }
219 
FindThreadByTidLocked(int tid)220 static ThreadContext *FindThreadByTidLocked(int tid) {
221   ctx->thread_registry->CheckLocked();
222   return static_cast<ThreadContext*>(
223       ctx->thread_registry->GetThreadLocked(tid));
224 }
225 
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)226 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
227   uptr addr = (uptr)arg;
228   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
229   if (tctx->status != ThreadStatusRunning)
230     return false;
231   ThreadState *thr = tctx->thr;
232   CHECK(thr);
233   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
234           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
235 }
236 
IsThreadStackOrTls(uptr addr,bool * is_stack)237 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
238   ctx->thread_registry->CheckLocked();
239   ThreadContext *tctx = static_cast<ThreadContext*>(
240       ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
241                                                     (void*)addr));
242   if (!tctx)
243     return 0;
244   ThreadState *thr = tctx->thr;
245   CHECK(thr);
246   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
247   return tctx;
248 }
249 #endif
250 
AddThread(int unique_tid,bool suppressable)251 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
252 #ifndef SANITIZER_GO
253   if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
254     AddThread(tctx, suppressable);
255 #endif
256 }
257 
AddMutex(const SyncVar * s)258 void ScopedReport::AddMutex(const SyncVar *s) {
259   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
260     if (rep_->mutexes[i]->id == s->uid)
261       return;
262   }
263   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
264   ReportMutex *rm = new(mem) ReportMutex;
265   rep_->mutexes.PushBack(rm);
266   rm->id = s->uid;
267   rm->addr = s->addr;
268   rm->destroyed = false;
269   rm->stack = SymbolizeStackId(s->creation_stack_id);
270 }
271 
AddMutex(u64 id)272 u64 ScopedReport::AddMutex(u64 id) {
273   u64 uid = 0;
274   u64 mid = id;
275   uptr addr = SyncVar::SplitId(id, &uid);
276   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
277   // Check that the mutex is still alive.
278   // Another mutex can be created at the same address,
279   // so check uid as well.
280   if (s && s->CheckId(uid)) {
281     mid = s->uid;
282     AddMutex(s);
283   } else {
284     AddDeadMutex(id);
285   }
286   if (s)
287     s->mtx.Unlock();
288   return mid;
289 }
290 
AddDeadMutex(u64 id)291 void ScopedReport::AddDeadMutex(u64 id) {
292   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
293     if (rep_->mutexes[i]->id == id)
294       return;
295   }
296   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
297   ReportMutex *rm = new(mem) ReportMutex;
298   rep_->mutexes.PushBack(rm);
299   rm->id = id;
300   rm->addr = 0;
301   rm->destroyed = true;
302   rm->stack = 0;
303 }
304 
AddLocation(uptr addr,uptr size)305 void ScopedReport::AddLocation(uptr addr, uptr size) {
306   if (addr == 0)
307     return;
308 #ifndef SANITIZER_GO
309   int fd = -1;
310   int creat_tid = -1;
311   u32 creat_stack = 0;
312   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
313     ReportLocation *loc = ReportLocation::New(ReportLocationFD);
314     loc->fd = fd;
315     loc->tid = creat_tid;
316     loc->stack = SymbolizeStackId(creat_stack);
317     rep_->locs.PushBack(loc);
318     ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
319     if (tctx)
320       AddThread(tctx);
321     return;
322   }
323   MBlock *b = 0;
324   Allocator *a = allocator();
325   if (a->PointerIsMine((void*)addr)) {
326     void *block_begin = a->GetBlockBegin((void*)addr);
327     if (block_begin)
328       b = ctx->metamap.GetBlock((uptr)block_begin);
329   }
330   if (b != 0) {
331     ThreadContext *tctx = FindThreadByTidLocked(b->tid);
332     ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
333     loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
334     loc->heap_chunk_size = b->siz;
335     loc->tid = tctx ? tctx->tid : b->tid;
336     loc->stack = SymbolizeStackId(b->stk);
337     rep_->locs.PushBack(loc);
338     if (tctx)
339       AddThread(tctx);
340     return;
341   }
342   bool is_stack = false;
343   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
344     ReportLocation *loc =
345         ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
346     loc->tid = tctx->tid;
347     rep_->locs.PushBack(loc);
348     AddThread(tctx);
349   }
350 #endif
351   if (ReportLocation *loc = SymbolizeData(addr)) {
352     loc->suppressable = true;
353     rep_->locs.PushBack(loc);
354     return;
355   }
356 }
357 
358 #ifndef SANITIZER_GO
AddSleep(u32 stack_id)359 void ScopedReport::AddSleep(u32 stack_id) {
360   rep_->sleep = SymbolizeStackId(stack_id);
361 }
362 #endif
363 
SetCount(int count)364 void ScopedReport::SetCount(int count) {
365   rep_->count = count;
366 }
367 
GetReport() const368 const ReportDesc *ScopedReport::GetReport() const {
369   return rep_;
370 }
371 
RestoreStack(int tid,const u64 epoch,VarSizeStackTrace * stk,MutexSet * mset)372 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
373                   MutexSet *mset) {
374   // This function restores stack trace and mutex set for the thread/epoch.
375   // It does so by getting stack trace and mutex set at the beginning of
376   // trace part, and then replaying the trace till the given epoch.
377   Trace* trace = ThreadTrace(tid);
378   ReadLock l(&trace->mtx);
379   const int partidx = (epoch / kTracePartSize) % TraceParts();
380   TraceHeader* hdr = &trace->headers[partidx];
381   if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
382     return;
383   CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
384   const u64 epoch0 = RoundDown(epoch, TraceSize());
385   const u64 eend = epoch % TraceSize();
386   const u64 ebegin = RoundDown(eend, kTracePartSize);
387   DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
388           tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
389   Vector<uptr> stack(MBlockReportStack);
390   stack.Resize(hdr->stack0.size + 64);
391   for (uptr i = 0; i < hdr->stack0.size; i++) {
392     stack[i] = hdr->stack0.trace[i];
393     DPrintf2("  #%02zu: pc=%zx\n", i, stack[i]);
394   }
395   if (mset)
396     *mset = hdr->mset0;
397   uptr pos = hdr->stack0.size;
398   Event *events = (Event*)GetThreadTrace(tid);
399   for (uptr i = ebegin; i <= eend; i++) {
400     Event ev = events[i];
401     EventType typ = (EventType)(ev >> 61);
402     uptr pc = (uptr)(ev & ((1ull << 61) - 1));
403     DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
404     if (typ == EventTypeMop) {
405       stack[pos] = pc;
406     } else if (typ == EventTypeFuncEnter) {
407       if (stack.Size() < pos + 2)
408         stack.Resize(pos + 2);
409       stack[pos++] = pc;
410     } else if (typ == EventTypeFuncExit) {
411       if (pos > 0)
412         pos--;
413     }
414     if (mset) {
415       if (typ == EventTypeLock) {
416         mset->Add(pc, true, epoch0 + i);
417       } else if (typ == EventTypeUnlock) {
418         mset->Del(pc, true);
419       } else if (typ == EventTypeRLock) {
420         mset->Add(pc, false, epoch0 + i);
421       } else if (typ == EventTypeRUnlock) {
422         mset->Del(pc, false);
423       }
424     }
425     for (uptr j = 0; j <= pos; j++)
426       DPrintf2("      #%zu: %zx\n", j, stack[j]);
427   }
428   if (pos == 0 && stack[0] == 0)
429     return;
430   pos++;
431   stk->Init(&stack[0], pos);
432 }
433 
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2],uptr addr_min,uptr addr_max)434 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
435                              uptr addr_min, uptr addr_max) {
436   bool equal_stack = false;
437   RacyStacks hash;
438   bool equal_address = false;
439   RacyAddress ra0 = {addr_min, addr_max};
440   {
441     ReadLock lock(&ctx->racy_mtx);
442     if (flags()->suppress_equal_stacks) {
443       hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
444       hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
445       for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
446         if (hash == ctx->racy_stacks[i]) {
447           VPrintf(2,
448               "ThreadSanitizer: suppressing report as doubled (stack)\n");
449           equal_stack = true;
450           break;
451         }
452       }
453     }
454     if (flags()->suppress_equal_addresses) {
455       for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
456         RacyAddress ra2 = ctx->racy_addresses[i];
457         uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
458         uptr minend = min(ra0.addr_max, ra2.addr_max);
459         if (maxbeg < minend) {
460           VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
461           equal_address = true;
462           break;
463         }
464       }
465     }
466   }
467   if (!equal_stack && !equal_address)
468     return false;
469   if (!equal_stack) {
470     Lock lock(&ctx->racy_mtx);
471     ctx->racy_stacks.PushBack(hash);
472   }
473   if (!equal_address) {
474     Lock lock(&ctx->racy_mtx);
475     ctx->racy_addresses.PushBack(ra0);
476   }
477   return true;
478 }
479 
AddRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2],uptr addr_min,uptr addr_max)480 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
481                           uptr addr_min, uptr addr_max) {
482   Lock lock(&ctx->racy_mtx);
483   if (flags()->suppress_equal_stacks) {
484     RacyStacks hash;
485     hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
486     hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
487     ctx->racy_stacks.PushBack(hash);
488   }
489   if (flags()->suppress_equal_addresses) {
490     RacyAddress ra0 = {addr_min, addr_max};
491     ctx->racy_addresses.PushBack(ra0);
492   }
493 }
494 
OutputReport(ThreadState * thr,const ScopedReport & srep)495 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
496   if (!flags()->report_bugs)
497     return false;
498   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
499   const ReportDesc *rep = srep.GetReport();
500   CHECK_EQ(thr->current_report, nullptr);
501   thr->current_report = rep;
502   Suppression *supp = 0;
503   uptr pc_or_addr = 0;
504   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
505     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
506   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
507     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
508   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
509     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
510   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
511     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
512   if (pc_or_addr != 0) {
513     Lock lock(&ctx->fired_suppressions_mtx);
514     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
515     ctx->fired_suppressions.push_back(s);
516   }
517   {
518     bool old_is_freeing = thr->is_freeing;
519     thr->is_freeing = false;
520     bool suppressed = OnReport(rep, pc_or_addr != 0);
521     thr->is_freeing = old_is_freeing;
522     if (suppressed) {
523       thr->current_report = nullptr;
524       return false;
525     }
526   }
527   PrintReport(rep);
528   __tsan_on_report(rep);
529   ctx->nreported++;
530   if (flags()->halt_on_error)
531     Die();
532   thr->current_report = nullptr;
533   return true;
534 }
535 
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)536 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
537   ReadLock lock(&ctx->fired_suppressions_mtx);
538   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
539     if (ctx->fired_suppressions[k].type != type)
540       continue;
541     for (uptr j = 0; j < trace.size; j++) {
542       FiredSuppression *s = &ctx->fired_suppressions[k];
543       if (trace.trace[j] == s->pc_or_addr) {
544         if (s->supp)
545           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
546         return true;
547       }
548     }
549   }
550   return false;
551 }
552 
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)553 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
554   ReadLock lock(&ctx->fired_suppressions_mtx);
555   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
556     if (ctx->fired_suppressions[k].type != type)
557       continue;
558     FiredSuppression *s = &ctx->fired_suppressions[k];
559     if (addr == s->pc_or_addr) {
560       if (s->supp)
561         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
562       return true;
563     }
564   }
565   return false;
566 }
567 
RaceBetweenAtomicAndFree(ThreadState * thr)568 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
569   Shadow s0(thr->racy_state[0]);
570   Shadow s1(thr->racy_state[1]);
571   CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
572   if (!s0.IsAtomic() && !s1.IsAtomic())
573     return true;
574   if (s0.IsAtomic() && s1.IsFreed())
575     return true;
576   if (s1.IsAtomic() && thr->is_freeing)
577     return true;
578   return false;
579 }
580 
ReportRace(ThreadState * thr)581 void ReportRace(ThreadState *thr) {
582   CheckNoLocks(thr);
583 
584   // Symbolizer makes lots of intercepted calls. If we try to process them,
585   // at best it will cause deadlocks on internal mutexes.
586   ScopedIgnoreInterceptors ignore;
587 
588   if (!flags()->report_bugs)
589     return;
590   if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
591     return;
592 
593   bool freed = false;
594   {
595     Shadow s(thr->racy_state[1]);
596     freed = s.GetFreedAndReset();
597     thr->racy_state[1] = s.raw();
598   }
599 
600   uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
601   uptr addr_min = 0;
602   uptr addr_max = 0;
603   {
604     uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
605     uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
606     uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
607     uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
608     addr_min = min(a0, a1);
609     addr_max = max(e0, e1);
610     if (IsExpectedReport(addr_min, addr_max - addr_min))
611       return;
612   }
613 
614   ReportType typ = ReportTypeRace;
615   if (thr->is_vptr_access && freed)
616     typ = ReportTypeVptrUseAfterFree;
617   else if (thr->is_vptr_access)
618     typ = ReportTypeVptrRace;
619   else if (freed)
620     typ = ReportTypeUseAfterFree;
621 
622   if (IsFiredSuppression(ctx, typ, addr))
623     return;
624 
625   const uptr kMop = 2;
626   VarSizeStackTrace traces[kMop];
627   const uptr toppc = TraceTopPC(thr);
628   ObtainCurrentStack(thr, toppc, &traces[0]);
629   if (IsFiredSuppression(ctx, typ, traces[0]))
630     return;
631 
632   // MutexSet is too large to live on stack.
633   Vector<u64> mset_buffer(MBlockScopedBuf);
634   mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
635   MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
636 
637   Shadow s2(thr->racy_state[1]);
638   RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
639   if (IsFiredSuppression(ctx, typ, traces[1]))
640     return;
641 
642   if (HandleRacyStacks(thr, traces, addr_min, addr_max))
643     return;
644 
645   ThreadRegistryLock l0(ctx->thread_registry);
646   ScopedReport rep(typ);
647   for (uptr i = 0; i < kMop; i++) {
648     Shadow s(thr->racy_state[i]);
649     rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
650   }
651 
652   for (uptr i = 0; i < kMop; i++) {
653     FastState s(thr->racy_state[i]);
654     ThreadContext *tctx = static_cast<ThreadContext*>(
655         ctx->thread_registry->GetThreadLocked(s.tid()));
656     if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
657       continue;
658     rep.AddThread(tctx);
659   }
660 
661   rep.AddLocation(addr_min, addr_max - addr_min);
662 
663 #ifndef SANITIZER_GO
664   {  // NOLINT
665     Shadow s(thr->racy_state[1]);
666     if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
667       rep.AddSleep(thr->last_sleep_stack_id);
668   }
669 #endif
670 
671   if (!OutputReport(thr, rep))
672     return;
673 
674   AddRacyStacks(thr, traces, addr_min, addr_max);
675 }
676 
PrintCurrentStack(ThreadState * thr,uptr pc)677 void PrintCurrentStack(ThreadState *thr, uptr pc) {
678   VarSizeStackTrace trace;
679   ObtainCurrentStack(thr, pc, &trace);
680   PrintStack(SymbolizeStack(trace));
681 }
682 
683 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
684 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
685 // tail-call to PrintCurrentStackSlow breaks this assumption because
686 // __sanitizer_print_stack_trace disappears after tail-call.
687 // However, this solution is not reliable enough, please see dvyukov's comment
688 // http://reviews.llvm.org/D19148#406208
689 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
690 ALWAYS_INLINE
PrintCurrentStackSlow(uptr pc)691 void PrintCurrentStackSlow(uptr pc) {
692 #ifndef SANITIZER_GO
693   BufferedStackTrace *ptrace =
694       new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
695           BufferedStackTrace();
696   ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
697   for (uptr i = 0; i < ptrace->size / 2; i++) {
698     uptr tmp = ptrace->trace_buffer[i];
699     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
700     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
701   }
702   PrintStack(SymbolizeStack(*ptrace));
703 #endif
704 }
705 
706 }  // namespace __tsan
707 
708 using namespace __tsan;
709 
710 extern "C" {
711 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()712 void __sanitizer_print_stack_trace() {
713   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
714 }
715 }  // extern "C"
716