• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl_report.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "sanitizer_common/sanitizer_stackdepot.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_stacktrace.h"
18 #include "tsan_platform.h"
19 #include "tsan_rtl.h"
20 #include "tsan_suppressions.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_report.h"
23 #include "tsan_sync.h"
24 #include "tsan_mman.h"
25 #include "tsan_flags.h"
26 #include "tsan_fd.h"
27 
28 namespace __tsan {
29 
30 using namespace __sanitizer;
31 
32 static ReportStack *SymbolizeStack(StackTrace trace);
33 
TsanCheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)34 void TsanCheckFailed(const char *file, int line, const char *cond,
35                      u64 v1, u64 v2) {
36   // There is high probability that interceptors will check-fail as well,
37   // on the other hand there is no sense in processing interceptors
38   // since we are going to die soon.
39   ScopedIgnoreInterceptors ignore;
40 #if !SANITIZER_GO
41   cur_thread()->ignore_sync++;
42   cur_thread()->ignore_reads_and_writes++;
43 #endif
44   Printf("FATAL: ThreadSanitizer CHECK failed: "
45          "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
46          file, line, cond, (uptr)v1, (uptr)v2);
47   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
48   Die();
49 }
50 
51 // Can be overriden by an application/test to intercept reports.
52 #ifdef TSAN_EXTERNAL_HOOKS
53 bool OnReport(const ReportDesc *rep, bool suppressed);
54 #else
55 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)56 bool OnReport(const ReportDesc *rep, bool suppressed) {
57   (void)rep;
58   return suppressed;
59 }
60 #endif
61 
62 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)63 void __tsan_on_report(const ReportDesc *rep) {
64   (void)rep;
65 }
66 
StackStripMain(SymbolizedStack * frames)67 static void StackStripMain(SymbolizedStack *frames) {
68   SymbolizedStack *last_frame = nullptr;
69   SymbolizedStack *last_frame2 = nullptr;
70   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
71     last_frame2 = last_frame;
72     last_frame = cur;
73   }
74 
75   if (last_frame2 == 0)
76     return;
77 #if !SANITIZER_GO
78   const char *last = last_frame->info.function;
79   const char *last2 = last_frame2->info.function;
80   // Strip frame above 'main'
81   if (last2 && 0 == internal_strcmp(last2, "main")) {
82     last_frame->ClearAll();
83     last_frame2->next = nullptr;
84   // Strip our internal thread start routine.
85   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
86     last_frame->ClearAll();
87     last_frame2->next = nullptr;
88   // Strip global ctors init.
89   } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
90     last_frame->ClearAll();
91     last_frame2->next = nullptr;
92   // If both are 0, then we probably just failed to symbolize.
93   } else if (last || last2) {
94     // Ensure that we recovered stack completely. Trimmed stack
95     // can actually happen if we do not instrument some code,
96     // so it's only a debug print. However we must try hard to not miss it
97     // due to our fault.
98     DPrintf("Bottom stack frame is missed\n");
99   }
100 #else
101   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
102   last_frame->ClearAll();
103   last_frame2->next = nullptr;
104 #endif
105 }
106 
SymbolizeStackId(u32 stack_id)107 ReportStack *SymbolizeStackId(u32 stack_id) {
108   if (stack_id == 0)
109     return 0;
110   StackTrace stack = StackDepotGet(stack_id);
111   if (stack.trace == nullptr)
112     return nullptr;
113   return SymbolizeStack(stack);
114 }
115 
SymbolizeStack(StackTrace trace)116 static ReportStack *SymbolizeStack(StackTrace trace) {
117   if (trace.size == 0)
118     return 0;
119   SymbolizedStack *top = nullptr;
120   for (uptr si = 0; si < trace.size; si++) {
121     const uptr pc = trace.trace[si];
122     uptr pc1 = pc;
123     // We obtain the return address, but we're interested in the previous
124     // instruction.
125     if ((pc & kExternalPCBit) == 0)
126       pc1 = StackTrace::GetPreviousInstructionPc(pc);
127     SymbolizedStack *ent = SymbolizeCode(pc1);
128     CHECK_NE(ent, 0);
129     SymbolizedStack *last = ent;
130     while (last->next) {
131       last->info.address = pc;  // restore original pc for report
132       last = last->next;
133     }
134     last->info.address = pc;  // restore original pc for report
135     last->next = top;
136     top = ent;
137   }
138   StackStripMain(top);
139 
140   ReportStack *stack = ReportStack::New();
141   stack->frames = top;
142   return stack;
143 }
144 
ScopedReportBase(ReportType typ,uptr tag)145 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
146   ctx->thread_registry->CheckLocked();
147   void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
148   rep_ = new(mem) ReportDesc;
149   rep_->typ = typ;
150   rep_->tag = tag;
151   ctx->report_mtx.Lock();
152 }
153 
~ScopedReportBase()154 ScopedReportBase::~ScopedReportBase() {
155   ctx->report_mtx.Unlock();
156   DestroyAndFree(rep_);
157   rep_ = nullptr;
158 }
159 
AddStack(StackTrace stack,bool suppressable)160 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
161   ReportStack **rs = rep_->stacks.PushBack();
162   *rs = SymbolizeStack(stack);
163   (*rs)->suppressable = suppressable;
164 }
165 
AddMemoryAccess(uptr addr,uptr external_tag,Shadow s,StackTrace stack,const MutexSet * mset)166 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
167                                        StackTrace stack, const MutexSet *mset) {
168   void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
169   ReportMop *mop = new(mem) ReportMop;
170   rep_->mops.PushBack(mop);
171   mop->tid = s.tid();
172   mop->addr = addr + s.addr0();
173   mop->size = s.size();
174   mop->write = s.IsWrite();
175   mop->atomic = s.IsAtomic();
176   mop->stack = SymbolizeStack(stack);
177   mop->external_tag = external_tag;
178   if (mop->stack)
179     mop->stack->suppressable = true;
180   for (uptr i = 0; i < mset->Size(); i++) {
181     MutexSet::Desc d = mset->Get(i);
182     u64 mid = this->AddMutex(d.id);
183     ReportMopMutex mtx = {mid, d.write};
184     mop->mset.PushBack(mtx);
185   }
186 }
187 
AddUniqueTid(int unique_tid)188 void ScopedReportBase::AddUniqueTid(int unique_tid) {
189   rep_->unique_tids.PushBack(unique_tid);
190 }
191 
AddThread(const ThreadContext * tctx,bool suppressable)192 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
193   for (uptr i = 0; i < rep_->threads.Size(); i++) {
194     if ((u32)rep_->threads[i]->id == tctx->tid)
195       return;
196   }
197   void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
198   ReportThread *rt = new(mem) ReportThread;
199   rep_->threads.PushBack(rt);
200   rt->id = tctx->tid;
201   rt->os_id = tctx->os_id;
202   rt->running = (tctx->status == ThreadStatusRunning);
203   rt->name = internal_strdup(tctx->name);
204   rt->parent_tid = tctx->parent_tid;
205   rt->thread_type = tctx->thread_type;
206   rt->stack = 0;
207   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
208   if (rt->stack)
209     rt->stack->suppressable = suppressable;
210 }
211 
212 #if !SANITIZER_GO
FindThreadByUidLockedCallback(ThreadContextBase * tctx,void * arg)213 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
214   int unique_id = *(int *)arg;
215   return tctx->unique_id == (u32)unique_id;
216 }
217 
FindThreadByUidLocked(int unique_id)218 static ThreadContext *FindThreadByUidLocked(int unique_id) {
219   ctx->thread_registry->CheckLocked();
220   return static_cast<ThreadContext *>(
221       ctx->thread_registry->FindThreadContextLocked(
222           FindThreadByUidLockedCallback, &unique_id));
223 }
224 
FindThreadByTidLocked(int tid)225 static ThreadContext *FindThreadByTidLocked(int tid) {
226   ctx->thread_registry->CheckLocked();
227   return static_cast<ThreadContext*>(
228       ctx->thread_registry->GetThreadLocked(tid));
229 }
230 
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)231 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
232   uptr addr = (uptr)arg;
233   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
234   if (tctx->status != ThreadStatusRunning)
235     return false;
236   ThreadState *thr = tctx->thr;
237   CHECK(thr);
238   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
239           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
240 }
241 
IsThreadStackOrTls(uptr addr,bool * is_stack)242 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
243   ctx->thread_registry->CheckLocked();
244   ThreadContext *tctx = static_cast<ThreadContext*>(
245       ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
246                                                     (void*)addr));
247   if (!tctx)
248     return 0;
249   ThreadState *thr = tctx->thr;
250   CHECK(thr);
251   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
252   return tctx;
253 }
254 #endif
255 
AddThread(int unique_tid,bool suppressable)256 void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
257 #if !SANITIZER_GO
258   if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
259     AddThread(tctx, suppressable);
260 #endif
261 }
262 
AddMutex(const SyncVar * s)263 void ScopedReportBase::AddMutex(const SyncVar *s) {
264   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
265     if (rep_->mutexes[i]->id == s->uid)
266       return;
267   }
268   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
269   ReportMutex *rm = new(mem) ReportMutex;
270   rep_->mutexes.PushBack(rm);
271   rm->id = s->uid;
272   rm->addr = s->addr;
273   rm->destroyed = false;
274   rm->stack = SymbolizeStackId(s->creation_stack_id);
275 }
276 
AddMutex(u64 id)277 u64 ScopedReportBase::AddMutex(u64 id) {
278   u64 uid = 0;
279   u64 mid = id;
280   uptr addr = SyncVar::SplitId(id, &uid);
281   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
282   // Check that the mutex is still alive.
283   // Another mutex can be created at the same address,
284   // so check uid as well.
285   if (s && s->CheckId(uid)) {
286     mid = s->uid;
287     AddMutex(s);
288   } else {
289     AddDeadMutex(id);
290   }
291   if (s)
292     s->mtx.Unlock();
293   return mid;
294 }
295 
AddDeadMutex(u64 id)296 void ScopedReportBase::AddDeadMutex(u64 id) {
297   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
298     if (rep_->mutexes[i]->id == id)
299       return;
300   }
301   void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
302   ReportMutex *rm = new(mem) ReportMutex;
303   rep_->mutexes.PushBack(rm);
304   rm->id = id;
305   rm->addr = 0;
306   rm->destroyed = true;
307   rm->stack = 0;
308 }
309 
AddLocation(uptr addr,uptr size)310 void ScopedReportBase::AddLocation(uptr addr, uptr size) {
311   if (addr == 0)
312     return;
313 #if !SANITIZER_GO
314   int fd = -1;
315   int creat_tid = kInvalidTid;
316   u32 creat_stack = 0;
317   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
318     ReportLocation *loc = ReportLocation::New(ReportLocationFD);
319     loc->fd = fd;
320     loc->tid = creat_tid;
321     loc->stack = SymbolizeStackId(creat_stack);
322     rep_->locs.PushBack(loc);
323     ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
324     if (tctx)
325       AddThread(tctx);
326     return;
327   }
328   MBlock *b = 0;
329   Allocator *a = allocator();
330   if (a->PointerIsMine((void*)addr)) {
331     void *block_begin = a->GetBlockBegin((void*)addr);
332     if (block_begin)
333       b = ctx->metamap.GetBlock((uptr)block_begin);
334   }
335   if (b != 0) {
336     ThreadContext *tctx = FindThreadByTidLocked(b->tid);
337     ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
338     loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
339     loc->heap_chunk_size = b->siz;
340     loc->external_tag = b->tag;
341     loc->tid = tctx ? tctx->tid : b->tid;
342     loc->stack = SymbolizeStackId(b->stk);
343     rep_->locs.PushBack(loc);
344     if (tctx)
345       AddThread(tctx);
346     return;
347   }
348   bool is_stack = false;
349   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
350     ReportLocation *loc =
351         ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
352     loc->tid = tctx->tid;
353     rep_->locs.PushBack(loc);
354     AddThread(tctx);
355   }
356 #endif
357   if (ReportLocation *loc = SymbolizeData(addr)) {
358     loc->suppressable = true;
359     rep_->locs.PushBack(loc);
360     return;
361   }
362 }
363 
364 #if !SANITIZER_GO
AddSleep(u32 stack_id)365 void ScopedReportBase::AddSleep(u32 stack_id) {
366   rep_->sleep = SymbolizeStackId(stack_id);
367 }
368 #endif
369 
SetCount(int count)370 void ScopedReportBase::SetCount(int count) { rep_->count = count; }
371 
GetReport() const372 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
373 
ScopedReport(ReportType typ,uptr tag)374 ScopedReport::ScopedReport(ReportType typ, uptr tag)
375     : ScopedReportBase(typ, tag) {}
376 
~ScopedReport()377 ScopedReport::~ScopedReport() {}
378 
RestoreStack(int tid,const u64 epoch,VarSizeStackTrace * stk,MutexSet * mset,uptr * tag)379 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
380                   MutexSet *mset, uptr *tag) {
381   // This function restores stack trace and mutex set for the thread/epoch.
382   // It does so by getting stack trace and mutex set at the beginning of
383   // trace part, and then replaying the trace till the given epoch.
384   Trace* trace = ThreadTrace(tid);
385   ReadLock l(&trace->mtx);
386   const int partidx = (epoch / kTracePartSize) % TraceParts();
387   TraceHeader* hdr = &trace->headers[partidx];
388   if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
389     return;
390   CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
391   const u64 epoch0 = RoundDown(epoch, TraceSize());
392   const u64 eend = epoch % TraceSize();
393   const u64 ebegin = RoundDown(eend, kTracePartSize);
394   DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
395           tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
396   Vector<uptr> stack;
397   stack.Resize(hdr->stack0.size + 64);
398   for (uptr i = 0; i < hdr->stack0.size; i++) {
399     stack[i] = hdr->stack0.trace[i];
400     DPrintf2("  #%02zu: pc=%zx\n", i, stack[i]);
401   }
402   if (mset)
403     *mset = hdr->mset0;
404   uptr pos = hdr->stack0.size;
405   Event *events = (Event*)GetThreadTrace(tid);
406   for (uptr i = ebegin; i <= eend; i++) {
407     Event ev = events[i];
408     EventType typ = (EventType)(ev >> kEventPCBits);
409     uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
410     DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
411     if (typ == EventTypeMop) {
412       stack[pos] = pc;
413     } else if (typ == EventTypeFuncEnter) {
414       if (stack.Size() < pos + 2)
415         stack.Resize(pos + 2);
416       stack[pos++] = pc;
417     } else if (typ == EventTypeFuncExit) {
418       if (pos > 0)
419         pos--;
420     }
421     if (mset) {
422       if (typ == EventTypeLock) {
423         mset->Add(pc, true, epoch0 + i);
424       } else if (typ == EventTypeUnlock) {
425         mset->Del(pc, true);
426       } else if (typ == EventTypeRLock) {
427         mset->Add(pc, false, epoch0 + i);
428       } else if (typ == EventTypeRUnlock) {
429         mset->Del(pc, false);
430       }
431     }
432     for (uptr j = 0; j <= pos; j++)
433       DPrintf2("      #%zu: %zx\n", j, stack[j]);
434   }
435   if (pos == 0 && stack[0] == 0)
436     return;
437   pos++;
438   stk->Init(&stack[0], pos);
439   ExtractTagFromStack(stk, tag);
440 }
441 
FindRacyStacks(const RacyStacks & hash)442 static bool FindRacyStacks(const RacyStacks &hash) {
443   for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
444     if (hash == ctx->racy_stacks[i]) {
445       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
446       return true;
447     }
448   }
449   return false;
450 }
451 
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2])452 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
453   if (!flags()->suppress_equal_stacks)
454     return false;
455   RacyStacks hash;
456   hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
457   hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
458   {
459     ReadLock lock(&ctx->racy_mtx);
460     if (FindRacyStacks(hash))
461       return true;
462   }
463   Lock lock(&ctx->racy_mtx);
464   if (FindRacyStacks(hash))
465     return true;
466   ctx->racy_stacks.PushBack(hash);
467   return false;
468 }
469 
FindRacyAddress(const RacyAddress & ra0)470 static bool FindRacyAddress(const RacyAddress &ra0) {
471   for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
472     RacyAddress ra2 = ctx->racy_addresses[i];
473     uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
474     uptr minend = min(ra0.addr_max, ra2.addr_max);
475     if (maxbeg < minend) {
476       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
477       return true;
478     }
479   }
480   return false;
481 }
482 
HandleRacyAddress(ThreadState * thr,uptr addr_min,uptr addr_max)483 static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
484   if (!flags()->suppress_equal_addresses)
485     return false;
486   RacyAddress ra0 = {addr_min, addr_max};
487   {
488     ReadLock lock(&ctx->racy_mtx);
489     if (FindRacyAddress(ra0))
490       return true;
491   }
492   Lock lock(&ctx->racy_mtx);
493   if (FindRacyAddress(ra0))
494     return true;
495   ctx->racy_addresses.PushBack(ra0);
496   return false;
497 }
498 
OutputReport(ThreadState * thr,const ScopedReport & srep)499 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
500   if (!flags()->report_bugs || thr->suppress_reports)
501     return false;
502   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
503   const ReportDesc *rep = srep.GetReport();
504   CHECK_EQ(thr->current_report, nullptr);
505   thr->current_report = rep;
506   Suppression *supp = 0;
507   uptr pc_or_addr = 0;
508   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
509     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
510   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
511     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
512   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
513     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
514   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
515     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
516   if (pc_or_addr != 0) {
517     Lock lock(&ctx->fired_suppressions_mtx);
518     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
519     ctx->fired_suppressions.push_back(s);
520   }
521   {
522     bool old_is_freeing = thr->is_freeing;
523     thr->is_freeing = false;
524     bool suppressed = OnReport(rep, pc_or_addr != 0);
525     thr->is_freeing = old_is_freeing;
526     if (suppressed) {
527       thr->current_report = nullptr;
528       return false;
529     }
530   }
531   PrintReport(rep);
532   __tsan_on_report(rep);
533   ctx->nreported++;
534   if (flags()->halt_on_error)
535     Die();
536   thr->current_report = nullptr;
537   return true;
538 }
539 
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)540 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
541   ReadLock lock(&ctx->fired_suppressions_mtx);
542   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
543     if (ctx->fired_suppressions[k].type != type)
544       continue;
545     for (uptr j = 0; j < trace.size; j++) {
546       FiredSuppression *s = &ctx->fired_suppressions[k];
547       if (trace.trace[j] == s->pc_or_addr) {
548         if (s->supp)
549           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
550         return true;
551       }
552     }
553   }
554   return false;
555 }
556 
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)557 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
558   ReadLock lock(&ctx->fired_suppressions_mtx);
559   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
560     if (ctx->fired_suppressions[k].type != type)
561       continue;
562     FiredSuppression *s = &ctx->fired_suppressions[k];
563     if (addr == s->pc_or_addr) {
564       if (s->supp)
565         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
566       return true;
567     }
568   }
569   return false;
570 }
571 
RaceBetweenAtomicAndFree(ThreadState * thr)572 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
573   Shadow s0(thr->racy_state[0]);
574   Shadow s1(thr->racy_state[1]);
575   CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
576   if (!s0.IsAtomic() && !s1.IsAtomic())
577     return true;
578   if (s0.IsAtomic() && s1.IsFreed())
579     return true;
580   if (s1.IsAtomic() && thr->is_freeing)
581     return true;
582   return false;
583 }
584 
ReportRace(ThreadState * thr)585 void ReportRace(ThreadState *thr) {
586   CheckNoLocks(thr);
587 
588   // Symbolizer makes lots of intercepted calls. If we try to process them,
589   // at best it will cause deadlocks on internal mutexes.
590   ScopedIgnoreInterceptors ignore;
591 
592   if (!flags()->report_bugs)
593     return;
594   if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
595     return;
596 
597   bool freed = false;
598   {
599     Shadow s(thr->racy_state[1]);
600     freed = s.GetFreedAndReset();
601     thr->racy_state[1] = s.raw();
602   }
603 
604   uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
605   uptr addr_min = 0;
606   uptr addr_max = 0;
607   {
608     uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
609     uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
610     uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
611     uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
612     addr_min = min(a0, a1);
613     addr_max = max(e0, e1);
614     if (IsExpectedReport(addr_min, addr_max - addr_min))
615       return;
616   }
617   if (HandleRacyAddress(thr, addr_min, addr_max))
618     return;
619 
620   ReportType typ = ReportTypeRace;
621   if (thr->is_vptr_access && freed)
622     typ = ReportTypeVptrUseAfterFree;
623   else if (thr->is_vptr_access)
624     typ = ReportTypeVptrRace;
625   else if (freed)
626     typ = ReportTypeUseAfterFree;
627 
628   if (IsFiredSuppression(ctx, typ, addr))
629     return;
630 
631   const uptr kMop = 2;
632   VarSizeStackTrace traces[kMop];
633   uptr tags[kMop] = {kExternalTagNone};
634   uptr toppc = TraceTopPC(thr);
635   if (toppc >> kEventPCBits) {
636     // This is a work-around for a known issue.
637     // The scenario where this happens is rather elaborate and requires
638     // an instrumented __sanitizer_report_error_summary callback and
639     // a __tsan_symbolize_external callback and a race during a range memory
640     // access larger than 8 bytes. MemoryAccessRange adds the current PC to
641     // the trace and starts processing memory accesses. A first memory access
642     // triggers a race, we report it and call the instrumented
643     // __sanitizer_report_error_summary, which adds more stuff to the trace
644     // since it is intrumented. Then a second memory access in MemoryAccessRange
645     // also triggers a race and we get here and call TraceTopPC to get the
646     // current PC, however now it contains some unrelated events from the
647     // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
648     // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
649     // and the resulting PC has kExternalPCBit set, so we pass it to
650     // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
651     // rights to crash since the PC is completely bogus.
652     // test/tsan/double_race.cpp contains a test case for this.
653     toppc = 0;
654   }
655   ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
656   if (IsFiredSuppression(ctx, typ, traces[0]))
657     return;
658 
659   // MutexSet is too large to live on stack.
660   Vector<u64> mset_buffer;
661   mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
662   MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
663 
664   Shadow s2(thr->racy_state[1]);
665   RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
666   if (IsFiredSuppression(ctx, typ, traces[1]))
667     return;
668 
669   if (HandleRacyStacks(thr, traces))
670     return;
671 
672   // If any of the accesses has a tag, treat this as an "external" race.
673   uptr tag = kExternalTagNone;
674   for (uptr i = 0; i < kMop; i++) {
675     if (tags[i] != kExternalTagNone) {
676       typ = ReportTypeExternalRace;
677       tag = tags[i];
678       break;
679     }
680   }
681 
682   ThreadRegistryLock l0(ctx->thread_registry);
683   ScopedReport rep(typ, tag);
684   for (uptr i = 0; i < kMop; i++) {
685     Shadow s(thr->racy_state[i]);
686     rep.AddMemoryAccess(addr, tags[i], s, traces[i],
687                         i == 0 ? &thr->mset : mset2);
688   }
689 
690   for (uptr i = 0; i < kMop; i++) {
691     FastState s(thr->racy_state[i]);
692     ThreadContext *tctx = static_cast<ThreadContext*>(
693         ctx->thread_registry->GetThreadLocked(s.tid()));
694     if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
695       continue;
696     rep.AddThread(tctx);
697   }
698 
699   rep.AddLocation(addr_min, addr_max - addr_min);
700 
701 #if !SANITIZER_GO
702   {
703     Shadow s(thr->racy_state[1]);
704     if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
705       rep.AddSleep(thr->last_sleep_stack_id);
706   }
707 #endif
708 
709   OutputReport(thr, rep);
710 }
711 
PrintCurrentStack(ThreadState * thr,uptr pc)712 void PrintCurrentStack(ThreadState *thr, uptr pc) {
713   VarSizeStackTrace trace;
714   ObtainCurrentStack(thr, pc, &trace);
715   PrintStack(SymbolizeStack(trace));
716 }
717 
718 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
719 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
720 // tail-call to PrintCurrentStackSlow breaks this assumption because
721 // __sanitizer_print_stack_trace disappears after tail-call.
722 // However, this solution is not reliable enough, please see dvyukov's comment
723 // http://reviews.llvm.org/D19148#406208
724 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
725 ALWAYS_INLINE
PrintCurrentStackSlow(uptr pc)726 void PrintCurrentStackSlow(uptr pc) {
727 #if !SANITIZER_GO
728   uptr bp = GET_CURRENT_FRAME();
729   BufferedStackTrace *ptrace =
730       new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
731           BufferedStackTrace();
732   ptrace->Unwind(pc, bp, nullptr, false);
733 
734   for (uptr i = 0; i < ptrace->size / 2; i++) {
735     uptr tmp = ptrace->trace_buffer[i];
736     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
737     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
738   }
739   PrintStack(SymbolizeStack(*ptrace));
740 #endif
741 }
742 
743 }  // namespace __tsan
744 
745 using namespace __tsan;
746 
747 extern "C" {
748 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()749 void __sanitizer_print_stack_trace() {
750   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
751 }
752 }  // extern "C"
753