1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
20 #include "tsan_rtl.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
27 #include "tsan_fd.h"
28
29 namespace __tsan {
30
31 using namespace __sanitizer; // NOLINT
32
33 static ReportStack *SymbolizeStack(const StackTrace& trace);
34
TsanCheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)35 void TsanCheckFailed(const char *file, int line, const char *cond,
36 u64 v1, u64 v2) {
37 // There is high probability that interceptors will check-fail as well,
38 // on the other hand there is no sense in processing interceptors
39 // since we are going to die soon.
40 ScopedIgnoreInterceptors ignore;
41 Printf("FATAL: ThreadSanitizer CHECK failed: "
42 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
43 file, line, cond, (uptr)v1, (uptr)v2);
44 PrintCurrentStackSlow();
45 Die();
46 }
47
48 // Can be overriden by an application/test to intercept reports.
49 #ifdef TSAN_EXTERNAL_HOOKS
50 bool OnReport(const ReportDesc *rep, bool suppressed);
51 #else
52 SANITIZER_INTERFACE_ATTRIBUTE
OnReport(const ReportDesc * rep,bool suppressed)53 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
54 (void)rep;
55 return suppressed;
56 }
57 #endif
58
StackStripMain(ReportStack * stack)59 static void StackStripMain(ReportStack *stack) {
60 ReportStack *last_frame = 0;
61 ReportStack *last_frame2 = 0;
62 const char *prefix = "__interceptor_";
63 uptr prefix_len = internal_strlen(prefix);
64 const char *path_prefix = flags()->strip_path_prefix;
65 uptr path_prefix_len = internal_strlen(path_prefix);
66 char *pos;
67 for (ReportStack *ent = stack; ent; ent = ent->next) {
68 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
69 ent->func += prefix_len;
70 if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
71 ent->file = pos + path_prefix_len;
72 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
73 ent->file += 2;
74 last_frame2 = last_frame;
75 last_frame = ent;
76 }
77
78 if (last_frame2 == 0)
79 return;
80 const char *last = last_frame->func;
81 #ifndef TSAN_GO
82 const char *last2 = last_frame2->func;
83 // Strip frame above 'main'
84 if (last2 && 0 == internal_strcmp(last2, "main")) {
85 last_frame2->next = 0;
86 // Strip our internal thread start routine.
87 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
88 last_frame2->next = 0;
89 // Strip global ctors init.
90 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
91 last_frame2->next = 0;
92 // If both are 0, then we probably just failed to symbolize.
93 } else if (last || last2) {
94 // Ensure that we recovered stack completely. Trimmed stack
95 // can actually happen if we do not instrument some code,
96 // so it's only a debug print. However we must try hard to not miss it
97 // due to our fault.
98 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
99 }
100 #else
101 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
102 last_frame2->next = 0;
103 (void)last;
104 #endif
105 }
106
SymbolizeStackId(u32 stack_id)107 ReportStack *SymbolizeStackId(u32 stack_id) {
108 if (stack_id == 0)
109 return 0;
110 uptr ssz = 0;
111 const uptr *stack = StackDepotGet(stack_id, &ssz);
112 if (stack == 0)
113 return 0;
114 StackTrace trace;
115 trace.Init(stack, ssz);
116 return SymbolizeStack(trace);
117 }
118
SymbolizeStack(const StackTrace & trace)119 static ReportStack *SymbolizeStack(const StackTrace& trace) {
120 if (trace.IsEmpty())
121 return 0;
122 ReportStack *stack = 0;
123 for (uptr si = 0; si < trace.Size(); si++) {
124 const uptr pc = trace.Get(si);
125 #ifndef TSAN_GO
126 // We obtain the return address, that is, address of the next instruction,
127 // so offset it by 1 byte.
128 const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
129 #else
130 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
131 uptr pc1 = pc;
132 if (si != trace.Size() - 1)
133 pc1 -= 1;
134 #endif
135 ReportStack *ent = SymbolizeCode(pc1);
136 CHECK_NE(ent, 0);
137 ReportStack *last = ent;
138 while (last->next) {
139 last->pc = pc; // restore original pc for report
140 last = last->next;
141 }
142 last->pc = pc; // restore original pc for report
143 last->next = stack;
144 stack = ent;
145 }
146 StackStripMain(stack);
147 return stack;
148 }
149
ScopedReport(ReportType typ)150 ScopedReport::ScopedReport(ReportType typ) {
151 ctx->thread_registry->CheckLocked();
152 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
153 rep_ = new(mem) ReportDesc;
154 rep_->typ = typ;
155 ctx->report_mtx.Lock();
156 CommonSanitizerReportMutex.Lock();
157 }
158
~ScopedReport()159 ScopedReport::~ScopedReport() {
160 CommonSanitizerReportMutex.Unlock();
161 ctx->report_mtx.Unlock();
162 DestroyAndFree(rep_);
163 }
164
AddStack(const StackTrace * stack,bool suppressable)165 void ScopedReport::AddStack(const StackTrace *stack, bool suppressable) {
166 ReportStack **rs = rep_->stacks.PushBack();
167 *rs = SymbolizeStack(*stack);
168 (*rs)->suppressable = suppressable;
169 }
170
AddMemoryAccess(uptr addr,Shadow s,const StackTrace * stack,const MutexSet * mset)171 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
172 const StackTrace *stack, const MutexSet *mset) {
173 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
174 ReportMop *mop = new(mem) ReportMop;
175 rep_->mops.PushBack(mop);
176 mop->tid = s.tid();
177 mop->addr = addr + s.addr0();
178 mop->size = s.size();
179 mop->write = s.IsWrite();
180 mop->atomic = s.IsAtomic();
181 mop->stack = SymbolizeStack(*stack);
182 if (mop->stack)
183 mop->stack->suppressable = true;
184 for (uptr i = 0; i < mset->Size(); i++) {
185 MutexSet::Desc d = mset->Get(i);
186 u64 mid = this->AddMutex(d.id);
187 ReportMopMutex mtx = {mid, d.write};
188 mop->mset.PushBack(mtx);
189 }
190 }
191
AddUniqueTid(int unique_tid)192 void ScopedReport::AddUniqueTid(int unique_tid) {
193 rep_->unique_tids.PushBack(unique_tid);
194 }
195
AddThread(const ThreadContext * tctx,bool suppressable)196 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
197 for (uptr i = 0; i < rep_->threads.Size(); i++) {
198 if ((u32)rep_->threads[i]->id == tctx->tid)
199 return;
200 }
201 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
202 ReportThread *rt = new(mem) ReportThread();
203 rep_->threads.PushBack(rt);
204 rt->id = tctx->tid;
205 rt->pid = tctx->os_id;
206 rt->running = (tctx->status == ThreadStatusRunning);
207 rt->name = internal_strdup(tctx->name);
208 rt->parent_tid = tctx->parent_tid;
209 rt->stack = 0;
210 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
211 if (rt->stack)
212 rt->stack->suppressable = suppressable;
213 }
214
215 #ifndef TSAN_GO
FindThreadByUidLocked(int unique_id)216 static ThreadContext *FindThreadByUidLocked(int unique_id) {
217 ctx->thread_registry->CheckLocked();
218 for (unsigned i = 0; i < kMaxTid; i++) {
219 ThreadContext *tctx = static_cast<ThreadContext*>(
220 ctx->thread_registry->GetThreadLocked(i));
221 if (tctx && tctx->unique_id == (u32)unique_id) {
222 return tctx;
223 }
224 }
225 return 0;
226 }
227
FindThreadByTidLocked(int tid)228 static ThreadContext *FindThreadByTidLocked(int tid) {
229 ctx->thread_registry->CheckLocked();
230 return static_cast<ThreadContext*>(
231 ctx->thread_registry->GetThreadLocked(tid));
232 }
233
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)234 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
235 uptr addr = (uptr)arg;
236 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
237 if (tctx->status != ThreadStatusRunning)
238 return false;
239 ThreadState *thr = tctx->thr;
240 CHECK(thr);
241 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
242 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
243 }
244
IsThreadStackOrTls(uptr addr,bool * is_stack)245 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
246 ctx->thread_registry->CheckLocked();
247 ThreadContext *tctx = static_cast<ThreadContext*>(
248 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
249 (void*)addr));
250 if (!tctx)
251 return 0;
252 ThreadState *thr = tctx->thr;
253 CHECK(thr);
254 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
255 return tctx;
256 }
257 #endif
258
AddThread(int unique_tid,bool suppressable)259 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
260 #ifndef TSAN_GO
261 AddThread(FindThreadByUidLocked(unique_tid), suppressable);
262 #endif
263 }
264
AddMutex(const SyncVar * s)265 void ScopedReport::AddMutex(const SyncVar *s) {
266 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
267 if (rep_->mutexes[i]->id == s->uid)
268 return;
269 }
270 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
271 ReportMutex *rm = new(mem) ReportMutex();
272 rep_->mutexes.PushBack(rm);
273 rm->id = s->uid;
274 rm->addr = s->addr;
275 rm->destroyed = false;
276 rm->stack = SymbolizeStackId(s->creation_stack_id);
277 }
278
AddMutex(u64 id)279 u64 ScopedReport::AddMutex(u64 id) {
280 u64 uid = 0;
281 u64 mid = id;
282 uptr addr = SyncVar::SplitId(id, &uid);
283 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
284 // Check that the mutex is still alive.
285 // Another mutex can be created at the same address,
286 // so check uid as well.
287 if (s && s->CheckId(uid)) {
288 mid = s->uid;
289 AddMutex(s);
290 } else {
291 AddDeadMutex(id);
292 }
293 if (s)
294 s->mtx.Unlock();
295 return mid;
296 }
297
AddDeadMutex(u64 id)298 void ScopedReport::AddDeadMutex(u64 id) {
299 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
300 if (rep_->mutexes[i]->id == id)
301 return;
302 }
303 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
304 ReportMutex *rm = new(mem) ReportMutex();
305 rep_->mutexes.PushBack(rm);
306 rm->id = id;
307 rm->addr = 0;
308 rm->destroyed = true;
309 rm->stack = 0;
310 }
311
AddLocation(uptr addr,uptr size)312 void ScopedReport::AddLocation(uptr addr, uptr size) {
313 if (addr == 0)
314 return;
315 #ifndef TSAN_GO
316 int fd = -1;
317 int creat_tid = -1;
318 u32 creat_stack = 0;
319 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
320 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
321 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
322 ReportLocation *loc = new(mem) ReportLocation();
323 rep_->locs.PushBack(loc);
324 loc->type = ReportLocationFD;
325 loc->fd = fd;
326 loc->tid = creat_tid;
327 loc->stack = SymbolizeStackId(creat_stack);
328 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
329 if (tctx)
330 AddThread(tctx);
331 return;
332 }
333 MBlock *b = 0;
334 Allocator *a = allocator();
335 if (a->PointerIsMine((void*)addr)) {
336 void *block_begin = a->GetBlockBegin((void*)addr);
337 if (block_begin)
338 b = ctx->metamap.GetBlock((uptr)block_begin);
339 }
340 if (b != 0) {
341 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
342 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
343 ReportLocation *loc = new(mem) ReportLocation();
344 rep_->locs.PushBack(loc);
345 loc->type = ReportLocationHeap;
346 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
347 loc->size = b->siz;
348 loc->tid = tctx ? tctx->tid : b->tid;
349 loc->name = 0;
350 loc->file = 0;
351 loc->line = 0;
352 loc->stack = 0;
353 loc->stack = SymbolizeStackId(b->stk);
354 if (tctx)
355 AddThread(tctx);
356 return;
357 }
358 bool is_stack = false;
359 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
360 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
361 ReportLocation *loc = new(mem) ReportLocation();
362 rep_->locs.PushBack(loc);
363 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
364 loc->tid = tctx->tid;
365 AddThread(tctx);
366 }
367 ReportLocation *loc = SymbolizeData(addr);
368 if (loc) {
369 loc->suppressable = true;
370 rep_->locs.PushBack(loc);
371 return;
372 }
373 #endif
374 }
375
376 #ifndef TSAN_GO
AddSleep(u32 stack_id)377 void ScopedReport::AddSleep(u32 stack_id) {
378 rep_->sleep = SymbolizeStackId(stack_id);
379 }
380 #endif
381
SetCount(int count)382 void ScopedReport::SetCount(int count) {
383 rep_->count = count;
384 }
385
GetReport() const386 const ReportDesc *ScopedReport::GetReport() const {
387 return rep_;
388 }
389
RestoreStack(int tid,const u64 epoch,StackTrace * stk,MutexSet * mset)390 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
391 // This function restores stack trace and mutex set for the thread/epoch.
392 // It does so by getting stack trace and mutex set at the beginning of
393 // trace part, and then replaying the trace till the given epoch.
394 ctx->thread_registry->CheckLocked();
395 ThreadContext *tctx = static_cast<ThreadContext*>(
396 ctx->thread_registry->GetThreadLocked(tid));
397 if (tctx == 0)
398 return;
399 if (tctx->status != ThreadStatusRunning
400 && tctx->status != ThreadStatusFinished
401 && tctx->status != ThreadStatusDead)
402 return;
403 Trace* trace = ThreadTrace(tctx->tid);
404 Lock l(&trace->mtx);
405 const int partidx = (epoch / kTracePartSize) % TraceParts();
406 TraceHeader* hdr = &trace->headers[partidx];
407 if (epoch < hdr->epoch0)
408 return;
409 const u64 epoch0 = RoundDown(epoch, TraceSize());
410 const u64 eend = epoch % TraceSize();
411 const u64 ebegin = RoundDown(eend, kTracePartSize);
412 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
413 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
414 InternalScopedBuffer<uptr> stack(kShadowStackSize);
415 for (uptr i = 0; i < hdr->stack0.Size(); i++) {
416 stack[i] = hdr->stack0.Get(i);
417 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
418 }
419 if (mset)
420 *mset = hdr->mset0;
421 uptr pos = hdr->stack0.Size();
422 Event *events = (Event*)GetThreadTrace(tid);
423 for (uptr i = ebegin; i <= eend; i++) {
424 Event ev = events[i];
425 EventType typ = (EventType)(ev >> 61);
426 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
427 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
428 if (typ == EventTypeMop) {
429 stack[pos] = pc;
430 } else if (typ == EventTypeFuncEnter) {
431 stack[pos++] = pc;
432 } else if (typ == EventTypeFuncExit) {
433 if (pos > 0)
434 pos--;
435 }
436 if (mset) {
437 if (typ == EventTypeLock) {
438 mset->Add(pc, true, epoch0 + i);
439 } else if (typ == EventTypeUnlock) {
440 mset->Del(pc, true);
441 } else if (typ == EventTypeRLock) {
442 mset->Add(pc, false, epoch0 + i);
443 } else if (typ == EventTypeRUnlock) {
444 mset->Del(pc, false);
445 }
446 }
447 for (uptr j = 0; j <= pos; j++)
448 DPrintf2(" #%zu: %zx\n", j, stack[j]);
449 }
450 if (pos == 0 && stack[0] == 0)
451 return;
452 pos++;
453 stk->Init(stack.data(), pos);
454 }
455
HandleRacyStacks(ThreadState * thr,const StackTrace (& traces)[2],uptr addr_min,uptr addr_max)456 static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
457 uptr addr_min, uptr addr_max) {
458 bool equal_stack = false;
459 RacyStacks hash;
460 if (flags()->suppress_equal_stacks) {
461 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
462 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
463 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
464 if (hash == ctx->racy_stacks[i]) {
465 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
466 equal_stack = true;
467 break;
468 }
469 }
470 }
471 bool equal_address = false;
472 RacyAddress ra0 = {addr_min, addr_max};
473 if (flags()->suppress_equal_addresses) {
474 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
475 RacyAddress ra2 = ctx->racy_addresses[i];
476 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
477 uptr minend = min(ra0.addr_max, ra2.addr_max);
478 if (maxbeg < minend) {
479 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
480 equal_address = true;
481 break;
482 }
483 }
484 }
485 if (equal_stack || equal_address) {
486 if (!equal_stack)
487 ctx->racy_stacks.PushBack(hash);
488 if (!equal_address)
489 ctx->racy_addresses.PushBack(ra0);
490 return true;
491 }
492 return false;
493 }
494
AddRacyStacks(ThreadState * thr,const StackTrace (& traces)[2],uptr addr_min,uptr addr_max)495 static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
496 uptr addr_min, uptr addr_max) {
497 if (flags()->suppress_equal_stacks) {
498 RacyStacks hash;
499 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
500 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
501 ctx->racy_stacks.PushBack(hash);
502 }
503 if (flags()->suppress_equal_addresses) {
504 RacyAddress ra0 = {addr_min, addr_max};
505 ctx->racy_addresses.PushBack(ra0);
506 }
507 }
508
OutputReport(ThreadState * thr,const ScopedReport & srep)509 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
510 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
511 const ReportDesc *rep = srep.GetReport();
512 Suppression *supp = 0;
513 uptr suppress_pc = 0;
514 for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
515 suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
516 for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
517 suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
518 for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
519 suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
520 for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
521 suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
522 if (suppress_pc != 0) {
523 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
524 ctx->fired_suppressions.push_back(s);
525 }
526 {
527 bool old_is_freeing = thr->is_freeing;
528 thr->is_freeing = false;
529 bool suppressed = OnReport(rep, suppress_pc != 0);
530 thr->is_freeing = old_is_freeing;
531 if (suppressed)
532 return false;
533 }
534 PrintReport(rep);
535 ctx->nreported++;
536 if (flags()->halt_on_error)
537 internal__exit(flags()->exitcode);
538 return true;
539 }
540
IsFiredSuppression(Context * ctx,const ScopedReport & srep,const StackTrace & trace)541 bool IsFiredSuppression(Context *ctx,
542 const ScopedReport &srep,
543 const StackTrace &trace) {
544 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
545 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
546 continue;
547 for (uptr j = 0; j < trace.Size(); j++) {
548 FiredSuppression *s = &ctx->fired_suppressions[k];
549 if (trace.Get(j) == s->pc) {
550 if (s->supp)
551 s->supp->hit_count++;
552 return true;
553 }
554 }
555 }
556 return false;
557 }
558
IsFiredSuppression(Context * ctx,const ScopedReport & srep,uptr addr)559 static bool IsFiredSuppression(Context *ctx,
560 const ScopedReport &srep,
561 uptr addr) {
562 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
563 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
564 continue;
565 FiredSuppression *s = &ctx->fired_suppressions[k];
566 if (addr == s->pc) {
567 if (s->supp)
568 s->supp->hit_count++;
569 return true;
570 }
571 }
572 return false;
573 }
574
FrameIsInternal(const ReportStack * frame)575 bool FrameIsInternal(const ReportStack *frame) {
576 return frame != 0 && frame->file != 0
577 && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
578 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
579 internal_strstr(frame->file, "tsan_interface_"));
580 }
581
582 // On programs that use Java we see weird reports like:
583 // WARNING: ThreadSanitizer: data race (pid=22512)
584 // Read of size 8 at 0x7d2b00084318 by thread 100:
585 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
586 // #1 <null> <null>:0 (0x7f7ad9b40193)
587 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
588 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
589 // #1 <null> <null>:0 (0x7f7ad9b42707)
IsJavaNonsense(const ReportDesc * rep)590 static bool IsJavaNonsense(const ReportDesc *rep) {
591 #ifndef TSAN_GO
592 for (uptr i = 0; i < rep->mops.Size(); i++) {
593 ReportMop *mop = rep->mops[i];
594 ReportStack *frame = mop->stack;
595 if (frame == 0
596 || (frame->func == 0 && frame->file == 0 && frame->line == 0
597 && frame->module == 0)) {
598 return true;
599 }
600 if (FrameIsInternal(frame)) {
601 frame = frame->next;
602 if (frame == 0
603 || (frame->func == 0 && frame->file == 0 && frame->line == 0
604 && frame->module == 0)) {
605 if (frame) {
606 FiredSuppression supp = {rep->typ, frame->pc, 0};
607 ctx->fired_suppressions.push_back(supp);
608 }
609 return true;
610 }
611 }
612 }
613 #endif
614 return false;
615 }
616
RaceBetweenAtomicAndFree(ThreadState * thr)617 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
618 Shadow s0(thr->racy_state[0]);
619 Shadow s1(thr->racy_state[1]);
620 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
621 if (!s0.IsAtomic() && !s1.IsAtomic())
622 return true;
623 if (s0.IsAtomic() && s1.IsFreed())
624 return true;
625 if (s1.IsAtomic() && thr->is_freeing)
626 return true;
627 return false;
628 }
629
ReportRace(ThreadState * thr)630 void ReportRace(ThreadState *thr) {
631 CheckNoLocks(thr);
632
633 // Symbolizer makes lots of intercepted calls. If we try to process them,
634 // at best it will cause deadlocks on internal mutexes.
635 ScopedIgnoreInterceptors ignore;
636
637 if (!flags()->report_bugs)
638 return;
639 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
640 return;
641
642 bool freed = false;
643 {
644 Shadow s(thr->racy_state[1]);
645 freed = s.GetFreedAndReset();
646 thr->racy_state[1] = s.raw();
647 }
648
649 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
650 uptr addr_min = 0;
651 uptr addr_max = 0;
652 {
653 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
654 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
655 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
656 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
657 addr_min = min(a0, a1);
658 addr_max = max(e0, e1);
659 if (IsExpectedReport(addr_min, addr_max - addr_min))
660 return;
661 }
662
663 ThreadRegistryLock l0(ctx->thread_registry);
664
665 ReportType typ = ReportTypeRace;
666 if (thr->is_vptr_access)
667 typ = ReportTypeVptrRace;
668 else if (freed)
669 typ = ReportTypeUseAfterFree;
670 ScopedReport rep(typ);
671 if (IsFiredSuppression(ctx, rep, addr))
672 return;
673 const uptr kMop = 2;
674 StackTrace traces[kMop];
675 const uptr toppc = TraceTopPC(thr);
676 traces[0].ObtainCurrent(thr, toppc);
677 if (IsFiredSuppression(ctx, rep, traces[0]))
678 return;
679 InternalScopedBuffer<MutexSet> mset2(1);
680 new(mset2.data()) MutexSet();
681 Shadow s2(thr->racy_state[1]);
682 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
683 if (IsFiredSuppression(ctx, rep, traces[1]))
684 return;
685
686 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
687 return;
688
689 for (uptr i = 0; i < kMop; i++) {
690 Shadow s(thr->racy_state[i]);
691 rep.AddMemoryAccess(addr, s, &traces[i],
692 i == 0 ? &thr->mset : mset2.data());
693 }
694
695 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
696 return;
697
698 for (uptr i = 0; i < kMop; i++) {
699 FastState s(thr->racy_state[i]);
700 ThreadContext *tctx = static_cast<ThreadContext*>(
701 ctx->thread_registry->GetThreadLocked(s.tid()));
702 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
703 continue;
704 rep.AddThread(tctx);
705 }
706
707 rep.AddLocation(addr_min, addr_max - addr_min);
708
709 #ifndef TSAN_GO
710 { // NOLINT
711 Shadow s(thr->racy_state[1]);
712 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
713 rep.AddSleep(thr->last_sleep_stack_id);
714 }
715 #endif
716
717 if (!OutputReport(thr, rep))
718 return;
719
720 AddRacyStacks(thr, traces, addr_min, addr_max);
721 }
722
PrintCurrentStack(ThreadState * thr,uptr pc)723 void PrintCurrentStack(ThreadState *thr, uptr pc) {
724 StackTrace trace;
725 trace.ObtainCurrent(thr, pc);
726 PrintStack(SymbolizeStack(trace));
727 }
728
PrintCurrentStackSlow()729 void PrintCurrentStackSlow() {
730 #ifndef TSAN_GO
731 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
732 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
733 ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
734 0, 0, false);
735 for (uptr i = 0; i < ptrace->size / 2; i++) {
736 uptr tmp = ptrace->trace[i];
737 ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
738 ptrace->trace[ptrace->size - i - 1] = tmp;
739 }
740 StackTrace trace;
741 trace.Init(ptrace->trace, ptrace->size);
742 PrintStack(SymbolizeStack(trace));
743 #endif
744 }
745
746 } // namespace __tsan
747