1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "tsan_platform.h"
18 #include "tsan_rtl.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
25
26 namespace __sanitizer {
27 using namespace __tsan;
28
CheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)29 void CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) {
30 ScopedInRtl in_rtl;
31 TsanPrintf("FATAL: ThreadSanitizer CHECK failed: "
32 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
33 file, line, cond, (uptr)v1, (uptr)v2);
34 Die();
35 }
36
37 } // namespace __sanitizer
38
39 namespace __tsan {
40
41 // Can be overriden by an application/test to intercept reports.
42 #ifdef TSAN_EXTERNAL_HOOKS
43 bool OnReport(const ReportDesc *rep, bool suppressed);
44 #else
45 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
46 (void)rep;
47 return suppressed;
48 }
49 #endif
50
StackStripMain(ReportStack * stack)51 static void StackStripMain(ReportStack *stack) {
52 ReportStack *last_frame = 0;
53 ReportStack *last_frame2 = 0;
54 const char *prefix = "__interceptor_";
55 uptr prefix_len = internal_strlen(prefix);
56 const char *path_prefix = flags()->strip_path_prefix;
57 uptr path_prefix_len = internal_strlen(path_prefix);
58 char *pos;
59 for (ReportStack *ent = stack; ent; ent = ent->next) {
60 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
61 ent->func += prefix_len;
62 if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
63 ent->file = pos + path_prefix_len;
64 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
65 ent->file += 2;
66 last_frame2 = last_frame;
67 last_frame = ent;
68 }
69
70 if (last_frame2 == 0)
71 return;
72 const char *last = last_frame->func;
73 #ifndef TSAN_GO
74 const char *last2 = last_frame2->func;
75 // Strip frame above 'main'
76 if (last2 && 0 == internal_strcmp(last2, "main")) {
77 last_frame2->next = 0;
78 // Strip our internal thread start routine.
79 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
80 last_frame2->next = 0;
81 // Strip global ctors init.
82 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
83 last_frame2->next = 0;
84 // If both are 0, then we probably just failed to symbolize.
85 } else if (last || last2) {
86 // Ensure that we recovered stack completely. Trimmed stack
87 // can actually happen if we do not instrument some code,
88 // so it's only a debug print. However we must try hard to not miss it
89 // due to our fault.
90 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
91 }
92 #else
93 if (last && 0 == internal_strcmp(last, "schedunlock"))
94 last_frame2->next = 0;
95 #endif
96 }
97
SymbolizeStack(const StackTrace & trace)98 static ReportStack *SymbolizeStack(const StackTrace& trace) {
99 if (trace.IsEmpty())
100 return 0;
101 ReportStack *stack = 0;
102 for (uptr si = 0; si < trace.Size(); si++) {
103 // We obtain the return address, that is, address of the next instruction,
104 // so offset it by 1 byte.
105 bool is_last = (si == trace.Size() - 1);
106 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
107 CHECK_NE(ent, 0);
108 ReportStack *last = ent;
109 while (last->next) {
110 last->pc += !is_last;
111 last = last->next;
112 }
113 last->pc += !is_last;
114 last->next = stack;
115 stack = ent;
116 }
117 StackStripMain(stack);
118 return stack;
119 }
120
ScopedReport(ReportType typ)121 ScopedReport::ScopedReport(ReportType typ) {
122 ctx_ = CTX();
123 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
124 rep_ = new(mem) ReportDesc;
125 rep_->typ = typ;
126 ctx_->report_mtx.Lock();
127 }
128
~ScopedReport()129 ScopedReport::~ScopedReport() {
130 ctx_->report_mtx.Unlock();
131 rep_->~ReportDesc();
132 internal_free(rep_);
133 }
134
AddStack(const StackTrace * stack)135 void ScopedReport::AddStack(const StackTrace *stack) {
136 ReportStack **rs = rep_->stacks.PushBack();
137 *rs = SymbolizeStack(*stack);
138 }
139
AddMemoryAccess(uptr addr,Shadow s,const StackTrace * stack)140 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
141 const StackTrace *stack) {
142 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
143 ReportMop *mop = new(mem) ReportMop;
144 rep_->mops.PushBack(mop);
145 mop->tid = s.tid();
146 mop->addr = addr + s.addr0();
147 mop->size = s.size();
148 mop->write = s.is_write();
149 mop->nmutex = 0;
150 mop->stack = SymbolizeStack(*stack);
151 }
152
AddThread(const ThreadContext * tctx)153 void ScopedReport::AddThread(const ThreadContext *tctx) {
154 for (uptr i = 0; i < rep_->threads.Size(); i++) {
155 if (rep_->threads[i]->id == tctx->tid)
156 return;
157 }
158 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
159 ReportThread *rt = new(mem) ReportThread();
160 rep_->threads.PushBack(rt);
161 rt->id = tctx->tid;
162 rt->running = (tctx->status == ThreadStatusRunning);
163 rt->stack = SymbolizeStack(tctx->creation_stack);
164 }
165
166 #ifndef TSAN_GO
FindThread(int unique_id)167 static ThreadContext *FindThread(int unique_id) {
168 CTX()->thread_mtx.CheckLocked();
169 for (unsigned i = 0; i < kMaxTid; i++) {
170 ThreadContext *tctx = CTX()->threads[i];
171 if (tctx && tctx->unique_id == unique_id) {
172 return tctx;
173 }
174 }
175 return 0;
176 }
177 #endif
178
AddMutex(const SyncVar * s)179 void ScopedReport::AddMutex(const SyncVar *s) {
180 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
181 ReportMutex *rm = new(mem) ReportMutex();
182 rep_->mutexes.PushBack(rm);
183 rm->id = 42;
184 rm->stack = SymbolizeStack(s->creation_stack);
185 }
186
AddLocation(uptr addr,uptr size)187 void ScopedReport::AddLocation(uptr addr, uptr size) {
188 if (addr == 0)
189 return;
190 #ifndef TSAN_GO
191 if (allocator()->PointerIsMine((void*)addr)) {
192 MBlock *b = user_mblock(0, (void*)addr);
193 ThreadContext *tctx = FindThread(b->alloc_tid);
194 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
195 ReportLocation *loc = new(mem) ReportLocation();
196 rep_->locs.PushBack(loc);
197 loc->type = ReportLocationHeap;
198 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
199 loc->size = b->size;
200 loc->tid = tctx ? tctx->tid : b->alloc_tid;
201 loc->name = 0;
202 loc->file = 0;
203 loc->line = 0;
204 loc->stack = 0;
205 uptr ssz = 0;
206 const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
207 if (stack) {
208 StackTrace trace;
209 trace.Init(stack, ssz);
210 loc->stack = SymbolizeStack(trace);
211 }
212 if (tctx)
213 AddThread(tctx);
214 return;
215 }
216 #endif
217 ReportStack *symb = SymbolizeData(addr);
218 if (symb) {
219 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
220 ReportLocation *loc = new(mem) ReportLocation();
221 rep_->locs.PushBack(loc);
222 loc->type = ReportLocationGlobal;
223 loc->addr = addr;
224 loc->size = size;
225 loc->tid = 0;
226 loc->name = symb->func;
227 loc->file = symb->file;
228 loc->line = symb->line;
229 loc->stack = 0;
230 internal_free(symb);
231 return;
232 }
233 }
234
235 #ifndef TSAN_GO
AddSleep(u32 stack_id)236 void ScopedReport::AddSleep(u32 stack_id) {
237 uptr ssz = 0;
238 const uptr *stack = StackDepotGet(stack_id, &ssz);
239 if (stack) {
240 StackTrace trace;
241 trace.Init(stack, ssz);
242 rep_->sleep = SymbolizeStack(trace);
243 }
244 }
245 #endif
246
GetReport() const247 const ReportDesc *ScopedReport::GetReport() const {
248 return rep_;
249 }
250
RestoreStack(int tid,const u64 epoch,StackTrace * stk)251 void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
252 ThreadContext *tctx = CTX()->threads[tid];
253 if (tctx == 0)
254 return;
255 Trace* trace = 0;
256 if (tctx->status == ThreadStatusRunning) {
257 CHECK(tctx->thr);
258 trace = &tctx->thr->trace;
259 } else if (tctx->status == ThreadStatusFinished
260 || tctx->status == ThreadStatusDead) {
261 if (tctx->dead_info == 0)
262 return;
263 trace = &tctx->dead_info->trace;
264 } else {
265 return;
266 }
267 Lock l(&trace->mtx);
268 const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
269 TraceHeader* hdr = &trace->headers[partidx];
270 if (epoch < hdr->epoch0)
271 return;
272 const u64 eend = epoch % kTraceSize;
273 const u64 ebegin = eend / kTracePartSize * kTracePartSize;
274 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
275 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
276 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024
277 for (uptr i = 0; i < hdr->stack0.Size(); i++) {
278 stack[i] = hdr->stack0.Get(i);
279 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
280 }
281 uptr pos = hdr->stack0.Size();
282 for (uptr i = ebegin; i <= eend; i++) {
283 Event ev = trace->events[i];
284 EventType typ = (EventType)(ev >> 61);
285 uptr pc = (uptr)(ev & 0xffffffffffffull);
286 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
287 if (typ == EventTypeMop) {
288 stack[pos] = pc;
289 } else if (typ == EventTypeFuncEnter) {
290 stack[pos++] = pc;
291 } else if (typ == EventTypeFuncExit) {
292 if (pos > 0)
293 pos--;
294 }
295 for (uptr j = 0; j <= pos; j++)
296 DPrintf2(" #%zu: %zx\n", j, stack[j]);
297 }
298 if (pos == 0 && stack[0] == 0)
299 return;
300 pos++;
301 stk->Init(stack.data(), pos);
302 }
303
HandleRacyStacks(ThreadState * thr,const StackTrace (& traces)[2],uptr addr_min,uptr addr_max)304 static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
305 uptr addr_min, uptr addr_max) {
306 Context *ctx = CTX();
307 bool equal_stack = false;
308 RacyStacks hash = {};
309 if (flags()->suppress_equal_stacks) {
310 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
311 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
312 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
313 if (hash == ctx->racy_stacks[i]) {
314 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
315 equal_stack = true;
316 break;
317 }
318 }
319 }
320 bool equal_address = false;
321 RacyAddress ra0 = {addr_min, addr_max};
322 if (flags()->suppress_equal_addresses) {
323 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
324 RacyAddress ra2 = ctx->racy_addresses[i];
325 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
326 uptr minend = min(ra0.addr_max, ra2.addr_max);
327 if (maxbeg < minend) {
328 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
329 equal_address = true;
330 break;
331 }
332 }
333 }
334 if (equal_stack || equal_address) {
335 if (!equal_stack)
336 ctx->racy_stacks.PushBack(hash);
337 if (!equal_address)
338 ctx->racy_addresses.PushBack(ra0);
339 return true;
340 }
341 return false;
342 }
343
AddRacyStacks(ThreadState * thr,const StackTrace (& traces)[2],uptr addr_min,uptr addr_max)344 static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
345 uptr addr_min, uptr addr_max) {
346 Context *ctx = CTX();
347 if (flags()->suppress_equal_stacks) {
348 RacyStacks hash;
349 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
350 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
351 ctx->racy_stacks.PushBack(hash);
352 }
353 if (flags()->suppress_equal_addresses) {
354 RacyAddress ra0 = {addr_min, addr_max};
355 ctx->racy_addresses.PushBack(ra0);
356 }
357 }
358
OutputReport(const ScopedReport & srep,const ReportStack * suppress_stack)359 bool OutputReport(const ScopedReport &srep, const ReportStack *suppress_stack) {
360 const ReportDesc *rep = srep.GetReport();
361 bool suppressed = IsSuppressed(rep->typ, suppress_stack);
362 suppressed = OnReport(rep, suppressed);
363 if (suppressed)
364 return false;
365 PrintReport(rep);
366 CTX()->nreported++;
367 return true;
368 }
369
ReportRace(ThreadState * thr)370 void ReportRace(ThreadState *thr) {
371 ScopedInRtl in_rtl;
372
373 bool freed = false;
374 {
375 Shadow s(thr->racy_state[1]);
376 freed = s.GetFreedAndReset();
377 thr->racy_state[1] = s.raw();
378 }
379
380 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
381 uptr addr_min = 0;
382 uptr addr_max = 0;
383 {
384 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
385 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
386 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
387 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
388 addr_min = min(a0, a1);
389 addr_max = max(e0, e1);
390 if (IsExpectedReport(addr_min, addr_max - addr_min))
391 return;
392 }
393
394 Context *ctx = CTX();
395 Lock l0(&ctx->thread_mtx);
396
397 ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
398 const uptr kMop = 2;
399 StackTrace traces[kMop];
400 for (uptr i = 0; i < kMop; i++) {
401 Shadow s(thr->racy_state[i]);
402 RestoreStack(s.tid(), s.epoch(), &traces[i]);
403 }
404 // Failure to restore stack of the current thread
405 // was observed on free() interceptor called from pthread.
406 // Just get the current shadow stack instead.
407 if (traces[0].IsEmpty())
408 traces[0].ObtainCurrent(thr, 0);
409
410 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
411 return;
412
413 for (uptr i = 0; i < kMop; i++) {
414 Shadow s(thr->racy_state[i]);
415 rep.AddMemoryAccess(addr, s, &traces[i]);
416 }
417
418 for (uptr i = 0; i < kMop; i++) {
419 FastState s(thr->racy_state[i]);
420 ThreadContext *tctx = ctx->threads[s.tid()];
421 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
422 continue;
423 rep.AddThread(tctx);
424 }
425
426 rep.AddLocation(addr_min, addr_max - addr_min);
427
428 #ifndef TSAN_GO
429 { // NOLINT
430 Shadow s(thr->racy_state[1]);
431 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
432 rep.AddSleep(thr->last_sleep_stack_id);
433 }
434 #endif
435
436 if (!OutputReport(rep, rep.GetReport()->mops[0]->stack))
437 return;
438
439 AddRacyStacks(thr, traces, addr_min, addr_max);
440 }
441
PrintCurrentStack(ThreadState * thr,uptr pc)442 void PrintCurrentStack(ThreadState *thr, uptr pc) {
443 StackTrace trace;
444 trace.ObtainCurrent(thr, pc);
445 PrintStack(SymbolizeStack(trace));
446 }
447
448 } // namespace __tsan
449