1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
23 #include "tsan_rtl.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26
27 volatile int __tsan_resumed = 0;
28
__tsan_resume()29 extern "C" void __tsan_resume() {
30 __tsan_resumed = 1;
31 }
32
33 namespace __tsan {
34
35 #ifndef TSAN_GO
36 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
37 #endif
38 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
39
40 // Can be overriden by a front-end.
OnFinalize(bool failed)41 bool CPP_WEAK OnFinalize(bool failed) {
42 return failed;
43 }
44
45 static Context *ctx;
CTX()46 Context *CTX() {
47 return ctx;
48 }
49
50 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
51
CreateThreadContext(u32 tid)52 static ThreadContextBase *CreateThreadContext(u32 tid) {
53 // Map thread trace when context is created.
54 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
55 void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
56 return new(mem) ThreadContext(tid);
57 }
58
59 #ifndef TSAN_GO
60 static const u32 kThreadQuarantineSize = 16;
61 #else
62 static const u32 kThreadQuarantineSize = 64;
63 #endif
64
Context()65 Context::Context()
66 : initialized()
67 , report_mtx(MutexTypeReport, StatMtxReport)
68 , nreported()
69 , nmissed_expected()
70 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
71 CreateThreadContext, kMaxTid, kThreadQuarantineSize))
72 , racy_stacks(MBlockRacyStacks)
73 , racy_addresses(MBlockRacyAddresses)
74 , fired_suppressions(MBlockRacyAddresses) {
75 }
76
77 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,int tid,int unique_id,u64 epoch,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)78 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
79 uptr stk_addr, uptr stk_size,
80 uptr tls_addr, uptr tls_size)
81 : fast_state(tid, epoch)
82 // Do not touch these, rely on zero initialization,
83 // they may be accessed before the ctor.
84 // , fast_ignore_reads()
85 // , fast_ignore_writes()
86 // , in_rtl()
87 , shadow_stack_pos(&shadow_stack[0])
88 , tid(tid)
89 , unique_id(unique_id)
90 , stk_addr(stk_addr)
91 , stk_size(stk_size)
92 , tls_addr(tls_addr)
93 , tls_size(tls_size) {
94 }
95
MemoryProfileThread(void * arg)96 static void MemoryProfileThread(void *arg) {
97 ScopedInRtl in_rtl;
98 fd_t fd = (fd_t)(uptr)arg;
99 Context *ctx = CTX();
100 for (int i = 0; ; i++) {
101 InternalScopedBuffer<char> buf(4096);
102 uptr n_threads;
103 uptr n_running_threads;
104 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
105 internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
106 i, n_threads, n_running_threads);
107 internal_write(fd, buf.data(), internal_strlen(buf.data()));
108 WriteMemoryProfile(buf.data(), buf.size());
109 internal_write(fd, buf.data(), internal_strlen(buf.data()));
110 SleepForSeconds(1);
111 }
112 }
113
InitializeMemoryProfile()114 static void InitializeMemoryProfile() {
115 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
116 return;
117 InternalScopedBuffer<char> filename(4096);
118 internal_snprintf(filename.data(), filename.size(), "%s.%d",
119 flags()->profile_memory, GetPid());
120 fd_t fd = OpenFile(filename.data(), true);
121 if (fd == kInvalidFd) {
122 Printf("Failed to open memory profile file '%s'\n", &filename[0]);
123 Die();
124 }
125 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
126 }
127
DontNeedShadowFor(uptr addr,uptr size)128 void DontNeedShadowFor(uptr addr, uptr size) {
129 uptr shadow_beg = MemToShadow(addr);
130 uptr shadow_end = MemToShadow(addr + size);
131 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
132 }
133
MemoryFlushThread(void * arg)134 static void MemoryFlushThread(void *arg) {
135 ScopedInRtl in_rtl;
136 for (int i = 0; ; i++) {
137 SleepForMillis(flags()->flush_memory_ms);
138 FlushShadowMemory();
139 }
140 }
141
InitializeMemoryFlush()142 static void InitializeMemoryFlush() {
143 if (flags()->flush_memory_ms == 0)
144 return;
145 if (flags()->flush_memory_ms < 100)
146 flags()->flush_memory_ms = 100;
147 internal_start_thread(&MemoryFlushThread, 0);
148 }
149
MapShadow(uptr addr,uptr size)150 void MapShadow(uptr addr, uptr size) {
151 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
152 }
153
MapThreadTrace(uptr addr,uptr size)154 void MapThreadTrace(uptr addr, uptr size) {
155 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
156 CHECK_GE(addr, kTraceMemBegin);
157 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
158 if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
159 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
160 Die();
161 }
162 }
163
Initialize(ThreadState * thr)164 void Initialize(ThreadState *thr) {
165 // Thread safe because done before all threads exist.
166 static bool is_initialized = false;
167 if (is_initialized)
168 return;
169 is_initialized = true;
170 SanitizerToolName = "ThreadSanitizer";
171 // Install tool-specific callbacks in sanitizer_common.
172 SetCheckFailedCallback(TsanCheckFailed);
173
174 ScopedInRtl in_rtl;
175 #ifndef TSAN_GO
176 InitializeAllocator();
177 #endif
178 InitializeInterceptors();
179 const char *env = InitializePlatform();
180 InitializeMutex();
181 InitializeDynamicAnnotations();
182 ctx = new(ctx_placeholder) Context;
183 #ifndef TSAN_GO
184 InitializeShadowMemory();
185 #endif
186 InitializeFlags(&ctx->flags, env);
187 // Setup correct file descriptor for error reports.
188 if (internal_strcmp(flags()->log_path, "stdout") == 0)
189 __sanitizer_set_report_fd(kStdoutFd);
190 else if (internal_strcmp(flags()->log_path, "stderr") == 0)
191 __sanitizer_set_report_fd(kStderrFd);
192 else
193 __sanitizer_set_report_path(flags()->log_path);
194 InitializeSuppressions();
195 #ifndef TSAN_GO
196 // Initialize external symbolizer before internal threads are started.
197 const char *external_symbolizer = flags()->external_symbolizer_path;
198 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
199 if (!InitializeExternalSymbolizer(external_symbolizer)) {
200 Printf("Failed to start external symbolizer: '%s'\n",
201 external_symbolizer);
202 Die();
203 }
204 }
205 #endif
206 InitializeMemoryProfile();
207 InitializeMemoryFlush();
208
209 if (ctx->flags.verbosity)
210 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
211 GetPid());
212
213 // Initialize thread 0.
214 int tid = ThreadCreate(thr, 0, 0, true);
215 CHECK_EQ(tid, 0);
216 ThreadStart(thr, tid, GetPid());
217 CHECK_EQ(thr->in_rtl, 1);
218 ctx->initialized = true;
219
220 if (flags()->stop_on_start) {
221 Printf("ThreadSanitizer is suspended at startup (pid %d)."
222 " Call __tsan_resume().\n",
223 GetPid());
224 while (__tsan_resumed == 0) {}
225 }
226 }
227
Finalize(ThreadState * thr)228 int Finalize(ThreadState *thr) {
229 ScopedInRtl in_rtl;
230 Context *ctx = __tsan::ctx;
231 bool failed = false;
232
233 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
234 SleepForMillis(flags()->atexit_sleep_ms);
235
236 // Wait for pending reports.
237 ctx->report_mtx.Lock();
238 ctx->report_mtx.Unlock();
239
240 #ifndef TSAN_GO
241 if (ctx->flags.verbosity)
242 AllocatorPrintStats();
243 #endif
244
245 ThreadFinalize(thr);
246
247 if (ctx->nreported) {
248 failed = true;
249 #ifndef TSAN_GO
250 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
251 #else
252 Printf("Found %d data race(s)\n", ctx->nreported);
253 #endif
254 }
255
256 if (ctx->nmissed_expected) {
257 failed = true;
258 Printf("ThreadSanitizer: missed %d expected races\n",
259 ctx->nmissed_expected);
260 }
261
262 failed = OnFinalize(failed);
263
264 StatAggregate(ctx->stat, thr->stat);
265 StatOutput(ctx->stat);
266 return failed ? flags()->exitcode : 0;
267 }
268
269 #ifndef TSAN_GO
CurrentStackId(ThreadState * thr,uptr pc)270 u32 CurrentStackId(ThreadState *thr, uptr pc) {
271 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
272 return 0;
273 if (pc) {
274 thr->shadow_stack_pos[0] = pc;
275 thr->shadow_stack_pos++;
276 }
277 u32 id = StackDepotPut(thr->shadow_stack,
278 thr->shadow_stack_pos - thr->shadow_stack);
279 if (pc)
280 thr->shadow_stack_pos--;
281 return id;
282 }
283 #endif
284
TraceSwitch(ThreadState * thr)285 void TraceSwitch(ThreadState *thr) {
286 thr->nomalloc++;
287 ScopedInRtl in_rtl;
288 Lock l(&thr->trace.mtx);
289 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
290 TraceHeader *hdr = &thr->trace.headers[trace];
291 hdr->epoch0 = thr->fast_state.epoch();
292 hdr->stack0.ObtainCurrent(thr, 0);
293 hdr->mset0 = thr->mset;
294 thr->nomalloc--;
295 }
296
TraceTopPC(ThreadState * thr)297 uptr TraceTopPC(ThreadState *thr) {
298 Event *events = (Event*)GetThreadTrace(thr->tid);
299 uptr pc = events[thr->fast_state.GetTracePos()];
300 return pc;
301 }
302
TraceSize()303 uptr TraceSize() {
304 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
305 }
306
TraceParts()307 uptr TraceParts() {
308 return TraceSize() / kTracePartSize;
309 }
310
311 #ifndef TSAN_GO
__tsan_trace_switch()312 extern "C" void __tsan_trace_switch() {
313 TraceSwitch(cur_thread());
314 }
315
__tsan_report_race()316 extern "C" void __tsan_report_race() {
317 ReportRace(cur_thread());
318 }
319 #endif
320
321 ALWAYS_INLINE
LoadShadow(u64 * p)322 static Shadow LoadShadow(u64 *p) {
323 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
324 return Shadow(raw);
325 }
326
327 ALWAYS_INLINE
StoreShadow(u64 * sp,u64 s)328 static void StoreShadow(u64 *sp, u64 s) {
329 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
330 }
331
332 ALWAYS_INLINE
StoreIfNotYetStored(u64 * sp,u64 * s)333 static void StoreIfNotYetStored(u64 *sp, u64 *s) {
334 StoreShadow(sp, *s);
335 *s = 0;
336 }
337
HandleRace(ThreadState * thr,u64 * shadow_mem,Shadow cur,Shadow old)338 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
339 Shadow cur, Shadow old) {
340 thr->racy_state[0] = cur.raw();
341 thr->racy_state[1] = old.raw();
342 thr->racy_shadow_addr = shadow_mem;
343 #ifndef TSAN_GO
344 HACKY_CALL(__tsan_report_race);
345 #else
346 ReportRace(thr);
347 #endif
348 }
349
OldIsInSameSynchEpoch(Shadow old,ThreadState * thr)350 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
351 return old.epoch() >= thr->fast_synch_epoch;
352 }
353
HappensBefore(Shadow old,ThreadState * thr)354 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
355 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
356 }
357
358 ALWAYS_INLINE
MemoryAccessImpl(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)359 void MemoryAccessImpl(ThreadState *thr, uptr addr,
360 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
361 u64 *shadow_mem, Shadow cur) {
362 StatInc(thr, StatMop);
363 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
364 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
365
366 // This potentially can live in an MMX/SSE scratch register.
367 // The required intrinsics are:
368 // __m128i _mm_move_epi64(__m128i*);
369 // _mm_storel_epi64(u64*, __m128i);
370 u64 store_word = cur.raw();
371
372 // scan all the shadow values and dispatch to 4 categories:
373 // same, replace, candidate and race (see comments below).
374 // we consider only 3 cases regarding access sizes:
375 // equal, intersect and not intersect. initially I considered
376 // larger and smaller as well, it allowed to replace some
377 // 'candidates' with 'same' or 'replace', but I think
378 // it's just not worth it (performance- and complexity-wise).
379
380 Shadow old(0);
381 if (kShadowCnt == 1) {
382 int idx = 0;
383 #include "tsan_update_shadow_word_inl.h"
384 } else if (kShadowCnt == 2) {
385 int idx = 0;
386 #include "tsan_update_shadow_word_inl.h"
387 idx = 1;
388 #include "tsan_update_shadow_word_inl.h"
389 } else if (kShadowCnt == 4) {
390 int idx = 0;
391 #include "tsan_update_shadow_word_inl.h"
392 idx = 1;
393 #include "tsan_update_shadow_word_inl.h"
394 idx = 2;
395 #include "tsan_update_shadow_word_inl.h"
396 idx = 3;
397 #include "tsan_update_shadow_word_inl.h"
398 } else if (kShadowCnt == 8) {
399 int idx = 0;
400 #include "tsan_update_shadow_word_inl.h"
401 idx = 1;
402 #include "tsan_update_shadow_word_inl.h"
403 idx = 2;
404 #include "tsan_update_shadow_word_inl.h"
405 idx = 3;
406 #include "tsan_update_shadow_word_inl.h"
407 idx = 4;
408 #include "tsan_update_shadow_word_inl.h"
409 idx = 5;
410 #include "tsan_update_shadow_word_inl.h"
411 idx = 6;
412 #include "tsan_update_shadow_word_inl.h"
413 idx = 7;
414 #include "tsan_update_shadow_word_inl.h"
415 } else {
416 CHECK(false);
417 }
418
419 // we did not find any races and had already stored
420 // the current access info, so we are done
421 if (LIKELY(store_word == 0))
422 return;
423 // choose a random candidate slot and replace it
424 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
425 StatInc(thr, StatShadowReplace);
426 return;
427 RACE:
428 HandleRace(thr, shadow_mem, cur, old);
429 return;
430 }
431
432 ALWAYS_INLINE
MemoryAccess(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic)433 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
434 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
435 u64 *shadow_mem = (u64*)MemToShadow(addr);
436 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
437 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
438 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
439 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
440 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
441 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
442 #if TSAN_DEBUG
443 if (!IsAppMem(addr)) {
444 Printf("Access to non app mem %zx\n", addr);
445 DCHECK(IsAppMem(addr));
446 }
447 if (!IsShadowMem((uptr)shadow_mem)) {
448 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
449 DCHECK(IsShadowMem((uptr)shadow_mem));
450 }
451 #endif
452
453 FastState fast_state = thr->fast_state;
454 if (fast_state.GetIgnoreBit())
455 return;
456 fast_state.IncrementEpoch();
457 thr->fast_state = fast_state;
458 Shadow cur(fast_state);
459 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
460 cur.SetWrite(kAccessIsWrite);
461 cur.SetAtomic(kIsAtomic);
462
463 // We must not store to the trace if we do not store to the shadow.
464 // That is, this call must be moved somewhere below.
465 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
466
467 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
468 shadow_mem, cur);
469 }
470
MemoryRangeSet(ThreadState * thr,uptr pc,uptr addr,uptr size,u64 val)471 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
472 u64 val) {
473 (void)thr;
474 (void)pc;
475 if (size == 0)
476 return;
477 // FIXME: fix me.
478 uptr offset = addr % kShadowCell;
479 if (offset) {
480 offset = kShadowCell - offset;
481 if (size <= offset)
482 return;
483 addr += offset;
484 size -= offset;
485 }
486 DCHECK_EQ(addr % 8, 0);
487 // If a user passes some insane arguments (memset(0)),
488 // let it just crash as usual.
489 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
490 return;
491 // Don't want to touch lots of shadow memory.
492 // If a program maps 10MB stack, there is no need reset the whole range.
493 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
494 if (size < 64*1024) {
495 u64 *p = (u64*)MemToShadow(addr);
496 CHECK(IsShadowMem((uptr)p));
497 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
498 // FIXME: may overwrite a part outside the region
499 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
500 p[i++] = val;
501 for (uptr j = 1; j < kShadowCnt; j++)
502 p[i++] = 0;
503 }
504 } else {
505 // The region is big, reset only beginning and end.
506 const uptr kPageSize = 4096;
507 u64 *begin = (u64*)MemToShadow(addr);
508 u64 *end = begin + size / kShadowCell * kShadowCnt;
509 u64 *p = begin;
510 // Set at least first kPageSize/2 to page boundary.
511 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
512 *p++ = val;
513 for (uptr j = 1; j < kShadowCnt; j++)
514 *p++ = 0;
515 }
516 // Reset middle part.
517 u64 *p1 = p;
518 p = RoundDown(end, kPageSize);
519 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
520 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
521 // Set the ending.
522 while (p < end) {
523 *p++ = val;
524 for (uptr j = 1; j < kShadowCnt; j++)
525 *p++ = 0;
526 }
527 }
528 }
529
MemoryResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)530 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
531 MemoryRangeSet(thr, pc, addr, size, 0);
532 }
533
MemoryRangeFreed(ThreadState * thr,uptr pc,uptr addr,uptr size)534 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
535 // Processing more than 1k (4k of shadow) is expensive,
536 // can cause excessive memory consumption (user does not necessary touch
537 // the whole range) and most likely unnecessary.
538 if (size > 1024)
539 size = 1024;
540 CHECK_EQ(thr->is_freeing, false);
541 thr->is_freeing = true;
542 MemoryAccessRange(thr, pc, addr, size, true);
543 thr->is_freeing = false;
544 Shadow s(thr->fast_state);
545 s.ClearIgnoreBit();
546 s.MarkAsFreed();
547 s.SetWrite(true);
548 s.SetAddr0AndSizeLog(0, 3);
549 MemoryRangeSet(thr, pc, addr, size, s.raw());
550 }
551
MemoryRangeImitateWrite(ThreadState * thr,uptr pc,uptr addr,uptr size)552 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
553 Shadow s(thr->fast_state);
554 s.ClearIgnoreBit();
555 s.SetWrite(true);
556 s.SetAddr0AndSizeLog(0, 3);
557 MemoryRangeSet(thr, pc, addr, size, s.raw());
558 }
559
560 ALWAYS_INLINE
FuncEntry(ThreadState * thr,uptr pc)561 void FuncEntry(ThreadState *thr, uptr pc) {
562 DCHECK_EQ(thr->in_rtl, 0);
563 StatInc(thr, StatFuncEnter);
564 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
565 thr->fast_state.IncrementEpoch();
566 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
567
568 // Shadow stack maintenance can be replaced with
569 // stack unwinding during trace switch (which presumably must be faster).
570 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
571 #ifndef TSAN_GO
572 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
573 #else
574 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
575 const int sz = thr->shadow_stack_end - thr->shadow_stack;
576 const int newsz = 2 * sz;
577 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
578 newsz * sizeof(uptr));
579 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
580 internal_free(thr->shadow_stack);
581 thr->shadow_stack = newstack;
582 thr->shadow_stack_pos = newstack + sz;
583 thr->shadow_stack_end = newstack + newsz;
584 }
585 #endif
586 thr->shadow_stack_pos[0] = pc;
587 thr->shadow_stack_pos++;
588 }
589
590 ALWAYS_INLINE
FuncExit(ThreadState * thr)591 void FuncExit(ThreadState *thr) {
592 DCHECK_EQ(thr->in_rtl, 0);
593 StatInc(thr, StatFuncExit);
594 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
595 thr->fast_state.IncrementEpoch();
596 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
597
598 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
599 #ifndef TSAN_GO
600 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
601 #endif
602 thr->shadow_stack_pos--;
603 }
604
IgnoreCtl(ThreadState * thr,bool write,bool begin)605 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
606 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
607 thr->ignore_reads_and_writes += begin ? 1 : -1;
608 CHECK_GE(thr->ignore_reads_and_writes, 0);
609 if (thr->ignore_reads_and_writes)
610 thr->fast_state.SetIgnoreBit();
611 else
612 thr->fast_state.ClearIgnoreBit();
613 }
614
operator ==(const MD5Hash & other) const615 bool MD5Hash::operator==(const MD5Hash &other) const {
616 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
617 }
618
619 #if TSAN_DEBUG
build_consistency_debug()620 void build_consistency_debug() {}
621 #else
build_consistency_release()622 void build_consistency_release() {}
623 #endif
624
625 #if TSAN_COLLECT_STATS
build_consistency_stats()626 void build_consistency_stats() {}
627 #else
build_consistency_nostats()628 void build_consistency_nostats() {}
629 #endif
630
631 #if TSAN_SHADOW_COUNT == 1
build_consistency_shadow1()632 void build_consistency_shadow1() {}
633 #elif TSAN_SHADOW_COUNT == 2
build_consistency_shadow2()634 void build_consistency_shadow2() {}
635 #elif TSAN_SHADOW_COUNT == 4
build_consistency_shadow4()636 void build_consistency_shadow4() {}
637 #else
build_consistency_shadow8()638 void build_consistency_shadow8() {}
639 #endif
640
641 } // namespace __tsan
642
643 #ifndef TSAN_GO
644 // Must be included in this file to make sure everything is inlined.
645 #include "tsan_interface_inl.h"
646 #endif
647