1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_file.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
23 #include "tsan_rtl.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27 #include "ubsan/ubsan_init.h"
28
29 #ifdef __SSE3__
30 // <emmintrin.h> transitively includes <stdlib.h>,
31 // and it's prohibited to include std headers into tsan runtime.
32 // So we do this dirty trick.
33 #define _MM_MALLOC_H_INCLUDED
34 #define __MM_MALLOC_H
35 #include <emmintrin.h>
36 typedef __m128i m128;
37 #endif
38
39 volatile int __tsan_resumed = 0;
40
__tsan_resume()41 extern "C" void __tsan_resume() {
42 __tsan_resumed = 1;
43 }
44
45 namespace __tsan {
46
47 #if !SANITIZER_GO && !SANITIZER_MAC
48 __attribute__((tls_model("initial-exec")))
49 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
50 #endif
51 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
52 Context *ctx;
53
54 // Can be overriden by a front-end.
55 #ifdef TSAN_EXTERNAL_HOOKS
56 bool OnFinalize(bool failed);
57 void OnInitialize();
58 #else
59 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)60 bool OnFinalize(bool failed) {
61 return failed;
62 }
63 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()64 void OnInitialize() {}
65 #endif
66
67 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
68
CreateThreadContext(u32 tid)69 static ThreadContextBase *CreateThreadContext(u32 tid) {
70 // Map thread trace when context is created.
71 char name[50];
72 internal_snprintf(name, sizeof(name), "trace %u", tid);
73 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
74 const uptr hdr = GetThreadTraceHeader(tid);
75 internal_snprintf(name, sizeof(name), "trace header %u", tid);
76 MapThreadTrace(hdr, sizeof(Trace), name);
77 new((void*)hdr) Trace();
78 // We are going to use only a small part of the trace with the default
79 // value of history_size. However, the constructor writes to the whole trace.
80 // Unmap the unused part.
81 uptr hdr_end = hdr + sizeof(Trace);
82 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
83 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
84 if (hdr_end < hdr + sizeof(Trace))
85 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
86 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
87 return new(mem) ThreadContext(tid);
88 }
89
90 #if !SANITIZER_GO
91 static const u32 kThreadQuarantineSize = 16;
92 #else
93 static const u32 kThreadQuarantineSize = 64;
94 #endif
95
Context()96 Context::Context()
97 : initialized()
98 , report_mtx(MutexTypeReport, StatMtxReport)
99 , nreported()
100 , nmissed_expected()
101 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
102 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
103 , racy_mtx(MutexTypeRacy, StatMtxRacy)
104 , racy_stacks()
105 , racy_addresses()
106 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
107 , clock_alloc("clock allocator") {
108 fired_suppressions.reserve(8);
109 }
110
111 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,int tid,int unique_id,u64 epoch,unsigned reuse_count,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)112 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
113 unsigned reuse_count,
114 uptr stk_addr, uptr stk_size,
115 uptr tls_addr, uptr tls_size)
116 : fast_state(tid, epoch)
117 // Do not touch these, rely on zero initialization,
118 // they may be accessed before the ctor.
119 // , ignore_reads_and_writes()
120 // , ignore_interceptors()
121 , clock(tid, reuse_count)
122 #if !SANITIZER_GO
123 , jmp_bufs()
124 #endif
125 , tid(tid)
126 , unique_id(unique_id)
127 , stk_addr(stk_addr)
128 , stk_size(stk_size)
129 , tls_addr(tls_addr)
130 , tls_size(tls_size)
131 #if !SANITIZER_GO
132 , last_sleep_clock(tid)
133 #endif
134 {
135 }
136
137 #if !SANITIZER_GO
MemoryProfiler(Context * ctx,fd_t fd,int i)138 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
139 uptr n_threads;
140 uptr n_running_threads;
141 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
142 InternalMmapVector<char> buf(4096);
143 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
144 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
145 }
146
BackgroundThread(void * arg)147 static void *BackgroundThread(void *arg) {
148 // This is a non-initialized non-user thread, nothing to see here.
149 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
150 // enabled even when the thread function exits (e.g. during pthread thread
151 // shutdown code).
152 cur_thread_init();
153 cur_thread()->ignore_interceptors++;
154 const u64 kMs2Ns = 1000 * 1000;
155
156 fd_t mprof_fd = kInvalidFd;
157 if (flags()->profile_memory && flags()->profile_memory[0]) {
158 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
159 mprof_fd = 1;
160 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
161 mprof_fd = 2;
162 } else {
163 InternalScopedString filename(kMaxPathLength);
164 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
165 fd_t fd = OpenFile(filename.data(), WrOnly);
166 if (fd == kInvalidFd) {
167 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
168 &filename[0]);
169 } else {
170 mprof_fd = fd;
171 }
172 }
173 }
174
175 u64 last_flush = NanoTime();
176 uptr last_rss = 0;
177 for (int i = 0;
178 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
179 i++) {
180 SleepForMillis(100);
181 u64 now = NanoTime();
182
183 // Flush memory if requested.
184 if (flags()->flush_memory_ms > 0) {
185 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
186 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
187 FlushShadowMemory();
188 last_flush = NanoTime();
189 }
190 }
191 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
192 if (flags()->memory_limit_mb > 0) {
193 uptr rss = GetRSS();
194 uptr limit = uptr(flags()->memory_limit_mb) << 20;
195 VPrintf(1, "ThreadSanitizer: memory flush check"
196 " RSS=%llu LAST=%llu LIMIT=%llu\n",
197 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
198 if (2 * rss > limit + last_rss) {
199 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
200 FlushShadowMemory();
201 rss = GetRSS();
202 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
203 }
204 last_rss = rss;
205 }
206
207 // Write memory profile if requested.
208 if (mprof_fd != kInvalidFd)
209 MemoryProfiler(ctx, mprof_fd, i);
210
211 // Flush symbolizer cache if requested.
212 if (flags()->flush_symbolizer_ms > 0) {
213 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
214 memory_order_relaxed);
215 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
216 Lock l(&ctx->report_mtx);
217 ScopedErrorReportLock l2;
218 SymbolizeFlush();
219 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
220 }
221 }
222 }
223 return nullptr;
224 }
225
StartBackgroundThread()226 static void StartBackgroundThread() {
227 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
228 }
229
230 #ifndef __mips__
StopBackgroundThread()231 static void StopBackgroundThread() {
232 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
233 internal_join_thread(ctx->background_thread);
234 ctx->background_thread = 0;
235 }
236 #endif
237 #endif
238
DontNeedShadowFor(uptr addr,uptr size)239 void DontNeedShadowFor(uptr addr, uptr size) {
240 ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
241 }
242
243 #if !SANITIZER_GO
UnmapShadow(ThreadState * thr,uptr addr,uptr size)244 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
245 if (size == 0) return;
246 DontNeedShadowFor(addr, size);
247 ScopedGlobalProcessor sgp;
248 ctx->metamap.ResetRange(thr->proc(), addr, size);
249 }
250 #endif
251
MapShadow(uptr addr,uptr size)252 void MapShadow(uptr addr, uptr size) {
253 // Global data is not 64K aligned, but there are no adjacent mappings,
254 // so we can get away with unaligned mapping.
255 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
256 const uptr kPageSize = GetPageSizeCached();
257 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
258 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
259 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
260 "shadow"))
261 Die();
262
263 // Meta shadow is 2:1, so tread carefully.
264 static bool data_mapped = false;
265 static uptr mapped_meta_end = 0;
266 uptr meta_begin = (uptr)MemToMeta(addr);
267 uptr meta_end = (uptr)MemToMeta(addr + size);
268 meta_begin = RoundDownTo(meta_begin, 64 << 10);
269 meta_end = RoundUpTo(meta_end, 64 << 10);
270 if (!data_mapped) {
271 // First call maps data+bss.
272 data_mapped = true;
273 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
274 "meta shadow"))
275 Die();
276 } else {
277 // Mapping continous heap.
278 // Windows wants 64K alignment.
279 meta_begin = RoundDownTo(meta_begin, 64 << 10);
280 meta_end = RoundUpTo(meta_end, 64 << 10);
281 if (meta_end <= mapped_meta_end)
282 return;
283 if (meta_begin < mapped_meta_end)
284 meta_begin = mapped_meta_end;
285 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
286 "meta shadow"))
287 Die();
288 mapped_meta_end = meta_end;
289 }
290 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
291 addr, addr+size, meta_begin, meta_end);
292 }
293
MapThreadTrace(uptr addr,uptr size,const char * name)294 void MapThreadTrace(uptr addr, uptr size, const char *name) {
295 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
296 CHECK_GE(addr, TraceMemBeg());
297 CHECK_LE(addr + size, TraceMemEnd());
298 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
299 if (!MmapFixedSuperNoReserve(addr, size, name)) {
300 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
301 addr, size);
302 Die();
303 }
304 }
305
CheckShadowMapping()306 static void CheckShadowMapping() {
307 uptr beg, end;
308 for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
309 // Skip cases for empty regions (heap definition for architectures that
310 // do not use 64-bit allocator).
311 if (beg == end)
312 continue;
313 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
314 uptr prev = 0;
315 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
316 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
317 const uptr p = RoundDown(p0 + x, kShadowCell);
318 if (p < beg || p >= end)
319 continue;
320 const uptr s = MemToShadow(p);
321 const uptr m = (uptr)MemToMeta(p);
322 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
323 CHECK(IsAppMem(p));
324 CHECK(IsShadowMem(s));
325 CHECK_EQ(p, ShadowToMem(s));
326 CHECK(IsMetaMem(m));
327 if (prev) {
328 // Ensure that shadow and meta mappings are linear within a single
329 // user range. Lots of code that processes memory ranges assumes it.
330 const uptr prev_s = MemToShadow(prev);
331 const uptr prev_m = (uptr)MemToMeta(prev);
332 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
333 CHECK_EQ((m - prev_m) / kMetaShadowSize,
334 (p - prev) / kMetaShadowCell);
335 }
336 prev = p;
337 }
338 }
339 }
340 }
341
342 #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)343 static void OnStackUnwind(const SignalContext &sig, const void *,
344 BufferedStackTrace *stack) {
345 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
346 common_flags()->fast_unwind_on_fatal);
347 }
348
TsanOnDeadlySignal(int signo,void * siginfo,void * context)349 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
350 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
351 }
352 #endif
353
Initialize(ThreadState * thr)354 void Initialize(ThreadState *thr) {
355 // Thread safe because done before all threads exist.
356 static bool is_initialized = false;
357 if (is_initialized)
358 return;
359 is_initialized = true;
360 // We are not ready to handle interceptors yet.
361 ScopedIgnoreInterceptors ignore;
362 SanitizerToolName = "ThreadSanitizer";
363 // Install tool-specific callbacks in sanitizer_common.
364 SetCheckFailedCallback(TsanCheckFailed);
365
366 ctx = new(ctx_placeholder) Context;
367 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
368 const char *options = GetEnv(env_name);
369 CacheBinaryName();
370 CheckASLR();
371 InitializeFlags(&ctx->flags, options, env_name);
372 AvoidCVE_2016_2143();
373 __sanitizer::InitializePlatformEarly();
374 __tsan::InitializePlatformEarly();
375
376 #if !SANITIZER_GO
377 // Re-exec ourselves if we need to set additional env or command line args.
378 MaybeReexec();
379
380 InitializeAllocator();
381 ReplaceSystemMalloc();
382 #endif
383 if (common_flags()->detect_deadlocks)
384 ctx->dd = DDetector::Create(flags());
385 Processor *proc = ProcCreate();
386 ProcWire(proc, thr);
387 InitializeInterceptors();
388 CheckShadowMapping();
389 InitializePlatform();
390 InitializeMutex();
391 InitializeDynamicAnnotations();
392 #if !SANITIZER_GO
393 InitializeShadowMemory();
394 InitializeAllocatorLate();
395 InstallDeadlySignalHandlers(TsanOnDeadlySignal);
396 #endif
397 // Setup correct file descriptor for error reports.
398 __sanitizer_set_report_path(common_flags()->log_path);
399 InitializeSuppressions();
400 #if !SANITIZER_GO
401 InitializeLibIgnore();
402 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
403 #endif
404
405 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
406 (int)internal_getpid());
407
408 // Initialize thread 0.
409 int tid = ThreadCreate(thr, 0, 0, true);
410 CHECK_EQ(tid, 0);
411 ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
412 #if TSAN_CONTAINS_UBSAN
413 __ubsan::InitAsPlugin();
414 #endif
415 ctx->initialized = true;
416
417 #if !SANITIZER_GO
418 Symbolizer::LateInitialize();
419 #endif
420
421 if (flags()->stop_on_start) {
422 Printf("ThreadSanitizer is suspended at startup (pid %d)."
423 " Call __tsan_resume().\n",
424 (int)internal_getpid());
425 while (__tsan_resumed == 0) {}
426 }
427
428 OnInitialize();
429 }
430
MaybeSpawnBackgroundThread()431 void MaybeSpawnBackgroundThread() {
432 // On MIPS, TSan initialization is run before
433 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
434 // new threads.
435 #if !SANITIZER_GO && !defined(__mips__)
436 static atomic_uint32_t bg_thread = {};
437 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
438 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
439 StartBackgroundThread();
440 SetSandboxingCallback(StopBackgroundThread);
441 }
442 #endif
443 }
444
445
Finalize(ThreadState * thr)446 int Finalize(ThreadState *thr) {
447 bool failed = false;
448
449 if (common_flags()->print_module_map == 1)
450 DumpProcessMap();
451
452 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
453 SleepForMillis(flags()->atexit_sleep_ms);
454
455 // Wait for pending reports.
456 ctx->report_mtx.Lock();
457 { ScopedErrorReportLock l; }
458 ctx->report_mtx.Unlock();
459
460 #if !SANITIZER_GO
461 if (Verbosity()) AllocatorPrintStats();
462 #endif
463
464 ThreadFinalize(thr);
465
466 if (ctx->nreported) {
467 failed = true;
468 #if !SANITIZER_GO
469 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
470 #else
471 Printf("Found %d data race(s)\n", ctx->nreported);
472 #endif
473 }
474
475 if (ctx->nmissed_expected) {
476 failed = true;
477 Printf("ThreadSanitizer: missed %d expected races\n",
478 ctx->nmissed_expected);
479 }
480
481 if (common_flags()->print_suppressions)
482 PrintMatchedSuppressions();
483 #if !SANITIZER_GO
484 if (flags()->print_benign)
485 PrintMatchedBenignRaces();
486 #endif
487
488 failed = OnFinalize(failed);
489
490 #if TSAN_COLLECT_STATS
491 StatAggregate(ctx->stat, thr->stat);
492 StatOutput(ctx->stat);
493 #endif
494
495 return failed ? common_flags()->exitcode : 0;
496 }
497
498 #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)499 void ForkBefore(ThreadState *thr, uptr pc) {
500 ctx->thread_registry->Lock();
501 ctx->report_mtx.Lock();
502 // Ignore memory accesses in the pthread_atfork callbacks.
503 // If any of them triggers a data race we will deadlock
504 // on the report_mtx.
505 // We could ignore interceptors and sync operations as well,
506 // but so far it's unclear if it will do more good or harm.
507 // Unnecessarily ignoring things can lead to false positives later.
508 ThreadIgnoreBegin(thr, pc);
509 }
510
ForkParentAfter(ThreadState * thr,uptr pc)511 void ForkParentAfter(ThreadState *thr, uptr pc) {
512 ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
513 ctx->report_mtx.Unlock();
514 ctx->thread_registry->Unlock();
515 }
516
ForkChildAfter(ThreadState * thr,uptr pc)517 void ForkChildAfter(ThreadState *thr, uptr pc) {
518 ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
519 ctx->report_mtx.Unlock();
520 ctx->thread_registry->Unlock();
521
522 uptr nthread = 0;
523 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
524 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
525 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
526 if (nthread == 1) {
527 StartBackgroundThread();
528 } else {
529 // We've just forked a multi-threaded process. We cannot reasonably function
530 // after that (some mutexes may be locked before fork). So just enable
531 // ignores for everything in the hope that we will exec soon.
532 ctx->after_multithreaded_fork = true;
533 thr->ignore_interceptors++;
534 ThreadIgnoreBegin(thr, pc);
535 ThreadIgnoreSyncBegin(thr, pc);
536 }
537 }
538 #endif
539
540 #if SANITIZER_GO
541 NOINLINE
GrowShadowStack(ThreadState * thr)542 void GrowShadowStack(ThreadState *thr) {
543 const int sz = thr->shadow_stack_end - thr->shadow_stack;
544 const int newsz = 2 * sz;
545 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
546 newsz * sizeof(uptr));
547 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
548 internal_free(thr->shadow_stack);
549 thr->shadow_stack = newstack;
550 thr->shadow_stack_pos = newstack + sz;
551 thr->shadow_stack_end = newstack + newsz;
552 }
553 #endif
554
CurrentStackId(ThreadState * thr,uptr pc)555 u32 CurrentStackId(ThreadState *thr, uptr pc) {
556 if (!thr->is_inited) // May happen during bootstrap.
557 return 0;
558 if (pc != 0) {
559 #if !SANITIZER_GO
560 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
561 #else
562 if (thr->shadow_stack_pos == thr->shadow_stack_end)
563 GrowShadowStack(thr);
564 #endif
565 thr->shadow_stack_pos[0] = pc;
566 thr->shadow_stack_pos++;
567 }
568 u32 id = StackDepotPut(
569 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
570 if (pc != 0)
571 thr->shadow_stack_pos--;
572 return id;
573 }
574
TraceSwitch(ThreadState * thr)575 void TraceSwitch(ThreadState *thr) {
576 #if !SANITIZER_GO
577 if (ctx->after_multithreaded_fork)
578 return;
579 #endif
580 thr->nomalloc++;
581 Trace *thr_trace = ThreadTrace(thr->tid);
582 Lock l(&thr_trace->mtx);
583 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
584 TraceHeader *hdr = &thr_trace->headers[trace];
585 hdr->epoch0 = thr->fast_state.epoch();
586 ObtainCurrentStack(thr, 0, &hdr->stack0);
587 hdr->mset0 = thr->mset;
588 thr->nomalloc--;
589 }
590
ThreadTrace(int tid)591 Trace *ThreadTrace(int tid) {
592 return (Trace*)GetThreadTraceHeader(tid);
593 }
594
TraceTopPC(ThreadState * thr)595 uptr TraceTopPC(ThreadState *thr) {
596 Event *events = (Event*)GetThreadTrace(thr->tid);
597 uptr pc = events[thr->fast_state.GetTracePos()];
598 return pc;
599 }
600
TraceSize()601 uptr TraceSize() {
602 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
603 }
604
TraceParts()605 uptr TraceParts() {
606 return TraceSize() / kTracePartSize;
607 }
608
609 #if !SANITIZER_GO
__tsan_trace_switch()610 extern "C" void __tsan_trace_switch() {
611 TraceSwitch(cur_thread());
612 }
613
__tsan_report_race()614 extern "C" void __tsan_report_race() {
615 ReportRace(cur_thread());
616 }
617 #endif
618
619 ALWAYS_INLINE
LoadShadow(u64 * p)620 Shadow LoadShadow(u64 *p) {
621 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
622 return Shadow(raw);
623 }
624
625 ALWAYS_INLINE
StoreShadow(u64 * sp,u64 s)626 void StoreShadow(u64 *sp, u64 s) {
627 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
628 }
629
630 ALWAYS_INLINE
StoreIfNotYetStored(u64 * sp,u64 * s)631 void StoreIfNotYetStored(u64 *sp, u64 *s) {
632 StoreShadow(sp, *s);
633 *s = 0;
634 }
635
636 ALWAYS_INLINE
HandleRace(ThreadState * thr,u64 * shadow_mem,Shadow cur,Shadow old)637 void HandleRace(ThreadState *thr, u64 *shadow_mem,
638 Shadow cur, Shadow old) {
639 thr->racy_state[0] = cur.raw();
640 thr->racy_state[1] = old.raw();
641 thr->racy_shadow_addr = shadow_mem;
642 #if !SANITIZER_GO
643 HACKY_CALL(__tsan_report_race);
644 #else
645 ReportRace(thr);
646 #endif
647 }
648
HappensBefore(Shadow old,ThreadState * thr)649 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
650 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
651 }
652
653 ALWAYS_INLINE
MemoryAccessImpl1(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)654 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
655 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
656 u64 *shadow_mem, Shadow cur) {
657 StatInc(thr, StatMop);
658 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
659 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
660
661 // This potentially can live in an MMX/SSE scratch register.
662 // The required intrinsics are:
663 // __m128i _mm_move_epi64(__m128i*);
664 // _mm_storel_epi64(u64*, __m128i);
665 u64 store_word = cur.raw();
666 bool stored = false;
667
668 // scan all the shadow values and dispatch to 4 categories:
669 // same, replace, candidate and race (see comments below).
670 // we consider only 3 cases regarding access sizes:
671 // equal, intersect and not intersect. initially I considered
672 // larger and smaller as well, it allowed to replace some
673 // 'candidates' with 'same' or 'replace', but I think
674 // it's just not worth it (performance- and complexity-wise).
675
676 Shadow old(0);
677
678 // It release mode we manually unroll the loop,
679 // because empirically gcc generates better code this way.
680 // However, we can't afford unrolling in debug mode, because the function
681 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
682 // threads, which is not enough for the unrolled loop.
683 #if SANITIZER_DEBUG
684 for (int idx = 0; idx < 4; idx++) {
685 #include "tsan_update_shadow_word_inl.h"
686 }
687 #else
688 int idx = 0;
689 #include "tsan_update_shadow_word_inl.h"
690 idx = 1;
691 if (stored) {
692 #include "tsan_update_shadow_word_inl.h"
693 } else {
694 #include "tsan_update_shadow_word_inl.h"
695 }
696 idx = 2;
697 if (stored) {
698 #include "tsan_update_shadow_word_inl.h"
699 } else {
700 #include "tsan_update_shadow_word_inl.h"
701 }
702 idx = 3;
703 if (stored) {
704 #include "tsan_update_shadow_word_inl.h"
705 } else {
706 #include "tsan_update_shadow_word_inl.h"
707 }
708 #endif
709
710 // we did not find any races and had already stored
711 // the current access info, so we are done
712 if (LIKELY(stored))
713 return;
714 // choose a random candidate slot and replace it
715 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
716 StatInc(thr, StatShadowReplace);
717 return;
718 RACE:
719 HandleRace(thr, shadow_mem, cur, old);
720 return;
721 }
722
UnalignedMemoryAccess(ThreadState * thr,uptr pc,uptr addr,int size,bool kAccessIsWrite,bool kIsAtomic)723 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
724 int size, bool kAccessIsWrite, bool kIsAtomic) {
725 while (size) {
726 int size1 = 1;
727 int kAccessSizeLog = kSizeLog1;
728 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
729 size1 = 8;
730 kAccessSizeLog = kSizeLog8;
731 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
732 size1 = 4;
733 kAccessSizeLog = kSizeLog4;
734 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
735 size1 = 2;
736 kAccessSizeLog = kSizeLog2;
737 }
738 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
739 addr += size1;
740 size -= size1;
741 }
742 }
743
744 ALWAYS_INLINE
ContainsSameAccessSlow(u64 * s,u64 a,u64 sync_epoch,bool is_write)745 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
746 Shadow cur(a);
747 for (uptr i = 0; i < kShadowCnt; i++) {
748 Shadow old(LoadShadow(&s[i]));
749 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
750 old.TidWithIgnore() == cur.TidWithIgnore() &&
751 old.epoch() > sync_epoch &&
752 old.IsAtomic() == cur.IsAtomic() &&
753 old.IsRead() <= cur.IsRead())
754 return true;
755 }
756 return false;
757 }
758
759 #if defined(__SSE3__)
760 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
761 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
762 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
763 ALWAYS_INLINE
ContainsSameAccessFast(u64 * s,u64 a,u64 sync_epoch,bool is_write)764 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
765 // This is an optimized version of ContainsSameAccessSlow.
766 // load current access into access[0:63]
767 const m128 access = _mm_cvtsi64_si128(a);
768 // duplicate high part of access in addr0:
769 // addr0[0:31] = access[32:63]
770 // addr0[32:63] = access[32:63]
771 // addr0[64:95] = access[32:63]
772 // addr0[96:127] = access[32:63]
773 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
774 // load 4 shadow slots
775 const m128 shadow0 = _mm_load_si128((__m128i*)s);
776 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
777 // load high parts of 4 shadow slots into addr_vect:
778 // addr_vect[0:31] = shadow0[32:63]
779 // addr_vect[32:63] = shadow0[96:127]
780 // addr_vect[64:95] = shadow1[32:63]
781 // addr_vect[96:127] = shadow1[96:127]
782 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
783 if (!is_write) {
784 // set IsRead bit in addr_vect
785 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
786 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
787 addr_vect = _mm_or_si128(addr_vect, rw_mask);
788 }
789 // addr0 == addr_vect?
790 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
791 // epoch1[0:63] = sync_epoch
792 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
793 // epoch[0:31] = sync_epoch[0:31]
794 // epoch[32:63] = sync_epoch[0:31]
795 // epoch[64:95] = sync_epoch[0:31]
796 // epoch[96:127] = sync_epoch[0:31]
797 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
798 // load low parts of shadow cell epochs into epoch_vect:
799 // epoch_vect[0:31] = shadow0[0:31]
800 // epoch_vect[32:63] = shadow0[64:95]
801 // epoch_vect[64:95] = shadow1[0:31]
802 // epoch_vect[96:127] = shadow1[64:95]
803 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
804 // epoch_vect >= sync_epoch?
805 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
806 // addr_res & epoch_res
807 const m128 res = _mm_and_si128(addr_res, epoch_res);
808 // mask[0] = res[7]
809 // mask[1] = res[15]
810 // ...
811 // mask[15] = res[127]
812 const int mask = _mm_movemask_epi8(res);
813 return mask != 0;
814 }
815 #endif
816
817 ALWAYS_INLINE
ContainsSameAccess(u64 * s,u64 a,u64 sync_epoch,bool is_write)818 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
819 #if defined(__SSE3__)
820 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
821 // NOTE: this check can fail if the shadow is concurrently mutated
822 // by other threads. But it still can be useful if you modify
823 // ContainsSameAccessFast and want to ensure that it's not completely broken.
824 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
825 return res;
826 #else
827 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
828 #endif
829 }
830
831 ALWAYS_INLINE USED
MemoryAccess(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic)832 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
833 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
834 u64 *shadow_mem = (u64*)MemToShadow(addr);
835 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
836 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
837 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
838 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
839 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
840 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
841 #if SANITIZER_DEBUG
842 if (!IsAppMem(addr)) {
843 Printf("Access to non app mem %zx\n", addr);
844 DCHECK(IsAppMem(addr));
845 }
846 if (!IsShadowMem((uptr)shadow_mem)) {
847 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
848 DCHECK(IsShadowMem((uptr)shadow_mem));
849 }
850 #endif
851
852 if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
853 // Access to .rodata section, no races here.
854 // Measurements show that it can be 10-20% of all memory accesses.
855 StatInc(thr, StatMop);
856 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
857 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
858 StatInc(thr, StatMopRodata);
859 return;
860 }
861
862 FastState fast_state = thr->fast_state;
863 if (UNLIKELY(fast_state.GetIgnoreBit())) {
864 StatInc(thr, StatMop);
865 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
866 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
867 StatInc(thr, StatMopIgnored);
868 return;
869 }
870
871 Shadow cur(fast_state);
872 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
873 cur.SetWrite(kAccessIsWrite);
874 cur.SetAtomic(kIsAtomic);
875
876 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
877 thr->fast_synch_epoch, kAccessIsWrite))) {
878 StatInc(thr, StatMop);
879 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
880 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
881 StatInc(thr, StatMopSame);
882 return;
883 }
884
885 if (kCollectHistory) {
886 fast_state.IncrementEpoch();
887 thr->fast_state = fast_state;
888 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
889 cur.IncrementEpoch();
890 }
891
892 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
893 shadow_mem, cur);
894 }
895
896 // Called by MemoryAccessRange in tsan_rtl_thread.cpp
897 ALWAYS_INLINE USED
MemoryAccessImpl(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)898 void MemoryAccessImpl(ThreadState *thr, uptr addr,
899 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
900 u64 *shadow_mem, Shadow cur) {
901 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
902 thr->fast_synch_epoch, kAccessIsWrite))) {
903 StatInc(thr, StatMop);
904 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
905 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
906 StatInc(thr, StatMopSame);
907 return;
908 }
909
910 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
911 shadow_mem, cur);
912 }
913
MemoryRangeSet(ThreadState * thr,uptr pc,uptr addr,uptr size,u64 val)914 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
915 u64 val) {
916 (void)thr;
917 (void)pc;
918 if (size == 0)
919 return;
920 // FIXME: fix me.
921 uptr offset = addr % kShadowCell;
922 if (offset) {
923 offset = kShadowCell - offset;
924 if (size <= offset)
925 return;
926 addr += offset;
927 size -= offset;
928 }
929 DCHECK_EQ(addr % 8, 0);
930 // If a user passes some insane arguments (memset(0)),
931 // let it just crash as usual.
932 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
933 return;
934 // Don't want to touch lots of shadow memory.
935 // If a program maps 10MB stack, there is no need reset the whole range.
936 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
937 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
938 if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
939 u64 *p = (u64*)MemToShadow(addr);
940 CHECK(IsShadowMem((uptr)p));
941 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
942 // FIXME: may overwrite a part outside the region
943 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
944 p[i++] = val;
945 for (uptr j = 1; j < kShadowCnt; j++)
946 p[i++] = 0;
947 }
948 } else {
949 // The region is big, reset only beginning and end.
950 const uptr kPageSize = GetPageSizeCached();
951 u64 *begin = (u64*)MemToShadow(addr);
952 u64 *end = begin + size / kShadowCell * kShadowCnt;
953 u64 *p = begin;
954 // Set at least first kPageSize/2 to page boundary.
955 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
956 *p++ = val;
957 for (uptr j = 1; j < kShadowCnt; j++)
958 *p++ = 0;
959 }
960 // Reset middle part.
961 u64 *p1 = p;
962 p = RoundDown(end, kPageSize);
963 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
964 if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
965 Die();
966 // Set the ending.
967 while (p < end) {
968 *p++ = val;
969 for (uptr j = 1; j < kShadowCnt; j++)
970 *p++ = 0;
971 }
972 }
973 }
974
MemoryResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)975 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
976 MemoryRangeSet(thr, pc, addr, size, 0);
977 }
978
MemoryRangeFreed(ThreadState * thr,uptr pc,uptr addr,uptr size)979 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
980 // Processing more than 1k (4k of shadow) is expensive,
981 // can cause excessive memory consumption (user does not necessary touch
982 // the whole range) and most likely unnecessary.
983 if (size > 1024)
984 size = 1024;
985 CHECK_EQ(thr->is_freeing, false);
986 thr->is_freeing = true;
987 MemoryAccessRange(thr, pc, addr, size, true);
988 thr->is_freeing = false;
989 if (kCollectHistory) {
990 thr->fast_state.IncrementEpoch();
991 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
992 }
993 Shadow s(thr->fast_state);
994 s.ClearIgnoreBit();
995 s.MarkAsFreed();
996 s.SetWrite(true);
997 s.SetAddr0AndSizeLog(0, 3);
998 MemoryRangeSet(thr, pc, addr, size, s.raw());
999 }
1000
MemoryRangeImitateWrite(ThreadState * thr,uptr pc,uptr addr,uptr size)1001 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
1002 if (kCollectHistory) {
1003 thr->fast_state.IncrementEpoch();
1004 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
1005 }
1006 Shadow s(thr->fast_state);
1007 s.ClearIgnoreBit();
1008 s.SetWrite(true);
1009 s.SetAddr0AndSizeLog(0, 3);
1010 MemoryRangeSet(thr, pc, addr, size, s.raw());
1011 }
1012
MemoryRangeImitateWriteOrResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)1013 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
1014 uptr size) {
1015 if (thr->ignore_reads_and_writes == 0)
1016 MemoryRangeImitateWrite(thr, pc, addr, size);
1017 else
1018 MemoryResetRange(thr, pc, addr, size);
1019 }
1020
1021 ALWAYS_INLINE USED
FuncEntry(ThreadState * thr,uptr pc)1022 void FuncEntry(ThreadState *thr, uptr pc) {
1023 StatInc(thr, StatFuncEnter);
1024 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
1025 if (kCollectHistory) {
1026 thr->fast_state.IncrementEpoch();
1027 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
1028 }
1029
1030 // Shadow stack maintenance can be replaced with
1031 // stack unwinding during trace switch (which presumably must be faster).
1032 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
1033 #if !SANITIZER_GO
1034 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
1035 #else
1036 if (thr->shadow_stack_pos == thr->shadow_stack_end)
1037 GrowShadowStack(thr);
1038 #endif
1039 thr->shadow_stack_pos[0] = pc;
1040 thr->shadow_stack_pos++;
1041 }
1042
1043 ALWAYS_INLINE USED
FuncExit(ThreadState * thr)1044 void FuncExit(ThreadState *thr) {
1045 StatInc(thr, StatFuncExit);
1046 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
1047 if (kCollectHistory) {
1048 thr->fast_state.IncrementEpoch();
1049 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
1050 }
1051
1052 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
1053 #if !SANITIZER_GO
1054 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
1055 #endif
1056 thr->shadow_stack_pos--;
1057 }
1058
ThreadIgnoreBegin(ThreadState * thr,uptr pc,bool save_stack)1059 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
1060 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1061 thr->ignore_reads_and_writes++;
1062 CHECK_GT(thr->ignore_reads_and_writes, 0);
1063 thr->fast_state.SetIgnoreBit();
1064 #if !SANITIZER_GO
1065 if (save_stack && !ctx->after_multithreaded_fork)
1066 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
1067 #endif
1068 }
1069
ThreadIgnoreEnd(ThreadState * thr,uptr pc)1070 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
1071 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1072 CHECK_GT(thr->ignore_reads_and_writes, 0);
1073 thr->ignore_reads_and_writes--;
1074 if (thr->ignore_reads_and_writes == 0) {
1075 thr->fast_state.ClearIgnoreBit();
1076 #if !SANITIZER_GO
1077 thr->mop_ignore_set.Reset();
1078 #endif
1079 }
1080 }
1081
1082 #if !SANITIZER_GO
1083 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()1084 uptr __tsan_testonly_shadow_stack_current_size() {
1085 ThreadState *thr = cur_thread();
1086 return thr->shadow_stack_pos - thr->shadow_stack;
1087 }
1088 #endif
1089
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc,bool save_stack)1090 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
1091 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1092 thr->ignore_sync++;
1093 CHECK_GT(thr->ignore_sync, 0);
1094 #if !SANITIZER_GO
1095 if (save_stack && !ctx->after_multithreaded_fork)
1096 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
1097 #endif
1098 }
1099
ThreadIgnoreSyncEnd(ThreadState * thr,uptr pc)1100 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
1101 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1102 CHECK_GT(thr->ignore_sync, 0);
1103 thr->ignore_sync--;
1104 #if !SANITIZER_GO
1105 if (thr->ignore_sync == 0)
1106 thr->sync_ignore_set.Reset();
1107 #endif
1108 }
1109
operator ==(const MD5Hash & other) const1110 bool MD5Hash::operator==(const MD5Hash &other) const {
1111 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1112 }
1113
1114 #if SANITIZER_DEBUG
build_consistency_debug()1115 void build_consistency_debug() {}
1116 #else
build_consistency_release()1117 void build_consistency_release() {}
1118 #endif
1119
1120 #if TSAN_COLLECT_STATS
build_consistency_stats()1121 void build_consistency_stats() {}
1122 #else
build_consistency_nostats()1123 void build_consistency_nostats() {}
1124 #endif
1125
1126 } // namespace __tsan
1127
1128 #if !SANITIZER_GO
1129 // Must be included in this file to make sure everything is inlined.
1130 #include "tsan_interface_inl.h"
1131 #endif
1132