1 //===-- tsan_interceptors.cc ----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // FIXME: move as many interceptors as possible into
13 // sanitizer_common/sanitizer_common_interceptors.inc
14 //===----------------------------------------------------------------------===//
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "interception/interception.h"
24 #include "tsan_interceptors.h"
25 #include "tsan_interface.h"
26 #include "tsan_platform.h"
27 #include "tsan_suppressions.h"
28 #include "tsan_rtl.h"
29 #include "tsan_mman.h"
30 #include "tsan_fd.h"
31
32 #if SANITIZER_POSIX
33 #include "sanitizer_common/sanitizer_posix.h"
34 #endif
35
36 using namespace __tsan; // NOLINT
37
38 #if SANITIZER_FREEBSD || SANITIZER_MAC
39 #define __errno_location __error
40 #define stdout __stdoutp
41 #define stderr __stderrp
42 #endif
43
44 #if SANITIZER_ANDROID
45 #define __errno_location __errno
46 #define mallopt(a, b)
47 #endif
48
49 #if SANITIZER_LINUX || SANITIZER_FREEBSD
50 #define PTHREAD_CREATE_DETACHED 1
51 #elif SANITIZER_MAC
52 #define PTHREAD_CREATE_DETACHED 2
53 #endif
54
55
56 #ifdef __mips__
57 const int kSigCount = 129;
58 #else
59 const int kSigCount = 65;
60 #endif
61
62 struct my_siginfo_t {
63 // The size is determined by looking at sizeof of real siginfo_t on linux.
64 u64 opaque[128 / sizeof(u64)];
65 };
66
67 #ifdef __mips__
68 struct ucontext_t {
69 u64 opaque[768 / sizeof(u64) + 1];
70 };
71 #else
72 struct ucontext_t {
73 // The size is determined by looking at sizeof of real ucontext_t on linux.
74 u64 opaque[936 / sizeof(u64) + 1];
75 };
76 #endif
77
78 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
79 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
80 #elif defined(__aarch64__) || SANITIZER_PPC64V2
81 #define PTHREAD_ABI_BASE "GLIBC_2.17"
82 #endif
83
84 extern "C" int pthread_attr_init(void *attr);
85 extern "C" int pthread_attr_destroy(void *attr);
86 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
87 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
88 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
89 extern "C" int pthread_setspecific(unsigned key, const void *v);
90 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
91 extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
92 __sanitizer_sigset_t *oldset);
93 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
94 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
95 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
96 extern "C" void *pthread_self();
97 extern "C" void _exit(int status);
98 extern "C" int *__errno_location();
99 extern "C" int fileno_unlocked(void *stream);
100 extern "C" int dirfd(void *dirp);
101 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
102 extern "C" int mallopt(int param, int value);
103 #endif
104 extern __sanitizer_FILE *stdout, *stderr;
105 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
106 const int PTHREAD_MUTEX_RECURSIVE = 1;
107 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
108 #else
109 const int PTHREAD_MUTEX_RECURSIVE = 2;
110 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
111 #endif
112 const int EINVAL = 22;
113 const int EBUSY = 16;
114 const int EOWNERDEAD = 130;
115 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
116 const int EPOLL_CTL_ADD = 1;
117 #endif
118 const int SIGILL = 4;
119 const int SIGABRT = 6;
120 const int SIGFPE = 8;
121 const int SIGSEGV = 11;
122 const int SIGPIPE = 13;
123 const int SIGTERM = 15;
124 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
125 const int SIGBUS = 10;
126 const int SIGSYS = 12;
127 #else
128 const int SIGBUS = 7;
129 const int SIGSYS = 31;
130 #endif
131 void *const MAP_FAILED = (void*)-1;
132 #if !SANITIZER_MAC
133 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
134 #endif
135 const int MAP_FIXED = 0x10;
136 typedef long long_t; // NOLINT
137
138 // From /usr/include/unistd.h
139 # define F_ULOCK 0 /* Unlock a previously locked region. */
140 # define F_LOCK 1 /* Lock a region for exclusive use. */
141 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
142 # define F_TEST 3 /* Test a region for other processes locks. */
143
144 #define errno (*__errno_location())
145
146 typedef void (*sighandler_t)(int sig);
147 typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
148
149 #if SANITIZER_ANDROID
150 struct sigaction_t {
151 u32 sa_flags;
152 union {
153 sighandler_t sa_handler;
154 sigactionhandler_t sa_sigaction;
155 };
156 __sanitizer_sigset_t sa_mask;
157 void (*sa_restorer)();
158 };
159 #else
160 struct sigaction_t {
161 #ifdef __mips__
162 u32 sa_flags;
163 #endif
164 union {
165 sighandler_t sa_handler;
166 sigactionhandler_t sa_sigaction;
167 };
168 #if SANITIZER_FREEBSD
169 int sa_flags;
170 __sanitizer_sigset_t sa_mask;
171 #elif SANITIZER_MAC
172 __sanitizer_sigset_t sa_mask;
173 int sa_flags;
174 #else
175 __sanitizer_sigset_t sa_mask;
176 #ifndef __mips__
177 int sa_flags;
178 #endif
179 void (*sa_restorer)();
180 #endif
181 };
182 #endif
183
184 const sighandler_t SIG_DFL = (sighandler_t)0;
185 const sighandler_t SIG_IGN = (sighandler_t)1;
186 const sighandler_t SIG_ERR = (sighandler_t)-1;
187 #if SANITIZER_FREEBSD || SANITIZER_MAC
188 const int SA_SIGINFO = 0x40;
189 const int SIG_SETMASK = 3;
190 #elif defined(__mips__)
191 const int SA_SIGINFO = 8;
192 const int SIG_SETMASK = 3;
193 #else
194 const int SA_SIGINFO = 4;
195 const int SIG_SETMASK = 2;
196 #endif
197
198 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
199 (!cur_thread()->is_inited)
200
201 static sigaction_t sigactions[kSigCount];
202
203 namespace __tsan {
204 struct SignalDesc {
205 bool armed;
206 bool sigaction;
207 my_siginfo_t siginfo;
208 ucontext_t ctx;
209 };
210
211 struct ThreadSignalContext {
212 int int_signal_send;
213 atomic_uintptr_t in_blocking_func;
214 atomic_uintptr_t have_pending_signals;
215 SignalDesc pending_signals[kSigCount];
216 // emptyset and oldset are too big for stack.
217 __sanitizer_sigset_t emptyset;
218 __sanitizer_sigset_t oldset;
219 };
220
221 // The object is 64-byte aligned, because we want hot data to be located in
222 // a single cache line if possible (it's accessed in every interceptor).
223 static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
libignore()224 static LibIgnore *libignore() {
225 return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
226 }
227
InitializeLibIgnore()228 void InitializeLibIgnore() {
229 const SuppressionContext &supp = *Suppressions();
230 const uptr n = supp.SuppressionCount();
231 for (uptr i = 0; i < n; i++) {
232 const Suppression *s = supp.SuppressionAt(i);
233 if (0 == internal_strcmp(s->type, kSuppressionLib))
234 libignore()->AddIgnoredLibrary(s->templ);
235 }
236 libignore()->OnLibraryLoaded(0);
237 }
238
239 } // namespace __tsan
240
SigCtx(ThreadState * thr)241 static ThreadSignalContext *SigCtx(ThreadState *thr) {
242 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
243 if (ctx == 0 && !thr->is_dead) {
244 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
245 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
246 thr->signal_ctx = ctx;
247 }
248 return ctx;
249 }
250
251 #if !SANITIZER_MAC
252 static unsigned g_thread_finalize_key;
253 #endif
254
ScopedInterceptor(ThreadState * thr,const char * fname,uptr pc)255 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
256 uptr pc)
257 : thr_(thr)
258 , pc_(pc)
259 , in_ignored_lib_(false) {
260 Initialize(thr);
261 if (!thr_->is_inited)
262 return;
263 if (!thr_->ignore_interceptors)
264 FuncEntry(thr, pc);
265 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
266 if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
267 in_ignored_lib_ = true;
268 thr_->in_ignored_lib = true;
269 ThreadIgnoreBegin(thr_, pc_);
270 }
271 if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
272 }
273
~ScopedInterceptor()274 ScopedInterceptor::~ScopedInterceptor() {
275 if (!thr_->is_inited)
276 return;
277 if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
278 if (in_ignored_lib_) {
279 thr_->in_ignored_lib = false;
280 ThreadIgnoreEnd(thr_, pc_);
281 }
282 if (!thr_->ignore_interceptors) {
283 ProcessPendingSignals(thr_);
284 FuncExit(thr_);
285 CheckNoLocks(thr_);
286 }
287 }
288
UserCallbackStart()289 void ScopedInterceptor::UserCallbackStart() {
290 if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
291 if (in_ignored_lib_) {
292 thr_->in_ignored_lib = false;
293 ThreadIgnoreEnd(thr_, pc_);
294 }
295 }
296
UserCallbackEnd()297 void ScopedInterceptor::UserCallbackEnd() {
298 if (in_ignored_lib_) {
299 thr_->in_ignored_lib = true;
300 ThreadIgnoreBegin(thr_, pc_);
301 }
302 if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
303 }
304
305 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
306 #if SANITIZER_FREEBSD
307 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
308 #else
309 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
310 #endif
311
312 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
313 MemoryAccessRange((thr), (pc), (uptr)(s), \
314 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
315
316 #define READ_STRING(thr, pc, s, n) \
317 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
318
319 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
320
321 struct BlockingCall {
BlockingCallBlockingCall322 explicit BlockingCall(ThreadState *thr)
323 : thr(thr)
324 , ctx(SigCtx(thr)) {
325 for (;;) {
326 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
327 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
328 break;
329 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
330 ProcessPendingSignals(thr);
331 }
332 // When we are in a "blocking call", we process signals asynchronously
333 // (right when they arrive). In this context we do not expect to be
334 // executing any user/runtime code. The known interceptor sequence when
335 // this is not true is: pthread_join -> munmap(stack). It's fine
336 // to ignore munmap in this case -- we handle stack shadow separately.
337 thr->ignore_interceptors++;
338 }
339
~BlockingCallBlockingCall340 ~BlockingCall() {
341 thr->ignore_interceptors--;
342 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
343 }
344
345 ThreadState *thr;
346 ThreadSignalContext *ctx;
347 };
348
TSAN_INTERCEPTOR(unsigned,sleep,unsigned sec)349 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
350 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
351 unsigned res = BLOCK_REAL(sleep)(sec);
352 AfterSleep(thr, pc);
353 return res;
354 }
355
TSAN_INTERCEPTOR(int,usleep,long_t usec)356 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
357 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
358 int res = BLOCK_REAL(usleep)(usec);
359 AfterSleep(thr, pc);
360 return res;
361 }
362
TSAN_INTERCEPTOR(int,nanosleep,void * req,void * rem)363 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
364 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
365 int res = BLOCK_REAL(nanosleep)(req, rem);
366 AfterSleep(thr, pc);
367 return res;
368 }
369
370 // The sole reason tsan wraps atexit callbacks is to establish synchronization
371 // between callback setup and callback execution.
372 struct AtExitCtx {
373 void (*f)();
374 void *arg;
375 };
376
at_exit_wrapper(void * arg)377 static void at_exit_wrapper(void *arg) {
378 ThreadState *thr = cur_thread();
379 uptr pc = 0;
380 Acquire(thr, pc, (uptr)arg);
381 AtExitCtx *ctx = (AtExitCtx*)arg;
382 ((void(*)(void *arg))ctx->f)(ctx->arg);
383 InternalFree(ctx);
384 }
385
386 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
387 void *arg, void *dso);
388
389 #if !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,atexit,void (* f)())390 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
391 if (cur_thread()->in_symbolizer)
392 return 0;
393 // We want to setup the atexit callback even if we are in ignored lib
394 // or after fork.
395 SCOPED_INTERCEPTOR_RAW(atexit, f);
396 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
397 }
398 #endif
399
TSAN_INTERCEPTOR(int,__cxa_atexit,void (* f)(void * a),void * arg,void * dso)400 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
401 if (cur_thread()->in_symbolizer)
402 return 0;
403 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
404 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
405 }
406
setup_at_exit_wrapper(ThreadState * thr,uptr pc,void (* f)(),void * arg,void * dso)407 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
408 void *arg, void *dso) {
409 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
410 ctx->f = f;
411 ctx->arg = arg;
412 Release(thr, pc, (uptr)ctx);
413 // Memory allocation in __cxa_atexit will race with free during exit,
414 // because we do not see synchronization around atexit callback list.
415 ThreadIgnoreBegin(thr, pc);
416 int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
417 ThreadIgnoreEnd(thr, pc);
418 return res;
419 }
420
421 #if !SANITIZER_MAC
on_exit_wrapper(int status,void * arg)422 static void on_exit_wrapper(int status, void *arg) {
423 ThreadState *thr = cur_thread();
424 uptr pc = 0;
425 Acquire(thr, pc, (uptr)arg);
426 AtExitCtx *ctx = (AtExitCtx*)arg;
427 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
428 InternalFree(ctx);
429 }
430
TSAN_INTERCEPTOR(int,on_exit,void (* f)(int,void *),void * arg)431 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
432 if (cur_thread()->in_symbolizer)
433 return 0;
434 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
435 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
436 ctx->f = (void(*)())f;
437 ctx->arg = arg;
438 Release(thr, pc, (uptr)ctx);
439 // Memory allocation in __cxa_atexit will race with free during exit,
440 // because we do not see synchronization around atexit callback list.
441 ThreadIgnoreBegin(thr, pc);
442 int res = REAL(on_exit)(on_exit_wrapper, ctx);
443 ThreadIgnoreEnd(thr, pc);
444 return res;
445 }
446 #endif
447
448 // Cleanup old bufs.
JmpBufGarbageCollect(ThreadState * thr,uptr sp)449 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
450 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
451 JmpBuf *buf = &thr->jmp_bufs[i];
452 if (buf->sp <= sp) {
453 uptr sz = thr->jmp_bufs.Size();
454 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
455 thr->jmp_bufs.PopBack();
456 i--;
457 }
458 }
459 }
460
SetJmp(ThreadState * thr,uptr sp,uptr mangled_sp)461 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
462 if (!thr->is_inited) // called from libc guts during bootstrap
463 return;
464 // Cleanup old bufs.
465 JmpBufGarbageCollect(thr, sp);
466 // Remember the buf.
467 JmpBuf *buf = thr->jmp_bufs.PushBack();
468 buf->sp = sp;
469 buf->mangled_sp = mangled_sp;
470 buf->shadow_stack_pos = thr->shadow_stack_pos;
471 ThreadSignalContext *sctx = SigCtx(thr);
472 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
473 buf->in_blocking_func = sctx ?
474 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
475 false;
476 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
477 memory_order_relaxed);
478 }
479
LongJmp(ThreadState * thr,uptr * env)480 static void LongJmp(ThreadState *thr, uptr *env) {
481 #ifdef __powerpc__
482 uptr mangled_sp = env[0];
483 #elif SANITIZER_FREEBSD || SANITIZER_MAC
484 uptr mangled_sp = env[2];
485 #elif defined(SANITIZER_LINUX)
486 # ifdef __aarch64__
487 uptr mangled_sp = env[13];
488 # else
489 uptr mangled_sp = env[6];
490 # endif
491 #endif
492 // Find the saved buf by mangled_sp.
493 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
494 JmpBuf *buf = &thr->jmp_bufs[i];
495 if (buf->mangled_sp == mangled_sp) {
496 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
497 // Unwind the stack.
498 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
499 FuncExit(thr);
500 ThreadSignalContext *sctx = SigCtx(thr);
501 if (sctx) {
502 sctx->int_signal_send = buf->int_signal_send;
503 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
504 memory_order_relaxed);
505 }
506 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
507 memory_order_relaxed);
508 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
509 return;
510 }
511 }
512 Printf("ThreadSanitizer: can't find longjmp buf\n");
513 CHECK(0);
514 }
515
516 // FIXME: put everything below into a common extern "C" block?
__tsan_setjmp(uptr sp,uptr mangled_sp)517 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
518 SetJmp(cur_thread(), sp, mangled_sp);
519 }
520
521 #if SANITIZER_MAC
522 TSAN_INTERCEPTOR(int, setjmp, void *env);
523 TSAN_INTERCEPTOR(int, _setjmp, void *env);
524 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
525 #else // SANITIZER_MAC
526 // Not called. Merely to satisfy TSAN_INTERCEPT().
527 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
528 int __interceptor_setjmp(void *env);
__interceptor_setjmp(void * env)529 extern "C" int __interceptor_setjmp(void *env) {
530 CHECK(0);
531 return 0;
532 }
533
534 // FIXME: any reason to have a separate declaration?
535 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
536 int __interceptor__setjmp(void *env);
__interceptor__setjmp(void * env)537 extern "C" int __interceptor__setjmp(void *env) {
538 CHECK(0);
539 return 0;
540 }
541
542 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
543 int __interceptor_sigsetjmp(void *env);
__interceptor_sigsetjmp(void * env)544 extern "C" int __interceptor_sigsetjmp(void *env) {
545 CHECK(0);
546 return 0;
547 }
548
549 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
550 int __interceptor___sigsetjmp(void *env);
__interceptor___sigsetjmp(void * env)551 extern "C" int __interceptor___sigsetjmp(void *env) {
552 CHECK(0);
553 return 0;
554 }
555
556 extern "C" int setjmp(void *env);
557 extern "C" int _setjmp(void *env);
558 extern "C" int sigsetjmp(void *env);
559 extern "C" int __sigsetjmp(void *env);
DEFINE_REAL(int,setjmp,void * env)560 DEFINE_REAL(int, setjmp, void *env)
561 DEFINE_REAL(int, _setjmp, void *env)
562 DEFINE_REAL(int, sigsetjmp, void *env)
563 DEFINE_REAL(int, __sigsetjmp, void *env)
564 #endif // SANITIZER_MAC
565
566 TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
567 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
568 // bad things will happen. We will jump over ScopedInterceptor dtor and can
569 // leave thr->in_ignored_lib set.
570 {
571 SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
572 }
573 LongJmp(cur_thread(), env);
574 REAL(longjmp)(env, val);
575 }
576
TSAN_INTERCEPTOR(void,siglongjmp,uptr * env,int val)577 TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
578 {
579 SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
580 }
581 LongJmp(cur_thread(), env);
582 REAL(siglongjmp)(env, val);
583 }
584
585 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(void *,malloc,uptr size)586 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
587 if (cur_thread()->in_symbolizer)
588 return InternalAlloc(size);
589 void *p = 0;
590 {
591 SCOPED_INTERCEPTOR_RAW(malloc, size);
592 p = user_alloc(thr, pc, size);
593 }
594 invoke_malloc_hook(p, size);
595 return p;
596 }
597
TSAN_INTERCEPTOR(void *,__libc_memalign,uptr align,uptr sz)598 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
599 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
600 return user_alloc(thr, pc, sz, align);
601 }
602
TSAN_INTERCEPTOR(void *,calloc,uptr size,uptr n)603 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
604 if (cur_thread()->in_symbolizer)
605 return InternalCalloc(size, n);
606 void *p = 0;
607 {
608 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
609 p = user_calloc(thr, pc, size, n);
610 }
611 invoke_malloc_hook(p, n * size);
612 return p;
613 }
614
TSAN_INTERCEPTOR(void *,realloc,void * p,uptr size)615 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
616 if (cur_thread()->in_symbolizer)
617 return InternalRealloc(p, size);
618 if (p)
619 invoke_free_hook(p);
620 {
621 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
622 p = user_realloc(thr, pc, p, size);
623 }
624 invoke_malloc_hook(p, size);
625 return p;
626 }
627
TSAN_INTERCEPTOR(void,free,void * p)628 TSAN_INTERCEPTOR(void, free, void *p) {
629 if (p == 0)
630 return;
631 if (cur_thread()->in_symbolizer)
632 return InternalFree(p);
633 invoke_free_hook(p);
634 SCOPED_INTERCEPTOR_RAW(free, p);
635 user_free(thr, pc, p);
636 }
637
TSAN_INTERCEPTOR(void,cfree,void * p)638 TSAN_INTERCEPTOR(void, cfree, void *p) {
639 if (p == 0)
640 return;
641 if (cur_thread()->in_symbolizer)
642 return InternalFree(p);
643 invoke_free_hook(p);
644 SCOPED_INTERCEPTOR_RAW(cfree, p);
645 user_free(thr, pc, p);
646 }
647
TSAN_INTERCEPTOR(uptr,malloc_usable_size,void * p)648 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
649 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
650 return user_alloc_usable_size(p);
651 }
652 #endif
653
TSAN_INTERCEPTOR(char *,strcpy,char * dst,const char * src)654 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
655 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
656 uptr srclen = internal_strlen(src);
657 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
658 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
659 return REAL(strcpy)(dst, src); // NOLINT
660 }
661
TSAN_INTERCEPTOR(char *,strncpy,char * dst,char * src,uptr n)662 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
663 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
664 uptr srclen = internal_strnlen(src, n);
665 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
666 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
667 return REAL(strncpy)(dst, src, n);
668 }
669
TSAN_INTERCEPTOR(char *,strdup,const char * str)670 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
671 SCOPED_TSAN_INTERCEPTOR(strdup, str);
672 // strdup will call malloc, so no instrumentation is required here.
673 return REAL(strdup)(str);
674 }
675
fix_mmap_addr(void ** addr,long_t sz,int flags)676 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
677 if (*addr) {
678 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
679 if (flags & MAP_FIXED) {
680 errno = EINVAL;
681 return false;
682 } else {
683 *addr = 0;
684 }
685 }
686 }
687 return true;
688 }
689
TSAN_INTERCEPTOR(void *,mmap,void * addr,SIZE_T sz,int prot,int flags,int fd,OFF_T off)690 TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
691 int fd, OFF_T off) {
692 SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
693 if (!fix_mmap_addr(&addr, sz, flags))
694 return MAP_FAILED;
695 void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
696 if (res != MAP_FAILED) {
697 if (fd > 0)
698 FdAccess(thr, pc, fd);
699
700 if (thr->ignore_reads_and_writes == 0)
701 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
702 else
703 MemoryResetRange(thr, pc, (uptr)res, sz);
704 }
705 return res;
706 }
707
708 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,mmap64,void * addr,SIZE_T sz,int prot,int flags,int fd,OFF64_T off)709 TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
710 int fd, OFF64_T off) {
711 SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
712 if (!fix_mmap_addr(&addr, sz, flags))
713 return MAP_FAILED;
714 void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
715 if (res != MAP_FAILED) {
716 if (fd > 0)
717 FdAccess(thr, pc, fd);
718
719 if (thr->ignore_reads_and_writes == 0)
720 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
721 else
722 MemoryResetRange(thr, pc, (uptr)res, sz);
723 }
724 return res;
725 }
726 #define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
727 #else
728 #define TSAN_MAYBE_INTERCEPT_MMAP64
729 #endif
730
TSAN_INTERCEPTOR(int,munmap,void * addr,long_t sz)731 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
732 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
733 if (sz != 0) {
734 // If sz == 0, munmap will return EINVAL and don't unmap any memory.
735 DontNeedShadowFor((uptr)addr, sz);
736 ScopedGlobalProcessor sgp;
737 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
738 }
739 int res = REAL(munmap)(addr, sz);
740 return res;
741 }
742
743 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,memalign,uptr align,uptr sz)744 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
745 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
746 return user_alloc(thr, pc, sz, align);
747 }
748 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
749 #else
750 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
751 #endif
752
753 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(void *,aligned_alloc,uptr align,uptr sz)754 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
755 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
756 return user_alloc(thr, pc, sz, align);
757 }
758
TSAN_INTERCEPTOR(void *,valloc,uptr sz)759 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
760 SCOPED_INTERCEPTOR_RAW(valloc, sz);
761 return user_alloc(thr, pc, sz, GetPageSizeCached());
762 }
763 #endif
764
765 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,pvalloc,uptr sz)766 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
767 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
768 sz = RoundUp(sz, GetPageSizeCached());
769 return user_alloc(thr, pc, sz, GetPageSizeCached());
770 }
771 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
772 #else
773 #define TSAN_MAYBE_INTERCEPT_PVALLOC
774 #endif
775
776 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,posix_memalign,void ** memptr,uptr align,uptr sz)777 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
778 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
779 *memptr = user_alloc(thr, pc, sz, align);
780 return 0;
781 }
782 #endif
783
784 // __cxa_guard_acquire and friends need to be intercepted in a special way -
785 // regular interceptors will break statically-linked libstdc++. Linux
786 // interceptors are especially defined as weak functions (so that they don't
787 // cause link errors when user defines them as well). So they silently
788 // auto-disable themselves when such symbol is already present in the binary. If
789 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
790 // will silently replace our interceptor. That's why on Linux we simply export
791 // these interceptors with INTERFACE_ATTRIBUTE.
792 // On OS X, we don't support statically linking, so we just use a regular
793 // interceptor.
794 #if SANITIZER_MAC
795 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
796 #else
797 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
798 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
799 #endif
800
801 // Used in thread-safe function static initialization.
STDCXX_INTERCEPTOR(int,__cxa_guard_acquire,atomic_uint32_t * g)802 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
803 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
804 for (;;) {
805 u32 cmp = atomic_load(g, memory_order_acquire);
806 if (cmp == 0) {
807 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
808 return 1;
809 } else if (cmp == 1) {
810 Acquire(thr, pc, (uptr)g);
811 return 0;
812 } else {
813 internal_sched_yield();
814 }
815 }
816 }
817
STDCXX_INTERCEPTOR(void,__cxa_guard_release,atomic_uint32_t * g)818 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
819 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
820 Release(thr, pc, (uptr)g);
821 atomic_store(g, 1, memory_order_release);
822 }
823
STDCXX_INTERCEPTOR(void,__cxa_guard_abort,atomic_uint32_t * g)824 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
825 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
826 atomic_store(g, 0, memory_order_relaxed);
827 }
828
829 namespace __tsan {
DestroyThreadState()830 void DestroyThreadState() {
831 ThreadState *thr = cur_thread();
832 Processor *proc = thr->proc();
833 ThreadFinish(thr);
834 ProcUnwire(proc, thr);
835 ProcDestroy(proc);
836 ThreadSignalContext *sctx = thr->signal_ctx;
837 if (sctx) {
838 thr->signal_ctx = 0;
839 UnmapOrDie(sctx, sizeof(*sctx));
840 }
841 DTLS_Destroy();
842 cur_thread_finalize();
843 }
844 } // namespace __tsan
845
846 #if !SANITIZER_MAC
thread_finalize(void * v)847 static void thread_finalize(void *v) {
848 uptr iter = (uptr)v;
849 if (iter > 1) {
850 if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
851 Printf("ThreadSanitizer: failed to set thread key\n");
852 Die();
853 }
854 return;
855 }
856 DestroyThreadState();
857 }
858 #endif
859
860
861 struct ThreadParam {
862 void* (*callback)(void *arg);
863 void *param;
864 atomic_uintptr_t tid;
865 };
866
__tsan_thread_start_func(void * arg)867 extern "C" void *__tsan_thread_start_func(void *arg) {
868 ThreadParam *p = (ThreadParam*)arg;
869 void* (*callback)(void *arg) = p->callback;
870 void *param = p->param;
871 int tid = 0;
872 {
873 ThreadState *thr = cur_thread();
874 // Thread-local state is not initialized yet.
875 ScopedIgnoreInterceptors ignore;
876 #if !SANITIZER_MAC
877 ThreadIgnoreBegin(thr, 0);
878 if (pthread_setspecific(g_thread_finalize_key,
879 (void *)GetPthreadDestructorIterations())) {
880 Printf("ThreadSanitizer: failed to set thread key\n");
881 Die();
882 }
883 ThreadIgnoreEnd(thr, 0);
884 #endif
885 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
886 internal_sched_yield();
887 Processor *proc = ProcCreate();
888 ProcWire(proc, thr);
889 ThreadStart(thr, tid, GetTid());
890 atomic_store(&p->tid, 0, memory_order_release);
891 }
892 void *res = callback(param);
893 // Prevent the callback from being tail called,
894 // it mixes up stack traces.
895 volatile int foo = 42;
896 foo++;
897 return res;
898 }
899
TSAN_INTERCEPTOR(int,pthread_create,void * th,void * attr,void * (* callback)(void *),void * param)900 TSAN_INTERCEPTOR(int, pthread_create,
901 void *th, void *attr, void *(*callback)(void*), void * param) {
902 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
903 if (ctx->after_multithreaded_fork) {
904 if (flags()->die_after_fork) {
905 Report("ThreadSanitizer: starting new threads after multi-threaded "
906 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
907 Die();
908 } else {
909 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
910 "fork is not supported (pid %d). Continuing because of "
911 "die_after_fork=0, but you are on your own\n", internal_getpid());
912 }
913 }
914 __sanitizer_pthread_attr_t myattr;
915 if (attr == 0) {
916 pthread_attr_init(&myattr);
917 attr = &myattr;
918 }
919 int detached = 0;
920 REAL(pthread_attr_getdetachstate)(attr, &detached);
921 AdjustStackSize(attr);
922
923 ThreadParam p;
924 p.callback = callback;
925 p.param = param;
926 atomic_store(&p.tid, 0, memory_order_relaxed);
927 int res = -1;
928 {
929 // Otherwise we see false positives in pthread stack manipulation.
930 ScopedIgnoreInterceptors ignore;
931 ThreadIgnoreBegin(thr, pc);
932 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
933 ThreadIgnoreEnd(thr, pc);
934 }
935 if (res == 0) {
936 int tid = ThreadCreate(thr, pc, *(uptr*)th,
937 detached == PTHREAD_CREATE_DETACHED);
938 CHECK_NE(tid, 0);
939 // Synchronization on p.tid serves two purposes:
940 // 1. ThreadCreate must finish before the new thread starts.
941 // Otherwise the new thread can call pthread_detach, but the pthread_t
942 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
943 // 2. ThreadStart must finish before this thread continues.
944 // Otherwise, this thread can call pthread_detach and reset thr->sync
945 // before the new thread got a chance to acquire from it in ThreadStart.
946 atomic_store(&p.tid, tid, memory_order_release);
947 while (atomic_load(&p.tid, memory_order_acquire) != 0)
948 internal_sched_yield();
949 }
950 if (attr == &myattr)
951 pthread_attr_destroy(&myattr);
952 return res;
953 }
954
TSAN_INTERCEPTOR(int,pthread_join,void * th,void ** ret)955 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
956 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
957 int tid = ThreadTid(thr, pc, (uptr)th);
958 ThreadIgnoreBegin(thr, pc);
959 int res = BLOCK_REAL(pthread_join)(th, ret);
960 ThreadIgnoreEnd(thr, pc);
961 if (res == 0) {
962 ThreadJoin(thr, pc, tid);
963 }
964 return res;
965 }
966
967 DEFINE_REAL_PTHREAD_FUNCTIONS
968
TSAN_INTERCEPTOR(int,pthread_detach,void * th)969 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
970 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
971 int tid = ThreadTid(thr, pc, (uptr)th);
972 int res = REAL(pthread_detach)(th);
973 if (res == 0) {
974 ThreadDetach(thr, pc, tid);
975 }
976 return res;
977 }
978
979 // Problem:
980 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
981 // pthread_cond_t has different size in the different versions.
982 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
983 // after pthread_cond_t (old cond is smaller).
984 // If we call old REAL functions for new pthread_cond_t, we will lose some
985 // functionality (e.g. old functions do not support waiting against
986 // CLOCK_REALTIME).
987 // Proper handling would require to have 2 versions of interceptors as well.
988 // But this is messy, in particular requires linker scripts when sanitizer
989 // runtime is linked into a shared library.
990 // Instead we assume we don't have dynamic libraries built against old
991 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
992 // that allows to work with old libraries (but this mode does not support
993 // some features, e.g. pthread_condattr_getpshared).
init_cond(void * c,bool force=false)994 static void *init_cond(void *c, bool force = false) {
995 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
996 // So we allocate additional memory on the side large enough to hold
997 // any pthread_cond_t object. Always call new REAL functions, but pass
998 // the aux object to them.
999 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1000 // first word of pthread_cond_t to zero.
1001 // It's all relevant only for linux.
1002 if (!common_flags()->legacy_pthread_cond)
1003 return c;
1004 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1005 uptr cond = atomic_load(p, memory_order_acquire);
1006 if (!force && cond != 0)
1007 return (void*)cond;
1008 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1009 internal_memset(newcond, 0, pthread_cond_t_sz);
1010 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1011 memory_order_acq_rel))
1012 return newcond;
1013 WRAP(free)(newcond);
1014 return (void*)cond;
1015 }
1016
1017 struct CondMutexUnlockCtx {
1018 ScopedInterceptor *si;
1019 ThreadState *thr;
1020 uptr pc;
1021 void *m;
1022 };
1023
cond_mutex_unlock(CondMutexUnlockCtx * arg)1024 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
1025 // pthread_cond_wait interceptor has enabled async signal delivery
1026 // (see BlockingCall below). Disable async signals since we are running
1027 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1028 // since the thread is cancelled, so we have to manually execute them
1029 // (the thread still can run some user code due to pthread_cleanup_push).
1030 ThreadSignalContext *ctx = SigCtx(arg->thr);
1031 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1032 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1033 MutexLock(arg->thr, arg->pc, (uptr)arg->m);
1034 // Undo BlockingCall ctor effects.
1035 arg->thr->ignore_interceptors--;
1036 arg->si->~ScopedInterceptor();
1037 }
1038
INTERCEPTOR(int,pthread_cond_init,void * c,void * a)1039 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1040 void *cond = init_cond(c, true);
1041 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1042 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1043 return REAL(pthread_cond_init)(cond, a);
1044 }
1045
cond_wait(ThreadState * thr,uptr pc,ScopedInterceptor * si,int (* fn)(void * c,void * m,void * abstime),void * c,void * m,void * t)1046 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
1047 int (*fn)(void *c, void *m, void *abstime), void *c,
1048 void *m, void *t) {
1049 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1050 MutexUnlock(thr, pc, (uptr)m);
1051 CondMutexUnlockCtx arg = {si, thr, pc, m};
1052 int res = 0;
1053 // This ensures that we handle mutex lock even in case of pthread_cancel.
1054 // See test/tsan/cond_cancel.cc.
1055 {
1056 // Enable signal delivery while the thread is blocked.
1057 BlockingCall bc(thr);
1058 res = call_pthread_cancel_with_cleanup(
1059 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
1060 }
1061 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1062 MutexLock(thr, pc, (uptr)m);
1063 return res;
1064 }
1065
INTERCEPTOR(int,pthread_cond_wait,void * c,void * m)1066 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1067 void *cond = init_cond(c);
1068 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1069 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
1070 pthread_cond_wait),
1071 cond, m, 0);
1072 }
1073
INTERCEPTOR(int,pthread_cond_timedwait,void * c,void * m,void * abstime)1074 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1075 void *cond = init_cond(c);
1076 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1077 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
1078 abstime);
1079 }
1080
1081 #if SANITIZER_MAC
INTERCEPTOR(int,pthread_cond_timedwait_relative_np,void * c,void * m,void * reltime)1082 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1083 void *reltime) {
1084 void *cond = init_cond(c);
1085 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1086 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
1087 m, reltime);
1088 }
1089 #endif
1090
INTERCEPTOR(int,pthread_cond_signal,void * c)1091 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1092 void *cond = init_cond(c);
1093 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1094 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1095 return REAL(pthread_cond_signal)(cond);
1096 }
1097
INTERCEPTOR(int,pthread_cond_broadcast,void * c)1098 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1099 void *cond = init_cond(c);
1100 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1101 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1102 return REAL(pthread_cond_broadcast)(cond);
1103 }
1104
INTERCEPTOR(int,pthread_cond_destroy,void * c)1105 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1106 void *cond = init_cond(c);
1107 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1108 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1109 int res = REAL(pthread_cond_destroy)(cond);
1110 if (common_flags()->legacy_pthread_cond) {
1111 // Free our aux cond and zero the pointer to not leave dangling pointers.
1112 WRAP(free)(cond);
1113 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1114 }
1115 return res;
1116 }
1117
TSAN_INTERCEPTOR(int,pthread_mutex_init,void * m,void * a)1118 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1119 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1120 int res = REAL(pthread_mutex_init)(m, a);
1121 if (res == 0) {
1122 bool recursive = false;
1123 if (a) {
1124 int type = 0;
1125 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1126 recursive = (type == PTHREAD_MUTEX_RECURSIVE
1127 || type == PTHREAD_MUTEX_RECURSIVE_NP);
1128 }
1129 MutexCreate(thr, pc, (uptr)m, false, recursive, false);
1130 }
1131 return res;
1132 }
1133
TSAN_INTERCEPTOR(int,pthread_mutex_destroy,void * m)1134 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1135 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1136 int res = REAL(pthread_mutex_destroy)(m);
1137 if (res == 0 || res == EBUSY) {
1138 MutexDestroy(thr, pc, (uptr)m);
1139 }
1140 return res;
1141 }
1142
TSAN_INTERCEPTOR(int,pthread_mutex_trylock,void * m)1143 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1144 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1145 int res = REAL(pthread_mutex_trylock)(m);
1146 if (res == EOWNERDEAD)
1147 MutexRepair(thr, pc, (uptr)m);
1148 if (res == 0 || res == EOWNERDEAD)
1149 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1150 return res;
1151 }
1152
1153 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_mutex_timedlock,void * m,void * abstime)1154 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1155 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1156 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1157 if (res == 0) {
1158 MutexLock(thr, pc, (uptr)m);
1159 }
1160 return res;
1161 }
1162 #endif
1163
1164 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_spin_init,void * m,int pshared)1165 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1166 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1167 int res = REAL(pthread_spin_init)(m, pshared);
1168 if (res == 0) {
1169 MutexCreate(thr, pc, (uptr)m, false, false, false);
1170 }
1171 return res;
1172 }
1173
TSAN_INTERCEPTOR(int,pthread_spin_destroy,void * m)1174 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1175 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1176 int res = REAL(pthread_spin_destroy)(m);
1177 if (res == 0) {
1178 MutexDestroy(thr, pc, (uptr)m);
1179 }
1180 return res;
1181 }
1182
TSAN_INTERCEPTOR(int,pthread_spin_lock,void * m)1183 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1184 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1185 int res = REAL(pthread_spin_lock)(m);
1186 if (res == 0) {
1187 MutexLock(thr, pc, (uptr)m);
1188 }
1189 return res;
1190 }
1191
TSAN_INTERCEPTOR(int,pthread_spin_trylock,void * m)1192 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1193 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1194 int res = REAL(pthread_spin_trylock)(m);
1195 if (res == 0) {
1196 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1197 }
1198 return res;
1199 }
1200
TSAN_INTERCEPTOR(int,pthread_spin_unlock,void * m)1201 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1202 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1203 MutexUnlock(thr, pc, (uptr)m);
1204 int res = REAL(pthread_spin_unlock)(m);
1205 return res;
1206 }
1207 #endif
1208
TSAN_INTERCEPTOR(int,pthread_rwlock_init,void * m,void * a)1209 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1210 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1211 int res = REAL(pthread_rwlock_init)(m, a);
1212 if (res == 0) {
1213 MutexCreate(thr, pc, (uptr)m, true, false, false);
1214 }
1215 return res;
1216 }
1217
TSAN_INTERCEPTOR(int,pthread_rwlock_destroy,void * m)1218 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1219 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1220 int res = REAL(pthread_rwlock_destroy)(m);
1221 if (res == 0) {
1222 MutexDestroy(thr, pc, (uptr)m);
1223 }
1224 return res;
1225 }
1226
TSAN_INTERCEPTOR(int,pthread_rwlock_rdlock,void * m)1227 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1228 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1229 int res = REAL(pthread_rwlock_rdlock)(m);
1230 if (res == 0) {
1231 MutexReadLock(thr, pc, (uptr)m);
1232 }
1233 return res;
1234 }
1235
TSAN_INTERCEPTOR(int,pthread_rwlock_tryrdlock,void * m)1236 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1237 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1238 int res = REAL(pthread_rwlock_tryrdlock)(m);
1239 if (res == 0) {
1240 MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
1241 }
1242 return res;
1243 }
1244
1245 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_rwlock_timedrdlock,void * m,void * abstime)1246 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1247 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1248 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1249 if (res == 0) {
1250 MutexReadLock(thr, pc, (uptr)m);
1251 }
1252 return res;
1253 }
1254 #endif
1255
TSAN_INTERCEPTOR(int,pthread_rwlock_wrlock,void * m)1256 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1257 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1258 int res = REAL(pthread_rwlock_wrlock)(m);
1259 if (res == 0) {
1260 MutexLock(thr, pc, (uptr)m);
1261 }
1262 return res;
1263 }
1264
TSAN_INTERCEPTOR(int,pthread_rwlock_trywrlock,void * m)1265 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1266 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1267 int res = REAL(pthread_rwlock_trywrlock)(m);
1268 if (res == 0) {
1269 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1270 }
1271 return res;
1272 }
1273
1274 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_rwlock_timedwrlock,void * m,void * abstime)1275 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1276 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1277 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1278 if (res == 0) {
1279 MutexLock(thr, pc, (uptr)m);
1280 }
1281 return res;
1282 }
1283 #endif
1284
TSAN_INTERCEPTOR(int,pthread_rwlock_unlock,void * m)1285 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1286 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1287 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1288 int res = REAL(pthread_rwlock_unlock)(m);
1289 return res;
1290 }
1291
1292 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_barrier_init,void * b,void * a,unsigned count)1293 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1294 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1295 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1296 int res = REAL(pthread_barrier_init)(b, a, count);
1297 return res;
1298 }
1299
TSAN_INTERCEPTOR(int,pthread_barrier_destroy,void * b)1300 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1301 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1302 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1303 int res = REAL(pthread_barrier_destroy)(b);
1304 return res;
1305 }
1306
TSAN_INTERCEPTOR(int,pthread_barrier_wait,void * b)1307 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1308 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1309 Release(thr, pc, (uptr)b);
1310 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1311 int res = REAL(pthread_barrier_wait)(b);
1312 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1313 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1314 Acquire(thr, pc, (uptr)b);
1315 }
1316 return res;
1317 }
1318 #endif
1319
TSAN_INTERCEPTOR(int,pthread_once,void * o,void (* f)())1320 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1321 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1322 if (o == 0 || f == 0)
1323 return EINVAL;
1324 atomic_uint32_t *a;
1325 if (!SANITIZER_MAC)
1326 a = static_cast<atomic_uint32_t*>(o);
1327 else // On OS X, pthread_once_t has a header with a long-sized signature.
1328 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1329 u32 v = atomic_load(a, memory_order_acquire);
1330 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
1331 memory_order_relaxed)) {
1332 (*f)();
1333 if (!thr->in_ignored_lib)
1334 Release(thr, pc, (uptr)o);
1335 atomic_store(a, 2, memory_order_release);
1336 } else {
1337 while (v != 2) {
1338 internal_sched_yield();
1339 v = atomic_load(a, memory_order_acquire);
1340 }
1341 if (!thr->in_ignored_lib)
1342 Acquire(thr, pc, (uptr)o);
1343 }
1344 return 0;
1345 }
1346
1347 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,__fxstat,int version,int fd,void * buf)1348 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1349 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1350 if (fd > 0)
1351 FdAccess(thr, pc, fd);
1352 return REAL(__fxstat)(version, fd, buf);
1353 }
1354 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1355 #else
1356 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1357 #endif
1358
TSAN_INTERCEPTOR(int,fstat,int fd,void * buf)1359 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1360 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
1361 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1362 if (fd > 0)
1363 FdAccess(thr, pc, fd);
1364 return REAL(fstat)(fd, buf);
1365 #else
1366 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1367 if (fd > 0)
1368 FdAccess(thr, pc, fd);
1369 return REAL(__fxstat)(0, fd, buf);
1370 #endif
1371 }
1372
1373 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,__fxstat64,int version,int fd,void * buf)1374 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1375 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1376 if (fd > 0)
1377 FdAccess(thr, pc, fd);
1378 return REAL(__fxstat64)(version, fd, buf);
1379 }
1380 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1381 #else
1382 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1383 #endif
1384
1385 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,fstat64,int fd,void * buf)1386 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1387 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1388 if (fd > 0)
1389 FdAccess(thr, pc, fd);
1390 return REAL(__fxstat64)(0, fd, buf);
1391 }
1392 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1393 #else
1394 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1395 #endif
1396
TSAN_INTERCEPTOR(int,open,const char * name,int flags,int mode)1397 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
1398 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
1399 READ_STRING(thr, pc, name, 0);
1400 int fd = REAL(open)(name, flags, mode);
1401 if (fd >= 0)
1402 FdFileCreate(thr, pc, fd);
1403 return fd;
1404 }
1405
1406 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,open64,const char * name,int flags,int mode)1407 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
1408 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
1409 READ_STRING(thr, pc, name, 0);
1410 int fd = REAL(open64)(name, flags, mode);
1411 if (fd >= 0)
1412 FdFileCreate(thr, pc, fd);
1413 return fd;
1414 }
1415 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1416 #else
1417 #define TSAN_MAYBE_INTERCEPT_OPEN64
1418 #endif
1419
TSAN_INTERCEPTOR(int,creat,const char * name,int mode)1420 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1421 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1422 READ_STRING(thr, pc, name, 0);
1423 int fd = REAL(creat)(name, mode);
1424 if (fd >= 0)
1425 FdFileCreate(thr, pc, fd);
1426 return fd;
1427 }
1428
1429 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,creat64,const char * name,int mode)1430 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1431 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1432 READ_STRING(thr, pc, name, 0);
1433 int fd = REAL(creat64)(name, mode);
1434 if (fd >= 0)
1435 FdFileCreate(thr, pc, fd);
1436 return fd;
1437 }
1438 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1439 #else
1440 #define TSAN_MAYBE_INTERCEPT_CREAT64
1441 #endif
1442
TSAN_INTERCEPTOR(int,dup,int oldfd)1443 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1444 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1445 int newfd = REAL(dup)(oldfd);
1446 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1447 FdDup(thr, pc, oldfd, newfd, true);
1448 return newfd;
1449 }
1450
TSAN_INTERCEPTOR(int,dup2,int oldfd,int newfd)1451 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1452 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1453 int newfd2 = REAL(dup2)(oldfd, newfd);
1454 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1455 FdDup(thr, pc, oldfd, newfd2, false);
1456 return newfd2;
1457 }
1458
1459 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,dup3,int oldfd,int newfd,int flags)1460 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1461 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1462 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1463 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1464 FdDup(thr, pc, oldfd, newfd2, false);
1465 return newfd2;
1466 }
1467 #endif
1468
1469 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,eventfd,unsigned initval,int flags)1470 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1471 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1472 int fd = REAL(eventfd)(initval, flags);
1473 if (fd >= 0)
1474 FdEventCreate(thr, pc, fd);
1475 return fd;
1476 }
1477 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1478 #else
1479 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1480 #endif
1481
1482 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,signalfd,int fd,void * mask,int flags)1483 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1484 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1485 if (fd >= 0)
1486 FdClose(thr, pc, fd);
1487 fd = REAL(signalfd)(fd, mask, flags);
1488 if (fd >= 0)
1489 FdSignalCreate(thr, pc, fd);
1490 return fd;
1491 }
1492 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1493 #else
1494 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1495 #endif
1496
1497 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init,int fake)1498 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1499 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1500 int fd = REAL(inotify_init)(fake);
1501 if (fd >= 0)
1502 FdInotifyCreate(thr, pc, fd);
1503 return fd;
1504 }
1505 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1506 #else
1507 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1508 #endif
1509
1510 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init1,int flags)1511 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1512 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1513 int fd = REAL(inotify_init1)(flags);
1514 if (fd >= 0)
1515 FdInotifyCreate(thr, pc, fd);
1516 return fd;
1517 }
1518 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1519 #else
1520 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1521 #endif
1522
TSAN_INTERCEPTOR(int,socket,int domain,int type,int protocol)1523 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1524 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1525 int fd = REAL(socket)(domain, type, protocol);
1526 if (fd >= 0)
1527 FdSocketCreate(thr, pc, fd);
1528 return fd;
1529 }
1530
TSAN_INTERCEPTOR(int,socketpair,int domain,int type,int protocol,int * fd)1531 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1532 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1533 int res = REAL(socketpair)(domain, type, protocol, fd);
1534 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1535 FdPipeCreate(thr, pc, fd[0], fd[1]);
1536 return res;
1537 }
1538
TSAN_INTERCEPTOR(int,connect,int fd,void * addr,unsigned addrlen)1539 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1540 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1541 FdSocketConnecting(thr, pc, fd);
1542 int res = REAL(connect)(fd, addr, addrlen);
1543 if (res == 0 && fd >= 0)
1544 FdSocketConnect(thr, pc, fd);
1545 return res;
1546 }
1547
TSAN_INTERCEPTOR(int,bind,int fd,void * addr,unsigned addrlen)1548 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1549 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1550 int res = REAL(bind)(fd, addr, addrlen);
1551 if (fd > 0 && res == 0)
1552 FdAccess(thr, pc, fd);
1553 return res;
1554 }
1555
TSAN_INTERCEPTOR(int,listen,int fd,int backlog)1556 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1557 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1558 int res = REAL(listen)(fd, backlog);
1559 if (fd > 0 && res == 0)
1560 FdAccess(thr, pc, fd);
1561 return res;
1562 }
1563
TSAN_INTERCEPTOR(int,close,int fd)1564 TSAN_INTERCEPTOR(int, close, int fd) {
1565 SCOPED_TSAN_INTERCEPTOR(close, fd);
1566 if (fd >= 0)
1567 FdClose(thr, pc, fd);
1568 return REAL(close)(fd);
1569 }
1570
1571 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,__close,int fd)1572 TSAN_INTERCEPTOR(int, __close, int fd) {
1573 SCOPED_TSAN_INTERCEPTOR(__close, fd);
1574 if (fd >= 0)
1575 FdClose(thr, pc, fd);
1576 return REAL(__close)(fd);
1577 }
1578 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1579 #else
1580 #define TSAN_MAYBE_INTERCEPT___CLOSE
1581 #endif
1582
1583 // glibc guts
1584 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void,__res_iclose,void * state,bool free_addr)1585 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1586 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1587 int fds[64];
1588 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1589 for (int i = 0; i < cnt; i++) {
1590 if (fds[i] > 0)
1591 FdClose(thr, pc, fds[i]);
1592 }
1593 REAL(__res_iclose)(state, free_addr);
1594 }
1595 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1596 #else
1597 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1598 #endif
1599
TSAN_INTERCEPTOR(int,pipe,int * pipefd)1600 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1601 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1602 int res = REAL(pipe)(pipefd);
1603 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1604 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1605 return res;
1606 }
1607
1608 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pipe2,int * pipefd,int flags)1609 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1610 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1611 int res = REAL(pipe2)(pipefd, flags);
1612 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1613 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1614 return res;
1615 }
1616 #endif
1617
TSAN_INTERCEPTOR(int,unlink,char * path)1618 TSAN_INTERCEPTOR(int, unlink, char *path) {
1619 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1620 Release(thr, pc, File2addr(path));
1621 int res = REAL(unlink)(path);
1622 return res;
1623 }
1624
TSAN_INTERCEPTOR(void *,tmpfile,int fake)1625 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1626 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1627 void *res = REAL(tmpfile)(fake);
1628 if (res) {
1629 int fd = fileno_unlocked(res);
1630 if (fd >= 0)
1631 FdFileCreate(thr, pc, fd);
1632 }
1633 return res;
1634 }
1635
1636 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,tmpfile64,int fake)1637 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1638 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1639 void *res = REAL(tmpfile64)(fake);
1640 if (res) {
1641 int fd = fileno_unlocked(res);
1642 if (fd >= 0)
1643 FdFileCreate(thr, pc, fd);
1644 }
1645 return res;
1646 }
1647 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1648 #else
1649 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1650 #endif
1651
TSAN_INTERCEPTOR(uptr,fread,void * ptr,uptr size,uptr nmemb,void * f)1652 TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
1653 // libc file streams can call user-supplied functions, see fopencookie.
1654 {
1655 SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
1656 MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
1657 }
1658 return REAL(fread)(ptr, size, nmemb, f);
1659 }
1660
TSAN_INTERCEPTOR(uptr,fwrite,const void * p,uptr size,uptr nmemb,void * f)1661 TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
1662 // libc file streams can call user-supplied functions, see fopencookie.
1663 {
1664 SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
1665 MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
1666 }
1667 return REAL(fwrite)(p, size, nmemb, f);
1668 }
1669
FlushStreams()1670 static void FlushStreams() {
1671 // Flushing all the streams here may freeze the process if a child thread is
1672 // performing file stream operations at the same time.
1673 REAL(fflush)(stdout);
1674 REAL(fflush)(stderr);
1675 }
1676
TSAN_INTERCEPTOR(void,abort,int fake)1677 TSAN_INTERCEPTOR(void, abort, int fake) {
1678 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1679 FlushStreams();
1680 REAL(abort)(fake);
1681 }
1682
TSAN_INTERCEPTOR(int,puts,const char * s)1683 TSAN_INTERCEPTOR(int, puts, const char *s) {
1684 SCOPED_TSAN_INTERCEPTOR(puts, s);
1685 MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
1686 return REAL(puts)(s);
1687 }
1688
TSAN_INTERCEPTOR(int,rmdir,char * path)1689 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1690 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1691 Release(thr, pc, Dir2addr(path));
1692 int res = REAL(rmdir)(path);
1693 return res;
1694 }
1695
TSAN_INTERCEPTOR(int,closedir,void * dirp)1696 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1697 SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1698 if (dirp) {
1699 int fd = dirfd(dirp);
1700 FdClose(thr, pc, fd);
1701 }
1702 return REAL(closedir)(dirp);
1703 }
1704
1705 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,epoll_create,int size)1706 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1707 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1708 int fd = REAL(epoll_create)(size);
1709 if (fd >= 0)
1710 FdPollCreate(thr, pc, fd);
1711 return fd;
1712 }
1713
TSAN_INTERCEPTOR(int,epoll_create1,int flags)1714 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1715 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1716 int fd = REAL(epoll_create1)(flags);
1717 if (fd >= 0)
1718 FdPollCreate(thr, pc, fd);
1719 return fd;
1720 }
1721
TSAN_INTERCEPTOR(int,epoll_ctl,int epfd,int op,int fd,void * ev)1722 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1723 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1724 if (epfd >= 0)
1725 FdAccess(thr, pc, epfd);
1726 if (epfd >= 0 && fd >= 0)
1727 FdAccess(thr, pc, fd);
1728 if (op == EPOLL_CTL_ADD && epfd >= 0)
1729 FdRelease(thr, pc, epfd);
1730 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1731 return res;
1732 }
1733
TSAN_INTERCEPTOR(int,epoll_wait,int epfd,void * ev,int cnt,int timeout)1734 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1735 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1736 if (epfd >= 0)
1737 FdAccess(thr, pc, epfd);
1738 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1739 if (res > 0 && epfd >= 0)
1740 FdAcquire(thr, pc, epfd);
1741 return res;
1742 }
1743
TSAN_INTERCEPTOR(int,epoll_pwait,int epfd,void * ev,int cnt,int timeout,void * sigmask)1744 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1745 void *sigmask) {
1746 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1747 if (epfd >= 0)
1748 FdAccess(thr, pc, epfd);
1749 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1750 if (res > 0 && epfd >= 0)
1751 FdAcquire(thr, pc, epfd);
1752 return res;
1753 }
1754
1755 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1756 TSAN_INTERCEPT(epoll_create); \
1757 TSAN_INTERCEPT(epoll_create1); \
1758 TSAN_INTERCEPT(epoll_ctl); \
1759 TSAN_INTERCEPT(epoll_wait); \
1760 TSAN_INTERCEPT(epoll_pwait)
1761 #else
1762 #define TSAN_MAYBE_INTERCEPT_EPOLL
1763 #endif
1764
1765 namespace __tsan {
1766
CallUserSignalHandler(ThreadState * thr,bool sync,bool acquire,bool sigact,int sig,my_siginfo_t * info,void * uctx)1767 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1768 bool sigact, int sig, my_siginfo_t *info, void *uctx) {
1769 if (acquire)
1770 Acquire(thr, 0, (uptr)&sigactions[sig]);
1771 // Signals are generally asynchronous, so if we receive a signals when
1772 // ignores are enabled we should disable ignores. This is critical for sync
1773 // and interceptors, because otherwise we can miss syncronization and report
1774 // false races.
1775 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1776 int ignore_interceptors = thr->ignore_interceptors;
1777 int ignore_sync = thr->ignore_sync;
1778 if (!ctx->after_multithreaded_fork) {
1779 thr->ignore_reads_and_writes = 0;
1780 thr->fast_state.ClearIgnoreBit();
1781 thr->ignore_interceptors = 0;
1782 thr->ignore_sync = 0;
1783 }
1784 // Ensure that the handler does not spoil errno.
1785 const int saved_errno = errno;
1786 errno = 99;
1787 // This code races with sigaction. Be careful to not read sa_sigaction twice.
1788 // Also need to remember pc for reporting before the call,
1789 // because the handler can reset it.
1790 volatile uptr pc = sigact ?
1791 (uptr)sigactions[sig].sa_sigaction :
1792 (uptr)sigactions[sig].sa_handler;
1793 if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
1794 if (sigact)
1795 ((sigactionhandler_t)pc)(sig, info, uctx);
1796 else
1797 ((sighandler_t)pc)(sig);
1798 }
1799 if (!ctx->after_multithreaded_fork) {
1800 thr->ignore_reads_and_writes = ignore_reads_and_writes;
1801 if (ignore_reads_and_writes)
1802 thr->fast_state.SetIgnoreBit();
1803 thr->ignore_interceptors = ignore_interceptors;
1804 thr->ignore_sync = ignore_sync;
1805 }
1806 // We do not detect errno spoiling for SIGTERM,
1807 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
1808 // tsan reports false positive in such case.
1809 // It's difficult to properly detect this situation (reraise),
1810 // because in async signal processing case (when handler is called directly
1811 // from rtl_generic_sighandler) we have not yet received the reraised
1812 // signal; and it looks too fragile to intercept all ways to reraise a signal.
1813 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
1814 VarSizeStackTrace stack;
1815 // StackTrace::GetNestInstructionPc(pc) is used because return address is
1816 // expected, OutputReport() will undo this.
1817 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1818 ThreadRegistryLock l(ctx->thread_registry);
1819 ScopedReport rep(ReportTypeErrnoInSignal);
1820 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1821 rep.AddStack(stack, true);
1822 OutputReport(thr, rep);
1823 }
1824 }
1825 errno = saved_errno;
1826 }
1827
ProcessPendingSignals(ThreadState * thr)1828 void ProcessPendingSignals(ThreadState *thr) {
1829 ThreadSignalContext *sctx = SigCtx(thr);
1830 if (sctx == 0 ||
1831 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
1832 return;
1833 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
1834 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1835 internal_sigfillset(&sctx->emptyset);
1836 CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset));
1837 for (int sig = 0; sig < kSigCount; sig++) {
1838 SignalDesc *signal = &sctx->pending_signals[sig];
1839 if (signal->armed) {
1840 signal->armed = false;
1841 CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
1842 &signal->siginfo, &signal->ctx);
1843 }
1844 }
1845 CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->oldset, 0));
1846 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1847 }
1848
1849 } // namespace __tsan
1850
is_sync_signal(ThreadSignalContext * sctx,int sig)1851 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
1852 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1853 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
1854 // If we are sending signal to ourselves, we must process it now.
1855 (sctx && sig == sctx->int_signal_send);
1856 }
1857
rtl_generic_sighandler(bool sigact,int sig,my_siginfo_t * info,void * ctx)1858 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
1859 my_siginfo_t *info, void *ctx) {
1860 ThreadState *thr = cur_thread();
1861 ThreadSignalContext *sctx = SigCtx(thr);
1862 if (sig < 0 || sig >= kSigCount) {
1863 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
1864 return;
1865 }
1866 // Don't mess with synchronous signals.
1867 const bool sync = is_sync_signal(sctx, sig);
1868 if (sync ||
1869 // If we are in blocking function, we can safely process it now
1870 // (but check if we are in a recursive interceptor,
1871 // i.e. pthread_join()->munmap()).
1872 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
1873 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1874 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
1875 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
1876 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
1877 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
1878 } else {
1879 // Be very conservative with when we do acquire in this case.
1880 // It's unsafe to do acquire in async handlers, because ThreadState
1881 // can be in inconsistent state.
1882 // SIGSYS looks relatively safe -- it's synchronous and can actually
1883 // need some global state.
1884 bool acq = (sig == SIGSYS);
1885 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
1886 }
1887 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1888 return;
1889 }
1890
1891 if (sctx == 0)
1892 return;
1893 SignalDesc *signal = &sctx->pending_signals[sig];
1894 if (signal->armed == false) {
1895 signal->armed = true;
1896 signal->sigaction = sigact;
1897 if (info)
1898 internal_memcpy(&signal->siginfo, info, sizeof(*info));
1899 if (ctx)
1900 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
1901 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
1902 }
1903 }
1904
rtl_sighandler(int sig)1905 static void rtl_sighandler(int sig) {
1906 rtl_generic_sighandler(false, sig, 0, 0);
1907 }
1908
rtl_sigaction(int sig,my_siginfo_t * info,void * ctx)1909 static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
1910 rtl_generic_sighandler(true, sig, info, ctx);
1911 }
1912
TSAN_INTERCEPTOR(int,sigaction,int sig,sigaction_t * act,sigaction_t * old)1913 TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
1914 // Note: if we call REAL(sigaction) directly for any reason without proxying
1915 // the signal handler through rtl_sigaction, very bad things will happen.
1916 // The handler will run synchronously and corrupt tsan per-thread state.
1917 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
1918 if (old)
1919 internal_memcpy(old, &sigactions[sig], sizeof(*old));
1920 if (act == 0)
1921 return 0;
1922 // Copy act into sigactions[sig].
1923 // Can't use struct copy, because compiler can emit call to memcpy.
1924 // Can't use internal_memcpy, because it copies byte-by-byte,
1925 // and signal handler reads the sa_handler concurrently. It it can read
1926 // some bytes from old value and some bytes from new value.
1927 // Use volatile to prevent insertion of memcpy.
1928 sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
1929 sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
1930 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
1931 sizeof(sigactions[sig].sa_mask));
1932 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
1933 sigactions[sig].sa_restorer = act->sa_restorer;
1934 #endif
1935 sigaction_t newact;
1936 internal_memcpy(&newact, act, sizeof(newact));
1937 internal_sigfillset(&newact.sa_mask);
1938 if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
1939 if (newact.sa_flags & SA_SIGINFO)
1940 newact.sa_sigaction = rtl_sigaction;
1941 else
1942 newact.sa_handler = rtl_sighandler;
1943 }
1944 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
1945 int res = REAL(sigaction)(sig, &newact, 0);
1946 return res;
1947 }
1948
TSAN_INTERCEPTOR(sighandler_t,signal,int sig,sighandler_t h)1949 TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
1950 sigaction_t act;
1951 act.sa_handler = h;
1952 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
1953 act.sa_flags = 0;
1954 sigaction_t old;
1955 int res = sigaction(sig, &act, &old);
1956 if (res)
1957 return SIG_ERR;
1958 return old.sa_handler;
1959 }
1960
TSAN_INTERCEPTOR(int,sigsuspend,const __sanitizer_sigset_t * mask)1961 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1962 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1963 return REAL(sigsuspend)(mask);
1964 }
1965
TSAN_INTERCEPTOR(int,raise,int sig)1966 TSAN_INTERCEPTOR(int, raise, int sig) {
1967 SCOPED_TSAN_INTERCEPTOR(raise, sig);
1968 ThreadSignalContext *sctx = SigCtx(thr);
1969 CHECK_NE(sctx, 0);
1970 int prev = sctx->int_signal_send;
1971 sctx->int_signal_send = sig;
1972 int res = REAL(raise)(sig);
1973 CHECK_EQ(sctx->int_signal_send, sig);
1974 sctx->int_signal_send = prev;
1975 return res;
1976 }
1977
TSAN_INTERCEPTOR(int,kill,int pid,int sig)1978 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
1979 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
1980 ThreadSignalContext *sctx = SigCtx(thr);
1981 CHECK_NE(sctx, 0);
1982 int prev = sctx->int_signal_send;
1983 if (pid == (int)internal_getpid()) {
1984 sctx->int_signal_send = sig;
1985 }
1986 int res = REAL(kill)(pid, sig);
1987 if (pid == (int)internal_getpid()) {
1988 CHECK_EQ(sctx->int_signal_send, sig);
1989 sctx->int_signal_send = prev;
1990 }
1991 return res;
1992 }
1993
TSAN_INTERCEPTOR(int,pthread_kill,void * tid,int sig)1994 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
1995 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
1996 ThreadSignalContext *sctx = SigCtx(thr);
1997 CHECK_NE(sctx, 0);
1998 int prev = sctx->int_signal_send;
1999 if (tid == pthread_self()) {
2000 sctx->int_signal_send = sig;
2001 }
2002 int res = REAL(pthread_kill)(tid, sig);
2003 if (tid == pthread_self()) {
2004 CHECK_EQ(sctx->int_signal_send, sig);
2005 sctx->int_signal_send = prev;
2006 }
2007 return res;
2008 }
2009
TSAN_INTERCEPTOR(int,gettimeofday,void * tv,void * tz)2010 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2011 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2012 // It's intercepted merely to process pending signals.
2013 return REAL(gettimeofday)(tv, tz);
2014 }
2015
TSAN_INTERCEPTOR(int,getaddrinfo,void * node,void * service,void * hints,void * rv)2016 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2017 void *hints, void *rv) {
2018 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2019 // We miss atomic synchronization in getaddrinfo,
2020 // and can report false race between malloc and free
2021 // inside of getaddrinfo. So ignore memory accesses.
2022 ThreadIgnoreBegin(thr, pc);
2023 int res = REAL(getaddrinfo)(node, service, hints, rv);
2024 ThreadIgnoreEnd(thr, pc);
2025 return res;
2026 }
2027
TSAN_INTERCEPTOR(int,fork,int fake)2028 TSAN_INTERCEPTOR(int, fork, int fake) {
2029 if (cur_thread()->in_symbolizer)
2030 return REAL(fork)(fake);
2031 SCOPED_INTERCEPTOR_RAW(fork, fake);
2032 ForkBefore(thr, pc);
2033 int pid;
2034 {
2035 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
2036 // we'll assert in CheckNoLocks() unless we ignore interceptors.
2037 ScopedIgnoreInterceptors ignore;
2038 pid = REAL(fork)(fake);
2039 }
2040 if (pid == 0) {
2041 // child
2042 ForkChildAfter(thr, pc);
2043 FdOnFork(thr, pc);
2044 } else if (pid > 0) {
2045 // parent
2046 ForkParentAfter(thr, pc);
2047 } else {
2048 // error
2049 ForkParentAfter(thr, pc);
2050 }
2051 return pid;
2052 }
2053
TSAN_INTERCEPTOR(int,vfork,int fake)2054 TSAN_INTERCEPTOR(int, vfork, int fake) {
2055 // Some programs (e.g. openjdk) call close for all file descriptors
2056 // in the child process. Under tsan it leads to false positives, because
2057 // address space is shared, so the parent process also thinks that
2058 // the descriptors are closed (while they are actually not).
2059 // This leads to false positives due to missed synchronization.
2060 // Strictly saying this is undefined behavior, because vfork child is not
2061 // allowed to call any functions other than exec/exit. But this is what
2062 // openjdk does, so we want to handle it.
2063 // We could disable interceptors in the child process. But it's not possible
2064 // to simply intercept and wrap vfork, because vfork child is not allowed
2065 // to return from the function that calls vfork, and that's exactly what
2066 // we would do. So this would require some assembly trickery as well.
2067 // Instead we simply turn vfork into fork.
2068 return WRAP(fork)(fake);
2069 }
2070
2071 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2072 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2073 void *data);
2074 struct dl_iterate_phdr_data {
2075 ThreadState *thr;
2076 uptr pc;
2077 dl_iterate_phdr_cb_t cb;
2078 void *data;
2079 };
2080
IsAppNotRodata(uptr addr)2081 static bool IsAppNotRodata(uptr addr) {
2082 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
2083 }
2084
dl_iterate_phdr_cb(__sanitizer_dl_phdr_info * info,SIZE_T size,void * data)2085 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2086 void *data) {
2087 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2088 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2089 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2090 // inside of dynamic linker, so we "unpoison" it here in order to not
2091 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2092 // because some libc functions call __libc_dlopen.
2093 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2094 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2095 internal_strlen(info->dlpi_name));
2096 int res = cbdata->cb(info, size, cbdata->data);
2097 // Perform the check one more time in case info->dlpi_name was overwritten
2098 // by user callback.
2099 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2100 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2101 internal_strlen(info->dlpi_name));
2102 return res;
2103 }
2104
TSAN_INTERCEPTOR(int,dl_iterate_phdr,dl_iterate_phdr_cb_t cb,void * data)2105 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2106 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2107 dl_iterate_phdr_data cbdata;
2108 cbdata.thr = thr;
2109 cbdata.pc = pc;
2110 cbdata.cb = cb;
2111 cbdata.data = data;
2112 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2113 return res;
2114 }
2115 #endif
2116
OnExit(ThreadState * thr)2117 static int OnExit(ThreadState *thr) {
2118 int status = Finalize(thr);
2119 FlushStreams();
2120 return status;
2121 }
2122
2123 struct TsanInterceptorContext {
2124 ThreadState *thr;
2125 const uptr caller_pc;
2126 const uptr pc;
2127 };
2128
2129 #if !SANITIZER_MAC
HandleRecvmsg(ThreadState * thr,uptr pc,__sanitizer_msghdr * msg)2130 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2131 __sanitizer_msghdr *msg) {
2132 int fds[64];
2133 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2134 for (int i = 0; i < cnt; i++)
2135 FdEventCreate(thr, pc, fds[i]);
2136 }
2137 #endif
2138
2139 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2140 // Causes interceptor recursion (getaddrinfo() and fopen())
2141 #undef SANITIZER_INTERCEPT_GETADDRINFO
2142 // There interceptors do not seem to be strictly necessary for tsan.
2143 // But we see cases where the interceptors consume 70% of execution time.
2144 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
2145 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
2146 // function "writes to" the buffer. Then, the same memory is "written to"
2147 // twice, first as buf and then as pwbufp (both of them refer to the same
2148 // addresses).
2149 #undef SANITIZER_INTERCEPT_GETPWENT
2150 #undef SANITIZER_INTERCEPT_GETPWENT_R
2151 #undef SANITIZER_INTERCEPT_FGETPWENT
2152 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
2153 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
2154 // We define our own.
2155 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2156 #define NEED_TLS_GET_ADDR
2157 #endif
2158 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2159
2160 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2161 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2162 INTERCEPT_FUNCTION_VER(name, ver)
2163
2164 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
2165 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
2166 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2167 true)
2168
2169 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
2170 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
2171 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2172 false)
2173
2174 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2175 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
2176 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2177 ctx = (void *)&_ctx; \
2178 (void) ctx;
2179
2180 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2181 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2182 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2183 ctx = (void *)&_ctx; \
2184 (void) ctx;
2185
2186 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2187 Acquire(thr, pc, File2addr(path)); \
2188 if (file) { \
2189 int fd = fileno_unlocked(file); \
2190 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2191 }
2192
2193 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2194 if (file) { \
2195 int fd = fileno_unlocked(file); \
2196 if (fd >= 0) FdClose(thr, pc, fd); \
2197 }
2198
2199 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2200 libignore()->OnLibraryLoaded(filename)
2201
2202 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2203 libignore()->OnLibraryUnloaded()
2204
2205 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2206 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2207
2208 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2209 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2210
2211 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2212 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2213
2214 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2215 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2216
2217 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2218 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2219
2220 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2221 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2222
2223 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2224 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2225
2226 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2227 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2228
2229 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2230 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
2231
2232 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2233
2234 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2235 OnExit(((TsanInterceptorContext *) ctx)->thr)
2236
2237 #define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
2238 MutexLock(((TsanInterceptorContext *)ctx)->thr, \
2239 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2240
2241 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2242 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2243 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2244
2245 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2246 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2247 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2248
2249 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2250 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2251 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2252
2253 #if !SANITIZER_MAC
2254 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2255 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2256 ((TsanInterceptorContext *)ctx)->pc, msg)
2257 #endif
2258
2259 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2260 if (TsanThread *t = GetCurrentThread()) { \
2261 *begin = t->tls_begin(); \
2262 *end = t->tls_end(); \
2263 } else { \
2264 *begin = *end = 0; \
2265 }
2266
2267 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2268 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2269
2270 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2271 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2272
2273 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2274
2275 #define TSAN_SYSCALL() \
2276 ThreadState *thr = cur_thread(); \
2277 if (thr->ignore_interceptors) \
2278 return; \
2279 ScopedSyscall scoped_syscall(thr) \
2280 /**/
2281
2282 struct ScopedSyscall {
2283 ThreadState *thr;
2284
ScopedSyscallScopedSyscall2285 explicit ScopedSyscall(ThreadState *thr)
2286 : thr(thr) {
2287 Initialize(thr);
2288 }
2289
~ScopedSyscallScopedSyscall2290 ~ScopedSyscall() {
2291 ProcessPendingSignals(thr);
2292 }
2293 };
2294
2295 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
syscall_access_range(uptr pc,uptr p,uptr s,bool write)2296 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2297 TSAN_SYSCALL();
2298 MemoryAccessRange(thr, pc, p, s, write);
2299 }
2300
syscall_acquire(uptr pc,uptr addr)2301 static void syscall_acquire(uptr pc, uptr addr) {
2302 TSAN_SYSCALL();
2303 Acquire(thr, pc, addr);
2304 DPrintf("syscall_acquire(%p)\n", addr);
2305 }
2306
syscall_release(uptr pc,uptr addr)2307 static void syscall_release(uptr pc, uptr addr) {
2308 TSAN_SYSCALL();
2309 DPrintf("syscall_release(%p)\n", addr);
2310 Release(thr, pc, addr);
2311 }
2312
syscall_fd_close(uptr pc,int fd)2313 static void syscall_fd_close(uptr pc, int fd) {
2314 TSAN_SYSCALL();
2315 FdClose(thr, pc, fd);
2316 }
2317
syscall_fd_acquire(uptr pc,int fd)2318 static USED void syscall_fd_acquire(uptr pc, int fd) {
2319 TSAN_SYSCALL();
2320 FdAcquire(thr, pc, fd);
2321 DPrintf("syscall_fd_acquire(%p)\n", fd);
2322 }
2323
syscall_fd_release(uptr pc,int fd)2324 static USED void syscall_fd_release(uptr pc, int fd) {
2325 TSAN_SYSCALL();
2326 DPrintf("syscall_fd_release(%p)\n", fd);
2327 FdRelease(thr, pc, fd);
2328 }
2329
syscall_pre_fork(uptr pc)2330 static void syscall_pre_fork(uptr pc) {
2331 TSAN_SYSCALL();
2332 ForkBefore(thr, pc);
2333 }
2334
syscall_post_fork(uptr pc,int pid)2335 static void syscall_post_fork(uptr pc, int pid) {
2336 TSAN_SYSCALL();
2337 if (pid == 0) {
2338 // child
2339 ForkChildAfter(thr, pc);
2340 FdOnFork(thr, pc);
2341 } else if (pid > 0) {
2342 // parent
2343 ForkParentAfter(thr, pc);
2344 } else {
2345 // error
2346 ForkParentAfter(thr, pc);
2347 }
2348 }
2349 #endif
2350
2351 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2352 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2353
2354 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2355 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2356
2357 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2358 do { \
2359 (void)(p); \
2360 (void)(s); \
2361 } while (false)
2362
2363 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2364 do { \
2365 (void)(p); \
2366 (void)(s); \
2367 } while (false)
2368
2369 #define COMMON_SYSCALL_ACQUIRE(addr) \
2370 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2371
2372 #define COMMON_SYSCALL_RELEASE(addr) \
2373 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2374
2375 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2376
2377 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2378
2379 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2380
2381 #define COMMON_SYSCALL_PRE_FORK() \
2382 syscall_pre_fork(GET_CALLER_PC())
2383
2384 #define COMMON_SYSCALL_POST_FORK(res) \
2385 syscall_post_fork(GET_CALLER_PC(), res)
2386
2387 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2388
2389 #ifdef NEED_TLS_GET_ADDR
2390 // Define own interceptor instead of sanitizer_common's for three reasons:
2391 // 1. It must not process pending signals.
2392 // Signal handlers may contain MOVDQA instruction (see below).
2393 // 2. It must be as simple as possible to not contain MOVDQA.
2394 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2395 // is empty for tsan (meant only for msan).
2396 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2397 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2398 // So the interceptor must work with mis-aligned stack, in particular, does not
2399 // execute MOVDQA with stack addresses.
TSAN_INTERCEPTOR(void *,__tls_get_addr,void * arg)2400 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2401 void *res = REAL(__tls_get_addr)(arg);
2402 ThreadState *thr = cur_thread();
2403 if (!thr)
2404 return res;
2405 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
2406 if (!dtv)
2407 return res;
2408 // New DTLS block has been allocated.
2409 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2410 return res;
2411 }
2412 #endif
2413
2414 namespace __tsan {
2415
finalize(void * arg)2416 static void finalize(void *arg) {
2417 ThreadState *thr = cur_thread();
2418 int status = Finalize(thr);
2419 // Make sure the output is not lost.
2420 FlushStreams();
2421 if (status)
2422 Die();
2423 }
2424
2425 #if !SANITIZER_MAC && !SANITIZER_ANDROID
unreachable()2426 static void unreachable() {
2427 Report("FATAL: ThreadSanitizer: unreachable called\n");
2428 Die();
2429 }
2430 #endif
2431
InitializeInterceptors()2432 void InitializeInterceptors() {
2433 #if !SANITIZER_MAC
2434 // We need to setup it early, because functions like dlsym() can call it.
2435 REAL(memset) = internal_memset;
2436 REAL(memcpy) = internal_memcpy;
2437 #endif
2438
2439 // Instruct libc malloc to consume less memory.
2440 #if SANITIZER_LINUX
2441 mallopt(1, 0); // M_MXFAST
2442 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
2443 #endif
2444
2445 InitializeCommonInterceptors();
2446
2447 #if !SANITIZER_MAC
2448 // We can not use TSAN_INTERCEPT to get setjmp addr,
2449 // because it does &setjmp and setjmp is not present in some versions of libc.
2450 using __interception::GetRealFunctionAddress;
2451 GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
2452 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2453 GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
2454 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2455 #endif
2456
2457 TSAN_INTERCEPT(longjmp);
2458 TSAN_INTERCEPT(siglongjmp);
2459
2460 TSAN_INTERCEPT(malloc);
2461 TSAN_INTERCEPT(__libc_memalign);
2462 TSAN_INTERCEPT(calloc);
2463 TSAN_INTERCEPT(realloc);
2464 TSAN_INTERCEPT(free);
2465 TSAN_INTERCEPT(cfree);
2466 TSAN_INTERCEPT(mmap);
2467 TSAN_MAYBE_INTERCEPT_MMAP64;
2468 TSAN_INTERCEPT(munmap);
2469 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2470 TSAN_INTERCEPT(valloc);
2471 TSAN_MAYBE_INTERCEPT_PVALLOC;
2472 TSAN_INTERCEPT(posix_memalign);
2473
2474 TSAN_INTERCEPT(strcpy); // NOLINT
2475 TSAN_INTERCEPT(strncpy);
2476 TSAN_INTERCEPT(strdup);
2477
2478 TSAN_INTERCEPT(pthread_create);
2479 TSAN_INTERCEPT(pthread_join);
2480 TSAN_INTERCEPT(pthread_detach);
2481
2482 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2483 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2484 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2485 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2486 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2487 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2488
2489 TSAN_INTERCEPT(pthread_mutex_init);
2490 TSAN_INTERCEPT(pthread_mutex_destroy);
2491 TSAN_INTERCEPT(pthread_mutex_trylock);
2492 TSAN_INTERCEPT(pthread_mutex_timedlock);
2493
2494 TSAN_INTERCEPT(pthread_spin_init);
2495 TSAN_INTERCEPT(pthread_spin_destroy);
2496 TSAN_INTERCEPT(pthread_spin_lock);
2497 TSAN_INTERCEPT(pthread_spin_trylock);
2498 TSAN_INTERCEPT(pthread_spin_unlock);
2499
2500 TSAN_INTERCEPT(pthread_rwlock_init);
2501 TSAN_INTERCEPT(pthread_rwlock_destroy);
2502 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2503 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2504 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2505 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2506 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2507 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2508 TSAN_INTERCEPT(pthread_rwlock_unlock);
2509
2510 TSAN_INTERCEPT(pthread_barrier_init);
2511 TSAN_INTERCEPT(pthread_barrier_destroy);
2512 TSAN_INTERCEPT(pthread_barrier_wait);
2513
2514 TSAN_INTERCEPT(pthread_once);
2515
2516 TSAN_INTERCEPT(fstat);
2517 TSAN_MAYBE_INTERCEPT___FXSTAT;
2518 TSAN_MAYBE_INTERCEPT_FSTAT64;
2519 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2520 TSAN_INTERCEPT(open);
2521 TSAN_MAYBE_INTERCEPT_OPEN64;
2522 TSAN_INTERCEPT(creat);
2523 TSAN_MAYBE_INTERCEPT_CREAT64;
2524 TSAN_INTERCEPT(dup);
2525 TSAN_INTERCEPT(dup2);
2526 TSAN_INTERCEPT(dup3);
2527 TSAN_MAYBE_INTERCEPT_EVENTFD;
2528 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2529 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2530 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2531 TSAN_INTERCEPT(socket);
2532 TSAN_INTERCEPT(socketpair);
2533 TSAN_INTERCEPT(connect);
2534 TSAN_INTERCEPT(bind);
2535 TSAN_INTERCEPT(listen);
2536 TSAN_MAYBE_INTERCEPT_EPOLL;
2537 TSAN_INTERCEPT(close);
2538 TSAN_MAYBE_INTERCEPT___CLOSE;
2539 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2540 TSAN_INTERCEPT(pipe);
2541 TSAN_INTERCEPT(pipe2);
2542
2543 TSAN_INTERCEPT(unlink);
2544 TSAN_INTERCEPT(tmpfile);
2545 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2546 TSAN_INTERCEPT(fread);
2547 TSAN_INTERCEPT(fwrite);
2548 TSAN_INTERCEPT(abort);
2549 TSAN_INTERCEPT(puts);
2550 TSAN_INTERCEPT(rmdir);
2551 TSAN_INTERCEPT(closedir);
2552
2553 TSAN_INTERCEPT(sigaction);
2554 TSAN_INTERCEPT(signal);
2555 TSAN_INTERCEPT(sigsuspend);
2556 TSAN_INTERCEPT(raise);
2557 TSAN_INTERCEPT(kill);
2558 TSAN_INTERCEPT(pthread_kill);
2559 TSAN_INTERCEPT(sleep);
2560 TSAN_INTERCEPT(usleep);
2561 TSAN_INTERCEPT(nanosleep);
2562 TSAN_INTERCEPT(gettimeofday);
2563 TSAN_INTERCEPT(getaddrinfo);
2564
2565 TSAN_INTERCEPT(fork);
2566 TSAN_INTERCEPT(vfork);
2567 #if !SANITIZER_ANDROID
2568 TSAN_INTERCEPT(dl_iterate_phdr);
2569 #endif
2570 TSAN_INTERCEPT(on_exit);
2571 TSAN_INTERCEPT(__cxa_atexit);
2572 TSAN_INTERCEPT(_exit);
2573
2574 #ifdef NEED_TLS_GET_ADDR
2575 TSAN_INTERCEPT(__tls_get_addr);
2576 #endif
2577
2578 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2579 // Need to setup it, because interceptors check that the function is resolved.
2580 // But atexit is emitted directly into the module, so can't be resolved.
2581 REAL(atexit) = (int(*)(void(*)()))unreachable;
2582 #endif
2583
2584 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2585 Printf("ThreadSanitizer: failed to setup atexit callback\n");
2586 Die();
2587 }
2588
2589 #if !SANITIZER_MAC
2590 if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
2591 Printf("ThreadSanitizer: failed to create thread key\n");
2592 Die();
2593 }
2594 #endif
2595
2596 FdInit();
2597 }
2598
2599 } // namespace __tsan
2600
2601 // Invisible barrier for tests.
2602 // There were several unsuccessful iterations for this functionality:
2603 // 1. Initially it was implemented in user code using
2604 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2605 // MacOS. Futexes are linux-specific for this matter.
2606 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2607 // "as-if synchronized via sleep" messages in reports which failed some
2608 // output tests.
2609 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2610 // visible events, which lead to "failed to restore stack trace" failures.
2611 // Note that no_sanitize_thread attribute does not turn off atomic interception
2612 // so attaching it to the function defined in user code does not help.
2613 // That's why we now have what we have.
2614 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_barrier_init(u64 * barrier,u32 count)2615 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
2616 if (count >= (1 << 8)) {
2617 Printf("barrier_init: count is too large (%d)\n", count);
2618 Die();
2619 }
2620 // 8 lsb is thread count, the remaining are count of entered threads.
2621 *barrier = count;
2622 }
2623
2624 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_barrier_wait(u64 * barrier)2625 void __tsan_testonly_barrier_wait(u64 *barrier) {
2626 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
2627 unsigned old_epoch = (old >> 8) / (old & 0xff);
2628 for (;;) {
2629 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
2630 unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
2631 if (cur_epoch != old_epoch)
2632 return;
2633 internal_sched_yield();
2634 }
2635 }
2636