1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Main internal TSan header file.
12 //
13 // Ground rules:
14 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15 // function-scope locals)
16 // - All functions/classes/etc reside in namespace __tsan, except for those
17 // declared in tsan_interface.h.
18 // - Platform-specific files should be used instead of ifdefs (*).
19 // - No system headers included in header files (*).
20 // - Platform specific headres included only into platform-specific files (*).
21 //
22 // (*) Except when inlining is critical for performance.
23 //===----------------------------------------------------------------------===//
24
25 #ifndef TSAN_RTL_H
26 #define TSAN_RTL_H
27
28 #include "sanitizer_common/sanitizer_allocator.h"
29 #include "sanitizer_common/sanitizer_allocator_internal.h"
30 #include "sanitizer_common/sanitizer_asm.h"
31 #include "sanitizer_common/sanitizer_common.h"
32 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33 #include "sanitizer_common/sanitizer_libignore.h"
34 #include "sanitizer_common/sanitizer_suppressions.h"
35 #include "sanitizer_common/sanitizer_thread_registry.h"
36 #include "sanitizer_common/sanitizer_vector.h"
37 #include "tsan_clock.h"
38 #include "tsan_defs.h"
39 #include "tsan_flags.h"
40 #include "tsan_mman.h"
41 #include "tsan_sync.h"
42 #include "tsan_trace.h"
43 #include "tsan_report.h"
44 #include "tsan_platform.h"
45 #include "tsan_mutexset.h"
46 #include "tsan_ignoreset.h"
47 #include "tsan_stack_trace.h"
48
49 #if SANITIZER_WORDSIZE != 64
50 # error "ThreadSanitizer is supported only on 64-bit platforms"
51 #endif
52
53 namespace __tsan {
54
55 #if !SANITIZER_GO
56 struct MapUnmapCallback;
57 #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
58
59 struct AP32 {
60 static const uptr kSpaceBeg = 0;
61 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
62 static const uptr kMetadataSize = 0;
63 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
64 static const uptr kRegionSizeLog = 20;
65 using AddressSpaceView = LocalAddressSpaceView;
66 typedef __tsan::MapUnmapCallback MapUnmapCallback;
67 static const uptr kFlags = 0;
68 };
69 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
70 #else
71 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
72 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
73 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
74 static const uptr kMetadataSize = 0;
75 typedef DefaultSizeClassMap SizeClassMap;
76 typedef __tsan::MapUnmapCallback MapUnmapCallback;
77 static const uptr kFlags = 0;
78 using AddressSpaceView = LocalAddressSpaceView;
79 };
80 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
81 #endif
82 typedef CombinedAllocator<PrimaryAllocator> Allocator;
83 typedef Allocator::AllocatorCache AllocatorCache;
84 Allocator *allocator();
85 #endif
86
87 void TsanCheckFailed(const char *file, int line, const char *cond,
88 u64 v1, u64 v2);
89
90 const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
91
92 // FastState (from most significant bit):
93 // ignore : 1
94 // tid : kTidBits
95 // unused : -
96 // history_size : 3
97 // epoch : kClkBits
98 class FastState {
99 public:
FastState(u64 tid,u64 epoch)100 FastState(u64 tid, u64 epoch) {
101 x_ = tid << kTidShift;
102 x_ |= epoch;
103 DCHECK_EQ(tid, this->tid());
104 DCHECK_EQ(epoch, this->epoch());
105 DCHECK_EQ(GetIgnoreBit(), false);
106 }
107
FastState(u64 x)108 explicit FastState(u64 x)
109 : x_(x) {
110 }
111
raw()112 u64 raw() const {
113 return x_;
114 }
115
tid()116 u64 tid() const {
117 u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
118 return res;
119 }
120
TidWithIgnore()121 u64 TidWithIgnore() const {
122 u64 res = x_ >> kTidShift;
123 return res;
124 }
125
epoch()126 u64 epoch() const {
127 u64 res = x_ & ((1ull << kClkBits) - 1);
128 return res;
129 }
130
IncrementEpoch()131 void IncrementEpoch() {
132 u64 old_epoch = epoch();
133 x_ += 1;
134 DCHECK_EQ(old_epoch + 1, epoch());
135 (void)old_epoch;
136 }
137
SetIgnoreBit()138 void SetIgnoreBit() { x_ |= kIgnoreBit; }
ClearIgnoreBit()139 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
GetIgnoreBit()140 bool GetIgnoreBit() const { return (s64)x_ < 0; }
141
SetHistorySize(int hs)142 void SetHistorySize(int hs) {
143 CHECK_GE(hs, 0);
144 CHECK_LE(hs, 7);
145 x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
146 }
147
148 ALWAYS_INLINE
GetHistorySize()149 int GetHistorySize() const {
150 return (int)((x_ >> kHistoryShift) & kHistoryMask);
151 }
152
ClearHistorySize()153 void ClearHistorySize() {
154 SetHistorySize(0);
155 }
156
157 ALWAYS_INLINE
GetTracePos()158 u64 GetTracePos() const {
159 const int hs = GetHistorySize();
160 // When hs == 0, the trace consists of 2 parts.
161 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
162 return epoch() & mask;
163 }
164
165 private:
166 friend class Shadow;
167 static const int kTidShift = 64 - kTidBits - 1;
168 static const u64 kIgnoreBit = 1ull << 63;
169 static const u64 kFreedBit = 1ull << 63;
170 static const u64 kHistoryShift = kClkBits;
171 static const u64 kHistoryMask = 7;
172 u64 x_;
173 };
174
175 // Shadow (from most significant bit):
176 // freed : 1
177 // tid : kTidBits
178 // is_atomic : 1
179 // is_read : 1
180 // size_log : 2
181 // addr0 : 3
182 // epoch : kClkBits
183 class Shadow : public FastState {
184 public:
Shadow(u64 x)185 explicit Shadow(u64 x)
186 : FastState(x) {
187 }
188
Shadow(const FastState & s)189 explicit Shadow(const FastState &s)
190 : FastState(s.x_) {
191 ClearHistorySize();
192 }
193
SetAddr0AndSizeLog(u64 addr0,unsigned kAccessSizeLog)194 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
195 DCHECK_EQ((x_ >> kClkBits) & 31, 0);
196 DCHECK_LE(addr0, 7);
197 DCHECK_LE(kAccessSizeLog, 3);
198 x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
199 DCHECK_EQ(kAccessSizeLog, size_log());
200 DCHECK_EQ(addr0, this->addr0());
201 }
202
SetWrite(unsigned kAccessIsWrite)203 void SetWrite(unsigned kAccessIsWrite) {
204 DCHECK_EQ(x_ & kReadBit, 0);
205 if (!kAccessIsWrite)
206 x_ |= kReadBit;
207 DCHECK_EQ(kAccessIsWrite, IsWrite());
208 }
209
SetAtomic(bool kIsAtomic)210 void SetAtomic(bool kIsAtomic) {
211 DCHECK(!IsAtomic());
212 if (kIsAtomic)
213 x_ |= kAtomicBit;
214 DCHECK_EQ(IsAtomic(), kIsAtomic);
215 }
216
IsAtomic()217 bool IsAtomic() const {
218 return x_ & kAtomicBit;
219 }
220
IsZero()221 bool IsZero() const {
222 return x_ == 0;
223 }
224
TidsAreEqual(const Shadow s1,const Shadow s2)225 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
226 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
227 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
228 return shifted_xor == 0;
229 }
230
231 static ALWAYS_INLINE
Addr0AndSizeAreEqual(const Shadow s1,const Shadow s2)232 bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
233 u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
234 return masked_xor == 0;
235 }
236
TwoRangesIntersect(Shadow s1,Shadow s2,unsigned kS2AccessSize)237 static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
238 unsigned kS2AccessSize) {
239 bool res = false;
240 u64 diff = s1.addr0() - s2.addr0();
241 if ((s64)diff < 0) { // s1.addr0 < s2.addr0
242 // if (s1.addr0() + size1) > s2.addr0()) return true;
243 if (s1.size() > -diff)
244 res = true;
245 } else {
246 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
247 if (kS2AccessSize > diff)
248 res = true;
249 }
250 DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
251 DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
252 return res;
253 }
254
addr0()255 u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
size()256 u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
IsWrite()257 bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
IsRead()258 bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
259
260 // The idea behind the freed bit is as follows.
261 // When the memory is freed (or otherwise unaccessible) we write to the shadow
262 // values with tid/epoch related to the free and the freed bit set.
263 // During memory accesses processing the freed bit is considered
264 // as msb of tid. So any access races with shadow with freed bit set
265 // (it is as if write from a thread with which we never synchronized before).
266 // This allows us to detect accesses to freed memory w/o additional
267 // overheads in memory access processing and at the same time restore
268 // tid/epoch of free.
MarkAsFreed()269 void MarkAsFreed() {
270 x_ |= kFreedBit;
271 }
272
IsFreed()273 bool IsFreed() const {
274 return x_ & kFreedBit;
275 }
276
GetFreedAndReset()277 bool GetFreedAndReset() {
278 bool res = x_ & kFreedBit;
279 x_ &= ~kFreedBit;
280 return res;
281 }
282
IsBothReadsOrAtomic(bool kIsWrite,bool kIsAtomic)283 bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
284 bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
285 | (u64(kIsAtomic) << kAtomicShift));
286 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
287 return v;
288 }
289
IsRWNotWeaker(bool kIsWrite,bool kIsAtomic)290 bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
291 bool v = ((x_ >> kReadShift) & 3)
292 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
293 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
294 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
295 return v;
296 }
297
IsRWWeakerOrEqual(bool kIsWrite,bool kIsAtomic)298 bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
299 bool v = ((x_ >> kReadShift) & 3)
300 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
301 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
302 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
303 return v;
304 }
305
306 private:
307 static const u64 kReadShift = 5 + kClkBits;
308 static const u64 kReadBit = 1ull << kReadShift;
309 static const u64 kAtomicShift = 6 + kClkBits;
310 static const u64 kAtomicBit = 1ull << kAtomicShift;
311
size_log()312 u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
313
TwoRangesIntersectSlow(const Shadow s1,const Shadow s2)314 static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
315 if (s1.addr0() == s2.addr0()) return true;
316 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
317 return true;
318 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
319 return true;
320 return false;
321 }
322 };
323
324 struct ThreadSignalContext;
325
326 struct JmpBuf {
327 uptr sp;
328 int int_signal_send;
329 bool in_blocking_func;
330 uptr in_signal_handler;
331 uptr *shadow_stack_pos;
332 };
333
334 // A Processor represents a physical thread, or a P for Go.
335 // It is used to store internal resources like allocate cache, and does not
336 // participate in race-detection logic (invisible to end user).
337 // In C++ it is tied to an OS thread just like ThreadState, however ideally
338 // it should be tied to a CPU (this way we will have fewer allocator caches).
339 // In Go it is tied to a P, so there are significantly fewer Processor's than
340 // ThreadState's (which are tied to Gs).
341 // A ThreadState must be wired with a Processor to handle events.
342 struct Processor {
343 ThreadState *thr; // currently wired thread, or nullptr
344 #if !SANITIZER_GO
345 AllocatorCache alloc_cache;
346 InternalAllocatorCache internal_alloc_cache;
347 #endif
348 DenseSlabAllocCache block_cache;
349 DenseSlabAllocCache sync_cache;
350 DenseSlabAllocCache clock_cache;
351 DDPhysicalThread *dd_pt;
352 };
353
354 #if !SANITIZER_GO
355 // ScopedGlobalProcessor temporary setups a global processor for the current
356 // thread, if it does not have one. Intended for interceptors that can run
357 // at the very thread end, when we already destroyed the thread processor.
358 struct ScopedGlobalProcessor {
359 ScopedGlobalProcessor();
360 ~ScopedGlobalProcessor();
361 };
362 #endif
363
364 // This struct is stored in TLS.
365 struct ThreadState {
366 FastState fast_state;
367 // Synch epoch represents the threads's epoch before the last synchronization
368 // action. It allows to reduce number of shadow state updates.
369 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
370 // if we are processing write to X from the same thread at epoch=200,
371 // we do nothing, because both writes happen in the same 'synch epoch'.
372 // That is, if another memory access does not race with the former write,
373 // it does not race with the latter as well.
374 // QUESTION: can we can squeeze this into ThreadState::Fast?
375 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
376 // taken by epoch between synchs.
377 // This way we can save one load from tls.
378 u64 fast_synch_epoch;
379 // Technically `current` should be a separate THREADLOCAL variable;
380 // but it is placed here in order to share cache line with previous fields.
381 ThreadState* current;
382 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
383 // We do not distinguish beteween ignoring reads and writes
384 // for better performance.
385 int ignore_reads_and_writes;
386 int ignore_sync;
387 int suppress_reports;
388 // Go does not support ignores.
389 #if !SANITIZER_GO
390 IgnoreSet mop_ignore_set;
391 IgnoreSet sync_ignore_set;
392 #endif
393 // C/C++ uses fixed size shadow stack embed into Trace.
394 // Go uses malloc-allocated shadow stack with dynamic size.
395 uptr *shadow_stack;
396 uptr *shadow_stack_end;
397 uptr *shadow_stack_pos;
398 u64 *racy_shadow_addr;
399 u64 racy_state[2];
400 MutexSet mset;
401 ThreadClock clock;
402 #if !SANITIZER_GO
403 Vector<JmpBuf> jmp_bufs;
404 int ignore_interceptors;
405 #endif
406 #if TSAN_COLLECT_STATS
407 u64 stat[StatCnt];
408 #endif
409 const int tid;
410 const int unique_id;
411 bool in_symbolizer;
412 bool in_ignored_lib;
413 bool is_inited;
414 bool is_dead;
415 bool is_freeing;
416 bool is_vptr_access;
417 const uptr stk_addr;
418 const uptr stk_size;
419 const uptr tls_addr;
420 const uptr tls_size;
421 ThreadContext *tctx;
422
423 #if SANITIZER_DEBUG && !SANITIZER_GO
424 InternalDeadlockDetector internal_deadlock_detector;
425 #endif
426 DDLogicalThread *dd_lt;
427
428 // Current wired Processor, or nullptr. Required to handle any events.
429 Processor *proc1;
430 #if !SANITIZER_GO
procThreadState431 Processor *proc() { return proc1; }
432 #else
433 Processor *proc();
434 #endif
435
436 atomic_uintptr_t in_signal_handler;
437 ThreadSignalContext *signal_ctx;
438
439 #if !SANITIZER_GO
440 u32 last_sleep_stack_id;
441 ThreadClock last_sleep_clock;
442 #endif
443
444 // Set in regions of runtime that must be signal-safe and fork-safe.
445 // If set, malloc must not be called.
446 int nomalloc;
447
448 const ReportDesc *current_report;
449
450 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
451 unsigned reuse_count,
452 uptr stk_addr, uptr stk_size,
453 uptr tls_addr, uptr tls_size);
454 };
455
456 #if !SANITIZER_GO
457 #if SANITIZER_MAC || SANITIZER_ANDROID
458 ThreadState *cur_thread();
459 void set_cur_thread(ThreadState *thr);
460 void cur_thread_finalize();
cur_thread_init()461 inline void cur_thread_init() { }
462 #else
463 __attribute__((tls_model("initial-exec")))
464 extern THREADLOCAL char cur_thread_placeholder[];
cur_thread()465 inline ThreadState *cur_thread() {
466 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
467 }
cur_thread_init()468 inline void cur_thread_init() {
469 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
470 if (UNLIKELY(!thr->current))
471 thr->current = thr;
472 }
set_cur_thread(ThreadState * thr)473 inline void set_cur_thread(ThreadState *thr) {
474 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
475 }
cur_thread_finalize()476 inline void cur_thread_finalize() { }
477 #endif // SANITIZER_MAC || SANITIZER_ANDROID
478 #endif // SANITIZER_GO
479
480 class ThreadContext final : public ThreadContextBase {
481 public:
482 explicit ThreadContext(int tid);
483 ~ThreadContext();
484 ThreadState *thr;
485 u32 creation_stack_id;
486 SyncClock sync;
487 // Epoch at which the thread had started.
488 // If we see an event from the thread stamped by an older epoch,
489 // the event is from a dead thread that shared tid with this thread.
490 u64 epoch0;
491 u64 epoch1;
492
493 // Override superclass callbacks.
494 void OnDead() override;
495 void OnJoined(void *arg) override;
496 void OnFinished() override;
497 void OnStarted(void *arg) override;
498 void OnCreated(void *arg) override;
499 void OnReset() override;
500 void OnDetached(void *arg) override;
501 };
502
503 struct RacyStacks {
504 MD5Hash hash[2];
505 bool operator==(const RacyStacks &other) const {
506 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
507 return true;
508 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
509 return true;
510 return false;
511 }
512 };
513
514 struct RacyAddress {
515 uptr addr_min;
516 uptr addr_max;
517 };
518
519 struct FiredSuppression {
520 ReportType type;
521 uptr pc_or_addr;
522 Suppression *supp;
523 };
524
525 struct Context {
526 Context();
527
528 bool initialized;
529 #if !SANITIZER_GO
530 bool after_multithreaded_fork;
531 #endif
532
533 MetaMap metamap;
534
535 Mutex report_mtx;
536 int nreported;
537 int nmissed_expected;
538 atomic_uint64_t last_symbolize_time_ns;
539
540 void *background_thread;
541 atomic_uint32_t stop_background_thread;
542
543 ThreadRegistry *thread_registry;
544
545 Mutex racy_mtx;
546 Vector<RacyStacks> racy_stacks;
547 Vector<RacyAddress> racy_addresses;
548 // Number of fired suppressions may be large enough.
549 Mutex fired_suppressions_mtx;
550 InternalMmapVector<FiredSuppression> fired_suppressions;
551 DDetector *dd;
552
553 ClockAlloc clock_alloc;
554
555 Flags flags;
556
557 u64 stat[StatCnt];
558 u64 int_alloc_cnt[MBlockTypeCount];
559 u64 int_alloc_siz[MBlockTypeCount];
560 };
561
562 extern Context *ctx; // The one and the only global runtime context.
563
flags()564 ALWAYS_INLINE Flags *flags() {
565 return &ctx->flags;
566 }
567
568 struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptorsScopedIgnoreInterceptors569 ScopedIgnoreInterceptors() {
570 #if !SANITIZER_GO
571 cur_thread()->ignore_interceptors++;
572 #endif
573 }
574
~ScopedIgnoreInterceptorsScopedIgnoreInterceptors575 ~ScopedIgnoreInterceptors() {
576 #if !SANITIZER_GO
577 cur_thread()->ignore_interceptors--;
578 #endif
579 }
580 };
581
582 const char *GetObjectTypeFromTag(uptr tag);
583 const char *GetReportHeaderFromTag(uptr tag);
584 uptr TagFromShadowStackFrame(uptr pc);
585
586 class ScopedReportBase {
587 public:
588 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
589 const MutexSet *mset);
590 void AddStack(StackTrace stack, bool suppressable = false);
591 void AddThread(const ThreadContext *tctx, bool suppressable = false);
592 void AddThread(int unique_tid, bool suppressable = false);
593 void AddUniqueTid(int unique_tid);
594 void AddMutex(const SyncVar *s);
595 u64 AddMutex(u64 id);
596 void AddLocation(uptr addr, uptr size);
597 void AddSleep(u32 stack_id);
598 void SetCount(int count);
599
600 const ReportDesc *GetReport() const;
601
602 protected:
603 ScopedReportBase(ReportType typ, uptr tag);
604 ~ScopedReportBase();
605
606 private:
607 ReportDesc *rep_;
608 // Symbolizer makes lots of intercepted calls. If we try to process them,
609 // at best it will cause deadlocks on internal mutexes.
610 ScopedIgnoreInterceptors ignore_interceptors_;
611
612 void AddDeadMutex(u64 id);
613
614 ScopedReportBase(const ScopedReportBase &) = delete;
615 void operator=(const ScopedReportBase &) = delete;
616 };
617
618 class ScopedReport : public ScopedReportBase {
619 public:
620 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
621 ~ScopedReport();
622
623 private:
624 ScopedErrorReportLock lock_;
625 };
626
627 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
628 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
629 MutexSet *mset, uptr *tag = nullptr);
630
631 // The stack could look like:
632 // <start> | <main> | <foo> | tag | <bar>
633 // This will extract the tag and keep:
634 // <start> | <main> | <foo> | <bar>
635 template<typename StackTraceTy>
636 void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
637 if (stack->size < 2) return;
638 uptr possible_tag_pc = stack->trace[stack->size - 2];
639 uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
640 if (possible_tag == kExternalTagNone) return;
641 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
642 stack->size -= 1;
643 if (tag) *tag = possible_tag;
644 }
645
646 template<typename StackTraceTy>
647 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
648 uptr *tag = nullptr) {
649 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
650 uptr start = 0;
651 if (size + !!toppc > kStackTraceMax) {
652 start = size + !!toppc - kStackTraceMax;
653 size = kStackTraceMax - !!toppc;
654 }
655 stack->Init(&thr->shadow_stack[start], size, toppc);
656 ExtractTagFromStack(stack, tag);
657 }
658
659 #define GET_STACK_TRACE_FATAL(thr, pc) \
660 VarSizeStackTrace stack; \
661 ObtainCurrentStack(thr, pc, &stack); \
662 stack.ReverseOrder();
663
664 #if TSAN_COLLECT_STATS
665 void StatAggregate(u64 *dst, u64 *src);
666 void StatOutput(u64 *stat);
667 #endif
668
669 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
670 #if TSAN_COLLECT_STATS
671 thr->stat[typ] += n;
672 #endif
673 }
StatSet(ThreadState * thr,StatType typ,u64 n)674 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
675 #if TSAN_COLLECT_STATS
676 thr->stat[typ] = n;
677 #endif
678 }
679
680 void MapShadow(uptr addr, uptr size);
681 void MapThreadTrace(uptr addr, uptr size, const char *name);
682 void DontNeedShadowFor(uptr addr, uptr size);
683 void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
684 void InitializeShadowMemory();
685 void InitializeInterceptors();
686 void InitializeLibIgnore();
687 void InitializeDynamicAnnotations();
688
689 void ForkBefore(ThreadState *thr, uptr pc);
690 void ForkParentAfter(ThreadState *thr, uptr pc);
691 void ForkChildAfter(ThreadState *thr, uptr pc);
692
693 void ReportRace(ThreadState *thr);
694 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
695 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
696 bool IsExpectedReport(uptr addr, uptr size);
697 void PrintMatchedBenignRaces();
698
699 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
700 # define DPrintf Printf
701 #else
702 # define DPrintf(...)
703 #endif
704
705 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
706 # define DPrintf2 Printf
707 #else
708 # define DPrintf2(...)
709 #endif
710
711 u32 CurrentStackId(ThreadState *thr, uptr pc);
712 ReportStack *SymbolizeStackId(u32 stack_id);
713 void PrintCurrentStack(ThreadState *thr, uptr pc);
714 void PrintCurrentStackSlow(uptr pc); // uses libunwind
715
716 void Initialize(ThreadState *thr);
717 void MaybeSpawnBackgroundThread();
718 int Finalize(ThreadState *thr);
719
720 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
721 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
722
723 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
724 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
725 void MemoryAccessImpl(ThreadState *thr, uptr addr,
726 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
727 u64 *shadow_mem, Shadow cur);
728 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
729 uptr size, bool is_write);
730 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
731 uptr size, uptr step, bool is_write);
732 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
733 int size, bool kAccessIsWrite, bool kIsAtomic);
734
735 const int kSizeLog1 = 0;
736 const int kSizeLog2 = 1;
737 const int kSizeLog4 = 2;
738 const int kSizeLog8 = 3;
739
MemoryRead(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)740 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
741 uptr addr, int kAccessSizeLog) {
742 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
743 }
744
MemoryWrite(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)745 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
746 uptr addr, int kAccessSizeLog) {
747 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
748 }
749
MemoryReadAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)750 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
751 uptr addr, int kAccessSizeLog) {
752 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
753 }
754
MemoryWriteAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)755 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
756 uptr addr, int kAccessSizeLog) {
757 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
758 }
759
760 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
761 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
762 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
763 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
764 uptr size);
765
766 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
767 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
768 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
769 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
770
771 void FuncEntry(ThreadState *thr, uptr pc);
772 void FuncExit(ThreadState *thr);
773
774 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
775 void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
776 ThreadType thread_type);
777 void ThreadFinish(ThreadState *thr);
778 int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
779 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
780 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
781 void ThreadFinalize(ThreadState *thr);
782 void ThreadSetName(ThreadState *thr, const char *name);
783 int ThreadCount(ThreadState *thr);
784 void ProcessPendingSignals(ThreadState *thr);
785 void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
786
787 Processor *ProcCreate();
788 void ProcDestroy(Processor *proc);
789 void ProcWire(Processor *proc, ThreadState *thr);
790 void ProcUnwire(Processor *proc, ThreadState *thr);
791
792 // Note: the parameter is called flagz, because flags is already taken
793 // by the global function that returns flags.
794 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
795 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
796 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
797 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
798 int rec = 1);
799 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
800 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
801 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
802 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
803 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
804 void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
805 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
806
807 void Acquire(ThreadState *thr, uptr pc, uptr addr);
808 // AcquireGlobal synchronizes the current thread with all other threads.
809 // In terms of happens-before relation, it draws a HB edge from all threads
810 // (where they happen to execute right now) to the current thread. We use it to
811 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
812 // right before executing finalizers. This provides a coarse, but simple
813 // approximation of the actual required synchronization.
814 void AcquireGlobal(ThreadState *thr, uptr pc);
815 void Release(ThreadState *thr, uptr pc, uptr addr);
816 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
817 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
818 void AfterSleep(ThreadState *thr, uptr pc);
819 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
820 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
821 void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
822 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
823 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
824
825 // The hacky call uses custom calling convention and an assembly thunk.
826 // It is considerably faster that a normal call for the caller
827 // if it is not executed (it is intended for slow paths from hot functions).
828 // The trick is that the call preserves all registers and the compiler
829 // does not treat it as a call.
830 // If it does not work for you, use normal call.
831 #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
832 // The caller may not create the stack frame for itself at all,
833 // so we create a reserve stack frame for it (1024b must be enough).
834 #define HACKY_CALL(f) \
835 __asm__ __volatile__("sub $1024, %%rsp;" \
836 CFI_INL_ADJUST_CFA_OFFSET(1024) \
837 ".hidden " #f "_thunk;" \
838 "call " #f "_thunk;" \
839 "add $1024, %%rsp;" \
840 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
841 ::: "memory", "cc");
842 #else
843 #define HACKY_CALL(f) f()
844 #endif
845
846 void TraceSwitch(ThreadState *thr);
847 uptr TraceTopPC(ThreadState *thr);
848 uptr TraceSize();
849 uptr TraceParts();
850 Trace *ThreadTrace(int tid);
851
852 extern "C" void __tsan_trace_switch();
TraceAddEvent(ThreadState * thr,FastState fs,EventType typ,u64 addr)853 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
854 EventType typ, u64 addr) {
855 if (!kCollectHistory)
856 return;
857 DCHECK_GE((int)typ, 0);
858 DCHECK_LE((int)typ, 7);
859 DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
860 StatInc(thr, StatEvents);
861 u64 pos = fs.GetTracePos();
862 if (UNLIKELY((pos % kTracePartSize) == 0)) {
863 #if !SANITIZER_GO
864 HACKY_CALL(__tsan_trace_switch);
865 #else
866 TraceSwitch(thr);
867 #endif
868 }
869 Event *trace = (Event*)GetThreadTrace(fs.tid());
870 Event *evp = &trace[pos];
871 Event ev = (u64)addr | ((u64)typ << kEventPCBits);
872 *evp = ev;
873 }
874
875 #if !SANITIZER_GO
HeapEnd()876 uptr ALWAYS_INLINE HeapEnd() {
877 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
878 }
879 #endif
880
881 ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
882 void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
883 void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
884
885 // These need to match __tsan_switch_to_fiber_* flags defined in
886 // tsan_interface.h. See documentation there as well.
887 enum FiberSwitchFlags {
888 FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
889 };
890
891 } // namespace __tsan
892
893 #endif // TSAN_RTL_H
894