• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #ifndef TSAN_SYNC_H
13 #define TSAN_SYNC_H
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
18 #include "tsan_defs.h"
19 #include "tsan_clock.h"
20 #include "tsan_mutex.h"
21 #include "tsan_dense_alloc.h"
22 
23 namespace __tsan {
24 
25 // These need to match __tsan_mutex_* flags defined in tsan_interface.h.
26 // See documentation there as well.
27 enum MutexFlags {
28   MutexFlagLinkerInit          = 1 << 0, // __tsan_mutex_linker_init
29   MutexFlagWriteReentrant      = 1 << 1, // __tsan_mutex_write_reentrant
30   MutexFlagReadReentrant       = 1 << 2, // __tsan_mutex_read_reentrant
31   MutexFlagReadLock            = 1 << 3, // __tsan_mutex_read_lock
32   MutexFlagTryLock             = 1 << 4, // __tsan_mutex_try_lock
33   MutexFlagTryLockFailed       = 1 << 5, // __tsan_mutex_try_lock_failed
34   MutexFlagRecursiveLock       = 1 << 6, // __tsan_mutex_recursive_lock
35   MutexFlagRecursiveUnlock     = 1 << 7, // __tsan_mutex_recursive_unlock
36   MutexFlagNotStatic           = 1 << 8, // __tsan_mutex_not_static
37 
38   // The following flags are runtime private.
39   // Mutex API misuse was detected, so don't report any more.
40   MutexFlagBroken              = 1 << 30,
41   // We did not intercept pre lock event, so handle it on post lock.
42   MutexFlagDoPreLockOnPostLock = 1 << 29,
43   // Must list all mutex creation flags.
44   MutexCreationFlagMask        = MutexFlagLinkerInit |
45                                  MutexFlagWriteReentrant |
46                                  MutexFlagReadReentrant |
47                                  MutexFlagNotStatic,
48 };
49 
50 struct SyncVar {
51   SyncVar();
52 
53   static const int kInvalidTid = -1;
54 
55   uptr addr;  // overwritten by DenseSlabAlloc freelist
56   Mutex mtx;
57   u64 uid;  // Globally unique id.
58   u32 creation_stack_id;
59   int owner_tid;  // Set only by exclusive owners.
60   u64 last_lock;
61   int recursion;
62   atomic_uint32_t flags;
63   u32 next;  // in MetaMap
64   DDMutex dd;
65   SyncClock read_clock;  // Used for rw mutexes only.
66   // The clock is placed last, so that it is situated on a different cache line
67   // with the mtx. This reduces contention for hot sync objects.
68   SyncClock clock;
69 
70   void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
71   void Reset(Processor *proc);
72 
GetIdSyncVar73   u64 GetId() const {
74     // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
75     return GetLsb((u64)addr | (uid << 48), 60);
76   }
CheckIdSyncVar77   bool CheckId(u64 uid) const {
78     CHECK_EQ(uid, GetLsb(uid, 14));
79     return GetLsb(this->uid, 14) == uid;
80   }
SplitIdSyncVar81   static uptr SplitId(u64 id, u64 *uid) {
82     *uid = id >> 48;
83     return (uptr)GetLsb(id, 48);
84   }
85 
IsFlagSetSyncVar86   bool IsFlagSet(u32 f) const {
87     return atomic_load_relaxed(&flags) & f;
88   }
89 
SetFlagsSyncVar90   void SetFlags(u32 f) {
91     atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
92   }
93 
UpdateFlagsSyncVar94   void UpdateFlags(u32 flagz) {
95     // Filter out operation flags.
96     if (!(flagz & MutexCreationFlagMask))
97       return;
98     u32 current = atomic_load_relaxed(&flags);
99     if (current & MutexCreationFlagMask)
100       return;
101     // Note: this can be called from MutexPostReadLock which holds only read
102     // lock on the SyncVar.
103     atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
104   }
105 };
106 
107 /* MetaMap allows to map arbitrary user pointers onto various descriptors.
108    Currently it maps pointers to heap block descriptors and sync var descs.
109    It uses 1/2 direct shadow, see tsan_platform.h.
110 */
111 class MetaMap {
112  public:
113   MetaMap();
114 
115   void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
116   uptr FreeBlock(Processor *proc, uptr p);
117   bool FreeRange(Processor *proc, uptr p, uptr sz);
118   void ResetRange(Processor *proc, uptr p, uptr sz);
119   MBlock* GetBlock(uptr p);
120 
121   SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
122                               uptr addr, bool write_lock);
123   SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
124 
125   void MoveMemory(uptr src, uptr dst, uptr sz);
126 
127   void OnProcIdle(Processor *proc);
128 
129  private:
130   static const u32 kFlagMask  = 3u << 30;
131   static const u32 kFlagBlock = 1u << 30;
132   static const u32 kFlagSync  = 2u << 30;
133   typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
134   typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
135   BlockAlloc block_alloc_;
136   SyncAlloc sync_alloc_;
137   atomic_uint64_t uid_gen_;
138 
139   SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
140                       bool create);
141 };
142 
143 }  // namespace __tsan
144 
145 #endif  // TSAN_SYNC_H
146