1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_sync.h"
15 #include "tsan_rtl.h"
16 #include "tsan_mman.h"
17
18 namespace __tsan {
19
20 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
SyncVar()22 SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24 Reset(0);
25 }
26
Init(ThreadState * thr,uptr pc,uptr addr,u64 uid)27 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
30 this->next = 0;
31
32 creation_stack_id = 0;
33 if (kCppMode) // Go does not use them
34 creation_stack_id = CurrentStackId(thr, pc);
35 if (common_flags()->detect_deadlocks)
36 DDMutexInit(thr, pc, this);
37 }
38
Reset(Processor * proc)39 void SyncVar::Reset(Processor *proc) {
40 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
49
50 if (proc == 0) {
51 CHECK_EQ(clock.size(), 0);
52 CHECK_EQ(read_clock.size(), 0);
53 } else {
54 clock.Reset(&proc->clock_cache);
55 read_clock.Reset(&proc->clock_cache);
56 }
57 }
58
MetaMap()59 MetaMap::MetaMap() {
60 atomic_store(&uid_gen_, 0, memory_order_relaxed);
61 }
62
AllocBlock(ThreadState * thr,uptr pc,uptr p,uptr sz)63 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64 u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
65 MBlock *b = block_alloc_.Map(idx);
66 b->siz = sz;
67 b->tid = thr->tid;
68 b->stk = CurrentStackId(thr, pc);
69 u32 *meta = MemToMeta(p);
70 DCHECK_EQ(*meta, 0);
71 *meta = idx | kFlagBlock;
72 }
73
FreeBlock(Processor * proc,uptr p)74 uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
75 MBlock* b = GetBlock(p);
76 if (b == 0)
77 return 0;
78 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79 FreeRange(proc, p, sz);
80 return sz;
81 }
82
FreeRange(Processor * proc,uptr p,uptr sz)83 bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
84 bool has_something = false;
85 u32 *meta = MemToMeta(p);
86 u32 *end = MemToMeta(p + sz);
87 if (end == meta)
88 end++;
89 for (; meta < end; meta++) {
90 u32 idx = *meta;
91 if (idx == 0) {
92 // Note: don't write to meta in this case -- the block can be huge.
93 continue;
94 }
95 *meta = 0;
96 has_something = true;
97 while (idx != 0) {
98 if (idx & kFlagBlock) {
99 block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
100 break;
101 } else if (idx & kFlagSync) {
102 DCHECK(idx & kFlagSync);
103 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
104 u32 next = s->next;
105 s->Reset(proc);
106 sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
107 idx = next;
108 } else {
109 CHECK(0);
110 }
111 }
112 }
113 return has_something;
114 }
115
116 // ResetRange removes all meta objects from the range.
117 // It is called for large mmap-ed regions. The function is best-effort wrt
118 // freeing of meta objects, because we don't want to page in the whole range
119 // which can be huge. The function probes pages one-by-one until it finds a page
120 // without meta objects, at this point it stops freeing meta objects. Because
121 // thread stacks grow top-down, we do the same starting from end as well.
ResetRange(Processor * proc,uptr p,uptr sz)122 void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
123 if (kGoMode) {
124 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
125 // so we do the optimization only for C/C++.
126 FreeRange(proc, p, sz);
127 return;
128 }
129 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
130 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
131 if (sz <= 4 * kPageSize) {
132 // If the range is small, just do the normal free procedure.
133 FreeRange(proc, p, sz);
134 return;
135 }
136 // First, round both ends of the range to page size.
137 uptr diff = RoundUp(p, kPageSize) - p;
138 if (diff != 0) {
139 FreeRange(proc, p, diff);
140 p += diff;
141 sz -= diff;
142 }
143 diff = p + sz - RoundDown(p + sz, kPageSize);
144 if (diff != 0) {
145 FreeRange(proc, p + sz - diff, diff);
146 sz -= diff;
147 }
148 // Now we must have a non-empty page-aligned range.
149 CHECK_GT(sz, 0);
150 CHECK_EQ(p, RoundUp(p, kPageSize));
151 CHECK_EQ(sz, RoundUp(sz, kPageSize));
152 const uptr p0 = p;
153 const uptr sz0 = sz;
154 // Probe start of the range.
155 for (uptr checked = 0; sz > 0; checked += kPageSize) {
156 bool has_something = FreeRange(proc, p, kPageSize);
157 p += kPageSize;
158 sz -= kPageSize;
159 if (!has_something && checked > (128 << 10))
160 break;
161 }
162 // Probe end of the range.
163 for (uptr checked = 0; sz > 0; checked += kPageSize) {
164 bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
165 sz -= kPageSize;
166 // Stacks grow down, so sync object are most likely at the end of the region
167 // (if it is a stack). The very end of the stack is TLS and tsan increases
168 // TLS by at least 256K, so check at least 512K.
169 if (!has_something && checked > (512 << 10))
170 break;
171 }
172 // Finally, page out the whole range (including the parts that we've just
173 // freed). Note: we can't simply madvise, because we need to leave a zeroed
174 // range (otherwise __tsan_java_move can crash if it encounters a left-over
175 // meta objects in java heap).
176 uptr metap = (uptr)MemToMeta(p0);
177 uptr metasz = sz0 / kMetaRatio;
178 UnmapOrDie((void*)metap, metasz);
179 MmapFixedNoReserve(metap, metasz);
180 }
181
GetBlock(uptr p)182 MBlock* MetaMap::GetBlock(uptr p) {
183 u32 *meta = MemToMeta(p);
184 u32 idx = *meta;
185 for (;;) {
186 if (idx == 0)
187 return 0;
188 if (idx & kFlagBlock)
189 return block_alloc_.Map(idx & ~kFlagMask);
190 DCHECK(idx & kFlagSync);
191 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
192 idx = s->next;
193 }
194 }
195
GetOrCreateAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock)196 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
197 uptr addr, bool write_lock) {
198 return GetAndLock(thr, pc, addr, write_lock, true);
199 }
200
GetIfExistsAndLock(uptr addr,bool write_lock)201 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
202 return GetAndLock(0, 0, addr, write_lock, false);
203 }
204
GetAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock,bool create)205 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
206 uptr addr, bool write_lock, bool create) {
207 u32 *meta = MemToMeta(addr);
208 u32 idx0 = *meta;
209 u32 myidx = 0;
210 SyncVar *mys = 0;
211 for (;;) {
212 u32 idx = idx0;
213 for (;;) {
214 if (idx == 0)
215 break;
216 if (idx & kFlagBlock)
217 break;
218 DCHECK(idx & kFlagSync);
219 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
220 if (s->addr == addr) {
221 if (myidx != 0) {
222 mys->Reset(thr->proc());
223 sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
224 }
225 if (write_lock)
226 s->mtx.Lock();
227 else
228 s->mtx.ReadLock();
229 return s;
230 }
231 idx = s->next;
232 }
233 if (!create)
234 return 0;
235 if (*meta != idx0) {
236 idx0 = *meta;
237 continue;
238 }
239
240 if (myidx == 0) {
241 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
242 myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
243 mys = sync_alloc_.Map(myidx);
244 mys->Init(thr, pc, addr, uid);
245 }
246 mys->next = idx0;
247 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
248 myidx | kFlagSync, memory_order_release)) {
249 if (write_lock)
250 mys->mtx.Lock();
251 else
252 mys->mtx.ReadLock();
253 return mys;
254 }
255 }
256 }
257
MoveMemory(uptr src,uptr dst,uptr sz)258 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
259 // src and dst can overlap,
260 // there are no concurrent accesses to the regions (e.g. stop-the-world).
261 CHECK_NE(src, dst);
262 CHECK_NE(sz, 0);
263 uptr diff = dst - src;
264 u32 *src_meta = MemToMeta(src);
265 u32 *dst_meta = MemToMeta(dst);
266 u32 *src_meta_end = MemToMeta(src + sz);
267 uptr inc = 1;
268 if (dst > src) {
269 src_meta = MemToMeta(src + sz) - 1;
270 dst_meta = MemToMeta(dst + sz) - 1;
271 src_meta_end = MemToMeta(src) - 1;
272 inc = -1;
273 }
274 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
275 CHECK_EQ(*dst_meta, 0);
276 u32 idx = *src_meta;
277 *src_meta = 0;
278 *dst_meta = idx;
279 // Patch the addresses in sync objects.
280 while (idx != 0) {
281 if (idx & kFlagBlock)
282 break;
283 CHECK(idx & kFlagSync);
284 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
285 s->addr += diff;
286 idx = s->next;
287 }
288 }
289 }
290
OnProcIdle(Processor * proc)291 void MetaMap::OnProcIdle(Processor *proc) {
292 block_alloc_.FlushCache(&proc->block_cache);
293 sync_alloc_.FlushCache(&proc->sync_cache);
294 }
295
296 } // namespace __tsan
297