• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_sync.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_sync.h"
14 #include "tsan_rtl.h"
15 #include "tsan_mman.h"
16 
17 namespace __tsan {
18 
19 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
20 
SyncVar()21 SyncVar::SyncVar()
22     : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
23   Reset(0);
24 }
25 
Init(ThreadState * thr,uptr pc,uptr addr,u64 uid)26 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
27   this->addr = addr;
28   this->uid = uid;
29   this->next = 0;
30 
31   creation_stack_id = 0;
32   if (!SANITIZER_GO)  // Go does not use them
33     creation_stack_id = CurrentStackId(thr, pc);
34   if (common_flags()->detect_deadlocks)
35     DDMutexInit(thr, pc, this);
36 }
37 
Reset(Processor * proc)38 void SyncVar::Reset(Processor *proc) {
39   uid = 0;
40   creation_stack_id = 0;
41   owner_tid = kInvalidTid;
42   last_lock = 0;
43   recursion = 0;
44   atomic_store_relaxed(&flags, 0);
45 
46   if (proc == 0) {
47     CHECK_EQ(clock.size(), 0);
48     CHECK_EQ(read_clock.size(), 0);
49   } else {
50     clock.Reset(&proc->clock_cache);
51     read_clock.Reset(&proc->clock_cache);
52   }
53 }
54 
MetaMap()55 MetaMap::MetaMap()
56     : block_alloc_("heap block allocator")
57     , sync_alloc_("sync allocator") {
58   atomic_store(&uid_gen_, 0, memory_order_relaxed);
59 }
60 
AllocBlock(ThreadState * thr,uptr pc,uptr p,uptr sz)61 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
62   u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
63   MBlock *b = block_alloc_.Map(idx);
64   b->siz = sz;
65   b->tag = 0;
66   b->tid = thr->tid;
67   b->stk = CurrentStackId(thr, pc);
68   u32 *meta = MemToMeta(p);
69   DCHECK_EQ(*meta, 0);
70   *meta = idx | kFlagBlock;
71 }
72 
FreeBlock(Processor * proc,uptr p)73 uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
74   MBlock* b = GetBlock(p);
75   if (b == 0)
76     return 0;
77   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
78   FreeRange(proc, p, sz);
79   return sz;
80 }
81 
FreeRange(Processor * proc,uptr p,uptr sz)82 bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
83   bool has_something = false;
84   u32 *meta = MemToMeta(p);
85   u32 *end = MemToMeta(p + sz);
86   if (end == meta)
87     end++;
88   for (; meta < end; meta++) {
89     u32 idx = *meta;
90     if (idx == 0) {
91       // Note: don't write to meta in this case -- the block can be huge.
92       continue;
93     }
94     *meta = 0;
95     has_something = true;
96     while (idx != 0) {
97       if (idx & kFlagBlock) {
98         block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
99         break;
100       } else if (idx & kFlagSync) {
101         DCHECK(idx & kFlagSync);
102         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
103         u32 next = s->next;
104         s->Reset(proc);
105         sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
106         idx = next;
107       } else {
108         CHECK(0);
109       }
110     }
111   }
112   return has_something;
113 }
114 
115 // ResetRange removes all meta objects from the range.
116 // It is called for large mmap-ed regions. The function is best-effort wrt
117 // freeing of meta objects, because we don't want to page in the whole range
118 // which can be huge. The function probes pages one-by-one until it finds a page
119 // without meta objects, at this point it stops freeing meta objects. Because
120 // thread stacks grow top-down, we do the same starting from end as well.
ResetRange(Processor * proc,uptr p,uptr sz)121 void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
122   if (SANITIZER_GO) {
123     // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
124     // so we do the optimization only for C/C++.
125     FreeRange(proc, p, sz);
126     return;
127   }
128   const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
129   const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
130   if (sz <= 4 * kPageSize) {
131     // If the range is small, just do the normal free procedure.
132     FreeRange(proc, p, sz);
133     return;
134   }
135   // First, round both ends of the range to page size.
136   uptr diff = RoundUp(p, kPageSize) - p;
137   if (diff != 0) {
138     FreeRange(proc, p, diff);
139     p += diff;
140     sz -= diff;
141   }
142   diff = p + sz - RoundDown(p + sz, kPageSize);
143   if (diff != 0) {
144     FreeRange(proc, p + sz - diff, diff);
145     sz -= diff;
146   }
147   // Now we must have a non-empty page-aligned range.
148   CHECK_GT(sz, 0);
149   CHECK_EQ(p, RoundUp(p, kPageSize));
150   CHECK_EQ(sz, RoundUp(sz, kPageSize));
151   const uptr p0 = p;
152   const uptr sz0 = sz;
153   // Probe start of the range.
154   for (uptr checked = 0; sz > 0; checked += kPageSize) {
155     bool has_something = FreeRange(proc, p, kPageSize);
156     p += kPageSize;
157     sz -= kPageSize;
158     if (!has_something && checked > (128 << 10))
159       break;
160   }
161   // Probe end of the range.
162   for (uptr checked = 0; sz > 0; checked += kPageSize) {
163     bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
164     sz -= kPageSize;
165     // Stacks grow down, so sync object are most likely at the end of the region
166     // (if it is a stack). The very end of the stack is TLS and tsan increases
167     // TLS by at least 256K, so check at least 512K.
168     if (!has_something && checked > (512 << 10))
169       break;
170   }
171   // Finally, page out the whole range (including the parts that we've just
172   // freed). Note: we can't simply madvise, because we need to leave a zeroed
173   // range (otherwise __tsan_java_move can crash if it encounters a left-over
174   // meta objects in java heap).
175   uptr metap = (uptr)MemToMeta(p0);
176   uptr metasz = sz0 / kMetaRatio;
177   UnmapOrDie((void*)metap, metasz);
178   if (!MmapFixedSuperNoReserve(metap, metasz))
179     Die();
180 }
181 
GetBlock(uptr p)182 MBlock* MetaMap::GetBlock(uptr p) {
183   u32 *meta = MemToMeta(p);
184   u32 idx = *meta;
185   for (;;) {
186     if (idx == 0)
187       return 0;
188     if (idx & kFlagBlock)
189       return block_alloc_.Map(idx & ~kFlagMask);
190     DCHECK(idx & kFlagSync);
191     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
192     idx = s->next;
193   }
194 }
195 
GetOrCreateAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock)196 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
197                               uptr addr, bool write_lock) {
198   return GetAndLock(thr, pc, addr, write_lock, true);
199 }
200 
GetIfExistsAndLock(uptr addr,bool write_lock)201 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
202   return GetAndLock(0, 0, addr, write_lock, false);
203 }
204 
GetAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock,bool create)205 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
206                              uptr addr, bool write_lock, bool create) {
207   u32 *meta = MemToMeta(addr);
208   u32 idx0 = *meta;
209   u32 myidx = 0;
210   SyncVar *mys = 0;
211   for (;;) {
212     u32 idx = idx0;
213     for (;;) {
214       if (idx == 0)
215         break;
216       if (idx & kFlagBlock)
217         break;
218       DCHECK(idx & kFlagSync);
219       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
220       if (s->addr == addr) {
221         if (myidx != 0) {
222           mys->Reset(thr->proc());
223           sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
224         }
225         if (write_lock)
226           s->mtx.Lock();
227         else
228           s->mtx.ReadLock();
229         return s;
230       }
231       idx = s->next;
232     }
233     if (!create)
234       return 0;
235     if (*meta != idx0) {
236       idx0 = *meta;
237       continue;
238     }
239 
240     if (myidx == 0) {
241       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
242       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
243       mys = sync_alloc_.Map(myidx);
244       mys->Init(thr, pc, addr, uid);
245     }
246     mys->next = idx0;
247     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
248         myidx | kFlagSync, memory_order_release)) {
249       if (write_lock)
250         mys->mtx.Lock();
251       else
252         mys->mtx.ReadLock();
253       return mys;
254     }
255   }
256 }
257 
MoveMemory(uptr src,uptr dst,uptr sz)258 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
259   // src and dst can overlap,
260   // there are no concurrent accesses to the regions (e.g. stop-the-world).
261   CHECK_NE(src, dst);
262   CHECK_NE(sz, 0);
263   uptr diff = dst - src;
264   u32 *src_meta = MemToMeta(src);
265   u32 *dst_meta = MemToMeta(dst);
266   u32 *src_meta_end = MemToMeta(src + sz);
267   uptr inc = 1;
268   if (dst > src) {
269     src_meta = MemToMeta(src + sz) - 1;
270     dst_meta = MemToMeta(dst + sz) - 1;
271     src_meta_end = MemToMeta(src) - 1;
272     inc = -1;
273   }
274   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
275     CHECK_EQ(*dst_meta, 0);
276     u32 idx = *src_meta;
277     *src_meta = 0;
278     *dst_meta = idx;
279     // Patch the addresses in sync objects.
280     while (idx != 0) {
281       if (idx & kFlagBlock)
282         break;
283       CHECK(idx & kFlagSync);
284       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
285       s->addr += diff;
286       idx = s->next;
287     }
288   }
289 }
290 
OnProcIdle(Processor * proc)291 void MetaMap::OnProcIdle(Processor *proc) {
292   block_alloc_.FlushCache(&proc->block_cache);
293   sync_alloc_.FlushCache(&proc->sync_cache);
294 }
295 
296 }  // namespace __tsan
297