• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_interface_java.cc --------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "tsan_interface_java.h"
15 #include "tsan_rtl.h"
16 #include "tsan_mutex.h"
17 #include "sanitizer_common/sanitizer_internal_defs.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stacktrace.h"
21 
22 using namespace __tsan;  // NOLINT
23 
24 namespace __tsan {
25 
26 const uptr kHeapShadow = 0x300000000000ull;
27 const uptr kHeapAlignment = 8;
28 
29 struct BlockDesc {
30   bool begin;
31   Mutex mtx;
32   SyncVar *head;
33 
BlockDesc__tsan::BlockDesc34   BlockDesc()
35       : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
36       , head() {
37     CHECK_EQ(begin, false);
38     begin = true;
39   }
40 
~BlockDesc__tsan::BlockDesc41   ~BlockDesc() {
42     CHECK_EQ(begin, true);
43     begin = false;
44     ThreadState *thr = cur_thread();
45     SyncVar *s = head;
46     while (s) {
47       SyncVar *s1 = s->next;
48       StatInc(thr, StatSyncDestroyed);
49       s->mtx.Lock();
50       s->mtx.Unlock();
51       thr->mset.Remove(s->GetId());
52       DestroyAndFree(s);
53       s = s1;
54     }
55   }
56 };
57 
58 struct JavaContext {
59   const uptr heap_begin;
60   const uptr heap_size;
61   BlockDesc *heap_shadow;
62 
JavaContext__tsan::JavaContext63   JavaContext(jptr heap_begin, jptr heap_size)
64       : heap_begin(heap_begin)
65       , heap_size(heap_size) {
66     uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
67     heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
68     if ((uptr)heap_shadow != kHeapShadow) {
69       Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
70       Die();
71     }
72   }
73 };
74 
75 class ScopedJavaFunc {
76  public:
ScopedJavaFunc(ThreadState * thr,uptr pc)77   ScopedJavaFunc(ThreadState *thr, uptr pc)
78       : thr_(thr) {
79     Initialize(thr_);
80     FuncEntry(thr, pc);
81     CHECK_EQ(thr_->in_rtl, 0);
82     thr_->in_rtl++;
83   }
84 
~ScopedJavaFunc()85   ~ScopedJavaFunc() {
86     thr_->in_rtl--;
87     CHECK_EQ(thr_->in_rtl, 0);
88     FuncExit(thr_);
89     // FIXME(dvyukov): process pending signals.
90   }
91 
92  private:
93   ThreadState *thr_;
94 };
95 
96 static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
97 static JavaContext *jctx;
98 
getblock(uptr addr)99 static BlockDesc *getblock(uptr addr) {
100   uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
101   return &jctx->heap_shadow[i];
102 }
103 
getmem(BlockDesc * b)104 static uptr USED getmem(BlockDesc *b) {
105   uptr i = b - jctx->heap_shadow;
106   uptr p = jctx->heap_begin + i * kHeapAlignment;
107   CHECK_GE(p, jctx->heap_begin);
108   CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
109   return p;
110 }
111 
getblockbegin(uptr addr)112 static BlockDesc *getblockbegin(uptr addr) {
113   for (BlockDesc *b = getblock(addr);; b--) {
114     CHECK_GE(b, jctx->heap_shadow);
115     if (b->begin)
116       return b;
117   }
118   return 0;
119 }
120 
GetJavaSync(ThreadState * thr,uptr pc,uptr addr,bool write_lock,bool create)121 SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
122                      bool write_lock, bool create) {
123   if (jctx == 0 || addr < jctx->heap_begin
124       || addr >= jctx->heap_begin + jctx->heap_size)
125     return 0;
126   BlockDesc *b = getblockbegin(addr);
127   DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
128   Lock l(&b->mtx);
129   SyncVar *s = b->head;
130   for (; s; s = s->next) {
131     if (s->addr == addr) {
132       DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
133       break;
134     }
135   }
136   if (s == 0 && create) {
137     DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
138     s = CTX()->synctab.Create(thr, pc, addr);
139     s->next = b->head;
140     b->head = s;
141   }
142   if (s) {
143     if (write_lock)
144       s->mtx.Lock();
145     else
146       s->mtx.ReadLock();
147   }
148   return s;
149 }
150 
GetAndRemoveJavaSync(ThreadState * thr,uptr pc,uptr addr)151 SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
152   // We do not destroy Java mutexes other than in __tsan_java_free().
153   return 0;
154 }
155 
156 }  // namespace __tsan
157 
158 #define SCOPED_JAVA_FUNC(func) \
159   ThreadState *thr = cur_thread(); \
160   const uptr caller_pc = GET_CALLER_PC(); \
161   const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
162   (void)pc; \
163   ScopedJavaFunc scoped(thr, caller_pc); \
164 /**/
165 
__tsan_java_init(jptr heap_begin,jptr heap_size)166 void __tsan_java_init(jptr heap_begin, jptr heap_size) {
167   SCOPED_JAVA_FUNC(__tsan_java_init);
168   DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
169   CHECK_EQ(jctx, 0);
170   CHECK_GT(heap_begin, 0);
171   CHECK_GT(heap_size, 0);
172   CHECK_EQ(heap_begin % kHeapAlignment, 0);
173   CHECK_EQ(heap_size % kHeapAlignment, 0);
174   CHECK_LT(heap_begin, heap_begin + heap_size);
175   jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
176 }
177 
__tsan_java_fini()178 int  __tsan_java_fini() {
179   SCOPED_JAVA_FUNC(__tsan_java_fini);
180   DPrintf("#%d: java_fini()\n", thr->tid);
181   CHECK_NE(jctx, 0);
182   // FIXME(dvyukov): this does not call atexit() callbacks.
183   int status = Finalize(thr);
184   DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
185   return status;
186 }
187 
__tsan_java_alloc(jptr ptr,jptr size)188 void __tsan_java_alloc(jptr ptr, jptr size) {
189   SCOPED_JAVA_FUNC(__tsan_java_alloc);
190   DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
191   CHECK_NE(jctx, 0);
192   CHECK_NE(size, 0);
193   CHECK_EQ(ptr % kHeapAlignment, 0);
194   CHECK_EQ(size % kHeapAlignment, 0);
195   CHECK_GE(ptr, jctx->heap_begin);
196   CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
197 
198   BlockDesc *b = getblock(ptr);
199   new(b) BlockDesc();
200 }
201 
__tsan_java_free(jptr ptr,jptr size)202 void __tsan_java_free(jptr ptr, jptr size) {
203   SCOPED_JAVA_FUNC(__tsan_java_free);
204   DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
205   CHECK_NE(jctx, 0);
206   CHECK_NE(size, 0);
207   CHECK_EQ(ptr % kHeapAlignment, 0);
208   CHECK_EQ(size % kHeapAlignment, 0);
209   CHECK_GE(ptr, jctx->heap_begin);
210   CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
211 
212   BlockDesc *beg = getblock(ptr);
213   BlockDesc *end = getblock(ptr + size);
214   for (BlockDesc *b = beg; b != end; b++) {
215     if (b->begin)
216       b->~BlockDesc();
217   }
218 }
219 
__tsan_java_move(jptr src,jptr dst,jptr size)220 void __tsan_java_move(jptr src, jptr dst, jptr size) {
221   SCOPED_JAVA_FUNC(__tsan_java_move);
222   DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
223   CHECK_NE(jctx, 0);
224   CHECK_NE(size, 0);
225   CHECK_EQ(src % kHeapAlignment, 0);
226   CHECK_EQ(dst % kHeapAlignment, 0);
227   CHECK_EQ(size % kHeapAlignment, 0);
228   CHECK_GE(src, jctx->heap_begin);
229   CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
230   CHECK_GE(dst, jctx->heap_begin);
231   CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
232   CHECK(dst >= src + size || src >= dst + size);
233 
234   // Assuming it's not running concurrently with threads that do
235   // memory accesses and mutex operations (stop-the-world phase).
236   {  // NOLINT
237     BlockDesc *s = getblock(src);
238     BlockDesc *d = getblock(dst);
239     BlockDesc *send = getblock(src + size);
240     for (; s != send; s++, d++) {
241       CHECK_EQ(d->begin, false);
242       if (s->begin) {
243         DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
244         new(d) BlockDesc;
245         d->head = s->head;
246         for (SyncVar *sync = d->head; sync; sync = sync->next) {
247           uptr newaddr = sync->addr - src + dst;
248           DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
249           sync->addr = newaddr;
250         }
251         s->head = 0;
252         s->~BlockDesc();
253       }
254     }
255   }
256 
257   {  // NOLINT
258     u64 *s = (u64*)MemToShadow(src);
259     u64 *d = (u64*)MemToShadow(dst);
260     u64 *send = (u64*)MemToShadow(src + size);
261     for (; s != send; s++, d++) {
262       *d = *s;
263       *s = 0;
264     }
265   }
266 }
267 
__tsan_java_mutex_lock(jptr addr)268 void __tsan_java_mutex_lock(jptr addr) {
269   SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
270   DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
271   CHECK_NE(jctx, 0);
272   CHECK_GE(addr, jctx->heap_begin);
273   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
274 
275   MutexCreate(thr, pc, addr, true, true, true);
276   MutexLock(thr, pc, addr);
277 }
278 
__tsan_java_mutex_unlock(jptr addr)279 void __tsan_java_mutex_unlock(jptr addr) {
280   SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
281   DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
282   CHECK_NE(jctx, 0);
283   CHECK_GE(addr, jctx->heap_begin);
284   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
285 
286   MutexUnlock(thr, pc, addr);
287 }
288 
__tsan_java_mutex_read_lock(jptr addr)289 void __tsan_java_mutex_read_lock(jptr addr) {
290   SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
291   DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
292   CHECK_NE(jctx, 0);
293   CHECK_GE(addr, jctx->heap_begin);
294   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
295 
296   MutexCreate(thr, pc, addr, true, true, true);
297   MutexReadLock(thr, pc, addr);
298 }
299 
__tsan_java_mutex_read_unlock(jptr addr)300 void __tsan_java_mutex_read_unlock(jptr addr) {
301   SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
302   DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
303   CHECK_NE(jctx, 0);
304   CHECK_GE(addr, jctx->heap_begin);
305   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
306 
307   MutexReadUnlock(thr, pc, addr);
308 }
309 
__tsan_java_mutex_lock_rec(jptr addr,int rec)310 void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
311   SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
312   DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
313   CHECK_NE(jctx, 0);
314   CHECK_GE(addr, jctx->heap_begin);
315   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
316   CHECK_GT(rec, 0);
317 
318   MutexCreate(thr, pc, addr, true, true, true);
319   MutexLock(thr, pc, addr, rec);
320 }
321 
__tsan_java_mutex_unlock_rec(jptr addr)322 int __tsan_java_mutex_unlock_rec(jptr addr) {
323   SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
324   DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
325   CHECK_NE(jctx, 0);
326   CHECK_GE(addr, jctx->heap_begin);
327   CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
328 
329   return MutexUnlock(thr, pc, addr, true);
330 }
331