1
2 //===-- tsan_test_util_linux.cc -------------------------------------------===//
3 //
4 // The LLVM Compiler Infrastructure
5 //
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //
13 // Test utils, Linux and FreeBSD implementation.
14 //===----------------------------------------------------------------------===//
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "tsan_interface.h"
18 #include "tsan_test_util.h"
19 #include "tsan_report.h"
20
21 #include "gtest/gtest.h"
22
23 #include <assert.h>
24 #include <pthread.h>
25 #include <stdio.h>
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30
31 using namespace __tsan; // NOLINT
32
33 static __thread bool expect_report;
34 static __thread bool expect_report_reported;
35 static __thread ReportType expect_report_type;
36
37 extern "C" void *__interceptor_memcpy(void*, const void*, uptr);
38 extern "C" void *__interceptor_memset(void*, int, uptr);
39
BeforeInitThread(void * param)40 static void *BeforeInitThread(void *param) {
41 (void)param;
42 return 0;
43 }
44
AtExit()45 static void AtExit() {
46 }
47
TestMutexBeforeInit()48 void TestMutexBeforeInit() {
49 // Mutexes must be usable before __tsan_init();
50 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
51 pthread_mutex_lock(&mtx);
52 pthread_mutex_unlock(&mtx);
53 pthread_mutex_destroy(&mtx);
54 pthread_t thr;
55 pthread_create(&thr, 0, BeforeInitThread, 0);
56 pthread_join(thr, 0);
57 atexit(AtExit);
58 }
59
60 namespace __tsan {
OnReport(const ReportDesc * rep,bool suppressed)61 bool OnReport(const ReportDesc *rep, bool suppressed) {
62 if (expect_report) {
63 if (rep->typ != expect_report_type) {
64 printf("Expected report of type %d, got type %d\n",
65 (int)expect_report_type, (int)rep->typ);
66 EXPECT_FALSE("Wrong report type");
67 return false;
68 }
69 } else {
70 EXPECT_FALSE("Unexpected report");
71 return false;
72 }
73 expect_report_reported = true;
74 return true;
75 }
76 } // namespace __tsan
77
allocate_addr(int size,int offset_from_aligned=0)78 static void* allocate_addr(int size, int offset_from_aligned = 0) {
79 static uintptr_t foo;
80 static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
81 const int kAlign = 16;
82 CHECK(offset_from_aligned < kAlign);
83 size = (size + 2 * kAlign) & ~(kAlign - 1);
84 uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
85 return (void*)(addr + offset_from_aligned);
86 }
87
MemLoc(int offset_from_aligned)88 MemLoc::MemLoc(int offset_from_aligned)
89 : loc_(allocate_addr(16, offset_from_aligned)) {
90 }
91
~MemLoc()92 MemLoc::~MemLoc() {
93 }
94
Mutex(Type type)95 Mutex::Mutex(Type type)
96 : alive_()
97 , type_(type) {
98 }
99
~Mutex()100 Mutex::~Mutex() {
101 CHECK(!alive_);
102 }
103
Init()104 void Mutex::Init() {
105 CHECK(!alive_);
106 alive_ = true;
107 if (type_ == Normal)
108 CHECK_EQ(pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
109 else if (type_ == Spin)
110 CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
111 else if (type_ == RW)
112 CHECK_EQ(pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
113 else
114 CHECK(0);
115 }
116
StaticInit()117 void Mutex::StaticInit() {
118 CHECK(!alive_);
119 CHECK(type_ == Normal);
120 alive_ = true;
121 pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
122 memcpy(mtx_, &tmp, sizeof(tmp));
123 }
124
Destroy()125 void Mutex::Destroy() {
126 CHECK(alive_);
127 alive_ = false;
128 if (type_ == Normal)
129 CHECK_EQ(pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
130 else if (type_ == Spin)
131 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
132 else if (type_ == RW)
133 CHECK_EQ(pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
134 }
135
Lock()136 void Mutex::Lock() {
137 CHECK(alive_);
138 if (type_ == Normal)
139 CHECK_EQ(pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
140 else if (type_ == Spin)
141 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
142 else if (type_ == RW)
143 CHECK_EQ(pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
144 }
145
TryLock()146 bool Mutex::TryLock() {
147 CHECK(alive_);
148 if (type_ == Normal)
149 return pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
150 else if (type_ == Spin)
151 return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
152 else if (type_ == RW)
153 return pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
154 return false;
155 }
156
Unlock()157 void Mutex::Unlock() {
158 CHECK(alive_);
159 if (type_ == Normal)
160 CHECK_EQ(pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
161 else if (type_ == Spin)
162 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
163 else if (type_ == RW)
164 CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
165 }
166
ReadLock()167 void Mutex::ReadLock() {
168 CHECK(alive_);
169 CHECK(type_ == RW);
170 CHECK_EQ(pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
171 }
172
TryReadLock()173 bool Mutex::TryReadLock() {
174 CHECK(alive_);
175 CHECK(type_ == RW);
176 return pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
177 }
178
ReadUnlock()179 void Mutex::ReadUnlock() {
180 CHECK(alive_);
181 CHECK(type_ == RW);
182 CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
183 }
184
185 struct Event {
186 enum Type {
187 SHUTDOWN,
188 READ,
189 WRITE,
190 VPTR_UPDATE,
191 CALL,
192 RETURN,
193 MUTEX_CREATE,
194 MUTEX_DESTROY,
195 MUTEX_LOCK,
196 MUTEX_TRYLOCK,
197 MUTEX_UNLOCK,
198 MUTEX_READLOCK,
199 MUTEX_TRYREADLOCK,
200 MUTEX_READUNLOCK,
201 MEMCPY,
202 MEMSET
203 };
204 Type type;
205 void *ptr;
206 uptr arg;
207 uptr arg2;
208 bool res;
209 bool expect_report;
210 ReportType report_type;
211
EventEvent212 Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
213 : type(type)
214 , ptr(const_cast<void*>(ptr))
215 , arg(arg)
216 , arg2(arg2)
217 , res()
218 , expect_report()
219 , report_type() {
220 }
221
ExpectReportEvent222 void ExpectReport(ReportType type) {
223 expect_report = true;
224 report_type = type;
225 }
226 };
227
228 struct ScopedThread::Impl {
229 pthread_t thread;
230 bool main;
231 bool detached;
232 atomic_uintptr_t event; // Event*
233
234 static void *ScopedThreadCallback(void *arg);
235 void send(Event *ev);
236 void HandleEvent(Event *ev);
237 };
238
HandleEvent(Event * ev)239 void ScopedThread::Impl::HandleEvent(Event *ev) {
240 CHECK_EQ(expect_report, false);
241 expect_report = ev->expect_report;
242 expect_report_reported = false;
243 expect_report_type = ev->report_type;
244 switch (ev->type) {
245 case Event::READ:
246 case Event::WRITE: {
247 void (*tsan_mop)(void *addr) = 0;
248 if (ev->type == Event::READ) {
249 switch (ev->arg /*size*/) {
250 case 1: tsan_mop = __tsan_read1; break;
251 case 2: tsan_mop = __tsan_read2; break;
252 case 4: tsan_mop = __tsan_read4; break;
253 case 8: tsan_mop = __tsan_read8; break;
254 case 16: tsan_mop = __tsan_read16; break;
255 }
256 } else {
257 switch (ev->arg /*size*/) {
258 case 1: tsan_mop = __tsan_write1; break;
259 case 2: tsan_mop = __tsan_write2; break;
260 case 4: tsan_mop = __tsan_write4; break;
261 case 8: tsan_mop = __tsan_write8; break;
262 case 16: tsan_mop = __tsan_write16; break;
263 }
264 }
265 CHECK_NE(tsan_mop, 0);
266 #if defined(__FreeBSD__)
267 const int ErrCode = ESOCKTNOSUPPORT;
268 #else
269 const int ErrCode = ECHRNG;
270 #endif
271 errno = ErrCode;
272 tsan_mop(ev->ptr);
273 CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
274 break;
275 }
276 case Event::VPTR_UPDATE:
277 __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
278 break;
279 case Event::CALL:
280 __tsan_func_entry((void*)((uptr)ev->ptr));
281 break;
282 case Event::RETURN:
283 __tsan_func_exit();
284 break;
285 case Event::MUTEX_CREATE:
286 static_cast<Mutex*>(ev->ptr)->Init();
287 break;
288 case Event::MUTEX_DESTROY:
289 static_cast<Mutex*>(ev->ptr)->Destroy();
290 break;
291 case Event::MUTEX_LOCK:
292 static_cast<Mutex*>(ev->ptr)->Lock();
293 break;
294 case Event::MUTEX_TRYLOCK:
295 ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
296 break;
297 case Event::MUTEX_UNLOCK:
298 static_cast<Mutex*>(ev->ptr)->Unlock();
299 break;
300 case Event::MUTEX_READLOCK:
301 static_cast<Mutex*>(ev->ptr)->ReadLock();
302 break;
303 case Event::MUTEX_TRYREADLOCK:
304 ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
305 break;
306 case Event::MUTEX_READUNLOCK:
307 static_cast<Mutex*>(ev->ptr)->ReadUnlock();
308 break;
309 case Event::MEMCPY:
310 __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
311 break;
312 case Event::MEMSET:
313 __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
314 break;
315 default: CHECK(0);
316 }
317 if (expect_report && !expect_report_reported) {
318 printf("Missed expected report of type %d\n", (int)ev->report_type);
319 EXPECT_FALSE("Missed expected race");
320 }
321 expect_report = false;
322 }
323
ScopedThreadCallback(void * arg)324 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
325 __tsan_func_entry(__builtin_return_address(0));
326 Impl *impl = (Impl*)arg;
327 for (;;) {
328 Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
329 if (ev == 0) {
330 pthread_yield();
331 continue;
332 }
333 if (ev->type == Event::SHUTDOWN) {
334 atomic_store(&impl->event, 0, memory_order_release);
335 break;
336 }
337 impl->HandleEvent(ev);
338 atomic_store(&impl->event, 0, memory_order_release);
339 }
340 __tsan_func_exit();
341 return 0;
342 }
343
send(Event * e)344 void ScopedThread::Impl::send(Event *e) {
345 if (main) {
346 HandleEvent(e);
347 } else {
348 CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
349 atomic_store(&event, (uintptr_t)e, memory_order_release);
350 while (atomic_load(&event, memory_order_acquire) != 0)
351 pthread_yield();
352 }
353 }
354
ScopedThread(bool detached,bool main)355 ScopedThread::ScopedThread(bool detached, bool main) {
356 impl_ = new Impl;
357 impl_->main = main;
358 impl_->detached = detached;
359 atomic_store(&impl_->event, 0, memory_order_relaxed);
360 if (!main) {
361 pthread_attr_t attr;
362 pthread_attr_init(&attr);
363 pthread_attr_setdetachstate(&attr, detached);
364 pthread_attr_setstacksize(&attr, 64*1024);
365 pthread_create(&impl_->thread, &attr,
366 ScopedThread::Impl::ScopedThreadCallback, impl_);
367 }
368 }
369
~ScopedThread()370 ScopedThread::~ScopedThread() {
371 if (!impl_->main) {
372 Event event(Event::SHUTDOWN);
373 impl_->send(&event);
374 if (!impl_->detached)
375 pthread_join(impl_->thread, 0);
376 }
377 delete impl_;
378 }
379
Detach()380 void ScopedThread::Detach() {
381 CHECK(!impl_->main);
382 CHECK(!impl_->detached);
383 impl_->detached = true;
384 pthread_detach(impl_->thread);
385 }
386
Access(void * addr,bool is_write,int size,bool expect_race)387 void ScopedThread::Access(void *addr, bool is_write,
388 int size, bool expect_race) {
389 Event event(is_write ? Event::WRITE : Event::READ, addr, size);
390 if (expect_race)
391 event.ExpectReport(ReportTypeRace);
392 impl_->send(&event);
393 }
394
VptrUpdate(const MemLoc & vptr,const MemLoc & new_val,bool expect_race)395 void ScopedThread::VptrUpdate(const MemLoc &vptr,
396 const MemLoc &new_val,
397 bool expect_race) {
398 Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
399 if (expect_race)
400 event.ExpectReport(ReportTypeRace);
401 impl_->send(&event);
402 }
403
Call(void (* pc)())404 void ScopedThread::Call(void(*pc)()) {
405 Event event(Event::CALL, (void*)((uintptr_t)pc));
406 impl_->send(&event);
407 }
408
Return()409 void ScopedThread::Return() {
410 Event event(Event::RETURN);
411 impl_->send(&event);
412 }
413
Create(const Mutex & m)414 void ScopedThread::Create(const Mutex &m) {
415 Event event(Event::MUTEX_CREATE, &m);
416 impl_->send(&event);
417 }
418
Destroy(const Mutex & m)419 void ScopedThread::Destroy(const Mutex &m) {
420 Event event(Event::MUTEX_DESTROY, &m);
421 impl_->send(&event);
422 }
423
Lock(const Mutex & m)424 void ScopedThread::Lock(const Mutex &m) {
425 Event event(Event::MUTEX_LOCK, &m);
426 impl_->send(&event);
427 }
428
TryLock(const Mutex & m)429 bool ScopedThread::TryLock(const Mutex &m) {
430 Event event(Event::MUTEX_TRYLOCK, &m);
431 impl_->send(&event);
432 return event.res;
433 }
434
Unlock(const Mutex & m)435 void ScopedThread::Unlock(const Mutex &m) {
436 Event event(Event::MUTEX_UNLOCK, &m);
437 impl_->send(&event);
438 }
439
ReadLock(const Mutex & m)440 void ScopedThread::ReadLock(const Mutex &m) {
441 Event event(Event::MUTEX_READLOCK, &m);
442 impl_->send(&event);
443 }
444
TryReadLock(const Mutex & m)445 bool ScopedThread::TryReadLock(const Mutex &m) {
446 Event event(Event::MUTEX_TRYREADLOCK, &m);
447 impl_->send(&event);
448 return event.res;
449 }
450
ReadUnlock(const Mutex & m)451 void ScopedThread::ReadUnlock(const Mutex &m) {
452 Event event(Event::MUTEX_READUNLOCK, &m);
453 impl_->send(&event);
454 }
455
Memcpy(void * dst,const void * src,int size,bool expect_race)456 void ScopedThread::Memcpy(void *dst, const void *src, int size,
457 bool expect_race) {
458 Event event(Event::MEMCPY, dst, (uptr)src, size);
459 if (expect_race)
460 event.ExpectReport(ReportTypeRace);
461 impl_->send(&event);
462 }
463
Memset(void * dst,int val,int size,bool expect_race)464 void ScopedThread::Memset(void *dst, int val, int size,
465 bool expect_race) {
466 Event event(Event::MEMSET, dst, val, size);
467 if (expect_race)
468 event.ExpectReport(ReportTypeRace);
469 impl_->send(&event);
470 }
471