1
2 //===-- tsan_test_util_linux.cc -------------------------------------------===//
3 //
4 // The LLVM Compiler Infrastructure
5 //
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //
13 // Test utils, linux implementation.
14 //===----------------------------------------------------------------------===//
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "tsan_interface.h"
18 #include "tsan_test_util.h"
19 #include "tsan_report.h"
20
21 #include "gtest/gtest.h"
22
23 #include <assert.h>
24 #include <pthread.h>
25 #include <stdio.h>
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30
31 using namespace __tsan; // NOLINT
32
33 static __thread bool expect_report;
34 static __thread bool expect_report_reported;
35 static __thread ReportType expect_report_type;
36
37 extern "C" void *__interceptor_memcpy(void*, const void*, uptr);
38 extern "C" void *__interceptor_memset(void*, int, uptr);
39
BeforeInitThread(void * param)40 static void *BeforeInitThread(void *param) {
41 (void)param;
42 return 0;
43 }
44
AtExit()45 static void AtExit() {
46 }
47
TestMutexBeforeInit()48 void TestMutexBeforeInit() {
49 // Mutexes must be usable before __tsan_init();
50 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
51 pthread_mutex_lock(&mtx);
52 pthread_mutex_unlock(&mtx);
53 pthread_mutex_destroy(&mtx);
54 pthread_t thr;
55 pthread_create(&thr, 0, BeforeInitThread, 0);
56 pthread_join(thr, 0);
57 atexit(AtExit);
58 }
59
60 namespace __tsan {
OnReport(const ReportDesc * rep,bool suppressed)61 bool OnReport(const ReportDesc *rep, bool suppressed) {
62 if (expect_report) {
63 if (rep->typ != expect_report_type) {
64 printf("Expected report of type %d, got type %d\n",
65 (int)expect_report_type, (int)rep->typ);
66 EXPECT_FALSE("Wrong report type");
67 return false;
68 }
69 } else {
70 EXPECT_FALSE("Unexpected report");
71 return false;
72 }
73 expect_report_reported = true;
74 return true;
75 }
76 } // namespace __tsan
77
allocate_addr(int size,int offset_from_aligned=0)78 static void* allocate_addr(int size, int offset_from_aligned = 0) {
79 static uintptr_t foo;
80 static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
81 const int kAlign = 16;
82 CHECK(offset_from_aligned < kAlign);
83 size = (size + 2 * kAlign) & ~(kAlign - 1);
84 uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
85 return (void*)(addr + offset_from_aligned);
86 }
87
MemLoc(int offset_from_aligned)88 MemLoc::MemLoc(int offset_from_aligned)
89 : loc_(allocate_addr(16, offset_from_aligned)) {
90 }
91
~MemLoc()92 MemLoc::~MemLoc() {
93 }
94
Mutex(Type type)95 Mutex::Mutex(Type type)
96 : alive_()
97 , type_(type) {
98 }
99
~Mutex()100 Mutex::~Mutex() {
101 CHECK(!alive_);
102 }
103
Init()104 void Mutex::Init() {
105 CHECK(!alive_);
106 alive_ = true;
107 if (type_ == Normal)
108 CHECK_EQ(pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
109 else if (type_ == Spin)
110 CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
111 else if (type_ == RW)
112 CHECK_EQ(pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
113 else
114 CHECK(0);
115 }
116
StaticInit()117 void Mutex::StaticInit() {
118 CHECK(!alive_);
119 CHECK(type_ == Normal);
120 alive_ = true;
121 pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
122 memcpy(mtx_, &tmp, sizeof(tmp));
123 }
124
Destroy()125 void Mutex::Destroy() {
126 CHECK(alive_);
127 alive_ = false;
128 if (type_ == Normal)
129 CHECK_EQ(pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
130 else if (type_ == Spin)
131 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
132 else if (type_ == RW)
133 CHECK_EQ(pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
134 }
135
Lock()136 void Mutex::Lock() {
137 CHECK(alive_);
138 if (type_ == Normal)
139 CHECK_EQ(pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
140 else if (type_ == Spin)
141 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
142 else if (type_ == RW)
143 CHECK_EQ(pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
144 }
145
TryLock()146 bool Mutex::TryLock() {
147 CHECK(alive_);
148 if (type_ == Normal)
149 return pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
150 else if (type_ == Spin)
151 return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
152 else if (type_ == RW)
153 return pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
154 return false;
155 }
156
Unlock()157 void Mutex::Unlock() {
158 CHECK(alive_);
159 if (type_ == Normal)
160 CHECK_EQ(pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
161 else if (type_ == Spin)
162 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
163 else if (type_ == RW)
164 CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
165 }
166
ReadLock()167 void Mutex::ReadLock() {
168 CHECK(alive_);
169 CHECK(type_ == RW);
170 CHECK_EQ(pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
171 }
172
TryReadLock()173 bool Mutex::TryReadLock() {
174 CHECK(alive_);
175 CHECK(type_ == RW);
176 return pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
177 }
178
ReadUnlock()179 void Mutex::ReadUnlock() {
180 CHECK(alive_);
181 CHECK(type_ == RW);
182 CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
183 }
184
185 struct Event {
186 enum Type {
187 SHUTDOWN,
188 READ,
189 WRITE,
190 VPTR_UPDATE,
191 CALL,
192 RETURN,
193 MUTEX_CREATE,
194 MUTEX_DESTROY,
195 MUTEX_LOCK,
196 MUTEX_TRYLOCK,
197 MUTEX_UNLOCK,
198 MUTEX_READLOCK,
199 MUTEX_TRYREADLOCK,
200 MUTEX_READUNLOCK,
201 MEMCPY,
202 MEMSET
203 };
204 Type type;
205 void *ptr;
206 uptr arg;
207 uptr arg2;
208 bool res;
209 bool expect_report;
210 ReportType report_type;
211
EventEvent212 Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
213 : type(type)
214 , ptr(const_cast<void*>(ptr))
215 , arg(arg)
216 , arg2(arg2)
217 , res()
218 , expect_report()
219 , report_type() {
220 }
221
ExpectReportEvent222 void ExpectReport(ReportType type) {
223 expect_report = true;
224 report_type = type;
225 }
226 };
227
228 struct ScopedThread::Impl {
229 pthread_t thread;
230 bool main;
231 bool detached;
232 atomic_uintptr_t event; // Event*
233
234 static void *ScopedThreadCallback(void *arg);
235 void send(Event *ev);
236 void HandleEvent(Event *ev);
237 };
238
HandleEvent(Event * ev)239 void ScopedThread::Impl::HandleEvent(Event *ev) {
240 CHECK_EQ(expect_report, false);
241 expect_report = ev->expect_report;
242 expect_report_reported = false;
243 expect_report_type = ev->report_type;
244 switch (ev->type) {
245 case Event::READ:
246 case Event::WRITE: {
247 void (*tsan_mop)(void *addr) = 0;
248 if (ev->type == Event::READ) {
249 switch (ev->arg /*size*/) {
250 case 1: tsan_mop = __tsan_read1; break;
251 case 2: tsan_mop = __tsan_read2; break;
252 case 4: tsan_mop = __tsan_read4; break;
253 case 8: tsan_mop = __tsan_read8; break;
254 case 16: tsan_mop = __tsan_read16; break;
255 }
256 } else {
257 switch (ev->arg /*size*/) {
258 case 1: tsan_mop = __tsan_write1; break;
259 case 2: tsan_mop = __tsan_write2; break;
260 case 4: tsan_mop = __tsan_write4; break;
261 case 8: tsan_mop = __tsan_write8; break;
262 case 16: tsan_mop = __tsan_write16; break;
263 }
264 }
265 CHECK_NE(tsan_mop, 0);
266 errno = ECHRNG;
267 tsan_mop(ev->ptr);
268 CHECK_EQ(errno, ECHRNG); // In no case must errno be changed.
269 break;
270 }
271 case Event::VPTR_UPDATE:
272 __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
273 break;
274 case Event::CALL:
275 __tsan_func_entry((void*)((uptr)ev->ptr));
276 break;
277 case Event::RETURN:
278 __tsan_func_exit();
279 break;
280 case Event::MUTEX_CREATE:
281 static_cast<Mutex*>(ev->ptr)->Init();
282 break;
283 case Event::MUTEX_DESTROY:
284 static_cast<Mutex*>(ev->ptr)->Destroy();
285 break;
286 case Event::MUTEX_LOCK:
287 static_cast<Mutex*>(ev->ptr)->Lock();
288 break;
289 case Event::MUTEX_TRYLOCK:
290 ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
291 break;
292 case Event::MUTEX_UNLOCK:
293 static_cast<Mutex*>(ev->ptr)->Unlock();
294 break;
295 case Event::MUTEX_READLOCK:
296 static_cast<Mutex*>(ev->ptr)->ReadLock();
297 break;
298 case Event::MUTEX_TRYREADLOCK:
299 ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
300 break;
301 case Event::MUTEX_READUNLOCK:
302 static_cast<Mutex*>(ev->ptr)->ReadUnlock();
303 break;
304 case Event::MEMCPY:
305 __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
306 break;
307 case Event::MEMSET:
308 __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
309 break;
310 default: CHECK(0);
311 }
312 if (expect_report && !expect_report_reported) {
313 printf("Missed expected report of type %d\n", (int)ev->report_type);
314 EXPECT_FALSE("Missed expected race");
315 }
316 expect_report = false;
317 }
318
ScopedThreadCallback(void * arg)319 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
320 __tsan_func_entry(__builtin_return_address(0));
321 Impl *impl = (Impl*)arg;
322 for (;;) {
323 Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
324 if (ev == 0) {
325 pthread_yield();
326 continue;
327 }
328 if (ev->type == Event::SHUTDOWN) {
329 atomic_store(&impl->event, 0, memory_order_release);
330 break;
331 }
332 impl->HandleEvent(ev);
333 atomic_store(&impl->event, 0, memory_order_release);
334 }
335 __tsan_func_exit();
336 return 0;
337 }
338
send(Event * e)339 void ScopedThread::Impl::send(Event *e) {
340 if (main) {
341 HandleEvent(e);
342 } else {
343 CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
344 atomic_store(&event, (uintptr_t)e, memory_order_release);
345 while (atomic_load(&event, memory_order_acquire) != 0)
346 pthread_yield();
347 }
348 }
349
ScopedThread(bool detached,bool main)350 ScopedThread::ScopedThread(bool detached, bool main) {
351 impl_ = new Impl;
352 impl_->main = main;
353 impl_->detached = detached;
354 atomic_store(&impl_->event, 0, memory_order_relaxed);
355 if (!main) {
356 pthread_attr_t attr;
357 pthread_attr_init(&attr);
358 pthread_attr_setdetachstate(&attr, detached);
359 pthread_attr_setstacksize(&attr, 64*1024);
360 pthread_create(&impl_->thread, &attr,
361 ScopedThread::Impl::ScopedThreadCallback, impl_);
362 }
363 }
364
~ScopedThread()365 ScopedThread::~ScopedThread() {
366 if (!impl_->main) {
367 Event event(Event::SHUTDOWN);
368 impl_->send(&event);
369 if (!impl_->detached)
370 pthread_join(impl_->thread, 0);
371 }
372 delete impl_;
373 }
374
Detach()375 void ScopedThread::Detach() {
376 CHECK(!impl_->main);
377 CHECK(!impl_->detached);
378 impl_->detached = true;
379 pthread_detach(impl_->thread);
380 }
381
Access(void * addr,bool is_write,int size,bool expect_race)382 void ScopedThread::Access(void *addr, bool is_write,
383 int size, bool expect_race) {
384 Event event(is_write ? Event::WRITE : Event::READ, addr, size);
385 if (expect_race)
386 event.ExpectReport(ReportTypeRace);
387 impl_->send(&event);
388 }
389
VptrUpdate(const MemLoc & vptr,const MemLoc & new_val,bool expect_race)390 void ScopedThread::VptrUpdate(const MemLoc &vptr,
391 const MemLoc &new_val,
392 bool expect_race) {
393 Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
394 if (expect_race)
395 event.ExpectReport(ReportTypeRace);
396 impl_->send(&event);
397 }
398
Call(void (* pc)())399 void ScopedThread::Call(void(*pc)()) {
400 Event event(Event::CALL, (void*)((uintptr_t)pc));
401 impl_->send(&event);
402 }
403
Return()404 void ScopedThread::Return() {
405 Event event(Event::RETURN);
406 impl_->send(&event);
407 }
408
Create(const Mutex & m)409 void ScopedThread::Create(const Mutex &m) {
410 Event event(Event::MUTEX_CREATE, &m);
411 impl_->send(&event);
412 }
413
Destroy(const Mutex & m)414 void ScopedThread::Destroy(const Mutex &m) {
415 Event event(Event::MUTEX_DESTROY, &m);
416 impl_->send(&event);
417 }
418
Lock(const Mutex & m)419 void ScopedThread::Lock(const Mutex &m) {
420 Event event(Event::MUTEX_LOCK, &m);
421 impl_->send(&event);
422 }
423
TryLock(const Mutex & m)424 bool ScopedThread::TryLock(const Mutex &m) {
425 Event event(Event::MUTEX_TRYLOCK, &m);
426 impl_->send(&event);
427 return event.res;
428 }
429
Unlock(const Mutex & m)430 void ScopedThread::Unlock(const Mutex &m) {
431 Event event(Event::MUTEX_UNLOCK, &m);
432 impl_->send(&event);
433 }
434
ReadLock(const Mutex & m)435 void ScopedThread::ReadLock(const Mutex &m) {
436 Event event(Event::MUTEX_READLOCK, &m);
437 impl_->send(&event);
438 }
439
TryReadLock(const Mutex & m)440 bool ScopedThread::TryReadLock(const Mutex &m) {
441 Event event(Event::MUTEX_TRYREADLOCK, &m);
442 impl_->send(&event);
443 return event.res;
444 }
445
ReadUnlock(const Mutex & m)446 void ScopedThread::ReadUnlock(const Mutex &m) {
447 Event event(Event::MUTEX_READUNLOCK, &m);
448 impl_->send(&event);
449 }
450
Memcpy(void * dst,const void * src,int size,bool expect_race)451 void ScopedThread::Memcpy(void *dst, const void *src, int size,
452 bool expect_race) {
453 Event event(Event::MEMCPY, dst, (uptr)src, size);
454 if (expect_race)
455 event.ExpectReport(ReportTypeRace);
456 impl_->send(&event);
457 }
458
Memset(void * dst,int val,int size,bool expect_race)459 void ScopedThread::Memset(void *dst, int val, int size,
460 bool expect_race) {
461 Event event(Event::MEMSET, dst, val, size);
462 if (expect_race)
463 event.ExpectReport(ReportTypeRace);
464 impl_->send(&event);
465 }
466