• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_test_util_posix.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "tsan_interface.h"
16 #include "tsan_posix_util.h"
17 #include "tsan_test_util.h"
18 #include "tsan_report.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <assert.h>
23 #include <pthread.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <errno.h>
29 
30 #define CALLERPC (__builtin_return_address(0))
31 
32 using namespace __tsan;
33 
34 static __thread bool expect_report;
35 static __thread bool expect_report_reported;
36 static __thread ReportType expect_report_type;
37 
BeforeInitThread(void * param)38 static void *BeforeInitThread(void *param) {
39   (void)param;
40   return 0;
41 }
42 
AtExit()43 static void AtExit() {
44 }
45 
TestMutexBeforeInit()46 void TestMutexBeforeInit() {
47   // Mutexes must be usable before __tsan_init();
48   pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
49   __interceptor_pthread_mutex_lock(&mtx);
50   __interceptor_pthread_mutex_unlock(&mtx);
51   __interceptor_pthread_mutex_destroy(&mtx);
52   pthread_t thr;
53   __interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
54   __interceptor_pthread_join(thr, 0);
55   atexit(AtExit);
56 }
57 
58 namespace __tsan {
OnReport(const ReportDesc * rep,bool suppressed)59 bool OnReport(const ReportDesc *rep, bool suppressed) {
60   if (expect_report) {
61     if (rep->typ != expect_report_type) {
62       printf("Expected report of type %d, got type %d\n",
63              (int)expect_report_type, (int)rep->typ);
64       EXPECT_TRUE(false) << "Wrong report type";
65       return false;
66     }
67   } else {
68     EXPECT_TRUE(false) << "Unexpected report";
69     return false;
70   }
71   expect_report_reported = true;
72   return true;
73 }
74 }  // namespace __tsan
75 
allocate_addr(int size,int offset_from_aligned=0)76 static void* allocate_addr(int size, int offset_from_aligned = 0) {
77   static uintptr_t foo;
78   static atomic_uintptr_t uniq = {(uintptr_t)&foo};  // Some real address.
79   const int kAlign = 16;
80   CHECK(offset_from_aligned < kAlign);
81   size = (size + 2 * kAlign) & ~(kAlign - 1);
82   uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
83   return (void*)(addr + offset_from_aligned);
84 }
85 
MemLoc(int offset_from_aligned)86 MemLoc::MemLoc(int offset_from_aligned)
87   : loc_(allocate_addr(16, offset_from_aligned)) {
88 }
89 
~MemLoc()90 MemLoc::~MemLoc() {
91 }
92 
Mutex(Type type)93 Mutex::Mutex(Type type)
94   : alive_()
95   , type_(type) {
96 }
97 
~Mutex()98 Mutex::~Mutex() {
99   CHECK(!alive_);
100 }
101 
Init()102 void Mutex::Init() {
103   CHECK(!alive_);
104   alive_ = true;
105   if (type_ == Normal)
106     CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
107 #ifndef __APPLE__
108   else if (type_ == Spin)
109     CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
110 #endif
111   else if (type_ == RW)
112     CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
113   else
114     CHECK(0);
115 }
116 
StaticInit()117 void Mutex::StaticInit() {
118   CHECK(!alive_);
119   CHECK(type_ == Normal);
120   alive_ = true;
121   pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
122   memcpy(mtx_, &tmp, sizeof(tmp));
123 }
124 
Destroy()125 void Mutex::Destroy() {
126   CHECK(alive_);
127   alive_ = false;
128   if (type_ == Normal)
129     CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
130 #ifndef __APPLE__
131   else if (type_ == Spin)
132     CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
133 #endif
134   else if (type_ == RW)
135     CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
136 }
137 
Lock()138 void Mutex::Lock() {
139   CHECK(alive_);
140   if (type_ == Normal)
141     CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
142 #ifndef __APPLE__
143   else if (type_ == Spin)
144     CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
145 #endif
146   else if (type_ == RW)
147     CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
148 }
149 
TryLock()150 bool Mutex::TryLock() {
151   CHECK(alive_);
152   if (type_ == Normal)
153     return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
154 #ifndef __APPLE__
155   else if (type_ == Spin)
156     return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
157 #endif
158   else if (type_ == RW)
159     return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
160   return false;
161 }
162 
Unlock()163 void Mutex::Unlock() {
164   CHECK(alive_);
165   if (type_ == Normal)
166     CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
167 #ifndef __APPLE__
168   else if (type_ == Spin)
169     CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
170 #endif
171   else if (type_ == RW)
172     CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
173 }
174 
ReadLock()175 void Mutex::ReadLock() {
176   CHECK(alive_);
177   CHECK(type_ == RW);
178   CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
179 }
180 
TryReadLock()181 bool Mutex::TryReadLock() {
182   CHECK(alive_);
183   CHECK(type_ == RW);
184   return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) ==  0;
185 }
186 
ReadUnlock()187 void Mutex::ReadUnlock() {
188   CHECK(alive_);
189   CHECK(type_ == RW);
190   CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
191 }
192 
193 struct Event {
194   enum Type {
195     SHUTDOWN,
196     READ,
197     WRITE,
198     VPTR_UPDATE,
199     CALL,
200     RETURN,
201     MUTEX_CREATE,
202     MUTEX_DESTROY,
203     MUTEX_LOCK,
204     MUTEX_TRYLOCK,
205     MUTEX_UNLOCK,
206     MUTEX_READLOCK,
207     MUTEX_TRYREADLOCK,
208     MUTEX_READUNLOCK,
209     MEMCPY,
210     MEMSET
211   };
212   Type type;
213   void *ptr;
214   uptr arg;
215   uptr arg2;
216   bool res;
217   bool expect_report;
218   ReportType report_type;
219 
EventEvent220   explicit Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
221       : type(type),
222         ptr(const_cast<void *>(ptr)),
223         arg(arg),
224         arg2(arg2),
225         res(),
226         expect_report(),
227         report_type() {}
228 
ExpectReportEvent229   void ExpectReport(ReportType type) {
230     expect_report = true;
231     report_type = type;
232   }
233 };
234 
235 struct ScopedThread::Impl {
236   pthread_t thread;
237   bool main;
238   bool detached;
239   atomic_uintptr_t event;  // Event*
240 
241   static void *ScopedThreadCallback(void *arg);
242   void send(Event *ev);
243   void HandleEvent(Event *ev);
244 };
245 
HandleEvent(Event * ev)246 void ScopedThread::Impl::HandleEvent(Event *ev) {
247   CHECK_EQ(expect_report, false);
248   expect_report = ev->expect_report;
249   expect_report_reported = false;
250   expect_report_type = ev->report_type;
251   switch (ev->type) {
252   case Event::READ:
253   case Event::WRITE: {
254     void (*tsan_mop)(void *addr, void *pc) = 0;
255     if (ev->type == Event::READ) {
256       switch (ev->arg /*size*/) {
257         case 1:
258           tsan_mop = __tsan_read1_pc;
259           break;
260         case 2:
261           tsan_mop = __tsan_read2_pc;
262           break;
263         case 4:
264           tsan_mop = __tsan_read4_pc;
265           break;
266         case 8:
267           tsan_mop = __tsan_read8_pc;
268           break;
269         case 16:
270           tsan_mop = __tsan_read16_pc;
271           break;
272       }
273     } else {
274       switch (ev->arg /*size*/) {
275         case 1:
276           tsan_mop = __tsan_write1_pc;
277           break;
278         case 2:
279           tsan_mop = __tsan_write2_pc;
280           break;
281         case 4:
282           tsan_mop = __tsan_write4_pc;
283           break;
284         case 8:
285           tsan_mop = __tsan_write8_pc;
286           break;
287         case 16:
288           tsan_mop = __tsan_write16_pc;
289           break;
290       }
291     }
292     CHECK_NE(tsan_mop, 0);
293 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
294     const int ErrCode = ESOCKTNOSUPPORT;
295 #else
296     const int ErrCode = ECHRNG;
297 #endif
298     errno = ErrCode;
299     tsan_mop(ev->ptr, (void *)ev->arg2);
300     CHECK_EQ(ErrCode, errno);  // In no case must errno be changed.
301     break;
302   }
303   case Event::VPTR_UPDATE:
304     __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
305     break;
306   case Event::CALL:
307     __tsan_func_entry((void*)((uptr)ev->ptr));
308     break;
309   case Event::RETURN:
310     __tsan_func_exit();
311     break;
312   case Event::MUTEX_CREATE:
313     static_cast<Mutex*>(ev->ptr)->Init();
314     break;
315   case Event::MUTEX_DESTROY:
316     static_cast<Mutex*>(ev->ptr)->Destroy();
317     break;
318   case Event::MUTEX_LOCK:
319     static_cast<Mutex*>(ev->ptr)->Lock();
320     break;
321   case Event::MUTEX_TRYLOCK:
322     ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
323     break;
324   case Event::MUTEX_UNLOCK:
325     static_cast<Mutex*>(ev->ptr)->Unlock();
326     break;
327   case Event::MUTEX_READLOCK:
328     static_cast<Mutex*>(ev->ptr)->ReadLock();
329     break;
330   case Event::MUTEX_TRYREADLOCK:
331     ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
332     break;
333   case Event::MUTEX_READUNLOCK:
334     static_cast<Mutex*>(ev->ptr)->ReadUnlock();
335     break;
336   case Event::MEMCPY:
337     __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
338     break;
339   case Event::MEMSET:
340     __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
341     break;
342   default: CHECK(0);
343   }
344   if (expect_report && !expect_report_reported) {
345     printf("Missed expected report of type %d\n", (int)ev->report_type);
346     EXPECT_TRUE(false) << "Missed expected race";
347   }
348   expect_report = false;
349 }
350 
ScopedThreadCallback(void * arg)351 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
352   __tsan_func_entry(CALLERPC);
353   Impl *impl = (Impl*)arg;
354   for (;;) {
355     Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
356     if (ev == 0) {
357       sched_yield();
358       continue;
359     }
360     if (ev->type == Event::SHUTDOWN) {
361       atomic_store(&impl->event, 0, memory_order_release);
362       break;
363     }
364     impl->HandleEvent(ev);
365     atomic_store(&impl->event, 0, memory_order_release);
366   }
367   __tsan_func_exit();
368   return 0;
369 }
370 
send(Event * e)371 void ScopedThread::Impl::send(Event *e) {
372   if (main) {
373     HandleEvent(e);
374   } else {
375     CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
376     atomic_store(&event, (uintptr_t)e, memory_order_release);
377     while (atomic_load(&event, memory_order_acquire) != 0)
378       sched_yield();
379   }
380 }
381 
ScopedThread(bool detached,bool main)382 ScopedThread::ScopedThread(bool detached, bool main) {
383   impl_ = new Impl;
384   impl_->main = main;
385   impl_->detached = detached;
386   atomic_store(&impl_->event, 0, memory_order_relaxed);
387   if (!main) {
388     pthread_attr_t attr;
389     pthread_attr_init(&attr);
390     pthread_attr_setdetachstate(
391         &attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
392     pthread_attr_setstacksize(&attr, 64*1024);
393     __interceptor_pthread_create(&impl_->thread, &attr,
394         ScopedThread::Impl::ScopedThreadCallback, impl_);
395   }
396 }
397 
~ScopedThread()398 ScopedThread::~ScopedThread() {
399   if (!impl_->main) {
400     Event event(Event::SHUTDOWN);
401     impl_->send(&event);
402     if (!impl_->detached)
403       __interceptor_pthread_join(impl_->thread, 0);
404   }
405   delete impl_;
406 }
407 
Detach()408 void ScopedThread::Detach() {
409   CHECK(!impl_->main);
410   CHECK(!impl_->detached);
411   impl_->detached = true;
412   __interceptor_pthread_detach(impl_->thread);
413 }
414 
Access(void * addr,bool is_write,int size,bool expect_race)415 void ScopedThread::Access(void *addr, bool is_write,
416                           int size, bool expect_race) {
417   Event event(is_write ? Event::WRITE : Event::READ, addr, size,
418               (uptr)CALLERPC);
419   if (expect_race)
420     event.ExpectReport(ReportTypeRace);
421   impl_->send(&event);
422 }
423 
VptrUpdate(const MemLoc & vptr,const MemLoc & new_val,bool expect_race)424 void ScopedThread::VptrUpdate(const MemLoc &vptr,
425                               const MemLoc &new_val,
426                               bool expect_race) {
427   Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
428   if (expect_race)
429     event.ExpectReport(ReportTypeRace);
430   impl_->send(&event);
431 }
432 
Call(void (* pc)())433 void ScopedThread::Call(void(*pc)()) {
434   Event event(Event::CALL, (void*)((uintptr_t)pc));
435   impl_->send(&event);
436 }
437 
Return()438 void ScopedThread::Return() {
439   Event event(Event::RETURN);
440   impl_->send(&event);
441 }
442 
Create(const Mutex & m)443 void ScopedThread::Create(const Mutex &m) {
444   Event event(Event::MUTEX_CREATE, &m);
445   impl_->send(&event);
446 }
447 
Destroy(const Mutex & m)448 void ScopedThread::Destroy(const Mutex &m) {
449   Event event(Event::MUTEX_DESTROY, &m);
450   impl_->send(&event);
451 }
452 
Lock(const Mutex & m)453 void ScopedThread::Lock(const Mutex &m) {
454   Event event(Event::MUTEX_LOCK, &m);
455   impl_->send(&event);
456 }
457 
TryLock(const Mutex & m)458 bool ScopedThread::TryLock(const Mutex &m) {
459   Event event(Event::MUTEX_TRYLOCK, &m);
460   impl_->send(&event);
461   return event.res;
462 }
463 
Unlock(const Mutex & m)464 void ScopedThread::Unlock(const Mutex &m) {
465   Event event(Event::MUTEX_UNLOCK, &m);
466   impl_->send(&event);
467 }
468 
ReadLock(const Mutex & m)469 void ScopedThread::ReadLock(const Mutex &m) {
470   Event event(Event::MUTEX_READLOCK, &m);
471   impl_->send(&event);
472 }
473 
TryReadLock(const Mutex & m)474 bool ScopedThread::TryReadLock(const Mutex &m) {
475   Event event(Event::MUTEX_TRYREADLOCK, &m);
476   impl_->send(&event);
477   return event.res;
478 }
479 
ReadUnlock(const Mutex & m)480 void ScopedThread::ReadUnlock(const Mutex &m) {
481   Event event(Event::MUTEX_READUNLOCK, &m);
482   impl_->send(&event);
483 }
484 
Memcpy(void * dst,const void * src,int size,bool expect_race)485 void ScopedThread::Memcpy(void *dst, const void *src, int size,
486                           bool expect_race) {
487   Event event(Event::MEMCPY, dst, (uptr)src, size);
488   if (expect_race)
489     event.ExpectReport(ReportTypeRace);
490   impl_->send(&event);
491 }
492 
Memset(void * dst,int val,int size,bool expect_race)493 void ScopedThread::Memset(void *dst, int val, int size,
494                           bool expect_race) {
495   Event event(Event::MEMSET, dst, val, size);
496   if (expect_race)
497     event.ExpectReport(ReportTypeRace);
498   impl_->send(&event);
499 }
500