• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_fd.cc --------------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "tsan_fd.h"
15 #include "tsan_rtl.h"
16 #include <sanitizer_common/sanitizer_atomic.h>
17 
18 namespace __tsan {
19 
20 const int kTableSizeL1 = 1024;
21 const int kTableSizeL2 = 1024;
22 const int kTableSize = kTableSizeL1 * kTableSizeL2;
23 
24 struct FdSync {
25   atomic_uint64_t rc;
26 };
27 
28 struct FdDesc {
29   FdSync *sync;
30   int creation_tid;
31   u32 creation_stack;
32 };
33 
34 struct FdContext {
35   atomic_uintptr_t tab[kTableSizeL1];
36   // Addresses used for synchronization.
37   FdSync globsync;
38   FdSync filesync;
39   FdSync socksync;
40   u64 connectsync;
41 };
42 
43 static FdContext fdctx;
44 
allocsync()45 static FdSync *allocsync() {
46   FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
47   atomic_store(&s->rc, 1, memory_order_relaxed);
48   return s;
49 }
50 
ref(FdSync * s)51 static FdSync *ref(FdSync *s) {
52   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
53     atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
54   return s;
55 }
56 
unref(ThreadState * thr,uptr pc,FdSync * s)57 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
58   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
59     if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
60       CHECK_NE(s, &fdctx.globsync);
61       CHECK_NE(s, &fdctx.filesync);
62       CHECK_NE(s, &fdctx.socksync);
63       SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
64       if (v)
65         DestroyAndFree(v);
66       internal_free(s);
67     }
68   }
69 }
70 
fddesc(ThreadState * thr,uptr pc,int fd)71 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
72   CHECK_LT(fd, kTableSize);
73   atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
74   uptr l1 = atomic_load(pl1, memory_order_consume);
75   if (l1 == 0) {
76     uptr size = kTableSizeL2 * sizeof(FdDesc);
77     void *p = internal_alloc(MBlockFD, size);
78     internal_memset(p, 0, size);
79     MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
80     if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
81       l1 = (uptr)p;
82     else
83       internal_free(p);
84   }
85   return &((FdDesc*)l1)[fd % kTableSizeL2];  // NOLINT
86 }
87 
88 // pd must be already ref'ed.
init(ThreadState * thr,uptr pc,int fd,FdSync * s)89 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
90   FdDesc *d = fddesc(thr, pc, fd);
91   // As a matter of fact, we don't intercept all close calls.
92   // See e.g. libc __res_iclose().
93   if (d->sync) {
94     unref(thr, pc, d->sync);
95     d->sync = 0;
96   }
97   if (flags()->io_sync == 0) {
98     unref(thr, pc, s);
99   } else if (flags()->io_sync == 1) {
100     d->sync = s;
101   } else if (flags()->io_sync == 2) {
102     unref(thr, pc, s);
103     d->sync = &fdctx.globsync;
104   }
105   d->creation_tid = thr->tid;
106   d->creation_stack = CurrentStackId(thr, pc);
107   // To catch races between fd usage and open.
108   MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
109 }
110 
FdInit()111 void FdInit() {
112   atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
113   atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
114   atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
115 }
116 
FdOnFork(ThreadState * thr,uptr pc)117 void FdOnFork(ThreadState *thr, uptr pc) {
118   // On fork() we need to reset all fd's, because the child is going
119   // close all them, and that will cause races between previous read/write
120   // and the close.
121   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
122     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
123     if (tab == 0)
124       break;
125     for (int l2 = 0; l2 < kTableSizeL2; l2++) {
126       FdDesc *d = &tab[l2];
127       MemoryResetRange(thr, pc, (uptr)d, 8);
128     }
129   }
130 }
131 
FdLocation(uptr addr,int * fd,int * tid,u32 * stack)132 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
133   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
134     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
135     if (tab == 0)
136       break;
137     if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
138       int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
139       FdDesc *d = &tab[l2];
140       *fd = l1 * kTableSizeL1 + l2;
141       *tid = d->creation_tid;
142       *stack = d->creation_stack;
143       return true;
144     }
145   }
146   return false;
147 }
148 
FdAcquire(ThreadState * thr,uptr pc,int fd)149 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
150   FdDesc *d = fddesc(thr, pc, fd);
151   FdSync *s = d->sync;
152   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
153   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
154   if (s)
155     Acquire(thr, pc, (uptr)s);
156 }
157 
FdRelease(ThreadState * thr,uptr pc,int fd)158 void FdRelease(ThreadState *thr, uptr pc, int fd) {
159   FdDesc *d = fddesc(thr, pc, fd);
160   FdSync *s = d->sync;
161   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
162   if (s)
163     Release(thr, pc, (uptr)s);
164   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
165 }
166 
FdAccess(ThreadState * thr,uptr pc,int fd)167 void FdAccess(ThreadState *thr, uptr pc, int fd) {
168   DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
169   FdDesc *d = fddesc(thr, pc, fd);
170   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
171 }
172 
FdClose(ThreadState * thr,uptr pc,int fd)173 void FdClose(ThreadState *thr, uptr pc, int fd) {
174   DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
175   FdDesc *d = fddesc(thr, pc, fd);
176   // To catch races between fd usage and close.
177   MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
178   // We need to clear it, because if we do not intercept any call out there
179   // that creates fd, we will hit false postives.
180   MemoryResetRange(thr, pc, (uptr)d, 8);
181   unref(thr, pc, d->sync);
182   d->sync = 0;
183   d->creation_tid = 0;
184   d->creation_stack = 0;
185 }
186 
FdFileCreate(ThreadState * thr,uptr pc,int fd)187 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
188   DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
189   init(thr, pc, fd, &fdctx.filesync);
190 }
191 
FdDup(ThreadState * thr,uptr pc,int oldfd,int newfd)192 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
193   DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
194   // Ignore the case when user dups not yet connected socket.
195   FdDesc *od = fddesc(thr, pc, oldfd);
196   MemoryRead(thr, pc, (uptr)od, kSizeLog8);
197   FdClose(thr, pc, newfd);
198   init(thr, pc, newfd, ref(od->sync));
199 }
200 
FdPipeCreate(ThreadState * thr,uptr pc,int rfd,int wfd)201 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
202   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
203   FdSync *s = allocsync();
204   init(thr, pc, rfd, ref(s));
205   init(thr, pc, wfd, ref(s));
206   unref(thr, pc, s);
207 }
208 
FdEventCreate(ThreadState * thr,uptr pc,int fd)209 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
210   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
211   init(thr, pc, fd, allocsync());
212 }
213 
FdSignalCreate(ThreadState * thr,uptr pc,int fd)214 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
215   DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
216   init(thr, pc, fd, 0);
217 }
218 
FdInotifyCreate(ThreadState * thr,uptr pc,int fd)219 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
220   DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
221   init(thr, pc, fd, 0);
222 }
223 
FdPollCreate(ThreadState * thr,uptr pc,int fd)224 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
225   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
226   init(thr, pc, fd, allocsync());
227 }
228 
FdSocketCreate(ThreadState * thr,uptr pc,int fd)229 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
230   DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
231   // It can be a UDP socket.
232   init(thr, pc, fd, &fdctx.socksync);
233 }
234 
FdSocketAccept(ThreadState * thr,uptr pc,int fd,int newfd)235 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
236   DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
237   // Synchronize connect->accept.
238   Acquire(thr, pc, (uptr)&fdctx.connectsync);
239   init(thr, pc, newfd, &fdctx.socksync);
240 }
241 
FdSocketConnecting(ThreadState * thr,uptr pc,int fd)242 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
243   DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
244   // Synchronize connect->accept.
245   Release(thr, pc, (uptr)&fdctx.connectsync);
246 }
247 
FdSocketConnect(ThreadState * thr,uptr pc,int fd)248 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
249   DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
250   init(thr, pc, fd, &fdctx.socksync);
251 }
252 
File2addr(char * path)253 uptr File2addr(char *path) {
254   (void)path;
255   static u64 addr;
256   return (uptr)&addr;
257 }
258 
Dir2addr(char * path)259 uptr Dir2addr(char *path) {
260   (void)path;
261   static u64 addr;
262   return (uptr)&addr;
263 }
264 
265 }  //  namespace __tsan
266