• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
23 #include "tsan_rtl.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27 
28 volatile int __tsan_resumed = 0;
29 
__tsan_resume()30 extern "C" void __tsan_resume() {
31   __tsan_resumed = 1;
32 }
33 
34 namespace __tsan {
35 
36 #ifndef TSAN_GO
37 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
38 #endif
39 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
40 
41 // Can be overriden by a front-end.
OnFinalize(bool failed)42 bool CPP_WEAK OnFinalize(bool failed) {
43   return failed;
44 }
45 
46 static Context *ctx;
CTX()47 Context *CTX() {
48   return ctx;
49 }
50 
51 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
52 
CreateThreadContext(u32 tid)53 static ThreadContextBase *CreateThreadContext(u32 tid) {
54   // Map thread trace when context is created.
55   MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
56   MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
57   new(ThreadTrace(tid)) Trace();
58   void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
59   return new(mem) ThreadContext(tid);
60 }
61 
62 #ifndef TSAN_GO
63 static const u32 kThreadQuarantineSize = 16;
64 #else
65 static const u32 kThreadQuarantineSize = 64;
66 #endif
67 
Context()68 Context::Context()
69   : initialized()
70   , report_mtx(MutexTypeReport, StatMtxReport)
71   , nreported()
72   , nmissed_expected()
73   , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
74       CreateThreadContext, kMaxTid, kThreadQuarantineSize))
75   , racy_stacks(MBlockRacyStacks)
76   , racy_addresses(MBlockRacyAddresses)
77   , fired_suppressions(8) {
78 }
79 
80 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,int tid,int unique_id,u64 epoch,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)81 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
82                          uptr stk_addr, uptr stk_size,
83                          uptr tls_addr, uptr tls_size)
84   : fast_state(tid, epoch)
85   // Do not touch these, rely on zero initialization,
86   // they may be accessed before the ctor.
87   // , ignore_reads_and_writes()
88   // , in_rtl()
89   , shadow_stack_pos(&shadow_stack[0])
90 #ifndef TSAN_GO
91   , jmp_bufs(MBlockJmpBuf)
92 #endif
93   , tid(tid)
94   , unique_id(unique_id)
95   , stk_addr(stk_addr)
96   , stk_size(stk_size)
97   , tls_addr(tls_addr)
98   , tls_size(tls_size) {
99 }
100 
MemoryProfiler(Context * ctx,fd_t fd,int i)101 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
102   uptr n_threads;
103   uptr n_running_threads;
104   ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
105   InternalScopedBuffer<char> buf(4096);
106   internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
107       i, n_threads, n_running_threads);
108   internal_write(fd, buf.data(), internal_strlen(buf.data()));
109   WriteMemoryProfile(buf.data(), buf.size());
110   internal_write(fd, buf.data(), internal_strlen(buf.data()));
111 }
112 
BackgroundThread(void * arg)113 static void BackgroundThread(void *arg) {
114   ScopedInRtl in_rtl;
115   Context *ctx = CTX();
116   const u64 kMs2Ns = 1000 * 1000;
117 
118   fd_t mprof_fd = kInvalidFd;
119   if (flags()->profile_memory && flags()->profile_memory[0]) {
120     InternalScopedBuffer<char> filename(4096);
121     internal_snprintf(filename.data(), filename.size(), "%s.%d",
122         flags()->profile_memory, (int)internal_getpid());
123     uptr openrv = OpenFile(filename.data(), true);
124     if (internal_iserror(openrv)) {
125       Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
126           &filename[0]);
127     } else {
128       mprof_fd = openrv;
129     }
130   }
131 
132   u64 last_flush = NanoTime();
133   for (int i = 0; ; i++) {
134     SleepForSeconds(1);
135     u64 now = NanoTime();
136 
137     // Flush memory if requested.
138     if (flags()->flush_memory_ms) {
139       if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
140         FlushShadowMemory();
141         last_flush = NanoTime();
142       }
143     }
144 
145     // Write memory profile if requested.
146     if (mprof_fd != kInvalidFd)
147       MemoryProfiler(ctx, mprof_fd, i);
148 
149 #ifndef TSAN_GO
150     // Flush symbolizer cache if requested.
151     if (flags()->flush_symbolizer_ms > 0) {
152       u64 last = atomic_load(&ctx->last_symbolize_time_ns,
153                              memory_order_relaxed);
154       if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
155         Lock l(&ctx->report_mtx);
156         SpinMutexLock l2(&CommonSanitizerReportMutex);
157         SymbolizeFlush();
158         atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
159       }
160     }
161 #endif
162   }
163 }
164 
DontNeedShadowFor(uptr addr,uptr size)165 void DontNeedShadowFor(uptr addr, uptr size) {
166   uptr shadow_beg = MemToShadow(addr);
167   uptr shadow_end = MemToShadow(addr + size);
168   FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
169 }
170 
MapShadow(uptr addr,uptr size)171 void MapShadow(uptr addr, uptr size) {
172   MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
173 }
174 
MapThreadTrace(uptr addr,uptr size)175 void MapThreadTrace(uptr addr, uptr size) {
176   DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
177   CHECK_GE(addr, kTraceMemBegin);
178   CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
179   if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
180     Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
181     Die();
182   }
183 }
184 
Initialize(ThreadState * thr)185 void Initialize(ThreadState *thr) {
186   // Thread safe because done before all threads exist.
187   static bool is_initialized = false;
188   if (is_initialized)
189     return;
190   is_initialized = true;
191   SanitizerToolName = "ThreadSanitizer";
192   // Install tool-specific callbacks in sanitizer_common.
193   SetCheckFailedCallback(TsanCheckFailed);
194 
195   ScopedInRtl in_rtl;
196 #ifndef TSAN_GO
197   InitializeAllocator();
198 #endif
199   InitializeInterceptors();
200   const char *env = InitializePlatform();
201   InitializeMutex();
202   InitializeDynamicAnnotations();
203   ctx = new(ctx_placeholder) Context;
204 #ifndef TSAN_GO
205   InitializeShadowMemory();
206 #endif
207   InitializeFlags(&ctx->flags, env);
208   // Setup correct file descriptor for error reports.
209   if (internal_strcmp(flags()->log_path, "stdout") == 0)
210     __sanitizer_set_report_fd(kStdoutFd);
211   else if (internal_strcmp(flags()->log_path, "stderr") == 0)
212     __sanitizer_set_report_fd(kStderrFd);
213   else
214     __sanitizer_set_report_path(flags()->log_path);
215   InitializeSuppressions();
216 #ifndef TSAN_GO
217   // Initialize external symbolizer before internal threads are started.
218   const char *external_symbolizer = flags()->external_symbolizer_path;
219   if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
220     if (!InitializeExternalSymbolizer(external_symbolizer)) {
221       Printf("Failed to start external symbolizer: '%s'\n",
222              external_symbolizer);
223       Die();
224     }
225   }
226 #endif
227   internal_start_thread(&BackgroundThread, 0);
228 
229   if (ctx->flags.verbosity)
230     Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
231            (int)internal_getpid());
232 
233   // Initialize thread 0.
234   int tid = ThreadCreate(thr, 0, 0, true);
235   CHECK_EQ(tid, 0);
236   ThreadStart(thr, tid, internal_getpid());
237   CHECK_EQ(thr->in_rtl, 1);
238   ctx->initialized = true;
239 
240   if (flags()->stop_on_start) {
241     Printf("ThreadSanitizer is suspended at startup (pid %d)."
242            " Call __tsan_resume().\n",
243            (int)internal_getpid());
244     while (__tsan_resumed == 0) {}
245   }
246 }
247 
Finalize(ThreadState * thr)248 int Finalize(ThreadState *thr) {
249   ScopedInRtl in_rtl;
250   Context *ctx = __tsan::ctx;
251   bool failed = false;
252 
253   if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
254     SleepForMillis(flags()->atexit_sleep_ms);
255 
256   // Wait for pending reports.
257   ctx->report_mtx.Lock();
258   CommonSanitizerReportMutex.Lock();
259   CommonSanitizerReportMutex.Unlock();
260   ctx->report_mtx.Unlock();
261 
262 #ifndef TSAN_GO
263   if (ctx->flags.verbosity)
264     AllocatorPrintStats();
265 #endif
266 
267   ThreadFinalize(thr);
268 
269   if (ctx->nreported) {
270     failed = true;
271 #ifndef TSAN_GO
272     Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
273 #else
274     Printf("Found %d data race(s)\n", ctx->nreported);
275 #endif
276   }
277 
278   if (ctx->nmissed_expected) {
279     failed = true;
280     Printf("ThreadSanitizer: missed %d expected races\n",
281         ctx->nmissed_expected);
282   }
283 
284   if (flags()->print_suppressions)
285     PrintMatchedSuppressions();
286 #ifndef TSAN_GO
287   if (flags()->print_benign)
288     PrintMatchedBenignRaces();
289 #endif
290 
291   failed = OnFinalize(failed);
292 
293   StatAggregate(ctx->stat, thr->stat);
294   StatOutput(ctx->stat);
295   return failed ? flags()->exitcode : 0;
296 }
297 
298 #ifndef TSAN_GO
CurrentStackId(ThreadState * thr,uptr pc)299 u32 CurrentStackId(ThreadState *thr, uptr pc) {
300   if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
301     return 0;
302   if (pc) {
303     thr->shadow_stack_pos[0] = pc;
304     thr->shadow_stack_pos++;
305   }
306   u32 id = StackDepotPut(thr->shadow_stack,
307                          thr->shadow_stack_pos - thr->shadow_stack);
308   if (pc)
309     thr->shadow_stack_pos--;
310   return id;
311 }
312 #endif
313 
TraceSwitch(ThreadState * thr)314 void TraceSwitch(ThreadState *thr) {
315   thr->nomalloc++;
316   ScopedInRtl in_rtl;
317   Trace *thr_trace = ThreadTrace(thr->tid);
318   Lock l(&thr_trace->mtx);
319   unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
320   TraceHeader *hdr = &thr_trace->headers[trace];
321   hdr->epoch0 = thr->fast_state.epoch();
322   hdr->stack0.ObtainCurrent(thr, 0);
323   hdr->mset0 = thr->mset;
324   thr->nomalloc--;
325 }
326 
ThreadTrace(int tid)327 Trace *ThreadTrace(int tid) {
328   return (Trace*)GetThreadTraceHeader(tid);
329 }
330 
TraceTopPC(ThreadState * thr)331 uptr TraceTopPC(ThreadState *thr) {
332   Event *events = (Event*)GetThreadTrace(thr->tid);
333   uptr pc = events[thr->fast_state.GetTracePos()];
334   return pc;
335 }
336 
TraceSize()337 uptr TraceSize() {
338   return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
339 }
340 
TraceParts()341 uptr TraceParts() {
342   return TraceSize() / kTracePartSize;
343 }
344 
345 #ifndef TSAN_GO
__tsan_trace_switch()346 extern "C" void __tsan_trace_switch() {
347   TraceSwitch(cur_thread());
348 }
349 
__tsan_report_race()350 extern "C" void __tsan_report_race() {
351   ReportRace(cur_thread());
352 }
353 #endif
354 
355 ALWAYS_INLINE
LoadShadow(u64 * p)356 Shadow LoadShadow(u64 *p) {
357   u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
358   return Shadow(raw);
359 }
360 
361 ALWAYS_INLINE
StoreShadow(u64 * sp,u64 s)362 void StoreShadow(u64 *sp, u64 s) {
363   atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
364 }
365 
366 ALWAYS_INLINE
StoreIfNotYetStored(u64 * sp,u64 * s)367 void StoreIfNotYetStored(u64 *sp, u64 *s) {
368   StoreShadow(sp, *s);
369   *s = 0;
370 }
371 
HandleRace(ThreadState * thr,u64 * shadow_mem,Shadow cur,Shadow old)372 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
373                               Shadow cur, Shadow old) {
374   thr->racy_state[0] = cur.raw();
375   thr->racy_state[1] = old.raw();
376   thr->racy_shadow_addr = shadow_mem;
377 #ifndef TSAN_GO
378   HACKY_CALL(__tsan_report_race);
379 #else
380   ReportRace(thr);
381 #endif
382 }
383 
OldIsInSameSynchEpoch(Shadow old,ThreadState * thr)384 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
385   return old.epoch() >= thr->fast_synch_epoch;
386 }
387 
HappensBefore(Shadow old,ThreadState * thr)388 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
389   return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
390 }
391 
392 ALWAYS_INLINE USED
MemoryAccessImpl(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)393 void MemoryAccessImpl(ThreadState *thr, uptr addr,
394     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
395     u64 *shadow_mem, Shadow cur) {
396   StatInc(thr, StatMop);
397   StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
398   StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
399 
400   // This potentially can live in an MMX/SSE scratch register.
401   // The required intrinsics are:
402   // __m128i _mm_move_epi64(__m128i*);
403   // _mm_storel_epi64(u64*, __m128i);
404   u64 store_word = cur.raw();
405 
406   // scan all the shadow values and dispatch to 4 categories:
407   // same, replace, candidate and race (see comments below).
408   // we consider only 3 cases regarding access sizes:
409   // equal, intersect and not intersect. initially I considered
410   // larger and smaller as well, it allowed to replace some
411   // 'candidates' with 'same' or 'replace', but I think
412   // it's just not worth it (performance- and complexity-wise).
413 
414   Shadow old(0);
415   if (kShadowCnt == 1) {
416     int idx = 0;
417 #include "tsan_update_shadow_word_inl.h"
418   } else if (kShadowCnt == 2) {
419     int idx = 0;
420 #include "tsan_update_shadow_word_inl.h"
421     idx = 1;
422 #include "tsan_update_shadow_word_inl.h"
423   } else if (kShadowCnt == 4) {
424     int idx = 0;
425 #include "tsan_update_shadow_word_inl.h"
426     idx = 1;
427 #include "tsan_update_shadow_word_inl.h"
428     idx = 2;
429 #include "tsan_update_shadow_word_inl.h"
430     idx = 3;
431 #include "tsan_update_shadow_word_inl.h"
432   } else if (kShadowCnt == 8) {
433     int idx = 0;
434 #include "tsan_update_shadow_word_inl.h"
435     idx = 1;
436 #include "tsan_update_shadow_word_inl.h"
437     idx = 2;
438 #include "tsan_update_shadow_word_inl.h"
439     idx = 3;
440 #include "tsan_update_shadow_word_inl.h"
441     idx = 4;
442 #include "tsan_update_shadow_word_inl.h"
443     idx = 5;
444 #include "tsan_update_shadow_word_inl.h"
445     idx = 6;
446 #include "tsan_update_shadow_word_inl.h"
447     idx = 7;
448 #include "tsan_update_shadow_word_inl.h"
449   } else {
450     CHECK(false);
451   }
452 
453   // we did not find any races and had already stored
454   // the current access info, so we are done
455   if (LIKELY(store_word == 0))
456     return;
457   // choose a random candidate slot and replace it
458   StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
459   StatInc(thr, StatShadowReplace);
460   return;
461  RACE:
462   HandleRace(thr, shadow_mem, cur, old);
463   return;
464 }
465 
UnalignedMemoryAccess(ThreadState * thr,uptr pc,uptr addr,int size,bool kAccessIsWrite,bool kIsAtomic)466 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
467     int size, bool kAccessIsWrite, bool kIsAtomic) {
468   while (size) {
469     int size1 = 1;
470     int kAccessSizeLog = kSizeLog1;
471     if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
472       size1 = 8;
473       kAccessSizeLog = kSizeLog8;
474     } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
475       size1 = 4;
476       kAccessSizeLog = kSizeLog4;
477     } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
478       size1 = 2;
479       kAccessSizeLog = kSizeLog2;
480     }
481     MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
482     addr += size1;
483     size -= size1;
484   }
485 }
486 
487 ALWAYS_INLINE USED
MemoryAccess(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic)488 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
489     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
490   u64 *shadow_mem = (u64*)MemToShadow(addr);
491   DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
492       " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
493       (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
494       (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
495       (uptr)shadow_mem[0], (uptr)shadow_mem[1],
496       (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
497 #if TSAN_DEBUG
498   if (!IsAppMem(addr)) {
499     Printf("Access to non app mem %zx\n", addr);
500     DCHECK(IsAppMem(addr));
501   }
502   if (!IsShadowMem((uptr)shadow_mem)) {
503     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
504     DCHECK(IsShadowMem((uptr)shadow_mem));
505   }
506 #endif
507 
508   if (*shadow_mem == kShadowRodata) {
509     // Access to .rodata section, no races here.
510     // Measurements show that it can be 10-20% of all memory accesses.
511     StatInc(thr, StatMop);
512     StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
513     StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
514     StatInc(thr, StatMopRodata);
515     return;
516   }
517 
518   FastState fast_state = thr->fast_state;
519   if (fast_state.GetIgnoreBit())
520     return;
521   fast_state.IncrementEpoch();
522   thr->fast_state = fast_state;
523   Shadow cur(fast_state);
524   cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
525   cur.SetWrite(kAccessIsWrite);
526   cur.SetAtomic(kIsAtomic);
527 
528   // We must not store to the trace if we do not store to the shadow.
529   // That is, this call must be moved somewhere below.
530   TraceAddEvent(thr, fast_state, EventTypeMop, pc);
531 
532   MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
533       shadow_mem, cur);
534 }
535 
MemoryRangeSet(ThreadState * thr,uptr pc,uptr addr,uptr size,u64 val)536 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
537                            u64 val) {
538   (void)thr;
539   (void)pc;
540   if (size == 0)
541     return;
542   // FIXME: fix me.
543   uptr offset = addr % kShadowCell;
544   if (offset) {
545     offset = kShadowCell - offset;
546     if (size <= offset)
547       return;
548     addr += offset;
549     size -= offset;
550   }
551   DCHECK_EQ(addr % 8, 0);
552   // If a user passes some insane arguments (memset(0)),
553   // let it just crash as usual.
554   if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
555     return;
556   // Don't want to touch lots of shadow memory.
557   // If a program maps 10MB stack, there is no need reset the whole range.
558   size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
559   // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
560   // so we do it only for C/C++.
561   if (kGoMode || size < 64*1024) {
562     u64 *p = (u64*)MemToShadow(addr);
563     CHECK(IsShadowMem((uptr)p));
564     CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
565     // FIXME: may overwrite a part outside the region
566     for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
567       p[i++] = val;
568       for (uptr j = 1; j < kShadowCnt; j++)
569         p[i++] = 0;
570     }
571   } else {
572     // The region is big, reset only beginning and end.
573     const uptr kPageSize = 4096;
574     u64 *begin = (u64*)MemToShadow(addr);
575     u64 *end = begin + size / kShadowCell * kShadowCnt;
576     u64 *p = begin;
577     // Set at least first kPageSize/2 to page boundary.
578     while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
579       *p++ = val;
580       for (uptr j = 1; j < kShadowCnt; j++)
581         *p++ = 0;
582     }
583     // Reset middle part.
584     u64 *p1 = p;
585     p = RoundDown(end, kPageSize);
586     UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
587     MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
588     // Set the ending.
589     while (p < end) {
590       *p++ = val;
591       for (uptr j = 1; j < kShadowCnt; j++)
592         *p++ = 0;
593     }
594   }
595 }
596 
MemoryResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)597 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
598   MemoryRangeSet(thr, pc, addr, size, 0);
599 }
600 
MemoryRangeFreed(ThreadState * thr,uptr pc,uptr addr,uptr size)601 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
602   // Processing more than 1k (4k of shadow) is expensive,
603   // can cause excessive memory consumption (user does not necessary touch
604   // the whole range) and most likely unnecessary.
605   if (size > 1024)
606     size = 1024;
607   CHECK_EQ(thr->is_freeing, false);
608   thr->is_freeing = true;
609   MemoryAccessRange(thr, pc, addr, size, true);
610   thr->is_freeing = false;
611   thr->fast_state.IncrementEpoch();
612   TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
613   Shadow s(thr->fast_state);
614   s.ClearIgnoreBit();
615   s.MarkAsFreed();
616   s.SetWrite(true);
617   s.SetAddr0AndSizeLog(0, 3);
618   MemoryRangeSet(thr, pc, addr, size, s.raw());
619 }
620 
MemoryRangeImitateWrite(ThreadState * thr,uptr pc,uptr addr,uptr size)621 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
622   thr->fast_state.IncrementEpoch();
623   TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
624   Shadow s(thr->fast_state);
625   s.ClearIgnoreBit();
626   s.SetWrite(true);
627   s.SetAddr0AndSizeLog(0, 3);
628   MemoryRangeSet(thr, pc, addr, size, s.raw());
629 }
630 
631 ALWAYS_INLINE USED
FuncEntry(ThreadState * thr,uptr pc)632 void FuncEntry(ThreadState *thr, uptr pc) {
633   DCHECK_EQ(thr->in_rtl, 0);
634   StatInc(thr, StatFuncEnter);
635   DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
636   thr->fast_state.IncrementEpoch();
637   TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
638 
639   // Shadow stack maintenance can be replaced with
640   // stack unwinding during trace switch (which presumably must be faster).
641   DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
642 #ifndef TSAN_GO
643   DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
644 #else
645   if (thr->shadow_stack_pos == thr->shadow_stack_end) {
646     const int sz = thr->shadow_stack_end - thr->shadow_stack;
647     const int newsz = 2 * sz;
648     uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
649         newsz * sizeof(uptr));
650     internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
651     internal_free(thr->shadow_stack);
652     thr->shadow_stack = newstack;
653     thr->shadow_stack_pos = newstack + sz;
654     thr->shadow_stack_end = newstack + newsz;
655   }
656 #endif
657   thr->shadow_stack_pos[0] = pc;
658   thr->shadow_stack_pos++;
659 }
660 
661 ALWAYS_INLINE USED
FuncExit(ThreadState * thr)662 void FuncExit(ThreadState *thr) {
663   DCHECK_EQ(thr->in_rtl, 0);
664   StatInc(thr, StatFuncExit);
665   DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
666   thr->fast_state.IncrementEpoch();
667   TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
668 
669   DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
670 #ifndef TSAN_GO
671   DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
672 #endif
673   thr->shadow_stack_pos--;
674 }
675 
IgnoreCtl(ThreadState * thr,bool write,bool begin)676 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
677   DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
678   thr->ignore_reads_and_writes += begin ? 1 : -1;
679   CHECK_GE(thr->ignore_reads_and_writes, 0);
680   if (thr->ignore_reads_and_writes)
681     thr->fast_state.SetIgnoreBit();
682   else
683     thr->fast_state.ClearIgnoreBit();
684 }
685 
operator ==(const MD5Hash & other) const686 bool MD5Hash::operator==(const MD5Hash &other) const {
687   return hash[0] == other.hash[0] && hash[1] == other.hash[1];
688 }
689 
690 #if TSAN_DEBUG
build_consistency_debug()691 void build_consistency_debug() {}
692 #else
build_consistency_release()693 void build_consistency_release() {}
694 #endif
695 
696 #if TSAN_COLLECT_STATS
build_consistency_stats()697 void build_consistency_stats() {}
698 #else
build_consistency_nostats()699 void build_consistency_nostats() {}
700 #endif
701 
702 #if TSAN_SHADOW_COUNT == 1
build_consistency_shadow1()703 void build_consistency_shadow1() {}
704 #elif TSAN_SHADOW_COUNT == 2
build_consistency_shadow2()705 void build_consistency_shadow2() {}
706 #elif TSAN_SHADOW_COUNT == 4
build_consistency_shadow4()707 void build_consistency_shadow4() {}
708 #else
build_consistency_shadow8()709 void build_consistency_shadow8() {}
710 #endif
711 
712 }  // namespace __tsan
713 
714 #ifndef TSAN_GO
715 // Must be included in this file to make sure everything is inlined.
716 #include "tsan_interface_inl.h"
717 #endif
718