• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2   This file is part of ThreadSanitizer, a dynamic data race detector
3   based on Valgrind.
4 
5   Copyright (C) 2008-2010 Google Inc
6      opensource@google.com
7   Copyright (C) 2007-2008 OpenWorks LLP
8       info@open-works.co.uk
9 
10   This program is free software; you can redistribute it and/or
11   modify it under the terms of the GNU General Public License as
12   published by the Free Software Foundation; either version 2 of the
13   License, or (at your option) any later version.
14 
15   This program is distributed in the hope that it will be useful, but
16   WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23   02111-1307, USA.
24 
25   The GNU General Public License is contained in the file COPYING.
26 */
27 
28 // Author: Konstantin Serebryany.
29 // Parts of the code in this file are derived from Helgrind,
30 // a data race detector written by Julian Seward.
31 // Note that the rest of ThreadSanitizer code is not derived from Helgrind
32 // and is published under the BSD license.
33 
34 #include "ts_valgrind.h"
35 #include "valgrind.h"
36 #include "ts_valgrind_client_requests.h"
37 #include "thread_sanitizer.h"
38 #include "ts_trace_info.h"
39 #include "ts_race_verifier.h"
40 #include "common_util.h"
41 
42 #include "coregrind/pub_core_basics.h"
43 #include "coregrind/pub_core_machine.h"
44 #include "coregrind/pub_core_clreq.h"
45 #include "pub_tool_libcsetjmp.h"
46 #include "coregrind/pub_core_threadstate.h"
47 #include "pub_tool_libcproc.h"
48 
49 
50 //---------------------- C++ malloc support -------------- {{{1
operator new(size_t size)51 void *operator new (size_t size) {
52   return VG_(malloc)((HChar*)g_malloc_stack.Top(), size);
53 }
operator new[](size_t size)54 void *operator new [](size_t size) {
55   return VG_(malloc)((HChar*)g_malloc_stack.Top(), size);
56 }
operator delete(void * p)57 void operator delete (void *p) {
58   VG_(free)(p);
59 }
operator delete[](void * p)60 void operator delete [](void *p) {
61   VG_(free)(p);
62 }
63 
malloc(size_t size)64 extern "C" void *malloc(size_t size) {
65   return VG_(malloc)((HChar*)g_malloc_stack.Top(), size);
66 }
67 
free(void * ptr)68 extern "C" void free(void *ptr) {
69   VG_(free)(ptr);
70 }
71 
realloc(void * ptr,size_t size)72 extern "C" void* realloc(void *ptr, size_t size) {
73   return VG_(realloc)((HChar*)g_malloc_stack.Top(), ptr, size);
74 }
75 
76 
77 //---------------------- Utils ------------------- {{{1
78 
puts(const char * s)79 extern "C" int puts(const char *s) {
80   Printf("%s", s);
81   return 1;
82 }
83 
exit(int e)84 extern "C" void exit(int e) { VG_(exit)(e); }
85 
86 #ifdef VGO_darwin
abort()87 extern "C" void abort() { CHECK(0); }
88 #endif
89 
90 
91 // TODO: make this rtn public
92 extern "C" {
93   Bool VG_(get_fnname_no_cxx_demangle) ( Addr a, Char* buf, Int nbuf );
94 }
95 
96 
97 const int kBuffSize = 1024 * 10 - 1;
98 // not thread-safe.
99 static char g_buff1[kBuffSize+1];
100 static char g_buff2[kBuffSize+1];
101 
PcToRtnName(uintptr_t pc,bool demangle)102 string PcToRtnName(uintptr_t pc, bool demangle) {
103   if (demangle) {
104     if(VG_(get_fnname)(pc, (Char*)g_buff1, kBuffSize)) {
105       return g_buff1;
106     }
107   } else {
108     if(VG_(get_fnname_no_cxx_demangle)(pc, (Char*)g_buff1, kBuffSize)) {
109       return g_buff1;
110     }
111   }
112   return "(no symbols)";
113 }
114 
PcToStrings(uintptr_t pc,bool demangle,string * img_name,string * rtn_name,string * file_name,int * line_no)115 void PcToStrings(uintptr_t pc, bool demangle,
116                 string *img_name, string *rtn_name,
117                 string *file_name, int *line_no) {
118   const int kBuffSize = 1024 * 10 - 1;
119   Bool has_dirname = False;
120 
121   if (VG_(get_filename_linenum)
122       (pc, (Char*)g_buff1, kBuffSize, (Char*)g_buff2, kBuffSize,
123        &has_dirname, (UInt*)line_no) &&
124       has_dirname) {
125     *file_name = string(g_buff2) + "/" + g_buff1;
126   } else {
127     VG_(get_linenum)(pc, (UInt *)line_no);
128     if (VG_(get_filename)(pc, (Char*)g_buff1, kBuffSize)) {
129       *file_name = g_buff1;
130     }
131   }
132   *file_name = ConvertToPlatformIndependentPath(*file_name);
133 
134   *rtn_name = PcToRtnName(pc, demangle);
135 
136   if (VG_(get_objname)(pc, (Char*)g_buff1, kBuffSize)) {
137     *img_name = g_buff1;
138   }
139 }
140 
141 
142 
Demangle(const char * str)143 string Demangle(const char *str) {
144   return str;
145 }
146 
147 extern "C"
strlen(const char * s)148 size_t strlen(const char *s) {
149   return VG_(strlen)((const Char*)s);
150 }
151 
GetVgTid()152 static inline ThreadId GetVgTid() {
153   extern ThreadId VG_(running_tid); // HACK: avoid calling get_running_tid()
154   ThreadId res = VG_(running_tid);
155   //DCHECK(res == VG_(get_running_tid)());
156   return res;
157 }
158 
GetVgPc(ThreadId vg_tid)159 static inline uintptr_t GetVgPc(ThreadId vg_tid) {
160   Addr pc = VG_(threads)[vg_tid].arch.vex.VG_INSTR_PTR;
161   DCHECK(pc == VG_(get_IP)(vg_tid));
162   return pc;
163   //return (uintptr_t)VG_(get_IP)(vg_tid);
164 }
165 
GetVgSp(ThreadId vg_tid)166 static inline uintptr_t GetVgSp(ThreadId vg_tid) {
167   Addr sp = VG_(threads)[vg_tid].arch.vex.VG_STACK_PTR;
168   DCHECK(sp == VG_(get_SP)(vg_tid));
169   return sp;
170 }
171 
172 #ifdef VGP_arm_linux
GetVgLr(ThreadId vg_tid)173 static inline uintptr_t GetVgLr(ThreadId vg_tid) {
174   return (uintptr_t)VG_(threads)[vg_tid].arch.vex.guest_R14;
175 }
176 #endif
177 
178 static uintptr_t g_current_pc;
179 
GetPcOfCurrentThread()180 uintptr_t GetPcOfCurrentThread() {
181   return g_current_pc;
182 }
183 
GetThreadStack(int tid,uintptr_t * min_addr,uintptr_t * max_addr)184 void GetThreadStack(int tid, uintptr_t *min_addr, uintptr_t *max_addr) {
185   // tid is not used because we call it from the current thread anyway.
186   uintptr_t stack_max  = VG_(thread_get_stack_max)(GetVgTid());
187   uintptr_t stack_size = VG_(thread_get_stack_size)(GetVgTid());
188   uintptr_t stack_min  = stack_max - stack_size;
189   *min_addr = stack_min;
190   *max_addr = stack_max;
191 }
192 
193 struct CallStackRecord {
194   Addr pc;
195   Addr sp;
196 #ifdef VGP_arm_linux
197   // We need to store LR in order to keep the shadow stack consistent.
198   Addr lr;
199 #endif
200 };
201 
202 const size_t kMaxMopsPerTrace = 2048;
203 
204 struct ValgrindThread {
205   int32_t zero_based_uniq_tid;
206   TSanThread *ts_thread;
207   uint32_t literace_sampling;
208   vector<CallStackRecord> call_stack;
209 
210   int ignore_accesses;
211   int ignore_sync;
212   int in_signal_handler;
213 
214   // thread-local event buffer (tleb).
215   uintptr_t tleb[kMaxMopsPerTrace];
216   TraceInfo *trace_info;
217 
218   // PC (as in trace_info->pc()) of the trace currently being verified.
219   // 0 if outside of the verification sleep loop.
220   // -1 in the last iteration of the loop.
221   uintptr_t verifier_current_pc;
222 
223   // End time of the current verification loop.
224   unsigned verifier_wakeup_time_ms;
225 
ValgrindThreadValgrindThread226   ValgrindThread() {
227     Clear();
228   }
229 
ClearValgrindThread230   void Clear() {
231     ts_thread = NULL;
232     zero_based_uniq_tid = -1;
233     literace_sampling = G_flags->literace_sampling;  // cache it.
234     ignore_accesses = 0;
235     ignore_sync = 0;
236     in_signal_handler = 0;
237     call_stack.clear();
238     trace_info = NULL;
239     verifier_current_pc = 0;
240     verifier_wakeup_time_ms = 0;
241   }
242 };
243 
244 // If true, ignore all accesses in all threads.
245 extern bool global_ignore;
246 
247 // Array of VG_N_THREADS
248 static ValgrindThread *g_valgrind_threads = 0;
249 static map<uintptr_t, int> *g_ptid_to_ts_tid;
250 
251 // maintains a uniq thread id (first thread will have id=0)
252 static int32_t g_uniq_thread_id_counter = 0;
253 
VgTidToTsTid(ThreadId vg_tid)254 static int32_t VgTidToTsTid(ThreadId vg_tid) {
255   DCHECK(vg_tid < VG_N_THREADS);
256   DCHECK(vg_tid >= 1);
257   DCHECK(g_valgrind_threads);
258   DCHECK(g_valgrind_threads[vg_tid].zero_based_uniq_tid >= 0);
259   return g_valgrind_threads[vg_tid].zero_based_uniq_tid;
260 }
261 
262 static vector<string> *g_command_line_options = 0;
InitCommandLineOptions()263 static void InitCommandLineOptions() {
264   if(G_flags == NULL) {
265     G_flags = new FLAGS;
266   }
267   if (g_command_line_options == NULL) {
268     g_command_line_options = new vector<string>;
269   }
270 }
271 
ts_process_cmd_line_option(Char * arg)272 Bool ts_process_cmd_line_option (Char* arg) {
273   InitCommandLineOptions();
274   g_command_line_options->push_back((char*)arg);
275   return True;
276 }
277 
ts_print_usage(void)278 void ts_print_usage (void) {
279   InitCommandLineOptions();
280   ThreadSanitizerParseFlags(g_command_line_options);
281 
282   ThreadSanitizerPrintUsage();
283 }
284 
ts_print_debug_usage(void)285 void ts_print_debug_usage(void) {
286   ThreadSanitizerPrintUsage();
287 }
288 
289 extern int VG_(clo_error_exitcode);
290 
ts_post_clo_init(void)291 void ts_post_clo_init(void) {
292   ScopedMallocCostCenter malloc_cc(__FUNCTION__);
293   InitCommandLineOptions();
294   ThreadSanitizerParseFlags(g_command_line_options);
295 
296   // we get num-callers from valgrind flags.
297   G_flags->num_callers = VG_(clo_backtrace_size);
298   if (!G_flags->error_exitcode)
299     G_flags->error_exitcode = VG_(clo_error_exitcode);
300 
301   extern Int   VG_(clo_n_suppressions);
302   extern Int   VG_(clo_gen_suppressions);
303   extern Char* VG_(clo_suppressions)[];
304   extern Int   VG_(clo_n_fullpath_after);
305   extern Char* VG_(clo_fullpath_after)[];
306   // get the suppressions from Valgrind
307   for (int i = 0; i < VG_(clo_n_suppressions); i++) {
308     G_flags->suppressions.push_back((char*)VG_(clo_suppressions)[i]);
309   }
310   // get the --fullpath-after prefixes from Valgrind and treat them as
311   // --file-prefix-to-cut arguments.
312   for (int i = 0; i < VG_(clo_n_fullpath_after); i++) {
313     G_flags->file_prefix_to_cut.push_back((char*)VG_(clo_fullpath_after)[i]);
314   }
315   G_flags->generate_suppressions |= VG_(clo_gen_suppressions) >= 1;
316 
317   if (G_flags->html) {
318     Report("<pre>\n"
319            "<br id=race0>"
320            "<a href=\"#race1\">Go to first race report</a>\n");
321   }
322   Report("ThreadSanitizerValgrind r%s: %s\n",
323          TS_VERSION,
324          G_flags->pure_happens_before ? "hybrid=no" : "hybrid=yes");
325   if (DEBUG_MODE) {
326     Report("INFO: Debug build\n");
327   }
328   if (G_flags->max_mem_in_mb) {
329     Report("INFO: ThreadSanitizer memory limit: %dMB\n",
330            (int)G_flags->max_mem_in_mb);
331   }
332   ThreadSanitizerInit();
333 
334   g_valgrind_threads = new ValgrindThread[VG_N_THREADS];
335   g_ptid_to_ts_tid = new map<uintptr_t, int>;
336 
337   if (g_race_verifier_active) {
338     RaceVerifierInit(G_flags->race_verifier, G_flags->race_verifier_extra);
339     global_ignore = true;
340   }
341 }
342 
343 // Remember, valgrind is essentially single-threaded.
344 // Each time we switch to another thread, we set the global g_cur_tleb
345 // to the tleb of the current thread. This allows to load the tleb in one
346 // instruction.
347 static uintptr_t *g_cur_tleb;
OnStartClientCode(ThreadId vg_tid,ULong nDisp)348 static void OnStartClientCode(ThreadId vg_tid, ULong nDisp) {
349   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
350   g_cur_tleb = thr->tleb;
351 }
352 
FlushMops(ValgrindThread * thr,bool keep_trace_info=false)353 INLINE void FlushMops(ValgrindThread *thr, bool keep_trace_info = false) {
354   DCHECK(!g_race_verifier_active || global_ignore);
355   TraceInfo *t = thr->trace_info;
356   if (!t) return;
357   if (!keep_trace_info) {
358     thr->trace_info = NULL;
359   }
360 
361   if (global_ignore || thr->ignore_accesses ||
362        (thr->literace_sampling &&
363         t->LiteRaceSkipTraceRealTid(thr->zero_based_uniq_tid, thr->literace_sampling))) {
364     thr->trace_info = NULL;
365     return;
366   }
367 
368   size_t n = t->n_mops();
369   DCHECK(n > 0);
370   uintptr_t *tleb = thr->tleb;
371   DCHECK(thr->ts_thread);
372   ThreadSanitizerHandleTrace(thr->ts_thread, t, tleb);
373 }
374 
ShowCallStack(ValgrindThread * thr)375 static void ShowCallStack(ValgrindThread *thr) {
376   size_t n = thr->call_stack.size();
377   Printf("        ");
378   for (size_t i = n - 1; i > n - 10 && i >= 0; i--) {
379     Printf("{pc=%p sp=%p}, ", thr->call_stack[i].pc, thr->call_stack[i].sp);
380   }
381   Printf("\n");
382 }
383 
UpdateCallStack(ValgrindThread * thr,uintptr_t sp)384 static INLINE void UpdateCallStack(ValgrindThread *thr, uintptr_t sp) {
385   DCHECK(!g_race_verifier_active);
386   if (thr->trace_info) FlushMops(thr, true /* keep_trace_info */);
387   vector<CallStackRecord> &call_stack = thr->call_stack;
388   while (!call_stack.empty()) {
389     CallStackRecord &record = call_stack.back();
390     Addr cur_top = record.sp;
391     if (sp < cur_top) break;
392     call_stack.pop_back();
393     int32_t ts_tid = thr->zero_based_uniq_tid;
394     ThreadSanitizerHandleRtnExit(ts_tid);
395     if (debug_rtn) {
396       Printf("T%d: [%ld]<< pc=%p sp=%p cur_sp=%p %s\n",
397              ts_tid, thr->call_stack.size(), record.pc,
398              record.sp, sp,
399              PcToRtnNameAndFilePos(record.pc).c_str());
400       ShowCallStack(thr);
401     }
402   }
403 }
404 
405 VG_REGPARM(1)
OnTrace(TraceInfo * trace_info)406 static void OnTrace(TraceInfo *trace_info) {
407   DCHECK(!g_race_verifier_active);
408   //trace_info->counter()++;
409   if (global_ignore) return;
410   ThreadId vg_tid = GetVgTid();
411   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
412 
413   // First, flush the old trace_info.
414   if (thr->trace_info) {
415     FlushMops(thr);
416   }
417 
418   UpdateCallStack(thr, GetVgSp(vg_tid));
419 
420   // Start the new trace, zero the contents of tleb.
421   size_t n = trace_info->n_mops();
422   uintptr_t *tleb = thr->tleb;
423   for (size_t i = 0; i < n; i++)
424     tleb[i] = 0;
425   thr->trace_info = trace_info;
426   DCHECK(thr->trace_info);
427   DCHECK(thr->trace_info->n_mops() <= kMaxMopsPerTrace);
428 }
429 
Put(EventType type,int32_t tid,uintptr_t pc,uintptr_t a,uintptr_t info)430 static inline void Put(EventType type, int32_t tid, uintptr_t pc,
431                        uintptr_t a, uintptr_t info) {
432   if (DEBUG_MODE && G_flags->dry_run >= 1) return;
433   Event event(type, tid, pc, a, info);
434   ThreadSanitizerHandleOneEvent(&event);
435 }
436 
rtn_call(Addr sp_post_call_insn,Addr pc_post_call_insn,IGNORE_BELOW_RTN ignore_below)437 static void rtn_call(Addr sp_post_call_insn, Addr pc_post_call_insn,
438                      IGNORE_BELOW_RTN ignore_below) {
439   DCHECK(!g_race_verifier_active);
440   if (global_ignore) return;
441   ThreadId vg_tid = GetVgTid();
442   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
443   int ts_tid = thr->zero_based_uniq_tid;
444   CallStackRecord record;
445   record.pc = pc_post_call_insn;
446   record.sp = sp_post_call_insn + 4;  // sp before call.
447   UpdateCallStack(thr, record.sp);
448 #ifdef VGP_arm_linux
449   record.lr = GetVgLr(vg_tid);
450 #endif
451   thr->call_stack.push_back(record);
452   // If the shadow stack grows too high this usually means it is not cleaned
453   // properly. Or this may be a very deep recursion.
454   DCHECK(thr->call_stack.size() < 10000);
455   uintptr_t call_pc = GetVgPc(vg_tid);
456   if (thr->trace_info) FlushMops(thr);
457   ThreadSanitizerHandleRtnCall(ts_tid, call_pc, record.pc,
458                                ignore_below);
459 
460   if (debug_rtn) {
461     Printf("T%d: [%ld]>> pc=%p sp=%p %s\n",
462            ts_tid, thr->call_stack.size(), (void*)record.pc,
463            (void*)record.sp,
464            PcToRtnNameAndFilePos(record.pc).c_str());
465     ShowCallStack(thr);
466   }
467 }
468 
evh__rtn_call_ignore_unknown(Addr sp,Addr pc)469 VG_REGPARM(2) void evh__rtn_call_ignore_unknown ( Addr sp, Addr pc) {
470   rtn_call(sp, pc, IGNORE_BELOW_RTN_UNKNOWN);
471 }
evh__rtn_call_ignore_yes(Addr sp,Addr pc)472 VG_REGPARM(2) void evh__rtn_call_ignore_yes ( Addr sp, Addr pc) {
473   rtn_call(sp, pc, IGNORE_BELOW_RTN_YES);
474 }
evh__rtn_call_ignore_no(Addr sp,Addr pc)475 VG_REGPARM(2) void evh__rtn_call_ignore_no ( Addr sp, Addr pc) {
476   rtn_call(sp, pc, IGNORE_BELOW_RTN_NO);
477 }
478 
479 #ifdef VGP_arm_linux
480 // Handle shadow stack frame deletion on ARM.
481 // Instrumented code calls this function for each non-call jump out of
482 // a superblock. If the |sp_post_call_insn| (the jump target address) is equal
483 // to a link register value of one or more frames on top of the shadow stack,
484 // those frames are popped out.
485 // TODO(glider): there may be problems with optimized recursive functions that
486 // don't change PC, SP and LR.
487 VG_REGPARM(2)
evh__delete_frame(Addr sp_post_call_insn,Addr pc_post_call_insn)488 void evh__delete_frame ( Addr sp_post_call_insn,
489                          Addr pc_post_call_insn) {
490   DCHECK(!g_race_verifier_active);
491   ThreadId vg_tid = GetVgTid();
492   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
493   if (thr->trace_info) FlushMops(thr);
494   vector<CallStackRecord> &call_stack = thr->call_stack;
495   int32_t ts_tid = VgTidToTsTid(vg_tid);
496   while (!call_stack.empty()) {
497     CallStackRecord &record = call_stack.back();
498     if (record.lr != pc_post_call_insn) break;
499     call_stack.pop_back();
500     ThreadSanitizerHandleRtnExit(ts_tid);
501   }
502 }
503 #endif
504 
ts_fini(Int exitcode)505 void ts_fini(Int exitcode) {
506   ThreadSanitizerFini();
507   if (g_race_verifier_active) {
508     RaceVerifierFini();
509   }
510   if (G_flags->error_exitcode && GetNumberOfFoundErrors() > 0) {
511     exit(G_flags->error_exitcode);
512   }
513 }
514 
515 
evh__pre_thread_ll_create(ThreadId parent,ThreadId child)516 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child ) {
517   tl_assert(parent != child);
518   ValgrindThread *thr = &g_valgrind_threads[child];
519   //  Printf("thread_create: %d->%d\n", parent, child);
520   if (thr->zero_based_uniq_tid != -1) {
521     Printf("ThreadSanitizer WARNING: reusing TID %d w/o exiting thread\n",
522            child);
523   }
524   thr->Clear();
525   thr->zero_based_uniq_tid = g_uniq_thread_id_counter++;
526   // Printf("VG: T%d: VG_THR_START: parent=%d\n", VgTidToTsTid(child), VgTidToTsTid(parent));
527   Put(THR_START, VgTidToTsTid(child), 0, 0,
528       parent > 0 ? VgTidToTsTid(parent) : 0);
529   thr->ts_thread = ThreadSanitizerGetThreadByTid(thr->zero_based_uniq_tid);
530   CHECK(thr->ts_thread);
531 }
532 
evh__pre_workq_task_start(ThreadId vg_tid,Addr workitem)533 void evh__pre_workq_task_start(ThreadId vg_tid, Addr workitem) {
534   uintptr_t pc = GetVgPc(vg_tid);
535   int32_t ts_tid = VgTidToTsTid(vg_tid);
536   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
537   FlushMops(thr);
538   Put(WAIT, ts_tid, pc, workitem, 0);
539 }
540 
evh__pre_thread_first_insn(const ThreadId vg_tid)541 void evh__pre_thread_first_insn(const ThreadId vg_tid) {
542   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
543   FlushMops(thr);
544   Put(THR_FIRST_INSN, VgTidToTsTid(vg_tid), GetVgPc(vg_tid), 0, 0);
545 }
546 
547 
evh__pre_thread_ll_exit(ThreadId quit_tid)548 void evh__pre_thread_ll_exit ( ThreadId quit_tid ) {
549 //  Printf("thread_exit: %d\n", quit_tid);
550 //  Printf("T%d quiting thread; stack size=%ld\n",
551 //         VgTidToTsTid(quit_tid),
552 //         (int)g_valgrind_threads[quit_tid].call_stack.size());
553   ValgrindThread *thr = &g_valgrind_threads[quit_tid];
554   FlushMops(thr);
555   Put(THR_END, VgTidToTsTid(quit_tid), 0, 0, 0);
556   g_valgrind_threads[quit_tid].zero_based_uniq_tid = -1;
557 }
558 
559   extern "C" void VG_(show_all_errors)();
560 
561 // Whether we are currently ignoring sync events for the given thread at the
562 // given address.
ignoring_sync(ThreadId vg_tid,uintptr_t addr)563 static inline Bool ignoring_sync(ThreadId vg_tid, uintptr_t addr) {
564   // We ignore locking events if ignore_sync != 0 and if we are not
565   // inside a signal handler.
566   return (g_valgrind_threads[vg_tid].ignore_sync &&
567           !g_valgrind_threads[vg_tid].in_signal_handler) ||
568       ThreadSanitizerIgnoreForNacl(addr);
569 }
570 
ts_handle_client_request(ThreadId vg_tid,UWord * args,UWord * ret)571 Bool ts_handle_client_request(ThreadId vg_tid, UWord* args, UWord* ret) {
572   if (args[0] == VG_USERREQ__NACL_MEM_START) {
573     // This will get truncated on x86-32, but we don't support it with NaCl
574     // anyway.
575     const uintptr_t kFourGig = (uintptr_t)0x100000000ULL;
576     uintptr_t mem_start = args[1];
577     uintptr_t mem_end = mem_start + kFourGig;
578     ThreadSanitizerNaclUntrustedRegion(mem_start, mem_end);
579     return True;
580   }
581   if (!VG_IS_TOOL_USERREQ('T', 'S', args[0]))
582     return False;
583   int32_t ts_tid = VgTidToTsTid(vg_tid);
584   // Ignore almost everything in race verifier mode.
585   if (g_race_verifier_active) {
586     if (args[0] == TSREQ_EXPECT_RACE) {
587       Put(EXPECT_RACE, ts_tid, /*descr=*/args[2],
588           /*p=*/args[1], 0);
589     }
590     *ret = 0;
591     return True;
592   }
593   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
594   if (thr->trace_info) FlushMops(thr);
595   UpdateCallStack(thr, GetVgSp(vg_tid));
596   *ret = 0;
597   uintptr_t pc = GetVgPc(vg_tid);
598   switch (args[0]) {
599     case TSREQ_SET_MY_PTHREAD_T:
600       (*g_ptid_to_ts_tid)[args[1]] = ts_tid;
601       break;
602     case TSREQ_THR_STACK_TOP:
603       Put(THR_STACK_TOP, ts_tid, pc, args[1], 0);
604       break;
605     case TSREQ_PTHREAD_JOIN_POST:
606       Put(THR_JOIN_AFTER, ts_tid, pc, (*g_ptid_to_ts_tid)[args[1]], 0);
607       break;
608     case TSREQ_CLEAN_MEMORY:
609       Put(MALLOC, ts_tid, pc, /*ptr=*/args[1], /*size=*/args[2]);
610       break;
611     case TSREQ_MAIN_IN:
612       g_has_entered_main = true;
613       // Report("INFO: Entred main(); argc=%d\n", (int)args[1]);
614       break;
615     case TSREQ_MAIN_OUT:
616       g_has_exited_main = true;
617       if (G_flags->exit_after_main) {
618         Report("INFO: Exited main(); ret=%d\n", (int)args[1]);
619         VG_(show_all_errors)();
620         ThreadSanitizerFini();
621         if (g_race_verifier_active) {
622           RaceVerifierFini();
623         }
624         exit((int)args[1]);
625       }
626       break;
627     case TSREQ_MALLOC:
628       // Printf("Malloc: %p %ld\n", args[1], args[2]);
629       Put(MALLOC, ts_tid, pc, /*ptr=*/args[1], /*size=*/args[2]);
630       break;
631     case TSREQ_FREE:
632       // Printf("Free: %p\n", args[1]);
633       Put(FREE, ts_tid, pc, /*ptr=*/args[1], 0);
634       break;
635     case TSREQ_MMAP:
636       Put(MMAP, ts_tid, pc, /*ptr=*/args[1], /*size=*/args[2]);
637       break;
638     case TSREQ_MUNMAP:
639       Put(MUNMAP, ts_tid, pc, /*ptr=*/args[1], /*size=*/args[2]);
640       break;
641     case TSREQ_BENIGN_RACE:
642       Put(BENIGN_RACE, ts_tid, /*descr=*/args[3],
643           /*p=*/args[1], /*size=*/args[2]);
644       break;
645     case TSREQ_EXPECT_RACE:
646       Put(EXPECT_RACE, ts_tid, /*descr=*/args[2], /*p=*/args[1], 0);
647       break;
648     case TSREQ_FLUSH_EXPECTED_RACES:
649       Put(FLUSH_EXPECTED_RACES, ts_tid, 0, 0, 0);
650       break;
651     case TSREQ_PCQ_CREATE:
652       Put(PCQ_CREATE, ts_tid, pc, /*pcq=*/args[1], 0);
653       break;
654     case TSREQ_PCQ_DESTROY:
655       Put(PCQ_DESTROY, ts_tid, pc, /*pcq=*/args[1], 0);
656       break;
657     case TSREQ_PCQ_PUT:
658       Put(PCQ_PUT, ts_tid, pc, /*pcq=*/args[1], 0);
659       break;
660     case TSREQ_PCQ_GET:
661       Put(PCQ_GET, ts_tid, pc, /*pcq=*/args[1], 0);
662       break;
663     case TSREQ_TRACE_MEM:
664       Put(TRACE_MEM, ts_tid, pc, /*mem=*/args[1], 0);
665       break;
666     case TSREQ_MUTEX_IS_USED_AS_CONDVAR:
667       Put(HB_LOCK, ts_tid, pc, /*lock=*/args[1], 0);
668       break;
669     case TSREQ_MUTEX_IS_NOT_PHB:
670       Put(NON_HB_LOCK, ts_tid, pc, /*lock=*/args[1], 0);
671       break;
672     case TSREQ_GLOBAL_IGNORE_ON:
673       Report("INFO: GLOBAL IGNORE ON\n");
674       global_ignore = true;
675       break;
676     case TSREQ_GLOBAL_IGNORE_OFF:
677       Report("INFO: GLOBAL IGNORE OFF\n");
678       global_ignore = false;
679       break;
680     case TSREQ_IGNORE_READS_BEGIN:
681       Put(IGNORE_READS_BEG, ts_tid, pc, 0, 0);
682       break;
683     case TSREQ_IGNORE_READS_END:
684       Put(IGNORE_READS_END, ts_tid, pc, 0, 0);
685       break;
686     case TSREQ_IGNORE_WRITES_BEGIN:
687       Put(IGNORE_WRITES_BEG, ts_tid, pc, 0, 0);
688       break;
689     case TSREQ_IGNORE_WRITES_END:
690       Put(IGNORE_WRITES_END, ts_tid, pc, 0, 0);
691       break;
692     case TSREQ_SET_THREAD_NAME:
693       Put(SET_THREAD_NAME, ts_tid, pc, /*name=*/args[1], 0);
694       break;
695     case TSREQ_SET_STACKTOP_STACKSIZE:
696       Put(THR_STACK_TOP, ts_tid, pc, /*addr=*/args[1], /*size=*/args[2]);
697       break;
698     case TSREQ_IGNORE_ALL_ACCESSES_BEGIN:
699       g_valgrind_threads[vg_tid].ignore_accesses++;
700       break;
701     case TSREQ_IGNORE_ALL_ACCESSES_END:
702       g_valgrind_threads[vg_tid].ignore_accesses--;
703       CHECK(g_valgrind_threads[vg_tid].ignore_accesses >= 0);
704       break;
705     case TSREQ_IGNORE_ALL_SYNC_BEGIN:
706       g_valgrind_threads[vg_tid].ignore_sync++;
707       break;
708     case TSREQ_IGNORE_ALL_SYNC_END:
709       g_valgrind_threads[vg_tid].ignore_sync--;
710       CHECK(g_valgrind_threads[vg_tid].ignore_sync >= 0);
711       break;
712     case TSREQ_PUBLISH_MEMORY_RANGE:
713       Put(PUBLISH_RANGE, ts_tid, pc, /*mem=*/args[1], /*size=*/args[2]);
714       break;
715     case TSREQ_UNPUBLISH_MEMORY_RANGE:
716       Put(UNPUBLISH_RANGE, ts_tid, pc, /*mem=*/args[1], /*size=*/args[2]);
717       break;
718     case TSREQ_PRINT_MEMORY_USAGE:
719     case TSREQ_PRINT_STATS:
720     case TSREQ_RESET_STATS:
721     case TSREQ_PTH_API_ERROR:
722       break;
723     case TSREQ_PTHREAD_RWLOCK_CREATE_POST:
724       if (ignoring_sync(vg_tid, args[1]))
725         break;
726       Put(LOCK_CREATE, ts_tid, pc, /*lock=*/args[1], 0);
727       break;
728     case TSREQ_PTHREAD_RWLOCK_DESTROY_PRE:
729       if (ignoring_sync(vg_tid, args[1]))
730         break;
731       Put(LOCK_DESTROY, ts_tid, pc, /*lock=*/args[1], 0);
732       break;
733     case TSREQ_PTHREAD_RWLOCK_LOCK_POST:
734       if (ignoring_sync(vg_tid, args[1]))
735         break;
736       Put(args[2] ? WRITER_LOCK : READER_LOCK, ts_tid, pc, /*lock=*/args[1], 0);
737       break;
738     case TSREQ_PTHREAD_RWLOCK_UNLOCK_PRE:
739       if (ignoring_sync(vg_tid, args[1]))
740         break;
741       Put(UNLOCK, ts_tid, pc, /*lock=*/args[1], 0);
742       break;
743     case TSREQ_PTHREAD_SPIN_LOCK_INIT_OR_UNLOCK:
744       Put(UNLOCK_OR_INIT, ts_tid, pc, /*lock=*/args[1], 0);
745       break;
746     case TSREQ_POSIX_SEM_INIT_POST:
747     case TSREQ_POSIX_SEM_DESTROY_PRE:
748       break;
749     case TSREQ_SIGNAL:
750       if (ignoring_sync(vg_tid, args[1]))
751         break;
752       Put(SIGNAL, ts_tid, pc, args[1], 0);
753       break;
754     case TSREQ_WAIT:
755       if (ignoring_sync(vg_tid, args[1]))
756         break;
757       Put(WAIT, ts_tid, pc, args[1], 0);
758       break;
759     case TSREQ_CYCLIC_BARRIER_INIT:
760       Put(CYCLIC_BARRIER_INIT, ts_tid, pc, args[1], args[2]);
761       break;
762     case TSREQ_CYCLIC_BARRIER_WAIT_BEFORE:
763       Put(CYCLIC_BARRIER_WAIT_BEFORE, ts_tid, pc, args[1], 0);
764       break;
765     case TSREQ_CYCLIC_BARRIER_WAIT_AFTER:
766       Put(CYCLIC_BARRIER_WAIT_AFTER, ts_tid, pc, args[1], 0);
767       break;
768     case TSREQ_GET_MY_SEGMENT:
769       break;
770     case TSREQ_GET_THREAD_ID:
771       *ret = ts_tid;
772       break;
773     case TSREQ_GET_VG_THREAD_ID:
774       *ret = vg_tid;
775       break;
776     case TSREQ_GET_SEGMENT_ID:
777       break;
778     case TSREQ_THREAD_SANITIZER_QUERY:
779       *ret = (UWord)ThreadSanitizerQuery((const char *)args[1]);
780       break;
781     case TSREQ_FLUSH_STATE:
782       Put(FLUSH_STATE, ts_tid, pc, 0, 0);
783       break;
784     default: CHECK(0);
785   }
786   return True;
787 }
788 
SignalIn(ThreadId vg_tid,Int sigNo,Bool alt_stack)789 static void SignalIn(ThreadId vg_tid, Int sigNo, Bool alt_stack) {
790   g_valgrind_threads[vg_tid].in_signal_handler++;
791   DCHECK(g_valgrind_threads[vg_tid].in_signal_handler == 1);
792 //  int32_t ts_tid = VgTidToTsTid(vg_tid);
793 //  Printf("T%d %s\n", ts_tid, __FUNCTION__);
794 }
795 
SignalOut(ThreadId vg_tid,Int sigNo)796 static void SignalOut(ThreadId vg_tid, Int sigNo) {
797   g_valgrind_threads[vg_tid].in_signal_handler--;
798   CHECK(g_valgrind_threads[vg_tid].in_signal_handler >= 0);
799   DCHECK(g_valgrind_threads[vg_tid].in_signal_handler == 0);
800 //  int32_t ts_tid = VgTidToTsTid(vg_tid);
801 //  Printf("T%d %s\n", ts_tid, __FUNCTION__);
802 }
803 
804 
805 // ---------------------------- RaceVerifier    ---------------------------{{{1
806 
807 /**
808  * In race verifier mode _every_ IRSB is instrumented with a sleep loop at the
809  * beginning (but, of course, in most cases it is not executed).
810  * Its code logically looks like
811  *  irsb_start:
812  *   bool need_sleep = OnTraceVerify1();
813  *   if (need_sleep) {
814  *     sched_yield();
815  *     goto irsb_start;
816  *   }
817  *   OnTraceVerify2(trace_info);
818  *
819  * This loop verifies mops from the _previous_ trace_info and sets up the new
820  * trace info in OnTraceVerify2. Only IRSBs with "interesting" mops have
821  * non-zero trace_info.
822  */
823 
824 /**
825  * Race verification loop.
826  * On the first pass (for a trace_info), if there are mops to be verified,
827  * register them with RaceVerifier and calculate the wake up time.
828  * On the following passes, check the wake up time against the clock.
829  * The loop state is kept in ValgrindThread.
830  * Returns true if need to sleep more, false if the loop must be ended.
831  */
832 VG_REGPARM(1)
OnTraceVerify1()833 static uint32_t OnTraceVerify1() {
834   DCHECK(g_race_verifier_active);
835   ThreadId vg_tid = GetVgTid();
836 
837   // First, flush the old trace_info.
838   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
839 
840   // thr->trace_info is the trace info for the previous superblock.
841   if (!thr->trace_info)
842     // Nothing to do here.
843     return 0;
844 
845   if (!thr->verifier_current_pc) {
846     // This is the first iteration of the sleep loop.
847     // Register memory accesses.
848     int sleep_time_ms = RaceVerifierGetSleepTime(thr->trace_info->pc());
849     if (!sleep_time_ms) {
850       thr->trace_info = NULL;
851       return 0;
852     }
853     size_t n = thr->trace_info->n_mops();
854     uintptr_t* tleb = thr->tleb;
855     int need_sleep = 0;
856     for (size_t i = 0; i < n; ++i) {
857       uintptr_t addr = tleb[i];
858       if (addr) {
859         MopInfo *mop = thr->trace_info->GetMop(i);
860         need_sleep += RaceVerifierStartAccess(thr->zero_based_uniq_tid, addr,
861             mop->pc(), mop->is_write());
862       }
863     }
864     // Setup the sleep timer.
865     thr->verifier_current_pc = thr->trace_info->pc();
866     if (need_sleep) {
867       unsigned now = VG_(read_millisecond_timer)();
868       thr->verifier_wakeup_time_ms = now + sleep_time_ms;
869       return 1;
870     } else {
871       thr->verifier_current_pc = (unsigned)-1;
872       return 0;
873     }
874   } else {
875     // Continuation of the sleep loop.
876     DCHECK(thr->verifier_current_pc == thr->trace_info->pc());
877     unsigned now = VG_(read_millisecond_timer)();
878     if (now < thr->verifier_wakeup_time_ms) {
879       // sleep more
880       return 1;
881     } else {
882       // done, go straight to OnTraceVerify2
883       thr->verifier_current_pc = (unsigned)-1;
884       return 0;
885     }
886   }
887 }
888 
889 /**
890  * Race verification loop exit.
891  * Unregisters mops with the RaceVerifier.
892  * Sets up the new trace_info.
893  */
894 VG_REGPARM(1)
OnTraceVerify2(TraceInfo * trace_info)895 static void OnTraceVerify2(TraceInfo *trace_info) {
896   DCHECK(g_race_verifier_active);
897   ThreadId vg_tid = GetVgTid();
898   ValgrindThread *thr = &g_valgrind_threads[vg_tid];
899 
900   DCHECK(!thr->trace_info || thr->verifier_current_pc == (unsigned)-1);
901   thr->verifier_current_pc = 0;
902   thr->verifier_wakeup_time_ms = 0;
903 
904   if (thr->trace_info) {
905     // Unregister accesses from the old trace_info.
906     size_t n = thr->trace_info->n_mops();
907     uintptr_t* tleb = thr->tleb;
908     for (size_t i = 0; i < n; ++i) {
909       uintptr_t addr = tleb[i];
910       if (addr) {
911         MopInfo *mop = thr->trace_info->GetMop(i);
912         RaceVerifierEndAccess(thr->zero_based_uniq_tid, addr,
913             mop->pc(), mop->is_write());
914       }
915     }
916   }
917 
918   // Start the new trace, zero the contents of tleb.
919   thr->trace_info = trace_info;
920   if (trace_info) {
921     size_t n = trace_info->n_mops();
922     uintptr_t *tleb = thr->tleb;
923     for (size_t i = 0; i < n; i++)
924       tleb[i] = 0;
925     DCHECK(thr->trace_info->n_mops() <= kMaxMopsPerTrace);
926   }
927 }
928 
929 /**
930  * Add a race verification preamble to the IRSB.
931  */
ts_instrument_trace_entry_verify(IRSB * bbOut,VexGuestLayout * layout,TraceInfo * trace_info,uintptr_t cur_pc)932 static void ts_instrument_trace_entry_verify(IRSB *bbOut,
933     VexGuestLayout* layout, TraceInfo *trace_info, uintptr_t cur_pc) {
934    HChar*   hName = (HChar*)"OnTraceVerify1";
935    void *callback = (void*)OnTraceVerify1;
936    IRExpr **args = mkIRExprVec_0();
937    IRTemp need_sleep = newIRTemp(bbOut->tyenv, Ity_I32);
938    IRDirty* di = unsafeIRDirty_1_N(need_sleep, 0, hName,
939        VG_(fnptr_to_fnentry)(callback), args);
940    addStmtToIRSB( bbOut, IRStmt_Dirty(di));
941 
942    IRTemp need_sleep_i1 = newIRTemp(bbOut->tyenv, Ity_I1);
943    IRStmt* cmp_stmt = IRStmt_WrTmp(need_sleep_i1,
944        IRExpr_Binop(Iop_CmpNE32,
945            IRExpr_RdTmp(need_sleep),
946            IRExpr_Const(IRConst_U32(0))));
947    addStmtToIRSB(bbOut, cmp_stmt);
948 
949    IRConst* exit_dst = layout->sizeof_IP == 8 ?
950        IRConst_U64(cur_pc) : IRConst_U32(cur_pc);
951    IRStmt* exit_stmt = IRStmt_Exit(IRExpr_RdTmp(need_sleep_i1),
952        Ijk_YieldNoRedir, exit_dst);
953    addStmtToIRSB(bbOut, exit_stmt);
954 
955    hName = (HChar*)"OnTraceVerify2";
956    callback = (void*)OnTraceVerify2;
957    args = mkIRExprVec_1(mkIRExpr_HWord((HWord)trace_info));
958    di = unsafeIRDirty_0_N(1, hName, VG_(fnptr_to_fnentry)(callback), args);
959    addStmtToIRSB( bbOut, IRStmt_Dirty(di));
960 }
961 
962 
963 // ---------------------------- Instrumentation ---------------------------{{{1
964 
gen_Get_SP(IRSB * bbOut,VexGuestLayout * layout,Int hWordTy_szB)965 static IRTemp gen_Get_SP ( IRSB*           bbOut,
966                            VexGuestLayout* layout,
967                            Int             hWordTy_szB )
968 {
969   IRExpr* sp_expr;
970   IRTemp  sp_temp;
971   IRType  sp_type;
972   /* This in effect forces the host and guest word sizes to be the
973      same. */
974   tl_assert(hWordTy_szB == layout->sizeof_SP);
975   sp_type = layout->sizeof_SP == 8 ? Ity_I64 : Ity_I32;
976   sp_expr = IRExpr_Get( layout->offset_SP, sp_type );
977   sp_temp = newIRTemp( bbOut->tyenv, sp_type );
978   addStmtToIRSB( bbOut, IRStmt_WrTmp( sp_temp, sp_expr ) );
979   return sp_temp;
980 }
981 
ts_instrument_trace_entry(IRSB * bbOut,TraceInfo * trace_info)982 static void ts_instrument_trace_entry(IRSB *bbOut, TraceInfo *trace_info) {
983    CHECK(trace_info);
984    HChar*   hName = (HChar*)"OnTrace";
985    void *callback = (void*)OnTrace;
986    IRExpr **args = mkIRExprVec_1(mkIRExpr_HWord((HWord)trace_info));
987    IRDirty* di = unsafeIRDirty_0_N( 1,
988                            hName,
989                            VG_(fnptr_to_fnentry)(callback),
990                            args);
991    addStmtToIRSB( bbOut, IRStmt_Dirty(di));
992 }
993 
ts_instrument_final_jump(IRSB * sbOut,IRExpr * next,IRJumpKind jumpkind,VexGuestLayout * layout,IRType gWordTy,IRType hWordTy)994 static void ts_instrument_final_jump (
995                                 /*MOD*/IRSB* sbOut,
996                                 IRExpr* next,
997                                 IRJumpKind jumpkind,
998                                 VexGuestLayout* layout,
999                                 IRType gWordTy, IRType hWordTy ) {
1000 
1001 #ifndef VGP_arm_linux
1002   // On non-ARM systems we instrument only function calls.
1003   if (jumpkind != Ijk_Call) return;
1004 #else
1005   if (jumpkind != Ijk_Call) {
1006     // On an ARM system a non-call jump may possibly exit a function.
1007     IRTemp sp_post_call_insn
1008         = gen_Get_SP( sbOut, layout, sizeofIRType(hWordTy) );
1009     IRExpr **args = mkIRExprVec_2(
1010         IRExpr_RdTmp(sp_post_call_insn),
1011         next
1012         );
1013     IRDirty* di = unsafeIRDirty_0_N(
1014         2/*regparms*/,
1015         (char*)"evh__delete_frame",
1016         VG_(fnptr_to_fnentry)((void*) &evh__delete_frame ),
1017         args );
1018     addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
1019     return;  // do not fall through
1020   }
1021 #endif
1022   {
1023     const char *fn_name = "evh__rtn_call_ignore_unknown";
1024     void *fn = (void*)&evh__rtn_call_ignore_unknown;
1025     // Instrument the call instruction to keep the shadow stack consistent.
1026     IRTemp sp_post_call_insn
1027         = gen_Get_SP( sbOut, layout, sizeofIRType(hWordTy) );
1028     IRExpr **args = mkIRExprVec_2(
1029         IRExpr_RdTmp(sp_post_call_insn),
1030         next
1031         );
1032     if (next->tag == Iex_Const) {
1033       IRConst *con = next->Iex.Const.con;
1034       uintptr_t target = 0;
1035       if (con->tag == Ico_U32 || con->tag == Ico_U64) {
1036         target = con->tag == Ico_U32 ? con->Ico.U32 : con->Ico.U64;
1037         bool ignore = ThreadSanitizerIgnoreAccessesBelowFunction(target);
1038         if (ignore) {
1039           fn_name = "evh__rtn_call_ignore_yes";
1040           fn = (void*)&evh__rtn_call_ignore_yes;
1041         } else {
1042           fn_name = "evh__rtn_call_ignore_no";
1043           fn = (void*)&evh__rtn_call_ignore_no;
1044         }
1045       }
1046     }
1047     IRDirty* di = unsafeIRDirty_0_N(
1048         2/*regparms*/,
1049         (char*)fn_name,
1050         VG_(fnptr_to_fnentry)(fn),
1051         args );
1052     addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
1053   }
1054 }
1055 
1056 // Generate exprs/stmts that make g_cur_tleb[idx] = x.
gen_store_to_tleb(IRSB * bbOut,IRTemp tleb_temp,uintptr_t idx,IRExpr * x,IRType tyAddr)1057 static void gen_store_to_tleb(IRSB *bbOut, IRTemp tleb_temp,
1058                               uintptr_t idx, IRExpr *x, IRType tyAddr) {
1059   CHECK(tleb_temp != IRTemp_INVALID);
1060   IRExpr *idx_expr  = mkIRExpr_HWord(idx * sizeof(uintptr_t));
1061   IRExpr *tleb_plus_idx_expr = IRExpr_Binop(
1062       sizeof(uintptr_t) == 8 ? Iop_Add64 : Iop_Add32,
1063       IRExpr_RdTmp(tleb_temp), idx_expr);
1064   IRTemp temp = newIRTemp(bbOut->tyenv, tyAddr);
1065   IRStmt *temp_stmt = IRStmt_WrTmp(temp, tleb_plus_idx_expr);
1066   IRStmt *store_stmt = IRStmt_Store(Iend_LE, IRExpr_RdTmp(temp), x);
1067 
1068   addStmtToIRSB(bbOut, temp_stmt);
1069   addStmtToIRSB(bbOut, store_stmt);
1070 }
1071 
instrument_mem_access(TraceInfo * trace_info,IRTemp tleb_temp,uintptr_t pc,size_t * trace_idx,IRSB * bbOut,IRStmt * st,IRExpr * addr,Int szB,Bool isStore,Bool dtor_head,Int hWordTy_szB)1072 static void instrument_mem_access ( TraceInfo *trace_info,
1073                                     IRTemp tleb_temp,
1074                                     uintptr_t pc,
1075                                     size_t  *trace_idx,
1076                                     IRSB*   bbOut,
1077                                     IRStmt* st,
1078                                     IRExpr* addr,
1079                                     Int     szB,
1080                                     Bool    isStore,
1081                                     Bool    dtor_head,
1082                                     Int     hWordTy_szB ) {
1083   IRType   tyAddr   = Ity_INVALID;
1084 
1085   tl_assert(isIRAtom(addr));
1086   tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
1087 
1088   tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
1089   tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
1090 
1091   if (szB == 28) {
1092     // Ignore weird-sized accesses for now.
1093     // See http://code.google.com/p/data-race-test/issues/detail?id=36
1094     return;
1095   }
1096 
1097   bool check_ident_store = false;
1098 
1099   if (st->tag == Ist_Store && dtor_head &&
1100       typeOfIRExpr(bbOut->tyenv, st->Ist.Store.data) == tyAddr) {
1101     check_ident_store = true;
1102   }
1103 
1104   size_t next_trace_idx = *trace_idx + 1;
1105 
1106   if (next_trace_idx > kMaxMopsPerTrace) {
1107     if (next_trace_idx == kMaxMopsPerTrace) {
1108       Report("INFO: too many mops in trace: %p %s\n", pc,
1109              PcToRtnName(pc, true).c_str());
1110     }
1111     return;
1112   }
1113 
1114   if (!trace_info) {
1115     // not instrumenting yet.
1116     *trace_idx = next_trace_idx;
1117     return;
1118   }
1119 
1120   IRExpr *expr_to_store = NULL;
1121 
1122   if (check_ident_store) {
1123     int is_64 = (sizeof(void*) == 8);
1124     // generate expression (*addr == new_value ? 0 : addr):
1125 
1126     // old_value = *addr
1127     IRExpr *addr_load_expr = IRExpr_Load(Iend_LE, tyAddr, addr);
1128     IRTemp star_addr = newIRTemp(bbOut->tyenv, tyAddr);
1129     IRStmt *star_addr_stmt = IRStmt_WrTmp(star_addr, addr_load_expr);
1130     addStmtToIRSB(bbOut, star_addr_stmt);
1131     // sub = (old_value - new_value)
1132     IRTemp sub = newIRTemp(bbOut->tyenv, tyAddr);
1133     IRExpr *sub_expr = IRExpr_Binop((IROp)(Iop_Sub32 + is_64),
1134                                     IRExpr_RdTmp(star_addr),
1135                                     st->Ist.Store.data);
1136     IRStmt *sub_stmt = IRStmt_WrTmp(sub, sub_expr);
1137     addStmtToIRSB(bbOut, sub_stmt);
1138     // mask = (sub==0) ? 0 : -1
1139     IRTemp mask = newIRTemp(bbOut->tyenv, tyAddr);
1140     IRExpr *mask_expr = IRExpr_Unop((IROp)(Iop_CmpwNEZ32 + is_64),
1141                                     IRExpr_RdTmp(sub));
1142     IRStmt *mask_stmt = IRStmt_WrTmp(mask, mask_expr);
1143     addStmtToIRSB(bbOut, mask_stmt);
1144 
1145     // res = mask & addr
1146     IRTemp and_tmp = newIRTemp(bbOut->tyenv, tyAddr);
1147     IRExpr *and_expr = IRExpr_Binop((IROp)(Iop_And32 + is_64),
1148                                     IRExpr_RdTmp(mask), addr);
1149     IRStmt *and_stmt = IRStmt_WrTmp(and_tmp, and_expr);
1150     addStmtToIRSB(bbOut, and_stmt);
1151 
1152     expr_to_store = IRExpr_RdTmp(and_tmp);
1153   } else {
1154     expr_to_store = addr;
1155   }
1156 
1157   // OnMop: g_cur_tleb[idx] = expr_to_store
1158   gen_store_to_tleb(bbOut, tleb_temp, *trace_idx, expr_to_store, tyAddr);
1159   // Create a mop {pc, size, is_write}
1160   MopInfo *mop = trace_info->GetMop(*trace_idx);
1161   new (mop) MopInfo(pc, szB, isStore, false);
1162   (*trace_idx)++;
1163 
1164   CHECK(*trace_idx == next_trace_idx);
1165 }
1166 
instrument_statement(IRStmt * st,IRSB * bbIn,IRSB * bbOut,IRType hWordTy,TraceInfo * trace_info,IRTemp tleb_temp,size_t * idx,uintptr_t * cur_pc,bool dtor_head)1167 void instrument_statement (IRStmt* st, IRSB* bbIn, IRSB* bbOut, IRType hWordTy,
1168                            TraceInfo *trace_info, IRTemp tleb_temp,
1169                            size_t *idx, uintptr_t *cur_pc, bool dtor_head) {
1170   switch (st->tag) {
1171     case Ist_NoOp:
1172     case Ist_AbiHint:
1173     case Ist_Put:
1174     case Ist_PutI:
1175     case Ist_Exit:
1176       /* None of these can contain any memory references. */
1177       break;
1178 
1179     case Ist_IMark:
1180       *cur_pc = st->Ist.IMark.addr;
1181       break;
1182 
1183     case Ist_MBE:
1184       //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
1185       switch (st->Ist.MBE.event) {
1186         case Imbe_Fence:
1187           break; /* not interesting */
1188         default:
1189           ppIRStmt(st);
1190           tl_assert(0);
1191       }
1192       break;
1193 
1194     case Ist_CAS:
1195       break;
1196 
1197     case Ist_Store:
1198       instrument_mem_access(trace_info, tleb_temp, *cur_pc, idx,
1199         bbOut, st,
1200         st->Ist.Store.addr,
1201         sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
1202         True/*isStore*/, dtor_head,
1203         sizeofIRType(hWordTy)
1204       );
1205       break;
1206 
1207     case Ist_WrTmp: {
1208       IRExpr* data = st->Ist.WrTmp.data;
1209       if (data->tag == Iex_Load) {
1210         instrument_mem_access(trace_info, tleb_temp, *cur_pc, idx,
1211             bbOut, st,
1212             data->Iex.Load.addr,
1213             sizeofIRType(data->Iex.Load.ty),
1214             False/*!isStore*/, dtor_head,
1215             sizeofIRType(hWordTy)
1216             );
1217       }
1218       break;
1219     }
1220 
1221     case Ist_LLSC: {
1222       /* Ignore load-linked's and store-conditionals. */
1223       break;
1224     }
1225 
1226     case Ist_Dirty: {
1227       Int      dataSize;
1228       IRDirty* d = st->Ist.Dirty.details;
1229       if (d->mFx != Ifx_None) {
1230         /* This dirty helper accesses memory.  Collect the
1231            details. */
1232         tl_assert(d->mAddr != NULL);
1233         tl_assert(d->mSize != 0);
1234         dataSize = d->mSize;
1235         if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
1236           instrument_mem_access(trace_info, tleb_temp, *cur_pc, idx,
1237             bbOut, st, d->mAddr, dataSize, False/*!isStore*/, dtor_head,
1238             sizeofIRType(hWordTy)
1239           );
1240         }
1241         if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
1242           instrument_mem_access(trace_info, tleb_temp, *cur_pc, idx,
1243             bbOut, st, d->mAddr, dataSize, True/*isStore*/, dtor_head,
1244             sizeofIRType(hWordTy)
1245           );
1246         }
1247       } else {
1248         tl_assert(d->mAddr == NULL);
1249         tl_assert(d->mSize == 0);
1250       }
1251       break;
1252     }
1253 
1254     default:
1255       ppIRStmt(st);
1256       tl_assert(0);
1257   } /* switch (st->tag) */
1258 }
1259 
ts_instrument(VgCallbackClosure * closure,IRSB * bbIn,VexGuestLayout * layout,VexGuestExtents * vge,IRType gWordTy,IRType hWordTy)1260 static IRSB* ts_instrument ( VgCallbackClosure* closure,
1261                              IRSB* bbIn,
1262                              VexGuestLayout* layout,
1263                              VexGuestExtents* vge,
1264                              IRType gWordTy, IRType hWordTy) {
1265   if (G_flags->dry_run >= 2) return bbIn;
1266   Int   i;
1267   IRSB* bbOut;
1268   uintptr_t pc = closure->readdr;
1269 
1270   char objname[kBuffSize];
1271   if (VG_(get_objname)(pc, (Char*)objname, kBuffSize)) {
1272     if (StringMatch("*/ld-2*", objname)) {
1273       // we want to completely ignore ld-so.
1274       return bbIn;
1275     }
1276   }
1277 
1278   bool instrument_memory = ThreadSanitizerWantToInstrumentSblock(pc);
1279 
1280   if (gWordTy != hWordTy) {
1281     /* We don't currently support this case. */
1282     VG_(tool_panic)((Char*)"host/guest word size mismatch");
1283   }
1284 
1285   /* Set up BB */
1286   bbOut           = emptyIRSB();
1287   bbOut->tyenv    = deepCopyIRTypeEnv(bbIn->tyenv);
1288   bbOut->next     = deepCopyIRExpr(bbIn->next);
1289   bbOut->jumpkind = bbIn->jumpkind;
1290 
1291   // Copy verbatim any IR preamble preceding the first IMark
1292   i = 0;
1293   while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
1294     addStmtToIRSB( bbOut, bbIn->stmts[i] );
1295     i++;
1296   }
1297   int first = i;
1298   size_t n_mops = 0;
1299   uintptr_t cur_pc = pc;
1300 
1301   IRTemp tleb_temp = IRTemp_INVALID;
1302 
1303   bool dtor_head = false;
1304   char buff[1000];
1305   // get_fnname_w_offset returns demangled name with optional "+offset" prefix.
1306   // If we have "::~" and don't have "+", this SB is the first in this dtor.
1307   // We do all this stuff to avoid benign races on vptr:
1308   // http://code.google.com/p/data-race-test/wiki/PopularDataRaces#Data_race_on_vptr
1309   if (VG_(get_fnname_w_offset)(pc, (Char*)buff, sizeof(buff)) &&
1310       VG_(strstr)((Char*)buff, (Char*)"::~") != NULL) {
1311     char *offset_str = (char*)VG_(strchr)((Char*)buff, '+');
1312     if (offset_str == NULL) {
1313       // we are in the first BB of DTOR.
1314       dtor_head = true;
1315     } else {
1316       // We are not in the first BB.
1317       // On x86_64 (it seems like) the vfptr is updated only in the first BB.
1318       // On x86 with -fPIC, the vfptr may be updated in the second BB
1319       // (because -fPIC adds a call which splits the first BB).
1320       // See http://code.google.com/p/chromium/issues/detail?id=61199
1321 #ifdef VGA_x86
1322       char *end;
1323       size_t offset = my_strtol(offset_str + 1, &end, 10);
1324       if (offset <= 32) {
1325         dtor_head = true;
1326       }
1327 #endif
1328     }
1329   }
1330 
1331 
1332   uintptr_t instrument_pc = 0; // if != 0, instrument only the instruction at this address
1333   if (g_race_verifier_active) {
1334     uintptr_t min_pc = vge->base[0];
1335     uintptr_t max_pc = min_pc + vge->len[0];
1336     bool verify_trace = RaceVerifierGetAddresses(min_pc, max_pc, &instrument_pc);
1337     if (!verify_trace)
1338       instrument_memory = false;
1339   }
1340 
1341   // count mops
1342   if (instrument_memory) {
1343     for (i = first; i < bbIn->stmts_used; i++) {
1344       IRStmt* st = bbIn->stmts[i];
1345       tl_assert(st);
1346       tl_assert(isFlatIRStmt(st));
1347       if (st->tag == Ist_IMark)
1348         cur_pc = st->Ist.IMark.addr;
1349       if (!instrument_pc || cur_pc == instrument_pc)
1350         instrument_statement(st, bbIn, bbOut, hWordTy,
1351             NULL, tleb_temp, &n_mops, &cur_pc, dtor_head);
1352     } /* iterate over bbIn->stmts */
1353   }
1354   TraceInfo *trace_info = NULL;
1355   if (n_mops > 0) {
1356     trace_info = TraceInfo::NewTraceInfo(n_mops, pc);
1357   }
1358   size_t n_mops_done = 0;
1359   bool need_to_insert_on_trace = n_mops > 0 || g_race_verifier_active;
1360   // instrument mops and copy the rest of BB to the new one.
1361   for (i = first; i < bbIn->stmts_used; i++) {
1362     IRStmt* st = bbIn->stmts[i];
1363     tl_assert(st);
1364     tl_assert(isFlatIRStmt(st));
1365     if (st->tag != Ist_IMark && need_to_insert_on_trace) {
1366       if (g_race_verifier_active) {
1367         ts_instrument_trace_entry_verify(bbOut, layout, trace_info,
1368             closure->readdr);
1369       } else {
1370         ts_instrument_trace_entry(bbOut, trace_info);
1371       }
1372       need_to_insert_on_trace = false;
1373       // Generate temp for *g_cur_tleb.
1374       IRType   tyAddr = sizeof(uintptr_t) == 8 ?  Ity_I64 : Ity_I32;
1375       IRExpr *tleb_ptr_expr = mkIRExpr_HWord((HWord)&g_cur_tleb);
1376       IRExpr *tleb_expr = IRExpr_Load(Iend_LE, tyAddr, tleb_ptr_expr);
1377       tleb_temp = newIRTemp(bbOut->tyenv, tyAddr);
1378       IRStmt *stmt = IRStmt_WrTmp(tleb_temp, tleb_expr);
1379       addStmtToIRSB(bbOut, stmt);
1380     }
1381     if (instrument_memory) {
1382       if (st->tag == Ist_IMark)
1383         cur_pc = st->Ist.IMark.addr;
1384       if (!instrument_pc || cur_pc == instrument_pc)
1385         instrument_statement(st, bbIn, bbOut, hWordTy,
1386             trace_info, tleb_temp, &n_mops_done, &cur_pc, dtor_head);
1387     }
1388     addStmtToIRSB( bbOut, st );
1389   } /* iterate over bbIn->stmts */
1390   CHECK(n_mops == n_mops_done);
1391   if (!g_race_verifier_active)
1392     ts_instrument_final_jump(bbOut, bbIn->next, bbIn->jumpkind, layout, gWordTy, hWordTy);
1393   return bbOut;
1394 }
1395 
1396 extern "C"
ts_pre_clo_init(void)1397 void ts_pre_clo_init(void) {
1398   VG_(details_name)            ((Char*)"ThreadSanitizer");
1399   VG_(details_version)         ((Char*)NULL);
1400   VG_(details_description)     ((Char*)"a data race detector");
1401   VG_(details_copyright_author)(
1402       (Char*)"Copyright (C) 2008-2010, and GNU GPL'd, by Google Inc.");
1403   VG_(details_bug_reports_to)  ((Char*)"data-race-test@googlegroups.com");
1404 
1405   VG_(basic_tool_funcs)        (ts_post_clo_init,
1406                                 ts_instrument,
1407                                 ts_fini);
1408 
1409   VG_(needs_client_requests)     (ts_handle_client_request);
1410 
1411   VG_(needs_command_line_options)(ts_process_cmd_line_option,
1412                                   ts_print_usage,
1413                                   ts_print_debug_usage);
1414    VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
1415    VG_(track_pre_thread_ll_exit)  ( evh__pre_thread_ll_exit );
1416 
1417    if (!g_race_verifier_active) {
1418      VG_(track_workq_task_start)( evh__pre_workq_task_start );
1419      VG_(track_pre_thread_first_insn)( evh__pre_thread_first_insn );
1420    }
1421 
1422    VG_(clo_vex_control).iropt_unroll_thresh = 0;
1423    VG_(clo_vex_control).guest_chase_thresh = 0;
1424 
1425    VG_(track_pre_deliver_signal) (&SignalIn);
1426    VG_(track_post_deliver_signal)(&SignalOut);
1427 
1428    VG_(track_start_client_code)( OnStartClientCode );
1429 }
1430 
1431 VG_DETERMINE_INTERFACE_VERSION(ts_pre_clo_init)
1432 
1433 // {{{1 end
1434 // vim:shiftwidth=2:softtabstop=2:expandtab
1435