• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2   This file is part of drd, a thread error detector.
3 
4   Copyright (C) 2006-2017 Bart Van Assche <bvanassche@acm.org>.
5 
6   This program is free software; you can redistribute it and/or
7   modify it under the terms of the GNU General Public License as
8   published by the Free Software Foundation; either version 2 of the
9   License, or (at your option) any later version.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   You should have received a copy of the GNU General Public License
17   along with this program; if not, write to the Free Software
18   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19   02111-1307, USA.
20 
21   The GNU General Public License is contained in the file COPYING.
22 */
23 
24 
25 #include "drd_barrier.h"
26 #include "drd_clientobj.h"
27 #include "drd_clientreq.h"
28 #include "drd_cond.h"
29 #include "drd_error.h"
30 #include "drd_hb.h"
31 #include "drd_load_store.h"
32 #include "drd_malloc_wrappers.h"
33 #include "drd_mutex.h"
34 #include "drd_rwlock.h"
35 #include "drd_segment.h"
36 #include "drd_semaphore.h"
37 #include "drd_suppression.h"
38 #include "drd_thread.h"
39 #include "libvex_guest_offsets.h"
40 #include "pub_drd_bitmap.h"
41 #include "pub_tool_vki.h"         // Must be included before pub_tool_libcproc
42 #include "pub_tool_basics.h"
43 #include "pub_tool_libcassert.h"  // tl_assert()
44 #include "pub_tool_libcbase.h"    // VG_(strcmp)
45 #include "pub_tool_libcprint.h"   // VG_(printf)
46 #include "pub_tool_libcproc.h"    // VG_(getenv)()
47 #include "pub_tool_machine.h"
48 #include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
49 #include "pub_tool_options.h"     // command line options
50 #include "pub_tool_replacemalloc.h"
51 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
52 #include "pub_tool_tooliface.h"
53 #include "pub_tool_aspacemgr.h"   // VG_(am_is_valid_for_client)
54 
55 
56 /* Local variables. */
57 
58 static Bool s_print_stats;
59 static Bool s_var_info;
60 static Bool s_show_stack_usage;
61 static Bool s_trace_alloc;
62 static Bool trace_sectsuppr;
63 
64 
65 /**
66  * Implement the needs_command_line_options for drd.
67  */
DRD_(process_cmd_line_option)68 static Bool DRD_(process_cmd_line_option)(const HChar* arg)
69 {
70    int check_stack_accesses   = -1;
71    int join_list_vol          = -1;
72    int exclusive_threshold_ms = -1;
73    int first_race_only        = -1;
74    int report_signal_unlocked = -1;
75    int segment_merging        = -1;
76    int segment_merge_interval = -1;
77    int shared_threshold_ms    = -1;
78    int show_confl_seg         = -1;
79    int trace_barrier          = -1;
80    int trace_clientobj        = -1;
81    int trace_cond             = -1;
82    int trace_csw              = -1;
83    int trace_fork_join        = -1;
84    int trace_hb               = -1;
85    int trace_conflict_set     = -1;
86    int trace_conflict_set_bm  = -1;
87    int trace_mutex            = -1;
88    int trace_rwlock           = -1;
89    int trace_segment          = -1;
90    int trace_semaphore        = -1;
91    int trace_suppression      = -1;
92    const HChar* trace_address = 0;
93    const HChar* ptrace_address= 0;
94 
95    if      VG_BOOL_CLO(arg, "--check-stack-var",     check_stack_accesses) {}
96    else if VG_INT_CLO (arg, "--join-list-vol",       join_list_vol) {}
97    else if VG_BOOL_CLO(arg, "--drd-stats",           s_print_stats) {}
98    else if VG_BOOL_CLO(arg, "--first-race-only",     first_race_only) {}
99    else if VG_BOOL_CLO(arg, "--free-is-write",       DRD_(g_free_is_write)) {}
100    else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked)
101    {}
102    else if VG_BOOL_CLO(arg, "--segment-merging",     segment_merging) {}
103    else if VG_INT_CLO (arg, "--segment-merging-interval", segment_merge_interval)
104    {}
105    else if VG_BOOL_CLO(arg, "--show-confl-seg",      show_confl_seg) {}
106    else if VG_BOOL_CLO(arg, "--show-stack-usage",    s_show_stack_usage) {}
107    else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
108    DRD_(ignore_thread_creation)) {}
109    else if VG_BOOL_CLO(arg, "--trace-alloc",         s_trace_alloc) {}
110    else if VG_BOOL_CLO(arg, "--trace-barrier",       trace_barrier) {}
111    else if VG_BOOL_CLO(arg, "--trace-clientobj",     trace_clientobj) {}
112    else if VG_BOOL_CLO(arg, "--trace-cond",          trace_cond) {}
113    else if VG_BOOL_CLO(arg, "--trace-conflict-set",  trace_conflict_set) {}
114    else if VG_BOOL_CLO(arg, "--trace-conflict-set-bm", trace_conflict_set_bm){}
115    else if VG_BOOL_CLO(arg, "--trace-csw",           trace_csw) {}
116    else if VG_BOOL_CLO(arg, "--trace-fork-join",     trace_fork_join) {}
117    else if VG_BOOL_CLO(arg, "--trace-hb",            trace_hb) {}
118    else if VG_BOOL_CLO(arg, "--trace-mutex",         trace_mutex) {}
119    else if VG_BOOL_CLO(arg, "--trace-rwlock",        trace_rwlock) {}
120    else if VG_BOOL_CLO(arg, "--trace-sectsuppr",     trace_sectsuppr) {}
121    else if VG_BOOL_CLO(arg, "--trace-segment",       trace_segment) {}
122    else if VG_BOOL_CLO(arg, "--trace-semaphore",     trace_semaphore) {}
123    else if VG_BOOL_CLO(arg, "--trace-suppr",         trace_suppression) {}
124    else if VG_BOOL_CLO(arg, "--var-info",            s_var_info) {}
125    else if VG_BOOL_CLO(arg, "--verify-conflict-set", DRD_(verify_conflict_set))
126    {}
127    else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {}
128    else if VG_STR_CLO (arg, "--ptrace-addr",         ptrace_address) {}
129    else if VG_INT_CLO (arg, "--shared-threshold",    shared_threshold_ms)    {}
130    else if VG_STR_CLO (arg, "--trace-addr",          trace_address) {}
131    else
132       return VG_(replacement_malloc_process_cmd_line_option)(arg);
133 
134    if (check_stack_accesses != -1)
135       DRD_(set_check_stack_accesses)(check_stack_accesses);
136    if (exclusive_threshold_ms != -1)
137    {
138       DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
139       DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
140    }
141    if (first_race_only != -1)
142    {
143       DRD_(set_first_race_only)(first_race_only);
144    }
145    if (join_list_vol != -1)
146       DRD_(thread_set_join_list_vol)(join_list_vol);
147    if (report_signal_unlocked != -1)
148    {
149       DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
150    }
151    if (shared_threshold_ms != -1)
152    {
153       DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
154    }
155    if (segment_merging != -1)
156       DRD_(thread_set_segment_merging)(segment_merging);
157    if (segment_merge_interval != -1)
158       DRD_(thread_set_segment_merge_interval)(segment_merge_interval);
159    if (show_confl_seg != -1)
160       DRD_(set_show_conflicting_segments)(show_confl_seg);
161    if (trace_address) {
162       const Addr addr = VG_(strtoll16)(trace_address, 0);
163       DRD_(start_tracing_address_range)(addr, addr + 1, False);
164    }
165    if (ptrace_address) {
166       HChar *plus = VG_(strchr)(ptrace_address, '+');
167       Addr addr, length;
168       if (plus)
169          *plus = '\0';
170       addr = VG_(strtoll16)(ptrace_address, 0);
171       length = plus ? VG_(strtoll16)(plus + 1, 0) : 1;
172       DRD_(start_tracing_address_range)(addr, addr + length, True);
173    }
174    if (trace_barrier != -1)
175       DRD_(barrier_set_trace)(trace_barrier);
176    if (trace_clientobj != -1)
177       DRD_(clientobj_set_trace)(trace_clientobj);
178    if (trace_cond != -1)
179       DRD_(cond_set_trace)(trace_cond);
180    if (trace_csw != -1)
181       DRD_(thread_trace_context_switches)(trace_csw);
182    if (trace_fork_join != -1)
183       DRD_(thread_set_trace_fork_join)(trace_fork_join);
184    if (trace_hb != -1)
185       DRD_(hb_set_trace)(trace_hb);
186    if (trace_conflict_set != -1)
187       DRD_(thread_trace_conflict_set)(trace_conflict_set);
188    if (trace_conflict_set_bm != -1)
189       DRD_(thread_trace_conflict_set_bm)(trace_conflict_set_bm);
190    if (trace_mutex != -1)
191       DRD_(mutex_set_trace)(trace_mutex);
192    if (trace_rwlock != -1)
193       DRD_(rwlock_set_trace)(trace_rwlock);
194    if (trace_segment != -1)
195       DRD_(sg_set_trace)(trace_segment);
196    if (trace_semaphore != -1)
197       DRD_(semaphore_set_trace)(trace_semaphore);
198    if (trace_suppression != -1)
199       DRD_(suppression_set_trace)(trace_suppression);
200 
201    return True;
202 }
203 
DRD_(print_usage)204 static void DRD_(print_usage)(void)
205 {
206    VG_(printf)(
207 "    --check-stack-var=yes|no  Whether or not to report data races on\n"
208 "                              stack variables [no].\n"
209 "    --exclusive-threshold=<n> Print an error message if any mutex or\n"
210 "                              writer lock is held longer than the specified\n"
211 "                              time (in milliseconds) [off].\n"
212 "    --first-race-only=yes|no  Only report the first data race that occurs on\n"
213 "                              a memory location instead of all races [no].\n"
214 "    --free-is-write=yes|no    Whether to report races between freeing memory\n"
215 "                              and subsequent accesses of that memory[no].\n"
216 "    --join-list-vol=<n>       Number of threads to delay cleanup for [10].\n"
217 "    --report-signal-unlocked=yes|no Whether to report calls to\n"
218 "                              pthread_cond_signal() where the mutex associated\n"
219 "                              with the signal via pthread_cond_wait() is not\n"
220 "                              locked at the time the signal is sent [yes].\n"
221 "    --segment-merging=yes|no  Controls segment merging [yes].\n"
222 "        Segment merging is an algorithm to limit memory usage of the\n"
223 "        data race detection algorithm. Disabling segment merging may\n"
224 "        improve the accuracy of the so-called 'other segments' displayed\n"
225 "        in race reports but can also trigger an out of memory error.\n"
226 "    --segment-merging-interval=<n> Perform segment merging every time n new\n"
227 "        segments have been created. Default: %d.\n"
228 "    --shared-threshold=<n>    Print an error message if a reader lock\n"
229 "                              is held longer than the specified time (in\n"
230 "                              milliseconds) [off]\n"
231 "    --show-confl-seg=yes|no   Show conflicting segments in race reports [yes].\n"
232 "    --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n"
233 "    --ignore-thread-creation=yes|no Ignore activities during thread \n"
234 "                              creation [%s].\n"
235 "\n"
236 "  drd options for monitoring process behavior:\n"
237 "    --ptrace-addr=<address>[+<length>] Trace all load and store activity for\n"
238 "                              the specified address range and keep doing that\n"
239 "                              even after the memory at that address has been\n"
240 "                              freed and reallocated [off].\n"
241 "    --trace-addr=<address>    Trace all load and store activity for the\n"
242 "                              specified address [off].\n"
243 "    --trace-alloc=yes|no      Trace all memory allocations and deallocations\n"
244 "                              [no].\n"
245 "    --trace-barrier=yes|no    Trace all barrier activity [no].\n"
246 "    --trace-cond=yes|no       Trace all condition variable activity [no].\n"
247 "    --trace-fork-join=yes|no  Trace all thread fork/join activity [no].\n"
248 "    --trace-hb=yes|no         Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n"
249 "    --trace-mutex=yes|no      Trace all mutex activity [no].\n"
250 "    --trace-rwlock=yes|no     Trace all reader-writer lock activity[no].\n"
251 "    --trace-semaphore=yes|no  Trace all semaphore activity [no].\n",
252 DRD_(thread_get_segment_merge_interval)(),
253 DRD_(ignore_thread_creation) ? "yes" : "no"
254 );
255 }
256 
DRD_(print_debug_usage)257 static void DRD_(print_debug_usage)(void)
258 {
259    VG_(printf)(
260 "    --drd-stats=yes|no        Print statistics about DRD activity [no].\n"
261 "    --trace-clientobj=yes|no  Trace all client object activity [no].\n"
262 "    --trace-csw=yes|no        Trace all scheduler context switches [no].\n"
263 "    --trace-conflict-set=yes|no Trace all conflict set updates [no].\n"
264 "    --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n"
265 "                              updates [no]. Note: enabling this option\n"
266 "                              will generate a lot of output !\n"
267 "    --trace-sectsuppr=yes|no  Trace which the dynamic library sections on\n"
268 "                              which data race detection is suppressed.\n"
269 "    --trace-segment=yes|no    Trace segment actions [no].\n"
270 "    --trace-suppr=yes|no      Trace all address suppression actions [no].\n"
271 "    --verify-conflict-set=yes|no Verify conflict set consistency [no].\n"
272 );
273 }
274 
275 
276 //
277 // Implements the thread-related core callbacks.
278 //
279 
drd_pre_mem_read(const CorePart part,const ThreadId tid,const HChar * const s,const Addr a,const SizeT size)280 static void drd_pre_mem_read(const CorePart part,
281                              const ThreadId tid,
282                              const HChar* const s,
283                              const Addr a,
284                              const SizeT size)
285 {
286    DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
287    if (size > 0)
288    {
289       DRD_(trace_load)(a, size);
290    }
291 }
292 
drd_pre_mem_read_asciiz(const CorePart part,const ThreadId tid,const HChar * const s,const Addr a)293 static void drd_pre_mem_read_asciiz(const CorePart part,
294                                     const ThreadId tid,
295                                     const HChar* const s,
296                                     const Addr a)
297 {
298    const HChar* p = (void*)a;
299    SizeT size = 0;
300 
301    // Don't segfault if the string starts in an obviously stupid
302    // place.  Actually we should check the whole string, not just
303    // the start address, but that's too much trouble.  At least
304    // checking the first byte is better than nothing.  See #255009.
305    if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
306       return;
307 
308    /* Note: the expression '*p' reads client memory and may crash if the */
309    /* client provided an invalid pointer !                               */
310    while (*p)
311    {
312       p++;
313       size++;
314    }
315    if (size > 0)
316    {
317       DRD_(trace_load)(a, size);
318    }
319 }
320 
drd_post_mem_write(const CorePart part,const ThreadId tid,const Addr a,const SizeT size)321 static void drd_post_mem_write(const CorePart part,
322                                const ThreadId tid,
323                                const Addr a,
324                                const SizeT size)
325 {
326    DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
327    if (size > 0)
328    {
329       DRD_(trace_store)(a, size);
330    }
331 }
332 
333 static __inline__
drd_start_using_mem(const Addr a1,const SizeT len,const Bool is_stack_mem)334 void drd_start_using_mem(const Addr a1, const SizeT len,
335                          const Bool is_stack_mem)
336 {
337    const Addr a2 = a1 + len;
338 
339    tl_assert(a1 <= a2);
340 
341    if (!is_stack_mem && s_trace_alloc)
342       DRD_(trace_msg)("Started using memory range 0x%lx + %lu%s",
343                       a1, len, DRD_(running_thread_inside_pthread_create)()
344                       ? " (inside pthread_create())" : "");
345 
346    if (!is_stack_mem && DRD_(g_free_is_write))
347       DRD_(thread_stop_using_mem)(a1, a2);
348 
349    if (UNLIKELY(DRD_(any_address_is_traced)()))
350    {
351       DRD_(trace_mem_access)(a1, len, eStart, 0, 0);
352    }
353 
354    if (UNLIKELY(DRD_(running_thread_inside_pthread_create)()))
355    {
356       DRD_(start_suppression)(a1, a2, "pthread_create()");
357    }
358 }
359 
drd_start_using_mem_w_ecu(const Addr a1,const SizeT len,UInt ec_uniq)360 static void drd_start_using_mem_w_ecu(const Addr a1,
361                                       const SizeT len,
362                                       UInt ec_uniq)
363 {
364    drd_start_using_mem(a1, len, False);
365 }
366 
drd_start_using_mem_w_tid(const Addr a1,const SizeT len,ThreadId tid)367 static void drd_start_using_mem_w_tid(const Addr a1,
368                                       const SizeT len,
369                                       ThreadId tid)
370 {
371    drd_start_using_mem(a1, len, False);
372 }
373 
374 static __inline__
drd_stop_using_mem(const Addr a1,const SizeT len,const Bool is_stack_mem)375 void drd_stop_using_mem(const Addr a1, const SizeT len,
376                         const Bool is_stack_mem)
377 {
378    const Addr a2 = a1 + len;
379 
380    tl_assert(a1 <= a2);
381 
382    if (UNLIKELY(DRD_(any_address_is_traced)()))
383       DRD_(trace_mem_access)(a1, len, eEnd, 0, 0);
384 
385    if (!is_stack_mem && s_trace_alloc)
386       DRD_(trace_msg)("Stopped using memory range 0x%lx + %lu",
387                       a1, len);
388 
389    if (!is_stack_mem || DRD_(get_check_stack_accesses)())
390    {
391       if (is_stack_mem || !DRD_(g_free_is_write))
392 	 DRD_(thread_stop_using_mem)(a1, a2);
393       else if (DRD_(g_free_is_write))
394 	 DRD_(trace_store)(a1, len);
395       DRD_(clientobj_stop_using_mem)(a1, a2);
396       DRD_(suppression_stop_using_mem)(a1, a2);
397    }
398 }
399 
400 static __inline__
drd_stop_using_nonstack_mem(const Addr a1,const SizeT len)401 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len)
402 {
403    drd_stop_using_mem(a1, len, False);
404 }
405 
406 /**
407  * Discard all information DRD has about memory accesses and client objects
408  * in the specified address range.
409  */
DRD_(clean_memory)410 void DRD_(clean_memory)(const Addr a1, const SizeT len)
411 {
412    const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1);
413    drd_stop_using_mem(a1, len, is_stack_memory);
414    drd_start_using_mem(a1, len, is_stack_memory);
415 }
416 
417 /**
418  * Suppress data race reports on all addresses contained in .plt, .got and
419  * .got.plt sections inside the address range [ a, a + len [. The data in
420  * these sections is modified by _dl_relocate_object() every time a function
421  * in a shared library is called for the first time. Since the first call
422  * to a function in a shared library can happen from a multithreaded context,
423  * such calls can cause conflicting accesses. See also Ulrich Drepper's
424  * paper "How to Write Shared Libraries" for more information about relocation
425  * (http://people.redhat.com/drepper/dsohowto.pdf).
426  * Note: the contents of the .got section is only modified by the MIPS resolver.
427  */
DRD_(suppress_relocation_conflicts)428 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len)
429 {
430    const DebugInfo* di;
431 
432    if (trace_sectsuppr)
433       VG_(dmsg)("Evaluating range @ 0x%lx size %lu\n", a, len);
434 
435    for (di = VG_(next_DebugInfo)(0); di; di = VG_(next_DebugInfo)(di)) {
436       Addr  avma;
437       SizeT size;
438 
439       if (trace_sectsuppr)
440 	 VG_(dmsg)("Examining %s / %s\n", VG_(DebugInfo_get_filename)(di),
441 		   VG_(DebugInfo_get_soname)(di));
442 
443       /*
444        * Suppress the race report on the libpthread global variable
445        * __pthread_multiple_threads. See also
446        * http://bugs.kde.org/show_bug.cgi?id=323905.
447        */
448       avma = VG_(DebugInfo_get_bss_avma)(di);
449       size = VG_(DebugInfo_get_bss_size)(di);
450       tl_assert((avma && size) || (avma == 0 && size == 0));
451       if (size > 0 &&
452           VG_(strcmp)(VG_(DebugInfo_get_soname)(di), "libpthread.so.0") == 0) {
453 	 if (trace_sectsuppr)
454 	    VG_(dmsg)("Suppressing .bss @ 0x%lx size %lu\n", avma, size);
455          tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectBSS);
456          DRD_(start_suppression)(avma, avma + size, ".bss");
457       }
458 
459       avma = VG_(DebugInfo_get_plt_avma)(di);
460       size = VG_(DebugInfo_get_plt_size)(di);
461       tl_assert((avma && size) || (avma == 0 && size == 0));
462       if (size > 0) {
463 	 if (trace_sectsuppr)
464 	    VG_(dmsg)("Suppressing .plt @ 0x%lx size %lu\n", avma, size);
465          tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectPLT);
466          DRD_(start_suppression)(avma, avma + size, ".plt");
467       }
468 
469       avma = VG_(DebugInfo_get_gotplt_avma)(di);
470       size = VG_(DebugInfo_get_gotplt_size)(di);
471       tl_assert((avma && size) || (avma == 0 && size == 0));
472       if (size > 0) {
473 	 if (trace_sectsuppr)
474 	    VG_(dmsg)("Suppressing .got.plt @ 0x%lx size %lu\n", avma, size);
475          tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectGOTPLT);
476          DRD_(start_suppression)(avma, avma + size, ".gotplt");
477       }
478 
479       avma = VG_(DebugInfo_get_got_avma)(di);
480       size = VG_(DebugInfo_get_got_size)(di);
481       tl_assert((avma && size) || (avma == 0 && size == 0));
482       if (size > 0) {
483 	 if (trace_sectsuppr)
484 	    VG_(dmsg)("Suppressing .got @ 0x%lx size %lu\n", avma, size);
485          tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectGOT);
486          DRD_(start_suppression)(avma, avma + size, ".got");
487       }
488    }
489 }
490 
491 static
drd_start_using_mem_w_perms(const Addr a,const SizeT len,const Bool rr,const Bool ww,const Bool xx,ULong di_handle)492 void drd_start_using_mem_w_perms(const Addr a, const SizeT len,
493                                  const Bool rr, const Bool ww, const Bool xx,
494                                  ULong di_handle)
495 {
496    DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
497 
498    drd_start_using_mem(a, len, False);
499 
500    DRD_(suppress_relocation_conflicts)(a, len);
501 }
502 
503 /**
504  * Called by the core when the stack of a thread grows, to indicate that
505  * the addresses in range [ a, a + len [ may now be used by the client.
506  * Assumption: stacks grow downward.
507  */
508 static __inline__
drd_start_using_mem_stack2(const DrdThreadId tid,const Addr a,const SizeT len)509 void drd_start_using_mem_stack2(const DrdThreadId tid, const Addr a,
510                                 const SizeT len)
511 {
512    DRD_(thread_set_stack_min)(tid, a - VG_STACK_REDZONE_SZB);
513    drd_start_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
514                        True);
515 }
516 
517 static __inline__
drd_start_using_mem_stack(const Addr a,const SizeT len)518 void drd_start_using_mem_stack(const Addr a, const SizeT len)
519 {
520    drd_start_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
521 }
522 
523 /**
524  * Called by the core when the stack of a thread shrinks, to indicate that
525  * the addresses [ a, a + len [ are no longer accessible for the client.
526  * Assumption: stacks grow downward.
527  */
528 static __inline__
drd_stop_using_mem_stack2(const DrdThreadId tid,const Addr a,const SizeT len)529 void drd_stop_using_mem_stack2(const DrdThreadId tid, const Addr a,
530                                const SizeT len)
531 {
532    DRD_(thread_set_stack_min)(tid, a + len - VG_STACK_REDZONE_SZB);
533    drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
534                       True);
535 }
536 
537 static __inline__
drd_stop_using_mem_stack(const Addr a,const SizeT len)538 void drd_stop_using_mem_stack(const Addr a, const SizeT len)
539 {
540    drd_stop_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
541 }
542 
543 static
on_alt_stack(const Addr a)544 Bool on_alt_stack(const Addr a)
545 {
546    ThreadId vg_tid;
547    Addr alt_min;
548    SizeT alt_size;
549 
550    vg_tid = VG_(get_running_tid)();
551    alt_min = VG_(thread_get_altstack_min)(vg_tid);
552    alt_size = VG_(thread_get_altstack_size)(vg_tid);
553    return (SizeT)(a - alt_min) < alt_size;
554 }
555 
556 static
drd_start_using_mem_alt_stack(const Addr a,const SizeT len)557 void drd_start_using_mem_alt_stack(const Addr a, const SizeT len)
558 {
559    if (!on_alt_stack(a))
560       drd_start_using_mem_stack(a, len);
561 }
562 
563 static
drd_stop_using_mem_alt_stack(const Addr a,const SizeT len)564 void drd_stop_using_mem_alt_stack(const Addr a, const SizeT len)
565 {
566    if (!on_alt_stack(a))
567       drd_stop_using_mem_stack(a, len);
568 }
569 
570 /**
571  * Callback function invoked by the Valgrind core before a signal is delivered.
572  */
573 static
drd_pre_deliver_signal(const ThreadId vg_tid,const Int sigNo,const Bool alt_stack)574 void drd_pre_deliver_signal(const ThreadId vg_tid, const Int sigNo,
575                             const Bool alt_stack)
576 {
577    DrdThreadId drd_tid;
578 
579    drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
580    DRD_(thread_set_on_alt_stack)(drd_tid, alt_stack);
581    if (alt_stack)
582    {
583       /*
584        * As soon a signal handler has been invoked on the alternate stack,
585        * switch to stack memory handling functions that can handle the
586        * alternate stack.
587        */
588       VG_(track_new_mem_stack)(drd_start_using_mem_alt_stack);
589       VG_(track_die_mem_stack)(drd_stop_using_mem_alt_stack);
590    }
591 }
592 
593 /**
594  * Callback function invoked by the Valgrind core after a signal is delivered,
595  * at least if the signal handler did not longjmp().
596  */
597 static
drd_post_deliver_signal(const ThreadId vg_tid,const Int sigNo)598 void drd_post_deliver_signal(const ThreadId vg_tid, const Int sigNo)
599 {
600    DrdThreadId drd_tid;
601 
602    drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
603    DRD_(thread_set_on_alt_stack)(drd_tid, False);
604    if (DRD_(thread_get_threads_on_alt_stack)() == 0)
605    {
606       VG_(track_new_mem_stack)(drd_start_using_mem_stack);
607       VG_(track_die_mem_stack)(drd_stop_using_mem_stack);
608    }
609 }
610 
611 /**
612  * Callback function called by the Valgrind core before a stack area is
613  * being used by a signal handler.
614  *
615  * @param[in] a   Start of address range - VG_STACK_REDZONE_SZB.
616  * @param[in] len Address range length + VG_STACK_REDZONE_SZB.
617  * @param[in] tid Valgrind thread ID for whom the signal frame is being
618  *                constructed.
619  */
drd_start_using_mem_stack_signal(const Addr a,const SizeT len,ThreadId tid)620 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len,
621                                              ThreadId tid)
622 {
623    DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
624    drd_start_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB,
625                        True);
626 }
627 
drd_stop_using_mem_stack_signal(Addr a,SizeT len)628 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len)
629 {
630    drd_stop_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB,
631                       True);
632 }
633 
634 static
drd_pre_thread_create(const ThreadId creator,const ThreadId created)635 void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
636 {
637    const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator);
638    tl_assert(created != VG_INVALID_THREADID);
639    DRD_(thread_pre_create)(drd_creator, created);
640    if (DRD_(IsValidDrdThreadId)(drd_creator))
641    {
642       DRD_(thread_new_segment)(drd_creator);
643    }
644    if (DRD_(thread_get_trace_fork_join)())
645    {
646       DRD_(trace_msg)("drd_pre_thread_create creator = %u, created = %u",
647                       drd_creator, created);
648    }
649 }
650 
651 /**
652  * Called by Valgrind's core before any loads or stores are performed on
653  * the context of thread "created".
654  */
655 static
drd_post_thread_create(const ThreadId vg_created)656 void drd_post_thread_create(const ThreadId vg_created)
657 {
658    DrdThreadId drd_created;
659    Addr stack_max;
660 
661    tl_assert(vg_created != VG_INVALID_THREADID);
662 
663    drd_created = DRD_(thread_post_create)(vg_created);
664 
665    /* Set up red zone before the code in glibc's clone.S is run. */
666    stack_max = DRD_(thread_get_stack_max)(drd_created);
667    drd_start_using_mem_stack2(drd_created, stack_max, 0);
668 
669    if (DRD_(thread_get_trace_fork_join)())
670    {
671       DRD_(trace_msg)("drd_post_thread_create created = %u", drd_created);
672    }
673    if (! DRD_(get_check_stack_accesses)())
674    {
675       DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created)
676                               - DRD_(thread_get_stack_size)(drd_created),
677                               DRD_(thread_get_stack_max)(drd_created),
678                               "stack");
679    }
680 }
681 
682 /* Called after a thread has performed its last memory access. */
drd_thread_finished(ThreadId vg_tid)683 static void drd_thread_finished(ThreadId vg_tid)
684 {
685    DrdThreadId drd_tid;
686 
687    /*
688     * Ignore if invoked because thread creation failed. See e.g.
689     * coregrind/m_syswrap/syswrap-amd64-linux.c
690     */
691    if (VG_(get_running_tid)() != vg_tid)
692       return;
693 
694    drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
695    tl_assert(drd_tid != DRD_INVALID_THREADID);
696    if (DRD_(thread_get_trace_fork_join)())
697    {
698       DRD_(trace_msg)("drd_thread_finished tid = %u%s", drd_tid,
699                       DRD_(thread_get_joinable)(drd_tid)
700                       ? "" : " (which is a detached thread)");
701    }
702    if (s_show_stack_usage && !VG_(clo_xml)) {
703       const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid);
704       const SizeT used_stack
705          = (DRD_(thread_get_stack_max)(drd_tid)
706             - DRD_(thread_get_stack_min_min)(drd_tid));
707       VG_(message)(Vg_UserMsg,
708                    "thread %u%s finished and used %lu bytes out of %lu"
709                    " on its stack. Margin: %lu bytes.\n",
710                    drd_tid,
711                    DRD_(thread_get_joinable)(drd_tid)
712                    ? "" : " (which is a detached thread)",
713                    used_stack, stack_size, stack_size - used_stack);
714 
715    }
716    drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid),
717                       DRD_(thread_get_stack_max)(drd_tid)
718                       - DRD_(thread_get_stack_min)(drd_tid),
719                       True);
720    DRD_(thread_set_record_loads)(drd_tid, False);
721    DRD_(thread_set_record_stores)(drd_tid, False);
722    DRD_(thread_finished)(drd_tid);
723 }
724 
725 /*
726  * Called immediately after fork for the child process only. 'tid' is the
727  * only surviving thread in the child process. Cleans up thread state.
728  * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes.
729  */
730 static
drd__atfork_child(ThreadId tid)731 void drd__atfork_child(ThreadId tid)
732 {
733    DRD_(drd_thread_atfork_child)(tid);
734 }
735 
736 
737 //
738 // Implementation of the tool interface.
739 //
740 
DRD_(post_clo_init)741 static void DRD_(post_clo_init)(void)
742 {
743 #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
744    /* fine */
745 #else
746    VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n");
747 #  endif
748 
749    if (s_var_info)
750    {
751       VG_(needs_var_info)();
752    }
753 }
754 
drd_start_client_code(const ThreadId tid,const ULong bbs_done)755 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
756 {
757    tl_assert(tid == VG_(get_running_tid)());
758    DRD_(thread_set_vg_running_tid)(tid);
759 }
760 
DRD_(fini)761 static void DRD_(fini)(Int exitcode)
762 {
763    // thread_print_all();
764    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
765       VG_(message)(Vg_UserMsg, "For counts of detected and suppressed errors, "
766                    "rerun with: -v\n");
767    }
768 
769    if ((VG_(clo_stats) || s_print_stats) && !VG_(clo_xml))
770    {
771       ULong pu = DRD_(thread_get_update_conflict_set_count)();
772       ULong pu_seg_cr = DRD_(thread_get_update_conflict_set_new_sg_count)();
773       ULong pu_mtx_cv = DRD_(thread_get_update_conflict_set_sync_count)();
774       ULong pu_join   = DRD_(thread_get_update_conflict_set_join_count)();
775 
776       VG_(message)(Vg_UserMsg,
777                    "   thread: %llu context switches.\n",
778                    DRD_(thread_get_context_switch_count)());
779       VG_(message)(Vg_UserMsg,
780                    "confl set: %llu full updates and %llu partial updates;\n",
781                    DRD_(thread_get_compute_conflict_set_count)(),
782                    pu);
783       VG_(message)(Vg_UserMsg,
784                    "           %llu partial updates during segment creation,\n",
785                    pu_seg_cr);
786       VG_(message)(Vg_UserMsg,
787                    "           %llu because of mutex/sema/cond.var. operations,\n",
788                    pu_mtx_cv);
789       VG_(message)(Vg_UserMsg,
790                    "           %llu because of barrier/rwlock operations and\n",
791 		   pu - pu_seg_cr - pu_mtx_cv - pu_join);
792       VG_(message)(Vg_UserMsg,
793                    "           %llu partial updates because of thread join"
794                    " operations.\n",
795                    pu_join);
796       VG_(message)(Vg_UserMsg,
797                    " segments: created %llu segments, max %llu alive,\n",
798                    DRD_(sg_get_segments_created_count)(),
799                    DRD_(sg_get_max_segments_alive_count)());
800       VG_(message)(Vg_UserMsg,
801                    "           %llu discard points and %llu merges.\n",
802                    DRD_(thread_get_discard_ordered_segments_count)(),
803                    DRD_(sg_get_segment_merge_count)());
804       VG_(message)(Vg_UserMsg,
805                    "segmnt cr: %llu mutex, %llu rwlock, %llu semaphore and"
806                    " %llu barrier.\n",
807                    DRD_(get_mutex_segment_creation_count)(),
808                    DRD_(get_rwlock_segment_creation_count)(),
809                    DRD_(get_semaphore_segment_creation_count)(),
810                    DRD_(get_barrier_segment_creation_count)());
811       VG_(message)(Vg_UserMsg,
812                    "  bitmaps: %llu level one"
813                    " and %llu level two bitmaps were allocated.\n",
814                    DRD_(bm_get_bitmap_creation_count)(),
815                    DRD_(bm_get_bitmap2_creation_count)());
816       VG_(message)(Vg_UserMsg,
817                    "    mutex: %llu non-recursive lock/unlock events.\n",
818                    DRD_(get_mutex_lock_count)());
819       DRD_(print_malloc_stats)();
820    }
821 
822    DRD_(bm_module_cleanup)();
823 }
824 
825 static
drd_pre_clo_init(void)826 void drd_pre_clo_init(void)
827 {
828    // Basic tool stuff.
829    VG_(details_name)            ("drd");
830    VG_(details_version)         (NULL);
831    VG_(details_description)     ("a thread error detector");
832    VG_(details_copyright_author)("Copyright (C) 2006-2017, and GNU GPL'd,"
833                                  " by Bart Van Assche.");
834    VG_(details_bug_reports_to)  (VG_BUGS_TO);
835 
836    VG_(basic_tool_funcs)        (DRD_(post_clo_init),
837                                  DRD_(instrument),
838                                  DRD_(fini));
839 
840    // Command line stuff.
841    VG_(needs_command_line_options)(DRD_(process_cmd_line_option),
842                                    DRD_(print_usage),
843                                    DRD_(print_debug_usage));
844    VG_(needs_xml_output)          ();
845 
846    // Error handling.
847    DRD_(register_error_handlers)();
848 
849    // Core event tracking.
850    VG_(track_pre_mem_read)         (drd_pre_mem_read);
851    VG_(track_pre_mem_read_asciiz)  (drd_pre_mem_read_asciiz);
852    VG_(track_post_mem_write)       (drd_post_mem_write);
853    VG_(track_new_mem_brk)          (drd_start_using_mem_w_tid);
854    VG_(track_new_mem_mmap)         (drd_start_using_mem_w_perms);
855    VG_(track_new_mem_stack)        (drd_start_using_mem_stack);
856    VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
857    VG_(track_new_mem_startup)      (drd_start_using_mem_w_perms);
858    VG_(track_die_mem_brk)          (drd_stop_using_nonstack_mem);
859    VG_(track_die_mem_munmap)       (drd_stop_using_nonstack_mem);
860    VG_(track_die_mem_stack)        (drd_stop_using_mem_stack);
861    VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
862    VG_(track_pre_deliver_signal)   (drd_pre_deliver_signal);
863    VG_(track_post_deliver_signal)  (drd_post_deliver_signal);
864    VG_(track_start_client_code)    (drd_start_client_code);
865    VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
866    VG_(track_pre_thread_first_insn)(drd_post_thread_create);
867    VG_(track_pre_thread_ll_exit)   (drd_thread_finished);
868    VG_(atfork)                     (NULL/*pre*/, NULL/*parent*/,
869 				    drd__atfork_child/*child*/);
870 
871    // Other stuff.
872    DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu,
873                                   drd_stop_using_nonstack_mem);
874 
875    DRD_(bm_module_init)();
876 
877    DRD_(clientreq_init)();
878 
879    DRD_(suppression_init)();
880 
881    DRD_(clientobj_init)();
882 
883    DRD_(thread_init)();
884 
885    {
886       HChar* const smi = VG_(getenv)("DRD_SEGMENT_MERGING_INTERVAL");
887       if (smi)
888          DRD_(thread_set_segment_merge_interval)(VG_(strtoll10)(smi, NULL));
889    }
890 
891    if (VG_(getenv)("DRD_VERIFY_CONFLICT_SET"))
892       DRD_(verify_conflict_set) = True;
893 
894 }
895 
896 
897 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init)
898