1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2012 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_barrier.h"
26 #include "drd_clientobj.h"
27 #include "drd_clientreq.h"
28 #include "drd_cond.h"
29 #include "drd_error.h"
30 #include "drd_hb.h"
31 #include "drd_load_store.h"
32 #include "drd_malloc_wrappers.h"
33 #include "drd_mutex.h"
34 #include "drd_rwlock.h"
35 #include "drd_segment.h"
36 #include "drd_semaphore.h"
37 #include "drd_suppression.h"
38 #include "drd_thread.h"
39 #include "libvex_guest_offsets.h"
40 #include "pub_drd_bitmap.h"
41 #include "pub_tool_vki.h" // Must be included before pub_tool_libcproc
42 #include "pub_tool_basics.h"
43 #include "pub_tool_debuginfo.h" // VG_(describe_IP)()
44 #include "pub_tool_libcassert.h" // tl_assert()
45 #include "pub_tool_libcbase.h" // VG_(strcmp)
46 #include "pub_tool_libcprint.h" // VG_(printf)
47 #include "pub_tool_libcproc.h"
48 #include "pub_tool_machine.h"
49 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
50 #include "pub_tool_options.h" // command line options
51 #include "pub_tool_replacemalloc.h"
52 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
53 #include "pub_tool_tooliface.h"
54 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
55
56
57 /* Local variables. */
58
59 static Bool s_print_stats;
60 static Bool s_var_info;
61 static Bool s_show_stack_usage;
62 static Bool s_trace_alloc;
63
64
65 /**
66 * Implement the needs_command_line_options for drd.
67 */
DRD_(process_cmd_line_option)68 static Bool DRD_(process_cmd_line_option)(Char* arg)
69 {
70 int check_stack_accesses = -1;
71 int join_list_vol = -1;
72 int exclusive_threshold_ms = -1;
73 int first_race_only = -1;
74 int report_signal_unlocked = -1;
75 int segment_merging = -1;
76 int segment_merge_interval = -1;
77 int shared_threshold_ms = -1;
78 int show_confl_seg = -1;
79 int trace_barrier = -1;
80 int trace_clientobj = -1;
81 int trace_cond = -1;
82 int trace_csw = -1;
83 int trace_fork_join = -1;
84 int trace_hb = -1;
85 int trace_conflict_set = -1;
86 int trace_conflict_set_bm = -1;
87 int trace_mutex = -1;
88 int trace_rwlock = -1;
89 int trace_segment = -1;
90 int trace_semaphore = -1;
91 int trace_suppression = -1;
92 Char* trace_address = 0;
93 Char* ptrace_address = 0;
94
95 if VG_BOOL_CLO(arg, "--check-stack-var", check_stack_accesses) {}
96 else if VG_INT_CLO (arg, "--join-list-vol", join_list_vol) {}
97 else if VG_BOOL_CLO(arg, "--drd-stats", s_print_stats) {}
98 else if VG_BOOL_CLO(arg, "--first-race-only", first_race_only) {}
99 else if VG_BOOL_CLO(arg, "--free-is-write", DRD_(g_free_is_write)) {}
100 else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked)
101 {}
102 else if VG_BOOL_CLO(arg, "--segment-merging", segment_merging) {}
103 else if VG_INT_CLO (arg, "--segment-merging-interval", segment_merge_interval)
104 {}
105 else if VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg) {}
106 else if VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage) {}
107 else if VG_BOOL_CLO(arg, "--trace-alloc", s_trace_alloc) {}
108 else if VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier) {}
109 else if VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj) {}
110 else if VG_BOOL_CLO(arg, "--trace-cond", trace_cond) {}
111 else if VG_BOOL_CLO(arg, "--trace-conflict-set", trace_conflict_set) {}
112 else if VG_BOOL_CLO(arg, "--trace-conflict-set-bm", trace_conflict_set_bm){}
113 else if VG_BOOL_CLO(arg, "--trace-csw", trace_csw) {}
114 else if VG_BOOL_CLO(arg, "--trace-fork-join", trace_fork_join) {}
115 else if VG_BOOL_CLO(arg, "--trace-hb", trace_hb) {}
116 else if VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex) {}
117 else if VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock) {}
118 else if VG_BOOL_CLO(arg, "--trace-segment", trace_segment) {}
119 else if VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore) {}
120 else if VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression) {}
121 else if VG_BOOL_CLO(arg, "--var-info", s_var_info) {}
122 else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {}
123 else if VG_STR_CLO (arg, "--ptrace-addr", ptrace_address) {}
124 else if VG_INT_CLO (arg, "--shared-threshold", shared_threshold_ms) {}
125 else if VG_STR_CLO (arg, "--trace-addr", trace_address) {}
126 else
127 return VG_(replacement_malloc_process_cmd_line_option)(arg);
128
129 if (check_stack_accesses != -1)
130 DRD_(set_check_stack_accesses)(check_stack_accesses);
131 if (exclusive_threshold_ms != -1)
132 {
133 DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
134 DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
135 }
136 if (first_race_only != -1)
137 {
138 DRD_(set_first_race_only)(first_race_only);
139 }
140 if (join_list_vol != -1)
141 DRD_(thread_set_join_list_vol)(join_list_vol);
142 if (report_signal_unlocked != -1)
143 {
144 DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
145 }
146 if (shared_threshold_ms != -1)
147 {
148 DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
149 }
150 if (segment_merging != -1)
151 DRD_(thread_set_segment_merging)(segment_merging);
152 if (segment_merge_interval != -1)
153 DRD_(thread_set_segment_merge_interval)(segment_merge_interval);
154 if (show_confl_seg != -1)
155 DRD_(set_show_conflicting_segments)(show_confl_seg);
156 if (trace_address) {
157 const Addr addr = VG_(strtoll16)(trace_address, 0);
158 DRD_(start_tracing_address_range)(addr, addr + 1, False);
159 }
160 if (ptrace_address) {
161 const Addr addr = VG_(strtoll16)(ptrace_address, 0);
162 DRD_(start_tracing_address_range)(addr, addr + 1, True);
163 }
164 if (trace_barrier != -1)
165 DRD_(barrier_set_trace)(trace_barrier);
166 if (trace_clientobj != -1)
167 DRD_(clientobj_set_trace)(trace_clientobj);
168 if (trace_cond != -1)
169 DRD_(cond_set_trace)(trace_cond);
170 if (trace_csw != -1)
171 DRD_(thread_trace_context_switches)(trace_csw);
172 if (trace_fork_join != -1)
173 DRD_(thread_set_trace_fork_join)(trace_fork_join);
174 if (trace_hb != -1)
175 DRD_(hb_set_trace)(trace_hb);
176 if (trace_conflict_set != -1)
177 DRD_(thread_trace_conflict_set)(trace_conflict_set);
178 if (trace_conflict_set_bm != -1)
179 DRD_(thread_trace_conflict_set_bm)(trace_conflict_set_bm);
180 if (trace_mutex != -1)
181 DRD_(mutex_set_trace)(trace_mutex);
182 if (trace_rwlock != -1)
183 DRD_(rwlock_set_trace)(trace_rwlock);
184 if (trace_segment != -1)
185 DRD_(sg_set_trace)(trace_segment);
186 if (trace_semaphore != -1)
187 DRD_(semaphore_set_trace)(trace_semaphore);
188 if (trace_suppression != -1)
189 DRD_(suppression_set_trace)(trace_suppression);
190
191 return True;
192 }
193
DRD_(print_usage)194 static void DRD_(print_usage)(void)
195 {
196 VG_(printf)(
197 " --check-stack-var=yes|no Whether or not to report data races on\n"
198 " stack variables [no].\n"
199 " --exclusive-threshold=<n> Print an error message if any mutex or\n"
200 " writer lock is held longer than the specified\n"
201 " time (in milliseconds) [off].\n"
202 " --first-race-only=yes|no Only report the first data race that occurs on\n"
203 " a memory location instead of all races [no].\n"
204 " --free-is-write=yes|no Whether to report races between freeing memory\n"
205 " and subsequent accesses of that memory[no].\n"
206 " --join-list-vol=<n> Number of threads to delay cleanup for [10].\n"
207 " --report-signal-unlocked=yes|no Whether to report calls to\n"
208 " pthread_cond_signal() where the mutex associated\n"
209 " with the signal via pthread_cond_wait() is not\n"
210 " locked at the time the signal is sent [yes].\n"
211 " --segment-merging=yes|no Controls segment merging [yes].\n"
212 " Segment merging is an algorithm to limit memory usage of the\n"
213 " data race detection algorithm. Disabling segment merging may\n"
214 " improve the accuracy of the so-called 'other segments' displayed\n"
215 " in race reports but can also trigger an out of memory error.\n"
216 " --segment-merging-interval=<n> Perform segment merging every time n new\n"
217 " segments have been created. Default: %d.\n"
218 " --shared-threshold=<n> Print an error message if a reader lock\n"
219 " is held longer than the specified time (in\n"
220 " milliseconds) [off]\n"
221 " --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n"
222 " --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n"
223 "\n"
224 " drd options for monitoring process behavior:\n"
225 " --ptrace-addr=<address> Trace all load and store activity for the\n"
226 " specified address and keep doing that even after\n"
227 " the memory at that address has been freed and\n"
228 " reallocated [off].\n"
229 " --trace-addr=<address> Trace all load and store activity for the\n"
230 " specified address [off].\n"
231 " --trace-alloc=yes|no Trace all memory allocations and deallocations\n"" [no].\n"
232 " --trace-barrier=yes|no Trace all barrier activity [no].\n"
233 " --trace-cond=yes|no Trace all condition variable activity [no].\n"
234 " --trace-fork-join=yes|no Trace all thread fork/join activity [no].\n"
235 " --trace-hb=yes|no Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n"
236 " --trace-mutex=yes|no Trace all mutex activity [no].\n"
237 " --trace-rwlock=yes|no Trace all reader-writer lock activity[no].\n"
238 " --trace-semaphore=yes|no Trace all semaphore activity [no].\n",
239 DRD_(thread_get_segment_merge_interval)()
240 );
241 }
242
DRD_(print_debug_usage)243 static void DRD_(print_debug_usage)(void)
244 {
245 VG_(printf)(
246 " --drd-stats=yes|no Print statistics about DRD activity [no].\n"
247 " --trace-clientobj=yes|no Trace all client object activity [no].\n"
248 " --trace-csw=yes|no Trace all scheduler context switches [no].\n"
249 " --trace-conflict-set=yes|no Trace all conflict set updates [no].\n"
250 " --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n"
251 " updates [no]. Note: enabling this option\n"
252 " will generate a lot of output !\n"
253 " --trace-segment=yes|no Trace segment actions [no].\n"
254 " --trace-suppr=yes|no Trace all address suppression actions [no].\n"
255 );
256 }
257
258
259 //
260 // Implements the thread-related core callbacks.
261 //
262
drd_pre_mem_read(const CorePart part,const ThreadId tid,Char * const s,const Addr a,const SizeT size)263 static void drd_pre_mem_read(const CorePart part,
264 const ThreadId tid,
265 Char* const s,
266 const Addr a,
267 const SizeT size)
268 {
269 if (size > 0)
270 {
271 DRD_(trace_load)(a, size);
272 }
273 }
274
drd_pre_mem_read_asciiz(const CorePart part,const ThreadId tid,Char * const s,const Addr a)275 static void drd_pre_mem_read_asciiz(const CorePart part,
276 const ThreadId tid,
277 Char* const s,
278 const Addr a)
279 {
280 const char* p = (void*)a;
281 SizeT size = 0;
282
283 // Don't segfault if the string starts in an obviously stupid
284 // place. Actually we should check the whole string, not just
285 // the start address, but that's too much trouble. At least
286 // checking the first byte is better than nothing. See #255009.
287 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
288 return;
289
290 /* Note: the expression '*p' reads client memory and may crash if the */
291 /* client provided an invalid pointer ! */
292 while (*p)
293 {
294 p++;
295 size++;
296 }
297 if (size > 0)
298 {
299 DRD_(trace_load)(a, size);
300 }
301 }
302
drd_post_mem_write(const CorePart part,const ThreadId tid,const Addr a,const SizeT size)303 static void drd_post_mem_write(const CorePart part,
304 const ThreadId tid,
305 const Addr a,
306 const SizeT size)
307 {
308 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
309 if (size > 0)
310 {
311 DRD_(trace_store)(a, size);
312 }
313 }
314
315 static __inline__
drd_start_using_mem(const Addr a1,const SizeT len,const Bool is_stack_mem)316 void drd_start_using_mem(const Addr a1, const SizeT len,
317 const Bool is_stack_mem)
318 {
319 const Addr a2 = a1 + len;
320
321 tl_assert(a1 <= a2);
322
323 if (!is_stack_mem && s_trace_alloc)
324 DRD_(trace_msg)("Started using memory range 0x%lx + %ld%s",
325 a1, len, DRD_(running_thread_inside_pthread_create)()
326 ? " (inside pthread_create())" : "");
327
328 #if 0
329 if (!is_stack_mem && DRD_(g_free_is_write))
330 DRD_(thread_stop_using_mem)(a1, a2);
331 #else
332 /*
333 * Sometimes it happens that a client starts using a memory range that has
334 * been accessed before but for which drd_stop_using_mem() has not been
335 * called for the entire range. It is not yet clear whether this is an
336 * out-of-range access by the client, an issue in the Valgrind core or an
337 * issue in DRD. Avoid that this issue triggers false positive reports by
338 * always clearing accesses for newly allocated memory ranges. See also
339 * http://bugs.kde.org/show_bug.cgi?id=297147.
340 */
341 DRD_(thread_stop_using_mem)(a1, a2);
342 #endif
343
344 if (UNLIKELY(DRD_(any_address_is_traced)()))
345 {
346 DRD_(trace_mem_access)(a1, len, eStart, 0, 0);
347 }
348
349 if (UNLIKELY(DRD_(running_thread_inside_pthread_create)()))
350 {
351 DRD_(start_suppression)(a1, a2, "pthread_create()");
352 }
353 }
354
drd_start_using_mem_w_ecu(const Addr a1,const SizeT len,UInt ec_uniq)355 static void drd_start_using_mem_w_ecu(const Addr a1,
356 const SizeT len,
357 UInt ec_uniq)
358 {
359 drd_start_using_mem(a1, len, False);
360 }
361
drd_start_using_mem_w_tid(const Addr a1,const SizeT len,ThreadId tid)362 static void drd_start_using_mem_w_tid(const Addr a1,
363 const SizeT len,
364 ThreadId tid)
365 {
366 drd_start_using_mem(a1, len, False);
367 }
368
369 static __inline__
drd_stop_using_mem(const Addr a1,const SizeT len,const Bool is_stack_mem)370 void drd_stop_using_mem(const Addr a1, const SizeT len,
371 const Bool is_stack_mem)
372 {
373 const Addr a2 = a1 + len;
374
375 tl_assert(a1 <= a2);
376
377 if (UNLIKELY(DRD_(any_address_is_traced)()))
378 DRD_(trace_mem_access)(a1, len, eEnd, 0, 0);
379
380 if (!is_stack_mem && s_trace_alloc)
381 DRD_(trace_msg)("Stopped using memory range 0x%lx + %ld",
382 a1, len);
383
384 if (!is_stack_mem || DRD_(get_check_stack_accesses)())
385 {
386 if (is_stack_mem || !DRD_(g_free_is_write))
387 DRD_(thread_stop_using_mem)(a1, a2);
388 else if (DRD_(g_free_is_write))
389 DRD_(trace_store)(a1, len);
390 DRD_(clientobj_stop_using_mem)(a1, a2);
391 DRD_(suppression_stop_using_mem)(a1, a2);
392 }
393 }
394
395 static __inline__
drd_stop_using_nonstack_mem(const Addr a1,const SizeT len)396 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len)
397 {
398 drd_stop_using_mem(a1, len, False);
399 }
400
401 /**
402 * Discard all information DRD has about memory accesses and client objects
403 * in the specified address range.
404 */
DRD_(clean_memory)405 void DRD_(clean_memory)(const Addr a1, const SizeT len)
406 {
407 const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1);
408 drd_stop_using_mem(a1, len, is_stack_memory);
409 drd_start_using_mem(a1, len, is_stack_memory);
410 }
411
412 static const Bool trace_sectsuppr = False;
413
414 /**
415 * Suppress data race reports on all addresses contained in .plt and
416 * .got.plt sections inside the address range [ a, a + len [. The data in
417 * these sections is modified by _dl_relocate_object() every time a function
418 * in a shared library is called for the first time. Since the first call
419 * to a function in a shared library can happen from a multithreaded context,
420 * such calls can cause conflicting accesses. See also Ulrich Drepper's
421 * paper "How to Write Shared Libraries" for more information about relocation
422 * (http://people.redhat.com/drepper/dsohowto.pdf).
423 */
DRD_(suppress_relocation_conflicts)424 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len)
425 {
426 const DebugInfo* di;
427
428 if (trace_sectsuppr)
429 VG_(dmsg)("Evaluating range @ 0x%lx size %ld\n", a, len);
430
431 for (di = VG_(next_DebugInfo)(0); di; di = VG_(next_DebugInfo)(di)) {
432 Addr avma;
433 SizeT size;
434
435 if (trace_sectsuppr)
436 VG_(dmsg)("Examining %s / %s\n", VG_(DebugInfo_get_filename)(di),
437 VG_(DebugInfo_get_soname)(di));
438
439 avma = VG_(DebugInfo_get_plt_avma)(di);
440 size = VG_(DebugInfo_get_plt_size)(di);
441 tl_assert((avma && size) || (avma == 0 && size == 0));
442 if (size > 0) {
443 if (trace_sectsuppr)
444 VG_(dmsg)("Suppressing .plt @ 0x%lx size %ld\n", avma, size);
445 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectPLT);
446 DRD_(start_suppression)(avma, avma + size, ".plt");
447 }
448
449 avma = VG_(DebugInfo_get_gotplt_avma)(di);
450 size = VG_(DebugInfo_get_gotplt_size)(di);
451 tl_assert((avma && size) || (avma == 0 && size == 0));
452 if (size > 0) {
453 if (trace_sectsuppr)
454 VG_(dmsg)("Suppressing .got.plt @ 0x%lx size %ld\n", avma, size);
455 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectGOTPLT);
456 DRD_(start_suppression)(avma, avma + size, ".gotplt");
457 }
458 }
459 }
460
461 static
drd_start_using_mem_w_perms(const Addr a,const SizeT len,const Bool rr,const Bool ww,const Bool xx,ULong di_handle)462 void drd_start_using_mem_w_perms(const Addr a, const SizeT len,
463 const Bool rr, const Bool ww, const Bool xx,
464 ULong di_handle)
465 {
466 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
467
468 drd_start_using_mem(a, len, False);
469
470 DRD_(suppress_relocation_conflicts)(a, len);
471 }
472
473 /**
474 * Called by the core when the stack of a thread grows, to indicate that
475 * the addresses in range [ a, a + len [ may now be used by the client.
476 * Assumption: stacks grow downward.
477 */
478 static __inline__
drd_start_using_mem_stack2(const DrdThreadId tid,const Addr a,const SizeT len)479 void drd_start_using_mem_stack2(const DrdThreadId tid, const Addr a,
480 const SizeT len)
481 {
482 DRD_(thread_set_stack_min)(tid, a - VG_STACK_REDZONE_SZB);
483 drd_start_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
484 True);
485 }
486
487 static __inline__
drd_start_using_mem_stack(const Addr a,const SizeT len)488 void drd_start_using_mem_stack(const Addr a, const SizeT len)
489 {
490 drd_start_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
491 }
492
493 /**
494 * Called by the core when the stack of a thread shrinks, to indicate that
495 * the addresses [ a, a + len [ are no longer accessible for the client.
496 * Assumption: stacks grow downward.
497 */
498 static __inline__
drd_stop_using_mem_stack2(const DrdThreadId tid,const Addr a,const SizeT len)499 void drd_stop_using_mem_stack2(const DrdThreadId tid, const Addr a,
500 const SizeT len)
501 {
502 DRD_(thread_set_stack_min)(tid, a + len - VG_STACK_REDZONE_SZB);
503 drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
504 True);
505 }
506
507 static __inline__
drd_stop_using_mem_stack(const Addr a,const SizeT len)508 void drd_stop_using_mem_stack(const Addr a, const SizeT len)
509 {
510 drd_stop_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
511 }
512
513 static
on_alt_stack(const Addr a)514 Bool on_alt_stack(const Addr a)
515 {
516 ThreadId vg_tid;
517 Addr alt_min;
518 SizeT alt_size;
519
520 vg_tid = VG_(get_running_tid)();
521 alt_min = VG_(thread_get_altstack_min)(vg_tid);
522 alt_size = VG_(thread_get_altstack_size)(vg_tid);
523 return (SizeT)(a - alt_min) < alt_size;
524 }
525
526 static
drd_start_using_mem_alt_stack(const Addr a,const SizeT len)527 void drd_start_using_mem_alt_stack(const Addr a, const SizeT len)
528 {
529 if (!on_alt_stack(a))
530 drd_start_using_mem_stack(a, len);
531 }
532
533 static
drd_stop_using_mem_alt_stack(const Addr a,const SizeT len)534 void drd_stop_using_mem_alt_stack(const Addr a, const SizeT len)
535 {
536 if (!on_alt_stack(a))
537 drd_stop_using_mem_stack(a, len);
538 }
539
540 /**
541 * Callback function invoked by the Valgrind core before a signal is delivered.
542 */
543 static
drd_pre_deliver_signal(const ThreadId vg_tid,const Int sigNo,const Bool alt_stack)544 void drd_pre_deliver_signal(const ThreadId vg_tid, const Int sigNo,
545 const Bool alt_stack)
546 {
547 DrdThreadId drd_tid;
548
549 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
550 DRD_(thread_set_on_alt_stack)(drd_tid, alt_stack);
551 if (alt_stack)
552 {
553 /*
554 * As soon a signal handler has been invoked on the alternate stack,
555 * switch to stack memory handling functions that can handle the
556 * alternate stack.
557 */
558 VG_(track_new_mem_stack)(drd_start_using_mem_alt_stack);
559 VG_(track_die_mem_stack)(drd_stop_using_mem_alt_stack);
560 }
561 }
562
563 /**
564 * Callback function invoked by the Valgrind core after a signal is delivered,
565 * at least if the signal handler did not longjmp().
566 */
567 static
drd_post_deliver_signal(const ThreadId vg_tid,const Int sigNo)568 void drd_post_deliver_signal(const ThreadId vg_tid, const Int sigNo)
569 {
570 DrdThreadId drd_tid;
571
572 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
573 DRD_(thread_set_on_alt_stack)(drd_tid, False);
574 if (DRD_(thread_get_threads_on_alt_stack)() == 0)
575 {
576 VG_(track_new_mem_stack)(drd_start_using_mem_stack);
577 VG_(track_die_mem_stack)(drd_stop_using_mem_stack);
578 }
579 }
580
581 /**
582 * Callback function called by the Valgrind core before a stack area is
583 * being used by a signal handler.
584 *
585 * @param[in] a Start of address range.
586 * @param[in] len Address range length.
587 * @param[in] tid Valgrind thread ID for whom the signal frame is being
588 * constructed.
589 */
drd_start_using_mem_stack_signal(const Addr a,const SizeT len,ThreadId tid)590 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len,
591 ThreadId tid)
592 {
593 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
594 drd_start_using_mem(a, len, True);
595 }
596
drd_stop_using_mem_stack_signal(Addr a,SizeT len)597 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len)
598 {
599 drd_stop_using_mem(a, len, True);
600 }
601
602 static
drd_pre_thread_create(const ThreadId creator,const ThreadId created)603 void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
604 {
605 const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator);
606 tl_assert(created != VG_INVALID_THREADID);
607 DRD_(thread_pre_create)(drd_creator, created);
608 if (DRD_(IsValidDrdThreadId)(drd_creator))
609 {
610 DRD_(thread_new_segment)(drd_creator);
611 }
612 if (DRD_(thread_get_trace_fork_join)())
613 {
614 DRD_(trace_msg)("drd_pre_thread_create creator = %d, created = %d",
615 drd_creator, created);
616 }
617 }
618
619 /**
620 * Called by Valgrind's core before any loads or stores are performed on
621 * the context of thread "created".
622 */
623 static
drd_post_thread_create(const ThreadId vg_created)624 void drd_post_thread_create(const ThreadId vg_created)
625 {
626 DrdThreadId drd_created;
627 Addr stack_max;
628
629 tl_assert(vg_created != VG_INVALID_THREADID);
630
631 drd_created = DRD_(thread_post_create)(vg_created);
632
633 /* Set up red zone before the code in glibc's clone.S is run. */
634 stack_max = DRD_(thread_get_stack_max)(drd_created);
635 drd_start_using_mem_stack2(drd_created, stack_max, 0);
636
637 if (DRD_(thread_get_trace_fork_join)())
638 {
639 DRD_(trace_msg)("drd_post_thread_create created = %d", drd_created);
640 }
641 if (! DRD_(get_check_stack_accesses)())
642 {
643 DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created)
644 - DRD_(thread_get_stack_size)(drd_created),
645 DRD_(thread_get_stack_max)(drd_created),
646 "stack");
647 }
648 }
649
650 /* Called after a thread has performed its last memory access. */
drd_thread_finished(ThreadId vg_tid)651 static void drd_thread_finished(ThreadId vg_tid)
652 {
653 DrdThreadId drd_tid;
654
655 /*
656 * Ignore if invoked because thread creation failed. See e.g.
657 * coregrind/m_syswrap/syswrap-amd64-linux.c
658 */
659 if (VG_(get_running_tid)() != vg_tid)
660 return;
661
662 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
663 tl_assert(drd_tid != DRD_INVALID_THREADID);
664 if (DRD_(thread_get_trace_fork_join)())
665 {
666 DRD_(trace_msg)("drd_thread_finished tid = %d%s", drd_tid,
667 DRD_(thread_get_joinable)(drd_tid)
668 ? "" : " (which is a detached thread)");
669 }
670 if (s_show_stack_usage && !VG_(clo_xml)) {
671 const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid);
672 const SizeT used_stack
673 = (DRD_(thread_get_stack_max)(drd_tid)
674 - DRD_(thread_get_stack_min_min)(drd_tid));
675 VG_(message)(Vg_UserMsg,
676 "thread %d%s finished and used %ld bytes out of %ld"
677 " on its stack. Margin: %ld bytes.\n",
678 drd_tid,
679 DRD_(thread_get_joinable)(drd_tid)
680 ? "" : " (which is a detached thread)",
681 used_stack, stack_size, stack_size - used_stack);
682
683 }
684 drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid),
685 DRD_(thread_get_stack_max)(drd_tid)
686 - DRD_(thread_get_stack_min)(drd_tid),
687 True);
688 DRD_(thread_set_record_loads)(drd_tid, False);
689 DRD_(thread_set_record_stores)(drd_tid, False);
690 DRD_(thread_finished)(drd_tid);
691 }
692
693 /*
694 * Called immediately after fork for the child process only. 'tid' is the
695 * only surviving thread in the child process. Cleans up thread state.
696 * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes.
697 */
698 static
drd__atfork_child(ThreadId tid)699 void drd__atfork_child(ThreadId tid)
700 {
701 DRD_(drd_thread_atfork_child)(tid);
702 }
703
704
705 //
706 // Implementation of the tool interface.
707 //
708
DRD_(post_clo_init)709 static void DRD_(post_clo_init)(void)
710 {
711 #if defined(VGO_linux) || defined(VGO_darwin)
712 /* fine */
713 #else
714 VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n");
715 # endif
716
717 if (s_var_info)
718 {
719 VG_(needs_var_info)();
720 }
721 }
722
drd_start_client_code(const ThreadId tid,const ULong bbs_done)723 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
724 {
725 tl_assert(tid == VG_(get_running_tid)());
726 DRD_(thread_set_vg_running_tid)(tid);
727 }
728
DRD_(fini)729 static void DRD_(fini)(Int exitcode)
730 {
731 // thread_print_all();
732 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
733 VG_(message)(Vg_UserMsg, "For counts of detected and suppressed errors, "
734 "rerun with: -v\n");
735 }
736
737 if ((VG_(clo_stats) || s_print_stats) && !VG_(clo_xml))
738 {
739 ULong pu = DRD_(thread_get_update_conflict_set_count)();
740 ULong pu_seg_cr = DRD_(thread_get_update_conflict_set_new_sg_count)();
741 ULong pu_mtx_cv = DRD_(thread_get_update_conflict_set_sync_count)();
742 ULong pu_join = DRD_(thread_get_update_conflict_set_join_count)();
743
744 VG_(message)(Vg_UserMsg,
745 " thread: %lld context switches.\n",
746 DRD_(thread_get_context_switch_count)());
747 VG_(message)(Vg_UserMsg,
748 "confl set: %lld full updates and %lld partial updates;\n",
749 DRD_(thread_get_compute_conflict_set_count)(),
750 pu);
751 VG_(message)(Vg_UserMsg,
752 " %lld partial updates during segment creation,\n",
753 pu_seg_cr);
754 VG_(message)(Vg_UserMsg,
755 " %lld because of mutex/sema/cond.var. operations,\n",
756 pu_mtx_cv);
757 VG_(message)(Vg_UserMsg,
758 " %lld because of barrier/rwlock operations and\n",
759 pu - pu_seg_cr - pu_mtx_cv - pu_join);
760 VG_(message)(Vg_UserMsg,
761 " %lld partial updates because of thread join"
762 " operations.\n",
763 pu_join);
764 VG_(message)(Vg_UserMsg,
765 " segments: created %lld segments, max %lld alive,\n",
766 DRD_(sg_get_segments_created_count)(),
767 DRD_(sg_get_max_segments_alive_count)());
768 VG_(message)(Vg_UserMsg,
769 " %lld discard points and %lld merges.\n",
770 DRD_(thread_get_discard_ordered_segments_count)(),
771 DRD_(sg_get_segment_merge_count)());
772 VG_(message)(Vg_UserMsg,
773 "segmnt cr: %lld mutex, %lld rwlock, %lld semaphore and"
774 " %lld barrier.\n",
775 DRD_(get_mutex_segment_creation_count)(),
776 DRD_(get_rwlock_segment_creation_count)(),
777 DRD_(get_semaphore_segment_creation_count)(),
778 DRD_(get_barrier_segment_creation_count)());
779 VG_(message)(Vg_UserMsg,
780 " bitmaps: %lld level one"
781 " and %lld level two bitmaps were allocated.\n",
782 DRD_(bm_get_bitmap_creation_count)(),
783 DRD_(bm_get_bitmap2_creation_count)());
784 VG_(message)(Vg_UserMsg,
785 " mutex: %lld non-recursive lock/unlock events.\n",
786 DRD_(get_mutex_lock_count)());
787 DRD_(print_malloc_stats)();
788 }
789
790 DRD_(bm_module_cleanup)();
791 }
792
793 static
drd_pre_clo_init(void)794 void drd_pre_clo_init(void)
795 {
796 // Basic tool stuff.
797 VG_(details_name) ("drd");
798 VG_(details_version) (NULL);
799 VG_(details_description) ("a thread error detector");
800 VG_(details_copyright_author)("Copyright (C) 2006-2012, and GNU GPL'd,"
801 " by Bart Van Assche.");
802 VG_(details_bug_reports_to) (VG_BUGS_TO);
803
804 VG_(basic_tool_funcs) (DRD_(post_clo_init),
805 DRD_(instrument),
806 DRD_(fini));
807
808 // Command line stuff.
809 VG_(needs_command_line_options)(DRD_(process_cmd_line_option),
810 DRD_(print_usage),
811 DRD_(print_debug_usage));
812 VG_(needs_xml_output) ();
813
814 // Error handling.
815 DRD_(register_error_handlers)();
816
817 // Core event tracking.
818 VG_(track_pre_mem_read) (drd_pre_mem_read);
819 VG_(track_pre_mem_read_asciiz) (drd_pre_mem_read_asciiz);
820 VG_(track_post_mem_write) (drd_post_mem_write);
821 VG_(track_new_mem_brk) (drd_start_using_mem_w_tid);
822 VG_(track_new_mem_mmap) (drd_start_using_mem_w_perms);
823 VG_(track_new_mem_stack) (drd_start_using_mem_stack);
824 VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
825 VG_(track_new_mem_startup) (drd_start_using_mem_w_perms);
826 VG_(track_die_mem_brk) (drd_stop_using_nonstack_mem);
827 VG_(track_die_mem_munmap) (drd_stop_using_nonstack_mem);
828 VG_(track_die_mem_stack) (drd_stop_using_mem_stack);
829 VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
830 VG_(track_pre_deliver_signal) (drd_pre_deliver_signal);
831 VG_(track_post_deliver_signal) (drd_post_deliver_signal);
832 VG_(track_start_client_code) (drd_start_client_code);
833 VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
834 VG_(track_pre_thread_first_insn)(drd_post_thread_create);
835 VG_(track_pre_thread_ll_exit) (drd_thread_finished);
836 VG_(atfork) (NULL/*pre*/, NULL/*parent*/,
837 drd__atfork_child/*child*/);
838
839 // Other stuff.
840 DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu,
841 drd_stop_using_nonstack_mem);
842
843 DRD_(bm_module_init)();
844
845 DRD_(clientreq_init)();
846
847 DRD_(suppression_init)();
848
849 DRD_(clientobj_init)();
850
851 DRD_(thread_init)();
852
853 {
854 Char* const smi = VG_(getenv)("DRD_SEGMENT_MERGING_INTERVAL");
855 if (smi)
856 DRD_(thread_set_segment_merge_interval)(VG_(strtoll10)(smi, NULL));
857 }
858 }
859
860
861 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init)
862