1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2017 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_error.h"
26 #include "drd_barrier.h"
27 #include "drd_clientobj.h"
28 #include "drd_cond.h"
29 #include "drd_mutex.h"
30 #include "drd_segment.h"
31 #include "drd_semaphore.h"
32 #include "drd_suppression.h"
33 #include "drd_thread.h"
34 #include "pub_tool_vki.h"
35 #include "pub_tool_basics.h" // Addr, SizeT
36 #include "pub_tool_libcassert.h" // tl_assert()
37 #include "pub_tool_libcbase.h" // VG_(strlen)()
38 #include "pub_tool_libcprint.h" // VG_(printf)()
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
41 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
42 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
43
44
45
46 /* Local functions. */
47
48 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
49 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
50 static void thread_compute_conflict_set(struct bitmap** conflict_set,
51 const DrdThreadId tid);
52 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
53
54
55 /* Local variables. */
56
57 static ULong s_context_switch_count;
58 static ULong s_discard_ordered_segments_count;
59 static ULong s_compute_conflict_set_count;
60 static ULong s_update_conflict_set_count;
61 static ULong s_update_conflict_set_new_sg_count;
62 static ULong s_update_conflict_set_sync_count;
63 static ULong s_update_conflict_set_join_count;
64 static ULong s_conflict_set_bitmap_creation_count;
65 static ULong s_conflict_set_bitmap2_creation_count;
66 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
67 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
68 ThreadInfo* DRD_(g_threadinfo);
69 struct bitmap* DRD_(g_conflict_set);
70 Bool DRD_(verify_conflict_set);
71 static Bool s_trace_context_switches = False;
72 static Bool s_trace_conflict_set = False;
73 static Bool s_trace_conflict_set_bm = False;
74 static Bool s_trace_fork_join = False;
75 static Bool s_segment_merging = True;
76 static Bool s_new_segments_since_last_merge;
77 static int s_segment_merge_interval = 10;
78 static unsigned s_join_list_vol = 10;
79 static unsigned s_deletion_head;
80 static unsigned s_deletion_tail;
81 #if defined(VGO_solaris)
82 Bool DRD_(ignore_thread_creation) = True;
83 #else
84 Bool DRD_(ignore_thread_creation) = False;
85 #endif /* VGO_solaris */
86
87
88 /* Function definitions. */
89
90 /** Enables/disables context switch tracing. */
DRD_(thread_trace_context_switches)91 void DRD_(thread_trace_context_switches)(const Bool t)
92 {
93 tl_assert(t == False || t == True);
94 s_trace_context_switches = t;
95 }
96
97 /** Enables/disables conflict set tracing. */
DRD_(thread_trace_conflict_set)98 void DRD_(thread_trace_conflict_set)(const Bool t)
99 {
100 tl_assert(t == False || t == True);
101 s_trace_conflict_set = t;
102 }
103
104 /** Enables/disables conflict set bitmap tracing. */
DRD_(thread_trace_conflict_set_bm)105 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
106 {
107 tl_assert(t == False || t == True);
108 s_trace_conflict_set_bm = t;
109 }
110
111 /** Report whether fork/join tracing is enabled. */
DRD_(thread_get_trace_fork_join)112 Bool DRD_(thread_get_trace_fork_join)(void)
113 {
114 return s_trace_fork_join;
115 }
116
117 /** Enables/disables fork/join tracing. */
DRD_(thread_set_trace_fork_join)118 void DRD_(thread_set_trace_fork_join)(const Bool t)
119 {
120 tl_assert(t == False || t == True);
121 s_trace_fork_join = t;
122 }
123
124 /** Enables/disables segment merging. */
DRD_(thread_set_segment_merging)125 void DRD_(thread_set_segment_merging)(const Bool m)
126 {
127 tl_assert(m == False || m == True);
128 s_segment_merging = m;
129 }
130
131 /** Get the segment merging interval. */
DRD_(thread_get_segment_merge_interval)132 int DRD_(thread_get_segment_merge_interval)(void)
133 {
134 return s_segment_merge_interval;
135 }
136
137 /** Set the segment merging interval. */
DRD_(thread_set_segment_merge_interval)138 void DRD_(thread_set_segment_merge_interval)(const int i)
139 {
140 s_segment_merge_interval = i;
141 }
142
DRD_(thread_set_join_list_vol)143 void DRD_(thread_set_join_list_vol)(const int jlv)
144 {
145 s_join_list_vol = jlv;
146 }
147
DRD_(thread_init)148 void DRD_(thread_init)(void)
149 {
150 DRD_(g_threadinfo) = VG_(malloc)("drd.main.ti.1",
151 DRD_N_THREADS * sizeof DRD_(g_threadinfo)[0]);
152 for (UInt i = 0; i < DRD_N_THREADS; ++i) {
153 static ThreadInfo initval;
154 DRD_(g_threadinfo)[i] = initval;
155 }
156 }
157
158 /**
159 * Convert Valgrind's ThreadId into a DrdThreadId.
160 *
161 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
162 * Valgrind ThreadId does not yet exist.
163 */
DRD_(VgThreadIdToDrdThreadId)164 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
165 {
166 UInt i;
167
168 if (tid == VG_INVALID_THREADID)
169 return DRD_INVALID_THREADID;
170
171 for (i = 1; i < DRD_N_THREADS; i++)
172 {
173 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
174 && DRD_(g_threadinfo)[i].vg_threadid == tid)
175 {
176 return i;
177 }
178 }
179
180 return DRD_INVALID_THREADID;
181 }
182
183 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
DRD_(VgThreadIdToNewDrdThreadId)184 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
185 {
186 UInt i;
187
188 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
189
190 for (i = 1; i < DRD_N_THREADS; i++)
191 {
192 if (!DRD_(g_threadinfo)[i].valid)
193 {
194 tl_assert(! DRD_(IsValidDrdThreadId)(i));
195
196 DRD_(g_threadinfo)[i].valid = True;
197 DRD_(g_threadinfo)[i].vg_thread_exists = True;
198 DRD_(g_threadinfo)[i].vg_threadid = tid;
199 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
200 DRD_(g_threadinfo)[i].stack_min = 0;
201 DRD_(g_threadinfo)[i].stack_min_min = 0;
202 DRD_(g_threadinfo)[i].stack_startup = 0;
203 DRD_(g_threadinfo)[i].stack_max = 0;
204 DRD_(thread_set_name)(i, "");
205 DRD_(g_threadinfo)[i].on_alt_stack = False;
206 DRD_(g_threadinfo)[i].is_recording_loads = True;
207 DRD_(g_threadinfo)[i].is_recording_stores = True;
208 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
209 DRD_(g_threadinfo)[i].synchr_nesting = 0;
210 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
211 DRD_(g_threadinfo)[i].creator_thread = DRD_INVALID_THREADID;
212 #if defined (VGO_solaris)
213 DRD_(g_threadinfo)[i].bind_guard_flag = 0;
214 #endif /* VGO_solaris */
215
216 tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
217 tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
218
219 tl_assert(DRD_(IsValidDrdThreadId)(i));
220
221 return i;
222 }
223 }
224
225 VG_(printf)(
226 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
227 "Aborting.\n");
228
229 tl_assert(False);
230
231 return DRD_INVALID_THREADID;
232 }
233
234 /** Convert a POSIX thread ID into a DRD thread ID. */
DRD_(PtThreadIdToDrdThreadId)235 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
236 {
237 UInt i;
238
239 if (tid != INVALID_POSIX_THREADID)
240 {
241 for (i = 1; i < DRD_N_THREADS; i++)
242 {
243 if (DRD_(g_threadinfo)[i].posix_thread_exists
244 && DRD_(g_threadinfo)[i].pt_threadid == tid)
245 {
246 return i;
247 }
248 }
249 }
250 return DRD_INVALID_THREADID;
251 }
252
253 /** Convert a DRD thread ID into a Valgrind thread ID. */
DRD_(DrdThreadIdToVgThreadId)254 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
255 {
256 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
257 && tid != DRD_INVALID_THREADID);
258
259 return (DRD_(g_threadinfo)[tid].vg_thread_exists
260 ? DRD_(g_threadinfo)[tid].vg_threadid
261 : VG_INVALID_THREADID);
262 }
263
264 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
265 /**
266 * Sanity check of the doubly linked list of segments referenced by a
267 * ThreadInfo struct.
268 * @return True if sane, False if not.
269 */
DRD_(sane_ThreadInfo)270 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
271 {
272 Segment* p;
273
274 for (p = ti->sg_first; p; p = p->thr_next) {
275 if (p->thr_next && p->thr_next->thr_prev != p)
276 return False;
277 if (p->thr_next == 0 && p != ti->sg_last)
278 return False;
279 }
280 for (p = ti->sg_last; p; p = p->thr_prev) {
281 if (p->thr_prev && p->thr_prev->thr_next != p)
282 return False;
283 if (p->thr_prev == 0 && p != ti->sg_first)
284 return False;
285 }
286 return True;
287 }
288 #endif
289
290 /**
291 * Create the first segment for a newly started thread.
292 *
293 * This function is called from the handler installed via
294 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
295 * from the context of the creator thread, before the new thread has been
296 * created.
297 *
298 * @param[in] creator DRD thread ID of the creator thread.
299 * @param[in] vg_created Valgrind thread ID of the created thread.
300 *
301 * @return DRD thread ID of the created thread.
302 */
DRD_(thread_pre_create)303 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
304 const ThreadId vg_created)
305 {
306 DrdThreadId created;
307
308 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
309 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
310 tl_assert(0 <= (int)created && created < DRD_N_THREADS
311 && created != DRD_INVALID_THREADID);
312
313 tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
314 tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
315
316 if (creator != DRD_INVALID_THREADID) {
317 if (DRD_(ignore_thread_creation)) {
318 tl_assert(DRD_(thread_get_synchr_nesting_count)(created) == 0);
319 DRD_(thread_enter_synchr)(created);
320 /* Counterpart in DRD_(thread_set_pthreadid)(). */
321 }
322 }
323 DRD_(g_threadinfo)[created].creator_thread = creator;
324
325 /* Create an initial segment for the newly created thread. */
326 thread_append_segment(created, DRD_(sg_new)(creator, created));
327
328 return created;
329 }
330
331 /**
332 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
333 * after the thread has been created and before any client instructions are run
334 * on the newly created thread, e.g. from the handler installed via
335 * VG_(track_pre_thread_first_insn)().
336 *
337 * @param[in] vg_created Valgrind thread ID of the newly created thread.
338 *
339 * @return DRD thread ID for the new thread.
340 */
DRD_(thread_post_create)341 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
342 {
343 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
344
345 tl_assert(0 <= (int)created && created < DRD_N_THREADS
346 && created != DRD_INVALID_THREADID);
347
348 DRD_(g_threadinfo)[created].stack_max
349 = VG_(thread_get_stack_max)(vg_created);
350 DRD_(g_threadinfo)[created].stack_startup
351 = DRD_(g_threadinfo)[created].stack_max;
352 DRD_(g_threadinfo)[created].stack_min
353 = DRD_(g_threadinfo)[created].stack_max;
354 DRD_(g_threadinfo)[created].stack_min_min
355 = DRD_(g_threadinfo)[created].stack_max;
356 DRD_(g_threadinfo)[created].stack_size
357 = VG_(thread_get_stack_size)(vg_created);
358 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
359
360 return created;
361 }
362
DRD_(thread_delayed_delete)363 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
364 {
365 UInt j;
366
367 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
368 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
369 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
370 #if 0
371 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
372 #endif
373 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
374 for (j = 0; j < DRD_N_THREADS; ++j) {
375 if (DRD_(IsValidDrdThreadId)(j)
376 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
377 {
378 s_deletion_tail++;
379 #if 0
380 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
381 #endif
382 DRD_(thread_delete)(j, False);
383 break;
384 }
385 }
386 }
387 }
388
389 /**
390 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
391 * after thread drd_joiner joined thread drd_joinee.
392 */
DRD_(thread_post_join)393 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
394 {
395 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
396 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
397
398 DRD_(thread_new_segment)(drd_joiner);
399 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
400 DRD_(thread_new_segment)(drd_joinee);
401
402 if (s_trace_fork_join)
403 {
404 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
405 const unsigned msg_size = 256;
406 HChar* msg;
407
408 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
409
410 VG_(snprintf)(msg, msg_size,
411 "drd_post_thread_join joiner = %u, joinee = %u",
412 drd_joiner, drd_joinee);
413 if (joiner)
414 {
415 HChar* vc;
416
417 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
418 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
419 ", new vc: %s", vc);
420 VG_(free)(vc);
421 }
422 DRD_(trace_msg)("%pS", msg);
423 VG_(free)(msg);
424 }
425
426 if (! DRD_(get_check_stack_accesses)())
427 {
428 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
429 - DRD_(thread_get_stack_size)(drd_joinee),
430 DRD_(thread_get_stack_max)(drd_joinee));
431 }
432 DRD_(clientobj_delete_thread)(drd_joinee);
433 DRD_(thread_delayed_delete)(drd_joinee);
434 }
435
436 /**
437 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
438 * and accesses this data structure from multiple threads without locking.
439 * Any conflicting accesses in the range stack_startup..stack_max will be
440 * ignored.
441 */
DRD_(thread_set_stack_startup)442 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
443 const Addr stack_startup)
444 {
445 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
446 && tid != DRD_INVALID_THREADID);
447 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
448 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
449 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
450 }
451
452 /** Return the stack pointer for the specified thread. */
DRD_(thread_get_stack_min)453 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
454 {
455 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
456 && tid != DRD_INVALID_THREADID);
457 return DRD_(g_threadinfo)[tid].stack_min;
458 }
459
460 /**
461 * Return the lowest value that was ever assigned to the stack pointer
462 * for the specified thread.
463 */
DRD_(thread_get_stack_min_min)464 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
465 {
466 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
467 && tid != DRD_INVALID_THREADID);
468 return DRD_(g_threadinfo)[tid].stack_min_min;
469 }
470
471 /** Return the top address for the stack of the specified thread. */
DRD_(thread_get_stack_max)472 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
473 {
474 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
475 && tid != DRD_INVALID_THREADID);
476 return DRD_(g_threadinfo)[tid].stack_max;
477 }
478
479 /** Return the maximum stack size for the specified thread. */
DRD_(thread_get_stack_size)480 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
481 {
482 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
483 && tid != DRD_INVALID_THREADID);
484 return DRD_(g_threadinfo)[tid].stack_size;
485 }
486
DRD_(thread_get_on_alt_stack)487 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
488 {
489 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
490 && tid != DRD_INVALID_THREADID);
491 return DRD_(g_threadinfo)[tid].on_alt_stack;
492 }
493
DRD_(thread_set_on_alt_stack)494 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
495 const Bool on_alt_stack)
496 {
497 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
498 && tid != DRD_INVALID_THREADID);
499 tl_assert(on_alt_stack == !!on_alt_stack);
500 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
501 }
502
DRD_(thread_get_threads_on_alt_stack)503 Int DRD_(thread_get_threads_on_alt_stack)(void)
504 {
505 int n = 0;
506
507 for (UInt i = 1; i < DRD_N_THREADS; i++)
508 n += DRD_(g_threadinfo)[i].on_alt_stack;
509 return n;
510 }
511
512 /**
513 * Clean up thread-specific data structures.
514 */
DRD_(thread_delete)515 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
516 {
517 Segment* sg;
518 Segment* sg_prev;
519
520 tl_assert(DRD_(IsValidDrdThreadId)(tid));
521
522 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
523 for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
524 sg_prev = sg->thr_prev;
525 sg->thr_next = NULL;
526 sg->thr_prev = NULL;
527 DRD_(sg_put)(sg);
528 }
529 DRD_(g_threadinfo)[tid].valid = False;
530 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
531 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
532 if (detached)
533 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
534 else
535 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
536 DRD_(g_threadinfo)[tid].sg_first = NULL;
537 DRD_(g_threadinfo)[tid].sg_last = NULL;
538
539 tl_assert(!DRD_(IsValidDrdThreadId)(tid));
540 }
541
542 /**
543 * Called after a thread performed its last memory access and before
544 * thread_delete() is called. Note: thread_delete() is only called for
545 * joinable threads, not for detached threads.
546 */
DRD_(thread_finished)547 void DRD_(thread_finished)(const DrdThreadId tid)
548 {
549 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
550 && tid != DRD_INVALID_THREADID);
551
552 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
553
554 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
555 {
556 /*
557 * Once a detached thread has finished, its stack is deallocated and
558 * should no longer be taken into account when computing the conflict set.
559 */
560 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
561
562 /*
563 * For a detached thread, calling pthread_exit() invalidates the
564 * POSIX thread ID associated with the detached thread. For joinable
565 * POSIX threads however, the POSIX thread ID remains live after the
566 * pthread_exit() call until pthread_join() is called.
567 */
568 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
569 }
570 }
571
572 /** Called just after fork() in the child process. */
DRD_(drd_thread_atfork_child)573 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
574 {
575 unsigned i;
576
577 for (i = 1; i < DRD_N_THREADS; i++)
578 {
579 if (i == tid)
580 continue;
581 if (DRD_(IsValidDrdThreadId(i)))
582 DRD_(thread_delete)(i, True);
583 tl_assert(!DRD_(IsValidDrdThreadId(i)));
584 }
585
586 DRD_(bm_cleanup)(DRD_(g_conflict_set));
587 DRD_(bm_init)(DRD_(g_conflict_set));
588 }
589
590 /** Called just before pthread_cancel(). */
DRD_(thread_pre_cancel)591 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
592 {
593 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
594 && tid != DRD_INVALID_THREADID);
595 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
596
597 if (DRD_(thread_get_trace_fork_join)())
598 DRD_(trace_msg)("[%u] drd_thread_pre_cancel %u",
599 DRD_(g_drd_running_tid), tid);
600 }
601
602 /**
603 * Store the POSIX thread ID for the specified thread.
604 *
605 * @note This function can be called multiple times for the same thread -- see
606 * also the comment block preceding the pthread_create() wrapper in
607 * drd_pthread_intercepts.c.
608 */
DRD_(thread_set_pthreadid)609 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
610 {
611 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
612 && tid != DRD_INVALID_THREADID);
613 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
614 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
615 tl_assert(ptid != INVALID_POSIX_THREADID);
616 if (DRD_(g_threadinfo)[tid].posix_thread_exists) {
617 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == ptid);
618 return;
619 }
620 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
621 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
622
623 if (DRD_(g_threadinfo)[tid].creator_thread != DRD_INVALID_THREADID) {
624 if (DRD_(ignore_thread_creation)) {
625 DRD_(thread_leave_synchr)(tid);
626 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
627 }
628 }
629 }
630
631 /** Returns true for joinable threads and false for detached threads. */
DRD_(thread_get_joinable)632 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
633 {
634 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
635 && tid != DRD_INVALID_THREADID);
636 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
637 }
638
639 /** Store the thread mode: joinable or detached. */
640 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
641 /* There is a cse related issue in gcc for MIPS. Optimization level
642 has to be lowered, so cse related optimizations are not
643 included.*/
644 __attribute__((optimize("O1")))
645 #endif
DRD_(thread_set_joinable)646 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
647 {
648 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
649 && tid != DRD_INVALID_THREADID);
650 tl_assert((!! joinable) == joinable);
651 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
652
653 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
654 }
655
656 /** Tells DRD that the calling thread is about to enter pthread_create(). */
DRD_(thread_entering_pthread_create)657 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
658 {
659 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
660 && tid != DRD_INVALID_THREADID);
661 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
662 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
663
664 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
665
666 if (DRD_(ignore_thread_creation)) {
667 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
668 DRD_(thread_enter_synchr)(tid);
669 }
670 }
671
672 /** Tells DRD that the calling thread has left pthread_create(). */
DRD_(thread_left_pthread_create)673 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
674 {
675 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
676 && tid != DRD_INVALID_THREADID);
677 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
678 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
679
680 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
681
682 if (DRD_(ignore_thread_creation)) {
683 DRD_(thread_leave_synchr)(tid);
684 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
685 }
686 }
687
688 #if defined(VGO_solaris)
689 /** Handles the bind_guard() intercept. */
DRD_(thread_entering_rtld_bind_guard)690 void DRD_(thread_entering_rtld_bind_guard)(const DrdThreadId tid, int flags)
691 {
692 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
693 && tid != DRD_INVALID_THREADID);
694
695 Int bindflag = (flags & VKI_THR_FLG_RTLD);
696 if ((bindflag & DRD_(g_threadinfo)[tid].bind_guard_flag) == 0) {
697 DRD_(g_threadinfo)[tid].bind_guard_flag |= bindflag;
698 DRD_(thread_enter_synchr)(tid);
699 }
700 }
701
702 /**
703 * Handles the bind_clear() intercept.
704 * Call to bind_clear(0) is typically used to determine value of bind_flags.
705 */
DRD_(thread_leaving_rtld_bind_clear)706 void DRD_(thread_leaving_rtld_bind_clear)(const DrdThreadId tid, int flags)
707 {
708 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
709 && tid != DRD_INVALID_THREADID);
710
711 Int bindflag = (flags & VKI_THR_FLG_RTLD);
712 if ((DRD_(g_threadinfo)[tid].bind_guard_flag & bindflag) != 0) {
713 DRD_(g_threadinfo)[tid].bind_guard_flag &= ~bindflag;
714 DRD_(thread_leave_synchr)(tid);
715 }
716 }
717 #endif /* VGO_solaris */
718
719 /** Obtain the thread number and the user-assigned thread name. */
DRD_(thread_get_name)720 const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
721 {
722 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
723 && tid != DRD_INVALID_THREADID);
724
725 return DRD_(g_threadinfo)[tid].name;
726 }
727
728 /** Set the name of the specified thread. */
DRD_(thread_set_name)729 void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
730 {
731 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
732 && tid != DRD_INVALID_THREADID);
733
734 if (name == NULL || name[0] == 0)
735 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
736 sizeof(DRD_(g_threadinfo)[tid].name),
737 "Thread %u",
738 tid);
739 else
740 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
741 sizeof(DRD_(g_threadinfo)[tid].name),
742 "Thread %u (%s)",
743 tid, name);
744 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
745 }
746
747 /**
748 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
749 * conflict set.
750 */
DRD_(thread_set_vg_running_tid)751 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
752 {
753 tl_assert(vg_tid != VG_INVALID_THREADID);
754
755 if (vg_tid != s_vg_running_tid)
756 {
757 DRD_(thread_set_running_tid)(vg_tid,
758 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
759 }
760
761 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
762 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
763 }
764
765 /**
766 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
767 * conflict set.
768 */
DRD_(thread_set_running_tid)769 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
770 const DrdThreadId drd_tid)
771 {
772 tl_assert(vg_tid != VG_INVALID_THREADID);
773 tl_assert(drd_tid != DRD_INVALID_THREADID);
774
775 if (vg_tid != s_vg_running_tid)
776 {
777 if (s_trace_context_switches
778 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
779 {
780 VG_(message)(Vg_DebugMsg,
781 "Context switch from thread %u to thread %u;"
782 " segments: %llu\n",
783 DRD_(g_drd_running_tid), drd_tid,
784 DRD_(sg_get_segments_alive_count)());
785 }
786 s_vg_running_tid = vg_tid;
787 DRD_(g_drd_running_tid) = drd_tid;
788 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
789 s_context_switch_count++;
790 }
791
792 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
793 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
794 }
795
796 /**
797 * Increase the synchronization nesting counter. Must be called before the
798 * client calls a synchronization function.
799 */
DRD_(thread_enter_synchr)800 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
801 {
802 tl_assert(DRD_(IsValidDrdThreadId)(tid));
803 return DRD_(g_threadinfo)[tid].synchr_nesting++;
804 }
805
806 /**
807 * Decrease the synchronization nesting counter. Must be called after the
808 * client left a synchronization function.
809 */
DRD_(thread_leave_synchr)810 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
811 {
812 tl_assert(DRD_(IsValidDrdThreadId)(tid));
813 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
814 return --DRD_(g_threadinfo)[tid].synchr_nesting;
815 }
816
817 /** Returns the synchronization nesting counter. */
DRD_(thread_get_synchr_nesting_count)818 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
819 {
820 tl_assert(DRD_(IsValidDrdThreadId)(tid));
821 return DRD_(g_threadinfo)[tid].synchr_nesting;
822 }
823
824 /** Append a new segment at the end of the segment list. */
825 static
thread_append_segment(const DrdThreadId tid,Segment * const sg)826 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
827 {
828 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
829 && tid != DRD_INVALID_THREADID);
830
831 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
832 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
833 #endif
834
835 // add at tail
836 sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
837 sg->thr_next = NULL;
838 if (DRD_(g_threadinfo)[tid].sg_last)
839 DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
840 DRD_(g_threadinfo)[tid].sg_last = sg;
841 if (DRD_(g_threadinfo)[tid].sg_first == NULL)
842 DRD_(g_threadinfo)[tid].sg_first = sg;
843
844 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
845 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
846 #endif
847 }
848
849 /**
850 * Remove a segment from the segment list of thread threadid, and free the
851 * associated memory.
852 */
853 static
thread_discard_segment(const DrdThreadId tid,Segment * const sg)854 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
855 {
856 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
857 && tid != DRD_INVALID_THREADID);
858
859 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
860 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
861 #endif
862
863 if (sg->thr_prev)
864 sg->thr_prev->thr_next = sg->thr_next;
865 if (sg->thr_next)
866 sg->thr_next->thr_prev = sg->thr_prev;
867 if (sg == DRD_(g_threadinfo)[tid].sg_first)
868 DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
869 if (sg == DRD_(g_threadinfo)[tid].sg_last)
870 DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
871 DRD_(sg_put)(sg);
872
873 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
874 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
875 #endif
876 }
877
878 /**
879 * Returns a pointer to the vector clock of the most recent segment associated
880 * with thread 'tid'.
881 */
DRD_(thread_get_vc)882 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
883 {
884 Segment* latest_sg;
885
886 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
887 && tid != DRD_INVALID_THREADID);
888 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
889 tl_assert(latest_sg);
890 return &latest_sg->vc;
891 }
892
893 /**
894 * Return the latest segment of thread 'tid' and increment its reference count.
895 */
DRD_(thread_get_latest_segment)896 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
897 {
898 Segment* latest_sg;
899
900 tl_assert(sg);
901 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
902 && tid != DRD_INVALID_THREADID);
903 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
904 tl_assert(latest_sg);
905
906 DRD_(sg_put)(*sg);
907 *sg = DRD_(sg_get)(latest_sg);
908 }
909
910 /**
911 * Compute the minimum of all latest vector clocks of all threads
912 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
913 *
914 * @param vc pointer to a vectorclock, holds result upon return.
915 */
DRD_(thread_compute_minimum_vc)916 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
917 {
918 unsigned i;
919 Bool first;
920 Segment* latest_sg;
921
922 first = True;
923 for (i = 0; i < DRD_N_THREADS; i++)
924 {
925 latest_sg = DRD_(g_threadinfo)[i].sg_last;
926 if (latest_sg) {
927 if (first)
928 DRD_(vc_assign)(vc, &latest_sg->vc);
929 else
930 DRD_(vc_min)(vc, &latest_sg->vc);
931 first = False;
932 }
933 }
934 }
935
936 /**
937 * Compute the maximum of all latest vector clocks of all threads.
938 *
939 * @param vc pointer to a vectorclock, holds result upon return.
940 */
DRD_(thread_compute_maximum_vc)941 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
942 {
943 unsigned i;
944 Bool first;
945 Segment* latest_sg;
946
947 first = True;
948 for (i = 0; i < DRD_N_THREADS; i++)
949 {
950 latest_sg = DRD_(g_threadinfo)[i].sg_last;
951 if (latest_sg) {
952 if (first)
953 DRD_(vc_assign)(vc, &latest_sg->vc);
954 else
955 DRD_(vc_combine)(vc, &latest_sg->vc);
956 first = False;
957 }
958 }
959 }
960
961 /**
962 * Discard all segments that have a defined order against the latest vector
963 * clock of all threads -- these segments can no longer be involved in a
964 * data race.
965 */
thread_discard_ordered_segments(void)966 static void thread_discard_ordered_segments(void)
967 {
968 unsigned i;
969 VectorClock thread_vc_min;
970
971 s_discard_ordered_segments_count++;
972
973 DRD_(vc_init)(&thread_vc_min, 0, 0);
974 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
975 if (DRD_(sg_get_trace)())
976 {
977 HChar *vc_min, *vc_max;
978 VectorClock thread_vc_max;
979
980 DRD_(vc_init)(&thread_vc_max, 0, 0);
981 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
982 vc_min = DRD_(vc_aprint)(&thread_vc_min);
983 vc_max = DRD_(vc_aprint)(&thread_vc_max);
984 VG_(message)(Vg_DebugMsg,
985 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
986 vc_min, vc_max);
987 VG_(free)(vc_min);
988 VG_(free)(vc_max);
989 DRD_(vc_cleanup)(&thread_vc_max);
990 }
991
992 for (i = 0; i < DRD_N_THREADS; i++) {
993 Segment* sg;
994 Segment* sg_next;
995
996 for (sg = DRD_(g_threadinfo)[i].sg_first;
997 sg && (sg_next = sg->thr_next)
998 && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
999 sg = sg_next)
1000 {
1001 thread_discard_segment(i, sg);
1002 }
1003 }
1004 DRD_(vc_cleanup)(&thread_vc_min);
1005 }
1006
1007 /**
1008 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
1009 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
1010 * all segments in the set CS are ordered consistently against both sg1 and
1011 * sg2. The set CS is defined as the set of segments that can immediately
1012 * precede future segments via inter-thread synchronization operations. In
1013 * DRD the set CS consists of the latest segment of each thread combined with
1014 * all segments for which the reference count is strictly greater than one.
1015 * The code below is an optimized version of the following:
1016 *
1017 * for (i = 0; i < DRD_N_THREADS; i++)
1018 * {
1019 * Segment* sg;
1020 *
1021 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
1022 * {
1023 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
1024 * {
1025 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
1026 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
1027 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
1028 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
1029 * {
1030 * return False;
1031 * }
1032 * }
1033 * }
1034 * }
1035 */
thread_consistent_segment_ordering(const DrdThreadId tid,Segment * const sg1,Segment * const sg2)1036 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
1037 Segment* const sg1,
1038 Segment* const sg2)
1039 {
1040 unsigned i;
1041
1042 tl_assert(sg1->thr_next);
1043 tl_assert(sg2->thr_next);
1044 tl_assert(sg1->thr_next == sg2);
1045 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
1046
1047 for (i = 0; i < DRD_N_THREADS; i++)
1048 {
1049 Segment* sg;
1050
1051 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1052 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
1053 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
1054 break;
1055 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
1056 return False;
1057 }
1058 }
1059 for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
1060 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
1061 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
1062 break;
1063 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
1064 return False;
1065 }
1066 }
1067 }
1068 return True;
1069 }
1070
1071 /**
1072 * Merge all segments that may be merged without triggering false positives
1073 * or discarding real data races. For the theoretical background of segment
1074 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
1075 * and Koen De Bosschere. Bounding the number of segment histories during
1076 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
1077 * pp 1221-1238, September 2002. This paper contains a proof that merging
1078 * consecutive segments for which the property equiv(s1,s2) holds can be
1079 * merged without reducing the accuracy of datarace detection. Furthermore
1080 * it is also proven that the total number of all segments will never grow
1081 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1082 * every time a new segment is created. The property equiv(s1, s2) is defined
1083 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1084 * clocks of segments s and s1 are ordered in the same way as those of segments
1085 * s and s2. The set CS is defined as the set of existing segments s that have
1086 * the potential to conflict with not yet created segments, either because the
1087 * segment s is the latest segment of a thread or because it can become the
1088 * immediate predecessor of a new segment due to a synchronization operation.
1089 */
thread_merge_segments(void)1090 static void thread_merge_segments(void)
1091 {
1092 unsigned i;
1093
1094 s_new_segments_since_last_merge = 0;
1095
1096 for (i = 0; i < DRD_N_THREADS; i++)
1097 {
1098 Segment* sg;
1099
1100 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1101 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1102 #endif
1103
1104 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1105 if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
1106 Segment* const sg_next = sg->thr_next;
1107 if (DRD_(sg_get_refcnt)(sg_next) == 1
1108 && sg_next->thr_next
1109 && thread_consistent_segment_ordering(i, sg, sg_next))
1110 {
1111 /* Merge sg and sg_next into sg. */
1112 DRD_(sg_merge)(sg, sg_next);
1113 thread_discard_segment(i, sg_next);
1114 }
1115 }
1116 }
1117
1118 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1119 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1120 #endif
1121 }
1122 }
1123
1124 /**
1125 * Create a new segment for the specified thread, and discard any segments
1126 * that cannot cause races anymore.
1127 */
DRD_(thread_new_segment)1128 void DRD_(thread_new_segment)(const DrdThreadId tid)
1129 {
1130 Segment* last_sg;
1131 Segment* new_sg;
1132
1133 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1134 && tid != DRD_INVALID_THREADID);
1135 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1136
1137 last_sg = DRD_(g_threadinfo)[tid].sg_last;
1138 new_sg = DRD_(sg_new)(tid, tid);
1139 thread_append_segment(tid, new_sg);
1140 if (tid == DRD_(g_drd_running_tid) && last_sg)
1141 {
1142 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1143 s_update_conflict_set_new_sg_count++;
1144 }
1145
1146 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1147
1148 if (s_segment_merging
1149 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1150 {
1151 thread_discard_ordered_segments();
1152 thread_merge_segments();
1153 }
1154 }
1155
1156 /** Call this function after thread 'joiner' joined thread 'joinee'. */
DRD_(thread_combine_vc_join)1157 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1158 {
1159 tl_assert(joiner != joinee);
1160 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1161 && joiner != DRD_INVALID_THREADID);
1162 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1163 && joinee != DRD_INVALID_THREADID);
1164 tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
1165 tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
1166 tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
1167 tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
1168
1169 if (DRD_(sg_get_trace)())
1170 {
1171 HChar *str1, *str2;
1172 str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1173 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1174 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1175 str1, str2);
1176 VG_(free)(str1);
1177 VG_(free)(str2);
1178 }
1179 if (joiner == DRD_(g_drd_running_tid)) {
1180 VectorClock old_vc;
1181
1182 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1183 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1184 DRD_(thread_get_vc)(joinee));
1185 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1186 s_update_conflict_set_join_count++;
1187 DRD_(vc_cleanup)(&old_vc);
1188 } else {
1189 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1190 DRD_(thread_get_vc)(joinee));
1191 }
1192
1193 thread_discard_ordered_segments();
1194
1195 if (DRD_(sg_get_trace)()) {
1196 HChar* str;
1197
1198 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1199 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1200 VG_(free)(str);
1201 }
1202 }
1203
1204 /**
1205 * Update the vector clock of the last segment of thread tid with the
1206 * the vector clock of segment sg.
1207 */
thread_combine_vc_sync(DrdThreadId tid,const Segment * sg)1208 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1209 {
1210 const VectorClock* const vc = &sg->vc;
1211
1212 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1213 && tid != DRD_INVALID_THREADID);
1214 tl_assert(DRD_(g_threadinfo)[tid].sg_first);
1215 tl_assert(DRD_(g_threadinfo)[tid].sg_last);
1216 tl_assert(sg);
1217 tl_assert(vc);
1218
1219 if (tid != sg->tid) {
1220 VectorClock old_vc;
1221
1222 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1223 DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1224 if (DRD_(sg_get_trace)()) {
1225 HChar *str1, *str2;
1226 str1 = DRD_(vc_aprint)(&old_vc);
1227 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1228 VG_(message)(Vg_DebugMsg, "thread %u: vc %s -> %s\n", tid, str1, str2);
1229 VG_(free)(str1);
1230 VG_(free)(str2);
1231 }
1232
1233 thread_discard_ordered_segments();
1234
1235 DRD_(thread_update_conflict_set)(tid, &old_vc);
1236 s_update_conflict_set_sync_count++;
1237
1238 DRD_(vc_cleanup)(&old_vc);
1239 } else {
1240 tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1241 }
1242 }
1243
1244 /**
1245 * Create a new segment for thread tid and update the vector clock of the last
1246 * segment of this thread with the vector clock of segment sg. Call this
1247 * function after thread tid had to wait because of thread synchronization
1248 * until the memory accesses in the segment sg finished.
1249 */
DRD_(thread_new_segment_and_combine_vc)1250 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1251 {
1252 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1253 && tid != DRD_INVALID_THREADID);
1254 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1255 tl_assert(sg);
1256
1257 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1258
1259 thread_combine_vc_sync(tid, sg);
1260
1261 if (s_segment_merging
1262 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1263 {
1264 thread_discard_ordered_segments();
1265 thread_merge_segments();
1266 }
1267 }
1268
1269 /**
1270 * Call this function whenever a thread is no longer using the memory
1271 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1272 * increase.
1273 */
DRD_(thread_stop_using_mem)1274 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1275 {
1276 Segment* p;
1277
1278 for (p = DRD_(g_sg_list); p; p = p->g_next)
1279 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1280
1281 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1282 }
1283
1284 /** Specify whether memory loads should be recorded. */
DRD_(thread_set_record_loads)1285 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1286 {
1287 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1288 && tid != DRD_INVALID_THREADID);
1289 tl_assert(enabled == !! enabled);
1290
1291 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1292 }
1293
1294 /** Specify whether memory stores should be recorded. */
DRD_(thread_set_record_stores)1295 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1296 {
1297 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1298 && tid != DRD_INVALID_THREADID);
1299 tl_assert(enabled == !! enabled);
1300
1301 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1302 }
1303
1304 /**
1305 * Print the segment information for all threads.
1306 *
1307 * This function is only used for debugging purposes.
1308 */
DRD_(thread_print_all)1309 void DRD_(thread_print_all)(void)
1310 {
1311 UInt i;
1312 Segment* p;
1313
1314 for (i = 0; i < DRD_N_THREADS; i++)
1315 {
1316 p = DRD_(g_threadinfo)[i].sg_first;
1317 if (p) {
1318 VG_(printf)("**************\n"
1319 "* thread %3u (%d/%u/%u/%u/0x%lx/%d) *\n"
1320 "**************\n",
1321 i,
1322 DRD_(g_threadinfo)[i].valid,
1323 DRD_(g_threadinfo)[i].vg_thread_exists,
1324 DRD_(g_threadinfo)[i].vg_threadid,
1325 DRD_(g_threadinfo)[i].posix_thread_exists,
1326 DRD_(g_threadinfo)[i].pt_threadid,
1327 DRD_(g_threadinfo)[i].detached_posix_thread);
1328 for ( ; p; p = p->thr_next)
1329 DRD_(sg_print)(p);
1330 }
1331 }
1332 }
1333
1334 /** Show a call stack involved in a data race. */
show_call_stack(const DrdThreadId tid,ExeContext * const callstack)1335 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1336 {
1337 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1338
1339 if (vg_tid != VG_INVALID_THREADID) {
1340 if (callstack)
1341 VG_(pp_ExeContext)(callstack);
1342 else
1343 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1344 } else {
1345 if (!VG_(clo_xml))
1346 VG_(message)(Vg_UserMsg,
1347 " (thread finished, call stack no longer available)\n");
1348 }
1349 }
1350
1351 /** Print information about the segments involved in a data race. */
1352 static void
thread_report_conflicting_segments_segment(const DrdThreadId tid,const Addr addr,const SizeT size,const BmAccessTypeT access_type,const Segment * const p)1353 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1354 const Addr addr,
1355 const SizeT size,
1356 const BmAccessTypeT access_type,
1357 const Segment* const p)
1358 {
1359 unsigned i;
1360
1361 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1362 && tid != DRD_INVALID_THREADID);
1363 tl_assert(p);
1364
1365 for (i = 0; i < DRD_N_THREADS; i++) {
1366 if (i != tid) {
1367 Segment* q;
1368
1369 for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
1370 /*
1371 * Since q iterates over the segments of thread i in order of
1372 * decreasing vector clocks, if q->vc <= p->vc, then
1373 * q->next->vc <= p->vc will also hold. Hence, break out of the
1374 * loop once this condition is met.
1375 */
1376 if (DRD_(vc_lte)(&q->vc, &p->vc))
1377 break;
1378 if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
1379 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1380 access_type)) {
1381 Segment* q_next;
1382
1383 tl_assert(q->stacktrace);
1384 if (VG_(clo_xml))
1385 VG_(printf_xml)(" <other_segment_start>\n");
1386 else
1387 VG_(message)(Vg_UserMsg,
1388 "Other segment start (thread %u)\n", i);
1389 show_call_stack(i, q->stacktrace);
1390 if (VG_(clo_xml))
1391 VG_(printf_xml)(" </other_segment_start>\n"
1392 " <other_segment_end>\n");
1393 else
1394 VG_(message)(Vg_UserMsg,
1395 "Other segment end (thread %u)\n", i);
1396 q_next = q->thr_next;
1397 show_call_stack(i, q_next ? q_next->stacktrace : 0);
1398 if (VG_(clo_xml))
1399 VG_(printf_xml)(" </other_segment_end>\n");
1400 }
1401 }
1402 }
1403 }
1404 }
1405 }
1406
1407 /** Print information about all segments involved in a data race. */
DRD_(thread_report_conflicting_segments)1408 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1409 const Addr addr,
1410 const SizeT size,
1411 const BmAccessTypeT access_type)
1412 {
1413 Segment* p;
1414
1415 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1416 && tid != DRD_INVALID_THREADID);
1417
1418 for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
1419 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1420 thread_report_conflicting_segments_segment(tid, addr, size,
1421 access_type, p);
1422 }
1423 }
1424
1425 /**
1426 * Verify whether the conflict set for thread tid is up to date. Only perform
1427 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1428 */
thread_conflict_set_up_to_date(const DrdThreadId tid)1429 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1430 {
1431 Bool result;
1432 struct bitmap* computed_conflict_set = 0;
1433
1434 if (!DRD_(verify_conflict_set))
1435 return True;
1436
1437 thread_compute_conflict_set(&computed_conflict_set, tid);
1438 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1439 if (! result)
1440 {
1441 VG_(printf)("actual conflict set:\n");
1442 DRD_(bm_print)(DRD_(g_conflict_set));
1443 VG_(printf)("\n");
1444 VG_(printf)("computed conflict set:\n");
1445 DRD_(bm_print)(computed_conflict_set);
1446 VG_(printf)("\n");
1447 }
1448 DRD_(bm_delete)(computed_conflict_set);
1449 return result;
1450 }
1451
1452 /**
1453 * Compute the conflict set: a bitmap that represents the union of all memory
1454 * accesses of all segments that are unordered to the current segment of the
1455 * thread tid.
1456 */
thread_compute_conflict_set(struct bitmap ** conflict_set,const DrdThreadId tid)1457 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1458 const DrdThreadId tid)
1459 {
1460 Segment* p;
1461
1462 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1463 && tid != DRD_INVALID_THREADID);
1464 tl_assert(tid == DRD_(g_drd_running_tid));
1465
1466 s_compute_conflict_set_count++;
1467 s_conflict_set_bitmap_creation_count
1468 -= DRD_(bm_get_bitmap_creation_count)();
1469 s_conflict_set_bitmap2_creation_count
1470 -= DRD_(bm_get_bitmap2_creation_count)();
1471
1472 if (*conflict_set) {
1473 DRD_(bm_cleanup)(*conflict_set);
1474 DRD_(bm_init)(*conflict_set);
1475 } else {
1476 *conflict_set = DRD_(bm_new)();
1477 }
1478
1479 if (s_trace_conflict_set) {
1480 HChar* str;
1481
1482 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1483 VG_(message)(Vg_DebugMsg,
1484 "computing conflict set for thread %u with vc %s\n",
1485 tid, str);
1486 VG_(free)(str);
1487 }
1488
1489 p = DRD_(g_threadinfo)[tid].sg_last;
1490 {
1491 unsigned j;
1492
1493 if (s_trace_conflict_set) {
1494 HChar* vc;
1495
1496 vc = DRD_(vc_aprint)(&p->vc);
1497 VG_(message)(Vg_DebugMsg, "conflict set: thread [%u] at vc %s\n",
1498 tid, vc);
1499 VG_(free)(vc);
1500 }
1501
1502 for (j = 0; j < DRD_N_THREADS; j++) {
1503 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1504 Segment* q;
1505
1506 for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
1507 if (!DRD_(vc_lte)(&q->vc, &p->vc)
1508 && !DRD_(vc_lte)(&p->vc, &q->vc)) {
1509 if (s_trace_conflict_set) {
1510 HChar* str;
1511
1512 str = DRD_(vc_aprint)(&q->vc);
1513 VG_(message)(Vg_DebugMsg,
1514 "conflict set: [%u] merging segment %s\n",
1515 j, str);
1516 VG_(free)(str);
1517 }
1518 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1519 } else {
1520 if (s_trace_conflict_set) {
1521 HChar* str;
1522
1523 str = DRD_(vc_aprint)(&q->vc);
1524 VG_(message)(Vg_DebugMsg,
1525 "conflict set: [%u] ignoring segment %s\n",
1526 j, str);
1527 VG_(free)(str);
1528 }
1529 }
1530 }
1531 }
1532 }
1533 }
1534
1535 s_conflict_set_bitmap_creation_count
1536 += DRD_(bm_get_bitmap_creation_count)();
1537 s_conflict_set_bitmap2_creation_count
1538 += DRD_(bm_get_bitmap2_creation_count)();
1539
1540 if (s_trace_conflict_set_bm) {
1541 VG_(message)(Vg_DebugMsg, "[%u] new conflict set:\n", tid);
1542 DRD_(bm_print)(*conflict_set);
1543 VG_(message)(Vg_DebugMsg, "[%u] end of new conflict set.\n", tid);
1544 }
1545 }
1546
1547 /**
1548 * Update the conflict set after the vector clock of thread tid has been
1549 * updated from old_vc to its current value, either because a new segment has
1550 * been created or because of a synchronization operation.
1551 */
DRD_(thread_update_conflict_set)1552 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1553 const VectorClock* const old_vc)
1554 {
1555 const VectorClock* new_vc;
1556 Segment* p;
1557 unsigned j;
1558
1559 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1560 && tid != DRD_INVALID_THREADID);
1561 tl_assert(old_vc);
1562 tl_assert(tid == DRD_(g_drd_running_tid));
1563 tl_assert(DRD_(g_conflict_set));
1564
1565 if (s_trace_conflict_set) {
1566 HChar* str;
1567
1568 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1569 VG_(message)(Vg_DebugMsg,
1570 "updating conflict set for thread %u with vc %s\n",
1571 tid, str);
1572 VG_(free)(str);
1573 }
1574
1575 new_vc = DRD_(thread_get_vc)(tid);
1576 tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1577
1578 DRD_(bm_unmark)(DRD_(g_conflict_set));
1579
1580 for (j = 0; j < DRD_N_THREADS; j++)
1581 {
1582 Segment* q;
1583
1584 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1585 continue;
1586
1587 for (q = DRD_(g_threadinfo)[j].sg_last;
1588 q && !DRD_(vc_lte)(&q->vc, new_vc);
1589 q = q->thr_prev) {
1590 const Bool included_in_old_conflict_set
1591 = !DRD_(vc_lte)(old_vc, &q->vc);
1592 const Bool included_in_new_conflict_set
1593 = !DRD_(vc_lte)(new_vc, &q->vc);
1594
1595 if (UNLIKELY(s_trace_conflict_set)) {
1596 HChar* str;
1597
1598 str = DRD_(vc_aprint)(&q->vc);
1599 VG_(message)(Vg_DebugMsg,
1600 "conflict set: [%u] %s segment %s\n", j,
1601 included_in_old_conflict_set
1602 != included_in_new_conflict_set
1603 ? "merging" : "ignoring", str);
1604 VG_(free)(str);
1605 }
1606 if (included_in_old_conflict_set != included_in_new_conflict_set)
1607 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1608 }
1609
1610 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
1611 const Bool included_in_old_conflict_set
1612 = !DRD_(vc_lte)(old_vc, &q->vc);
1613 const Bool included_in_new_conflict_set
1614 = !DRD_(vc_lte)(&q->vc, new_vc)
1615 && !DRD_(vc_lte)(new_vc, &q->vc);
1616
1617 if (UNLIKELY(s_trace_conflict_set)) {
1618 HChar* str;
1619
1620 str = DRD_(vc_aprint)(&q->vc);
1621 VG_(message)(Vg_DebugMsg,
1622 "conflict set: [%u] %s segment %s\n", j,
1623 included_in_old_conflict_set
1624 != included_in_new_conflict_set
1625 ? "merging" : "ignoring", str);
1626 VG_(free)(str);
1627 }
1628 if (included_in_old_conflict_set != included_in_new_conflict_set)
1629 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1630 }
1631 }
1632
1633 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1634
1635 p = DRD_(g_threadinfo)[tid].sg_last;
1636 for (j = 0; j < DRD_N_THREADS; j++) {
1637 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1638 Segment* q;
1639 for (q = DRD_(g_threadinfo)[j].sg_last;
1640 q && !DRD_(vc_lte)(&q->vc, &p->vc);
1641 q = q->thr_prev) {
1642 if (!DRD_(vc_lte)(&p->vc, &q->vc))
1643 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1644 }
1645 }
1646 }
1647
1648 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1649
1650 s_update_conflict_set_count++;
1651
1652 if (s_trace_conflict_set_bm)
1653 {
1654 VG_(message)(Vg_DebugMsg, "[%u] updated conflict set:\n", tid);
1655 DRD_(bm_print)(DRD_(g_conflict_set));
1656 VG_(message)(Vg_DebugMsg, "[%u] end of updated conflict set.\n", tid);
1657 }
1658
1659 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1660 }
1661
1662 /** Report the number of context switches performed. */
DRD_(thread_get_context_switch_count)1663 ULong DRD_(thread_get_context_switch_count)(void)
1664 {
1665 return s_context_switch_count;
1666 }
1667
1668 /** Report the number of ordered segments that have been discarded. */
DRD_(thread_get_discard_ordered_segments_count)1669 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1670 {
1671 return s_discard_ordered_segments_count;
1672 }
1673
1674 /** Return how many times the conflict set has been updated entirely. */
DRD_(thread_get_compute_conflict_set_count)1675 ULong DRD_(thread_get_compute_conflict_set_count)()
1676 {
1677 return s_compute_conflict_set_count;
1678 }
1679
1680 /** Return how many times the conflict set has been updated partially. */
DRD_(thread_get_update_conflict_set_count)1681 ULong DRD_(thread_get_update_conflict_set_count)(void)
1682 {
1683 return s_update_conflict_set_count;
1684 }
1685
1686 /**
1687 * Return how many times the conflict set has been updated partially
1688 * because a new segment has been created.
1689 */
DRD_(thread_get_update_conflict_set_new_sg_count)1690 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1691 {
1692 return s_update_conflict_set_new_sg_count;
1693 }
1694
1695 /**
1696 * Return how many times the conflict set has been updated partially
1697 * because of combining vector clocks due to synchronization operations
1698 * other than reader/writer lock or barrier operations.
1699 */
DRD_(thread_get_update_conflict_set_sync_count)1700 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1701 {
1702 return s_update_conflict_set_sync_count;
1703 }
1704
1705 /**
1706 * Return how many times the conflict set has been updated partially
1707 * because of thread joins.
1708 */
DRD_(thread_get_update_conflict_set_join_count)1709 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1710 {
1711 return s_update_conflict_set_join_count;
1712 }
1713
1714 /**
1715 * Return the number of first-level bitmaps that have been created during
1716 * conflict set updates.
1717 */
DRD_(thread_get_conflict_set_bitmap_creation_count)1718 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1719 {
1720 return s_conflict_set_bitmap_creation_count;
1721 }
1722
1723 /**
1724 * Return the number of second-level bitmaps that have been created during
1725 * conflict set updates.
1726 */
DRD_(thread_get_conflict_set_bitmap2_creation_count)1727 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1728 {
1729 return s_conflict_set_bitmap2_creation_count;
1730 }
1731