1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_error.h"
26 #include "drd_barrier.h"
27 #include "drd_clientobj.h"
28 #include "drd_cond.h"
29 #include "drd_mutex.h"
30 #include "drd_segment.h"
31 #include "drd_semaphore.h"
32 #include "drd_suppression.h"
33 #include "drd_thread.h"
34 #include "pub_tool_vki.h"
35 #include "pub_tool_basics.h" // Addr, SizeT
36 #include "pub_tool_libcassert.h" // tl_assert()
37 #include "pub_tool_libcbase.h" // VG_(strlen)()
38 #include "pub_tool_libcprint.h" // VG_(printf)()
39 #include "pub_tool_libcproc.h" // VG_(getenv)()
40 #include "pub_tool_machine.h"
41 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
42 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
43 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
44
45
46
47 /* Local functions. */
48
49 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
50 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
51 static void thread_compute_conflict_set(struct bitmap** conflict_set,
52 const DrdThreadId tid);
53 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
54
55
56 /* Local variables. */
57
58 static ULong s_context_switch_count;
59 static ULong s_discard_ordered_segments_count;
60 static ULong s_compute_conflict_set_count;
61 static ULong s_update_conflict_set_count;
62 static ULong s_update_conflict_set_new_sg_count;
63 static ULong s_update_conflict_set_sync_count;
64 static ULong s_update_conflict_set_join_count;
65 static ULong s_conflict_set_bitmap_creation_count;
66 static ULong s_conflict_set_bitmap2_creation_count;
67 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
68 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
69 ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS];
70 struct bitmap* DRD_(g_conflict_set);
71 static Bool s_trace_context_switches = False;
72 static Bool s_trace_conflict_set = False;
73 static Bool s_trace_conflict_set_bm = False;
74 static Bool s_trace_fork_join = False;
75 static Bool s_segment_merging = True;
76 static Bool s_new_segments_since_last_merge;
77 static int s_segment_merge_interval = 10;
78 static unsigned s_join_list_vol = 10;
79 static unsigned s_deletion_head;
80 static unsigned s_deletion_tail;
81
82
83 /* Function definitions. */
84
85 /** Enables/disables context switch tracing. */
DRD_(thread_trace_context_switches)86 void DRD_(thread_trace_context_switches)(const Bool t)
87 {
88 tl_assert(t == False || t == True);
89 s_trace_context_switches = t;
90 }
91
92 /** Enables/disables conflict set tracing. */
DRD_(thread_trace_conflict_set)93 void DRD_(thread_trace_conflict_set)(const Bool t)
94 {
95 tl_assert(t == False || t == True);
96 s_trace_conflict_set = t;
97 }
98
99 /** Enables/disables conflict set bitmap tracing. */
DRD_(thread_trace_conflict_set_bm)100 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
101 {
102 tl_assert(t == False || t == True);
103 s_trace_conflict_set_bm = t;
104 }
105
106 /** Report whether fork/join tracing is enabled. */
DRD_(thread_get_trace_fork_join)107 Bool DRD_(thread_get_trace_fork_join)(void)
108 {
109 return s_trace_fork_join;
110 }
111
112 /** Enables/disables fork/join tracing. */
DRD_(thread_set_trace_fork_join)113 void DRD_(thread_set_trace_fork_join)(const Bool t)
114 {
115 tl_assert(t == False || t == True);
116 s_trace_fork_join = t;
117 }
118
119 /** Enables/disables segment merging. */
DRD_(thread_set_segment_merging)120 void DRD_(thread_set_segment_merging)(const Bool m)
121 {
122 tl_assert(m == False || m == True);
123 s_segment_merging = m;
124 }
125
126 /** Get the segment merging interval. */
DRD_(thread_get_segment_merge_interval)127 int DRD_(thread_get_segment_merge_interval)(void)
128 {
129 return s_segment_merge_interval;
130 }
131
132 /** Set the segment merging interval. */
DRD_(thread_set_segment_merge_interval)133 void DRD_(thread_set_segment_merge_interval)(const int i)
134 {
135 s_segment_merge_interval = i;
136 }
137
DRD_(thread_set_join_list_vol)138 void DRD_(thread_set_join_list_vol)(const int jlv)
139 {
140 s_join_list_vol = jlv;
141 }
142
DRD_(thread_init)143 void DRD_(thread_init)(void)
144 {
145 }
146
147 /**
148 * Convert Valgrind's ThreadId into a DrdThreadId.
149 *
150 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
151 * Valgrind ThreadId does not yet exist.
152 */
DRD_(VgThreadIdToDrdThreadId)153 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
154 {
155 int i;
156
157 if (tid == VG_INVALID_THREADID)
158 return DRD_INVALID_THREADID;
159
160 for (i = 1; i < DRD_N_THREADS; i++)
161 {
162 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
163 && DRD_(g_threadinfo)[i].vg_threadid == tid)
164 {
165 return i;
166 }
167 }
168
169 return DRD_INVALID_THREADID;
170 }
171
172 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
DRD_(VgThreadIdToNewDrdThreadId)173 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
174 {
175 int i;
176
177 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
178
179 for (i = 1; i < DRD_N_THREADS; i++)
180 {
181 if (!DRD_(g_threadinfo)[i].valid)
182 {
183 tl_assert(! DRD_(IsValidDrdThreadId)(i));
184
185 DRD_(g_threadinfo)[i].valid = True;
186 DRD_(g_threadinfo)[i].vg_thread_exists = True;
187 DRD_(g_threadinfo)[i].vg_threadid = tid;
188 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
189 DRD_(g_threadinfo)[i].stack_min = 0;
190 DRD_(g_threadinfo)[i].stack_min_min = 0;
191 DRD_(g_threadinfo)[i].stack_startup = 0;
192 DRD_(g_threadinfo)[i].stack_max = 0;
193 DRD_(thread_set_name)(i, "");
194 DRD_(g_threadinfo)[i].on_alt_stack = False;
195 DRD_(g_threadinfo)[i].is_recording_loads = True;
196 DRD_(g_threadinfo)[i].is_recording_stores = True;
197 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
198 DRD_(g_threadinfo)[i].synchr_nesting = 0;
199 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
200 tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
201 tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
202
203 tl_assert(DRD_(IsValidDrdThreadId)(i));
204
205 return i;
206 }
207 }
208
209 VG_(printf)(
210 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
211 "Aborting.\n");
212
213 tl_assert(False);
214
215 return DRD_INVALID_THREADID;
216 }
217
218 /** Convert a POSIX thread ID into a DRD thread ID. */
DRD_(PtThreadIdToDrdThreadId)219 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
220 {
221 int i;
222
223 if (tid != INVALID_POSIX_THREADID)
224 {
225 for (i = 1; i < DRD_N_THREADS; i++)
226 {
227 if (DRD_(g_threadinfo)[i].posix_thread_exists
228 && DRD_(g_threadinfo)[i].pt_threadid == tid)
229 {
230 return i;
231 }
232 }
233 }
234 return DRD_INVALID_THREADID;
235 }
236
237 /** Convert a DRD thread ID into a Valgrind thread ID. */
DRD_(DrdThreadIdToVgThreadId)238 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
239 {
240 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
241 && tid != DRD_INVALID_THREADID);
242
243 return (DRD_(g_threadinfo)[tid].vg_thread_exists
244 ? DRD_(g_threadinfo)[tid].vg_threadid
245 : VG_INVALID_THREADID);
246 }
247
248 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
249 /**
250 * Sanity check of the doubly linked list of segments referenced by a
251 * ThreadInfo struct.
252 * @return True if sane, False if not.
253 */
DRD_(sane_ThreadInfo)254 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
255 {
256 Segment* p;
257
258 for (p = ti->sg_first; p; p = p->thr_next) {
259 if (p->thr_next && p->thr_next->thr_prev != p)
260 return False;
261 if (p->thr_next == 0 && p != ti->sg_last)
262 return False;
263 }
264 for (p = ti->sg_last; p; p = p->thr_prev) {
265 if (p->thr_prev && p->thr_prev->thr_next != p)
266 return False;
267 if (p->thr_prev == 0 && p != ti->sg_first)
268 return False;
269 }
270 return True;
271 }
272 #endif
273
274 /**
275 * Create the first segment for a newly started thread.
276 *
277 * This function is called from the handler installed via
278 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
279 * from the context of the creator thread, before the new thread has been
280 * created.
281 *
282 * @param[in] creator DRD thread ID of the creator thread.
283 * @param[in] vg_created Valgrind thread ID of the created thread.
284 *
285 * @return DRD thread ID of the created thread.
286 */
DRD_(thread_pre_create)287 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
288 const ThreadId vg_created)
289 {
290 DrdThreadId created;
291
292 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
293 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
294 tl_assert(0 <= (int)created && created < DRD_N_THREADS
295 && created != DRD_INVALID_THREADID);
296
297 tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
298 tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
299 /* Create an initial segment for the newly created thread. */
300 thread_append_segment(created, DRD_(sg_new)(creator, created));
301
302 return created;
303 }
304
305 /**
306 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
307 * after the thread has been created and before any client instructions are run
308 * on the newly created thread, e.g. from the handler installed via
309 * VG_(track_pre_thread_first_insn)().
310 *
311 * @param[in] vg_created Valgrind thread ID of the newly created thread.
312 *
313 * @return DRD thread ID for the new thread.
314 */
DRD_(thread_post_create)315 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
316 {
317 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
318
319 tl_assert(0 <= (int)created && created < DRD_N_THREADS
320 && created != DRD_INVALID_THREADID);
321
322 DRD_(g_threadinfo)[created].stack_max
323 = VG_(thread_get_stack_max)(vg_created);
324 DRD_(g_threadinfo)[created].stack_startup
325 = DRD_(g_threadinfo)[created].stack_max;
326 DRD_(g_threadinfo)[created].stack_min
327 = DRD_(g_threadinfo)[created].stack_max;
328 DRD_(g_threadinfo)[created].stack_min_min
329 = DRD_(g_threadinfo)[created].stack_max;
330 DRD_(g_threadinfo)[created].stack_size
331 = VG_(thread_get_stack_size)(vg_created);
332 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
333
334 return created;
335 }
336
DRD_(thread_delayed_delete)337 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
338 {
339 int j;
340
341 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
342 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
343 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
344 #if 0
345 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
346 #endif
347 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
348 for (j = 0; j < DRD_N_THREADS; ++j) {
349 if (DRD_(IsValidDrdThreadId)(j)
350 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
351 {
352 s_deletion_tail++;
353 #if 0
354 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
355 #endif
356 DRD_(thread_delete)(j, False);
357 break;
358 }
359 }
360 }
361 }
362
363 /**
364 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
365 * after thread drd_joiner joined thread drd_joinee.
366 */
DRD_(thread_post_join)367 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
368 {
369 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
370 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
371
372 DRD_(thread_new_segment)(drd_joiner);
373 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
374 DRD_(thread_new_segment)(drd_joinee);
375
376 if (s_trace_fork_join)
377 {
378 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
379 const unsigned msg_size = 256;
380 HChar* msg;
381
382 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
383 tl_assert(msg);
384 VG_(snprintf)(msg, msg_size,
385 "drd_post_thread_join joiner = %d, joinee = %d",
386 drd_joiner, drd_joinee);
387 if (joiner)
388 {
389 HChar* vc;
390
391 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
392 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
393 ", new vc: %s", vc);
394 VG_(free)(vc);
395 }
396 DRD_(trace_msg)("%pS", msg);
397 VG_(free)(msg);
398 }
399
400 if (! DRD_(get_check_stack_accesses)())
401 {
402 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
403 - DRD_(thread_get_stack_size)(drd_joinee),
404 DRD_(thread_get_stack_max)(drd_joinee));
405 }
406 DRD_(clientobj_delete_thread)(drd_joinee);
407 DRD_(thread_delayed_delete)(drd_joinee);
408 }
409
410 /**
411 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
412 * and accesses this data structure from multiple threads without locking.
413 * Any conflicting accesses in the range stack_startup..stack_max will be
414 * ignored.
415 */
DRD_(thread_set_stack_startup)416 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
417 const Addr stack_startup)
418 {
419 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
420 && tid != DRD_INVALID_THREADID);
421 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
422 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
423 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
424 }
425
426 /** Return the stack pointer for the specified thread. */
DRD_(thread_get_stack_min)427 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
428 {
429 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
430 && tid != DRD_INVALID_THREADID);
431 return DRD_(g_threadinfo)[tid].stack_min;
432 }
433
434 /**
435 * Return the lowest value that was ever assigned to the stack pointer
436 * for the specified thread.
437 */
DRD_(thread_get_stack_min_min)438 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
439 {
440 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
441 && tid != DRD_INVALID_THREADID);
442 return DRD_(g_threadinfo)[tid].stack_min_min;
443 }
444
445 /** Return the top address for the stack of the specified thread. */
DRD_(thread_get_stack_max)446 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
447 {
448 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
449 && tid != DRD_INVALID_THREADID);
450 return DRD_(g_threadinfo)[tid].stack_max;
451 }
452
453 /** Return the maximum stack size for the specified thread. */
DRD_(thread_get_stack_size)454 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
455 {
456 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
457 && tid != DRD_INVALID_THREADID);
458 return DRD_(g_threadinfo)[tid].stack_size;
459 }
460
DRD_(thread_get_on_alt_stack)461 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
462 {
463 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
464 && tid != DRD_INVALID_THREADID);
465 return DRD_(g_threadinfo)[tid].on_alt_stack;
466 }
467
DRD_(thread_set_on_alt_stack)468 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
469 const Bool on_alt_stack)
470 {
471 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
472 && tid != DRD_INVALID_THREADID);
473 tl_assert(on_alt_stack == !!on_alt_stack);
474 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
475 }
476
DRD_(thread_get_threads_on_alt_stack)477 Int DRD_(thread_get_threads_on_alt_stack)(void)
478 {
479 int i, n = 0;
480
481 for (i = 1; i < DRD_N_THREADS; i++)
482 n += DRD_(g_threadinfo)[i].on_alt_stack;
483 return n;
484 }
485
486 /**
487 * Clean up thread-specific data structures.
488 */
DRD_(thread_delete)489 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
490 {
491 Segment* sg;
492 Segment* sg_prev;
493
494 tl_assert(DRD_(IsValidDrdThreadId)(tid));
495
496 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
497 for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
498 sg_prev = sg->thr_prev;
499 sg->thr_next = NULL;
500 sg->thr_prev = NULL;
501 DRD_(sg_put)(sg);
502 }
503 DRD_(g_threadinfo)[tid].valid = False;
504 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
505 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
506 if (detached)
507 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
508 else
509 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
510 DRD_(g_threadinfo)[tid].sg_first = NULL;
511 DRD_(g_threadinfo)[tid].sg_last = NULL;
512
513 tl_assert(!DRD_(IsValidDrdThreadId)(tid));
514 }
515
516 /**
517 * Called after a thread performed its last memory access and before
518 * thread_delete() is called. Note: thread_delete() is only called for
519 * joinable threads, not for detached threads.
520 */
DRD_(thread_finished)521 void DRD_(thread_finished)(const DrdThreadId tid)
522 {
523 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
524 && tid != DRD_INVALID_THREADID);
525
526 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
527
528 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
529 {
530 /*
531 * Once a detached thread has finished, its stack is deallocated and
532 * should no longer be taken into account when computing the conflict set.
533 */
534 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
535
536 /*
537 * For a detached thread, calling pthread_exit() invalidates the
538 * POSIX thread ID associated with the detached thread. For joinable
539 * POSIX threads however, the POSIX thread ID remains live after the
540 * pthread_exit() call until pthread_join() is called.
541 */
542 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
543 }
544 }
545
546 /** Called just after fork() in the child process. */
DRD_(drd_thread_atfork_child)547 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
548 {
549 unsigned i;
550
551 for (i = 1; i < DRD_N_THREADS; i++)
552 {
553 if (i == tid)
554 continue;
555 if (DRD_(IsValidDrdThreadId(i)))
556 DRD_(thread_delete)(i, True);
557 tl_assert(!DRD_(IsValidDrdThreadId(i)));
558 }
559 }
560
561 /** Called just before pthread_cancel(). */
DRD_(thread_pre_cancel)562 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
563 {
564 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
565 && tid != DRD_INVALID_THREADID);
566 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
567
568 if (DRD_(thread_get_trace_fork_join)())
569 DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
570 DRD_(g_drd_running_tid), tid);
571 }
572
573 /**
574 * Store the POSIX thread ID for the specified thread.
575 *
576 * @note This function can be called two times for the same thread -- see also
577 * the comment block preceding the pthread_create() wrapper in
578 * drd_pthread_intercepts.c.
579 */
DRD_(thread_set_pthreadid)580 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
581 {
582 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
583 && tid != DRD_INVALID_THREADID);
584 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
585 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
586 tl_assert(ptid != INVALID_POSIX_THREADID);
587 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
588 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
589 }
590
591 /** Returns true for joinable threads and false for detached threads. */
DRD_(thread_get_joinable)592 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
593 {
594 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
595 && tid != DRD_INVALID_THREADID);
596 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
597 }
598
599 /** Store the thread mode: joinable or detached. */
600 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
601 /* There is a cse related issue in gcc for MIPS. Optimization level
602 has to be lowered, so cse related optimizations are not
603 included.*/
604 __attribute__((optimize("O1")))
605 #endif
DRD_(thread_set_joinable)606 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
607 {
608 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
609 && tid != DRD_INVALID_THREADID);
610 tl_assert(!! joinable == joinable);
611 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
612
613 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
614 }
615
616 /** Tells DRD that the calling thread is about to enter pthread_create(). */
DRD_(thread_entering_pthread_create)617 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
618 {
619 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
620 && tid != DRD_INVALID_THREADID);
621 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
622 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
623
624 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
625 }
626
627 /** Tells DRD that the calling thread has left pthread_create(). */
DRD_(thread_left_pthread_create)628 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
629 {
630 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
631 && tid != DRD_INVALID_THREADID);
632 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
633 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
634
635 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
636 }
637
638 /** Obtain the thread number and the user-assigned thread name. */
DRD_(thread_get_name)639 const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
640 {
641 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
642 && tid != DRD_INVALID_THREADID);
643
644 return DRD_(g_threadinfo)[tid].name;
645 }
646
647 /** Set the name of the specified thread. */
DRD_(thread_set_name)648 void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
649 {
650 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
651 && tid != DRD_INVALID_THREADID);
652
653 if (name == NULL || name[0] == 0)
654 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
655 sizeof(DRD_(g_threadinfo)[tid].name),
656 "Thread %d",
657 tid);
658 else
659 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
660 sizeof(DRD_(g_threadinfo)[tid].name),
661 "Thread %d (%s)",
662 tid, name);
663 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
664 }
665
666 /**
667 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
668 * conflict set.
669 */
DRD_(thread_set_vg_running_tid)670 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
671 {
672 tl_assert(vg_tid != VG_INVALID_THREADID);
673
674 if (vg_tid != s_vg_running_tid)
675 {
676 DRD_(thread_set_running_tid)(vg_tid,
677 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
678 }
679
680 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
681 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
682 }
683
684 /**
685 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
686 * conflict set.
687 */
DRD_(thread_set_running_tid)688 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
689 const DrdThreadId drd_tid)
690 {
691 tl_assert(vg_tid != VG_INVALID_THREADID);
692 tl_assert(drd_tid != DRD_INVALID_THREADID);
693
694 if (vg_tid != s_vg_running_tid)
695 {
696 if (s_trace_context_switches
697 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
698 {
699 VG_(message)(Vg_DebugMsg,
700 "Context switch from thread %d to thread %d;"
701 " segments: %llu\n",
702 DRD_(g_drd_running_tid), drd_tid,
703 DRD_(sg_get_segments_alive_count)());
704 }
705 s_vg_running_tid = vg_tid;
706 DRD_(g_drd_running_tid) = drd_tid;
707 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
708 s_context_switch_count++;
709 }
710
711 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
712 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
713 }
714
715 /**
716 * Increase the synchronization nesting counter. Must be called before the
717 * client calls a synchronization function.
718 */
DRD_(thread_enter_synchr)719 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
720 {
721 tl_assert(DRD_(IsValidDrdThreadId)(tid));
722 return DRD_(g_threadinfo)[tid].synchr_nesting++;
723 }
724
725 /**
726 * Decrease the synchronization nesting counter. Must be called after the
727 * client left a synchronization function.
728 */
DRD_(thread_leave_synchr)729 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
730 {
731 tl_assert(DRD_(IsValidDrdThreadId)(tid));
732 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
733 return --DRD_(g_threadinfo)[tid].synchr_nesting;
734 }
735
736 /** Returns the synchronization nesting counter. */
DRD_(thread_get_synchr_nesting_count)737 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
738 {
739 tl_assert(DRD_(IsValidDrdThreadId)(tid));
740 return DRD_(g_threadinfo)[tid].synchr_nesting;
741 }
742
743 /** Append a new segment at the end of the segment list. */
744 static
thread_append_segment(const DrdThreadId tid,Segment * const sg)745 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
746 {
747 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
748 && tid != DRD_INVALID_THREADID);
749
750 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
751 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
752 #endif
753
754 // add at tail
755 sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
756 sg->thr_next = NULL;
757 if (DRD_(g_threadinfo)[tid].sg_last)
758 DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
759 DRD_(g_threadinfo)[tid].sg_last = sg;
760 if (DRD_(g_threadinfo)[tid].sg_first == NULL)
761 DRD_(g_threadinfo)[tid].sg_first = sg;
762
763 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
764 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
765 #endif
766 }
767
768 /**
769 * Remove a segment from the segment list of thread threadid, and free the
770 * associated memory.
771 */
772 static
thread_discard_segment(const DrdThreadId tid,Segment * const sg)773 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
774 {
775 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
776 && tid != DRD_INVALID_THREADID);
777
778 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
779 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
780 #endif
781
782 if (sg->thr_prev)
783 sg->thr_prev->thr_next = sg->thr_next;
784 if (sg->thr_next)
785 sg->thr_next->thr_prev = sg->thr_prev;
786 if (sg == DRD_(g_threadinfo)[tid].sg_first)
787 DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
788 if (sg == DRD_(g_threadinfo)[tid].sg_last)
789 DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
790 DRD_(sg_put)(sg);
791
792 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
793 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
794 #endif
795 }
796
797 /**
798 * Returns a pointer to the vector clock of the most recent segment associated
799 * with thread 'tid'.
800 */
DRD_(thread_get_vc)801 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
802 {
803 Segment* latest_sg;
804
805 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
806 && tid != DRD_INVALID_THREADID);
807 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
808 tl_assert(latest_sg);
809 return &latest_sg->vc;
810 }
811
812 /**
813 * Return the latest segment of thread 'tid' and increment its reference count.
814 */
DRD_(thread_get_latest_segment)815 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
816 {
817 Segment* latest_sg;
818
819 tl_assert(sg);
820 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
821 && tid != DRD_INVALID_THREADID);
822 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
823 tl_assert(latest_sg);
824
825 DRD_(sg_put)(*sg);
826 *sg = DRD_(sg_get)(latest_sg);
827 }
828
829 /**
830 * Compute the minimum of all latest vector clocks of all threads
831 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
832 *
833 * @param vc pointer to a vectorclock, holds result upon return.
834 */
DRD_(thread_compute_minimum_vc)835 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
836 {
837 unsigned i;
838 Bool first;
839 Segment* latest_sg;
840
841 first = True;
842 for (i = 0; i < DRD_N_THREADS; i++)
843 {
844 latest_sg = DRD_(g_threadinfo)[i].sg_last;
845 if (latest_sg) {
846 if (first)
847 DRD_(vc_assign)(vc, &latest_sg->vc);
848 else
849 DRD_(vc_min)(vc, &latest_sg->vc);
850 first = False;
851 }
852 }
853 }
854
855 /**
856 * Compute the maximum of all latest vector clocks of all threads.
857 *
858 * @param vc pointer to a vectorclock, holds result upon return.
859 */
DRD_(thread_compute_maximum_vc)860 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
861 {
862 unsigned i;
863 Bool first;
864 Segment* latest_sg;
865
866 first = True;
867 for (i = 0; i < DRD_N_THREADS; i++)
868 {
869 latest_sg = DRD_(g_threadinfo)[i].sg_last;
870 if (latest_sg) {
871 if (first)
872 DRD_(vc_assign)(vc, &latest_sg->vc);
873 else
874 DRD_(vc_combine)(vc, &latest_sg->vc);
875 first = False;
876 }
877 }
878 }
879
880 /**
881 * Discard all segments that have a defined order against the latest vector
882 * clock of all threads -- these segments can no longer be involved in a
883 * data race.
884 */
thread_discard_ordered_segments(void)885 static void thread_discard_ordered_segments(void)
886 {
887 unsigned i;
888 VectorClock thread_vc_min;
889
890 s_discard_ordered_segments_count++;
891
892 DRD_(vc_init)(&thread_vc_min, 0, 0);
893 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
894 if (DRD_(sg_get_trace)())
895 {
896 HChar *vc_min, *vc_max;
897 VectorClock thread_vc_max;
898
899 DRD_(vc_init)(&thread_vc_max, 0, 0);
900 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
901 vc_min = DRD_(vc_aprint)(&thread_vc_min);
902 vc_max = DRD_(vc_aprint)(&thread_vc_max);
903 VG_(message)(Vg_DebugMsg,
904 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
905 vc_min, vc_max);
906 VG_(free)(vc_min);
907 VG_(free)(vc_max);
908 DRD_(vc_cleanup)(&thread_vc_max);
909 }
910
911 for (i = 0; i < DRD_N_THREADS; i++) {
912 Segment* sg;
913 Segment* sg_next;
914
915 for (sg = DRD_(g_threadinfo)[i].sg_first;
916 sg && (sg_next = sg->thr_next)
917 && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
918 sg = sg_next)
919 {
920 thread_discard_segment(i, sg);
921 }
922 }
923 DRD_(vc_cleanup)(&thread_vc_min);
924 }
925
926 /**
927 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
928 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
929 * all segments in the set CS are ordered consistently against both sg1 and
930 * sg2. The set CS is defined as the set of segments that can immediately
931 * precede future segments via inter-thread synchronization operations. In
932 * DRD the set CS consists of the latest segment of each thread combined with
933 * all segments for which the reference count is strictly greater than one.
934 * The code below is an optimized version of the following:
935 *
936 * for (i = 0; i < DRD_N_THREADS; i++)
937 * {
938 * Segment* sg;
939 *
940 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
941 * {
942 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
943 * {
944 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
945 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
946 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
947 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
948 * {
949 * return False;
950 * }
951 * }
952 * }
953 * }
954 */
thread_consistent_segment_ordering(const DrdThreadId tid,Segment * const sg1,Segment * const sg2)955 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
956 Segment* const sg1,
957 Segment* const sg2)
958 {
959 unsigned i;
960
961 tl_assert(sg1->thr_next);
962 tl_assert(sg2->thr_next);
963 tl_assert(sg1->thr_next == sg2);
964 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
965
966 for (i = 0; i < DRD_N_THREADS; i++)
967 {
968 Segment* sg;
969
970 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
971 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
972 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
973 break;
974 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
975 return False;
976 }
977 }
978 for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
979 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
980 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
981 break;
982 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
983 return False;
984 }
985 }
986 }
987 return True;
988 }
989
990 /**
991 * Merge all segments that may be merged without triggering false positives
992 * or discarding real data races. For the theoretical background of segment
993 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
994 * and Koen De Bosschere. Bounding the number of segment histories during
995 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
996 * pp 1221-1238, September 2002. This paper contains a proof that merging
997 * consecutive segments for which the property equiv(s1,s2) holds can be
998 * merged without reducing the accuracy of datarace detection. Furthermore
999 * it is also proven that the total number of all segments will never grow
1000 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1001 * every time a new segment is created. The property equiv(s1, s2) is defined
1002 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1003 * clocks of segments s and s1 are ordered in the same way as those of segments
1004 * s and s2. The set CS is defined as the set of existing segments s that have
1005 * the potential to conflict with not yet created segments, either because the
1006 * segment s is the latest segment of a thread or because it can become the
1007 * immediate predecessor of a new segment due to a synchronization operation.
1008 */
thread_merge_segments(void)1009 static void thread_merge_segments(void)
1010 {
1011 unsigned i;
1012
1013 s_new_segments_since_last_merge = 0;
1014
1015 for (i = 0; i < DRD_N_THREADS; i++)
1016 {
1017 Segment* sg;
1018
1019 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1020 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1021 #endif
1022
1023 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1024 if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
1025 Segment* const sg_next = sg->thr_next;
1026 if (DRD_(sg_get_refcnt)(sg_next) == 1
1027 && sg_next->thr_next
1028 && thread_consistent_segment_ordering(i, sg, sg_next))
1029 {
1030 /* Merge sg and sg_next into sg. */
1031 DRD_(sg_merge)(sg, sg_next);
1032 thread_discard_segment(i, sg_next);
1033 }
1034 }
1035 }
1036
1037 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1038 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1039 #endif
1040 }
1041 }
1042
1043 /**
1044 * Create a new segment for the specified thread, and discard any segments
1045 * that cannot cause races anymore.
1046 */
DRD_(thread_new_segment)1047 void DRD_(thread_new_segment)(const DrdThreadId tid)
1048 {
1049 Segment* last_sg;
1050 Segment* new_sg;
1051
1052 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1053 && tid != DRD_INVALID_THREADID);
1054 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1055
1056 last_sg = DRD_(g_threadinfo)[tid].sg_last;
1057 new_sg = DRD_(sg_new)(tid, tid);
1058 thread_append_segment(tid, new_sg);
1059 if (tid == DRD_(g_drd_running_tid) && last_sg)
1060 {
1061 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1062 s_update_conflict_set_new_sg_count++;
1063 }
1064
1065 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1066
1067 if (s_segment_merging
1068 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1069 {
1070 thread_discard_ordered_segments();
1071 thread_merge_segments();
1072 }
1073 }
1074
1075 /** Call this function after thread 'joiner' joined thread 'joinee'. */
DRD_(thread_combine_vc_join)1076 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1077 {
1078 tl_assert(joiner != joinee);
1079 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1080 && joiner != DRD_INVALID_THREADID);
1081 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1082 && joinee != DRD_INVALID_THREADID);
1083 tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
1084 tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
1085 tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
1086 tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
1087
1088 if (DRD_(sg_get_trace)())
1089 {
1090 HChar *str1, *str2;
1091 str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1092 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1093 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1094 str1, str2);
1095 VG_(free)(str1);
1096 VG_(free)(str2);
1097 }
1098 if (joiner == DRD_(g_drd_running_tid)) {
1099 VectorClock old_vc;
1100
1101 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1102 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1103 DRD_(thread_get_vc)(joinee));
1104 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1105 s_update_conflict_set_join_count++;
1106 DRD_(vc_cleanup)(&old_vc);
1107 } else {
1108 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1109 DRD_(thread_get_vc)(joinee));
1110 }
1111
1112 thread_discard_ordered_segments();
1113
1114 if (DRD_(sg_get_trace)()) {
1115 HChar* str;
1116
1117 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1118 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1119 VG_(free)(str);
1120 }
1121 }
1122
1123 /**
1124 * Update the vector clock of the last segment of thread tid with the
1125 * the vector clock of segment sg.
1126 */
thread_combine_vc_sync(DrdThreadId tid,const Segment * sg)1127 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1128 {
1129 const VectorClock* const vc = &sg->vc;
1130
1131 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1132 && tid != DRD_INVALID_THREADID);
1133 tl_assert(DRD_(g_threadinfo)[tid].sg_first);
1134 tl_assert(DRD_(g_threadinfo)[tid].sg_last);
1135 tl_assert(sg);
1136 tl_assert(vc);
1137
1138 if (tid != sg->tid) {
1139 VectorClock old_vc;
1140
1141 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1142 DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1143 if (DRD_(sg_get_trace)()) {
1144 HChar *str1, *str2;
1145 str1 = DRD_(vc_aprint)(&old_vc);
1146 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1147 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1148 VG_(free)(str1);
1149 VG_(free)(str2);
1150 }
1151
1152 thread_discard_ordered_segments();
1153
1154 DRD_(thread_update_conflict_set)(tid, &old_vc);
1155 s_update_conflict_set_sync_count++;
1156
1157 DRD_(vc_cleanup)(&old_vc);
1158 } else {
1159 tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1160 }
1161 }
1162
1163 /**
1164 * Create a new segment for thread tid and update the vector clock of the last
1165 * segment of this thread with the the vector clock of segment sg. Call this
1166 * function after thread tid had to wait because of thread synchronization
1167 * until the memory accesses in the segment sg finished.
1168 */
DRD_(thread_new_segment_and_combine_vc)1169 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1170 {
1171 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1172 && tid != DRD_INVALID_THREADID);
1173 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1174 tl_assert(sg);
1175
1176 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1177
1178 thread_combine_vc_sync(tid, sg);
1179
1180 if (s_segment_merging
1181 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1182 {
1183 thread_discard_ordered_segments();
1184 thread_merge_segments();
1185 }
1186 }
1187
1188 /**
1189 * Call this function whenever a thread is no longer using the memory
1190 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1191 * increase.
1192 */
DRD_(thread_stop_using_mem)1193 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1194 {
1195 Segment* p;
1196
1197 for (p = DRD_(g_sg_list); p; p = p->g_next)
1198 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1199
1200 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1201 }
1202
1203 /** Specify whether memory loads should be recorded. */
DRD_(thread_set_record_loads)1204 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1205 {
1206 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1207 && tid != DRD_INVALID_THREADID);
1208 tl_assert(enabled == !! enabled);
1209
1210 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1211 }
1212
1213 /** Specify whether memory stores should be recorded. */
DRD_(thread_set_record_stores)1214 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1215 {
1216 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1217 && tid != DRD_INVALID_THREADID);
1218 tl_assert(enabled == !! enabled);
1219
1220 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1221 }
1222
1223 /**
1224 * Print the segment information for all threads.
1225 *
1226 * This function is only used for debugging purposes.
1227 */
DRD_(thread_print_all)1228 void DRD_(thread_print_all)(void)
1229 {
1230 unsigned i;
1231 Segment* p;
1232
1233 for (i = 0; i < DRD_N_THREADS; i++)
1234 {
1235 p = DRD_(g_threadinfo)[i].sg_first;
1236 if (p) {
1237 VG_(printf)("**************\n"
1238 "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1239 "**************\n",
1240 i,
1241 DRD_(g_threadinfo)[i].valid,
1242 DRD_(g_threadinfo)[i].vg_thread_exists,
1243 DRD_(g_threadinfo)[i].vg_threadid,
1244 DRD_(g_threadinfo)[i].posix_thread_exists,
1245 DRD_(g_threadinfo)[i].pt_threadid,
1246 DRD_(g_threadinfo)[i].detached_posix_thread);
1247 for ( ; p; p = p->thr_next)
1248 DRD_(sg_print)(p);
1249 }
1250 }
1251 }
1252
1253 /** Show a call stack involved in a data race. */
show_call_stack(const DrdThreadId tid,ExeContext * const callstack)1254 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1255 {
1256 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1257
1258 if (vg_tid != VG_INVALID_THREADID) {
1259 if (callstack)
1260 VG_(pp_ExeContext)(callstack);
1261 else
1262 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1263 } else {
1264 if (!VG_(clo_xml))
1265 VG_(message)(Vg_UserMsg,
1266 " (thread finished, call stack no longer available)\n");
1267 }
1268 }
1269
1270 /** Print information about the segments involved in a data race. */
1271 static void
thread_report_conflicting_segments_segment(const DrdThreadId tid,const Addr addr,const SizeT size,const BmAccessTypeT access_type,const Segment * const p)1272 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1273 const Addr addr,
1274 const SizeT size,
1275 const BmAccessTypeT access_type,
1276 const Segment* const p)
1277 {
1278 unsigned i;
1279
1280 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1281 && tid != DRD_INVALID_THREADID);
1282 tl_assert(p);
1283
1284 for (i = 0; i < DRD_N_THREADS; i++) {
1285 if (i != tid) {
1286 Segment* q;
1287
1288 for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
1289 /*
1290 * Since q iterates over the segments of thread i in order of
1291 * decreasing vector clocks, if q->vc <= p->vc, then
1292 * q->next->vc <= p->vc will also hold. Hence, break out of the
1293 * loop once this condition is met.
1294 */
1295 if (DRD_(vc_lte)(&q->vc, &p->vc))
1296 break;
1297 if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
1298 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1299 access_type)) {
1300 Segment* q_next;
1301
1302 tl_assert(q->stacktrace);
1303 if (VG_(clo_xml))
1304 VG_(printf_xml)(" <other_segment_start>\n");
1305 else
1306 VG_(message)(Vg_UserMsg,
1307 "Other segment start (thread %d)\n", i);
1308 show_call_stack(i, q->stacktrace);
1309 if (VG_(clo_xml))
1310 VG_(printf_xml)(" </other_segment_start>\n"
1311 " <other_segment_end>\n");
1312 else
1313 VG_(message)(Vg_UserMsg,
1314 "Other segment end (thread %d)\n", i);
1315 q_next = q->thr_next;
1316 show_call_stack(i, q_next ? q_next->stacktrace : 0);
1317 if (VG_(clo_xml))
1318 VG_(printf_xml)(" </other_segment_end>\n");
1319 }
1320 }
1321 }
1322 }
1323 }
1324 }
1325
1326 /** Print information about all segments involved in a data race. */
DRD_(thread_report_conflicting_segments)1327 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1328 const Addr addr,
1329 const SizeT size,
1330 const BmAccessTypeT access_type)
1331 {
1332 Segment* p;
1333
1334 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1335 && tid != DRD_INVALID_THREADID);
1336
1337 for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
1338 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1339 thread_report_conflicting_segments_segment(tid, addr, size,
1340 access_type, p);
1341 }
1342 }
1343
1344 /**
1345 * Verify whether the conflict set for thread tid is up to date. Only perform
1346 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1347 */
thread_conflict_set_up_to_date(const DrdThreadId tid)1348 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1349 {
1350 static int do_verify_conflict_set = -1;
1351 Bool result;
1352 struct bitmap* computed_conflict_set = 0;
1353
1354 if (do_verify_conflict_set < 0)
1355 do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1356
1357 if (do_verify_conflict_set == 0)
1358 return True;
1359
1360 thread_compute_conflict_set(&computed_conflict_set, tid);
1361 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1362 if (! result)
1363 {
1364 VG_(printf)("actual conflict set:\n");
1365 DRD_(bm_print)(DRD_(g_conflict_set));
1366 VG_(printf)("\n");
1367 VG_(printf)("computed conflict set:\n");
1368 DRD_(bm_print)(computed_conflict_set);
1369 VG_(printf)("\n");
1370 }
1371 DRD_(bm_delete)(computed_conflict_set);
1372 return result;
1373 }
1374
1375 /**
1376 * Compute the conflict set: a bitmap that represents the union of all memory
1377 * accesses of all segments that are unordered to the current segment of the
1378 * thread tid.
1379 */
thread_compute_conflict_set(struct bitmap ** conflict_set,const DrdThreadId tid)1380 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1381 const DrdThreadId tid)
1382 {
1383 Segment* p;
1384
1385 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1386 && tid != DRD_INVALID_THREADID);
1387 tl_assert(tid == DRD_(g_drd_running_tid));
1388
1389 s_compute_conflict_set_count++;
1390 s_conflict_set_bitmap_creation_count
1391 -= DRD_(bm_get_bitmap_creation_count)();
1392 s_conflict_set_bitmap2_creation_count
1393 -= DRD_(bm_get_bitmap2_creation_count)();
1394
1395 if (*conflict_set) {
1396 DRD_(bm_cleanup)(*conflict_set);
1397 DRD_(bm_init)(*conflict_set);
1398 } else {
1399 *conflict_set = DRD_(bm_new)();
1400 }
1401
1402 if (s_trace_conflict_set) {
1403 HChar* str;
1404
1405 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1406 VG_(message)(Vg_DebugMsg,
1407 "computing conflict set for thread %d with vc %s\n",
1408 tid, str);
1409 VG_(free)(str);
1410 }
1411
1412 p = DRD_(g_threadinfo)[tid].sg_last;
1413 {
1414 unsigned j;
1415
1416 if (s_trace_conflict_set) {
1417 HChar* vc;
1418
1419 vc = DRD_(vc_aprint)(&p->vc);
1420 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1421 tid, vc);
1422 VG_(free)(vc);
1423 }
1424
1425 for (j = 0; j < DRD_N_THREADS; j++) {
1426 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1427 Segment* q;
1428
1429 for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
1430 if (!DRD_(vc_lte)(&q->vc, &p->vc)
1431 && !DRD_(vc_lte)(&p->vc, &q->vc)) {
1432 if (s_trace_conflict_set) {
1433 HChar* str;
1434
1435 str = DRD_(vc_aprint)(&q->vc);
1436 VG_(message)(Vg_DebugMsg,
1437 "conflict set: [%d] merging segment %s\n",
1438 j, str);
1439 VG_(free)(str);
1440 }
1441 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1442 } else {
1443 if (s_trace_conflict_set) {
1444 HChar* str;
1445
1446 str = DRD_(vc_aprint)(&q->vc);
1447 VG_(message)(Vg_DebugMsg,
1448 "conflict set: [%d] ignoring segment %s\n",
1449 j, str);
1450 VG_(free)(str);
1451 }
1452 }
1453 }
1454 }
1455 }
1456 }
1457
1458 s_conflict_set_bitmap_creation_count
1459 += DRD_(bm_get_bitmap_creation_count)();
1460 s_conflict_set_bitmap2_creation_count
1461 += DRD_(bm_get_bitmap2_creation_count)();
1462
1463 if (s_trace_conflict_set_bm) {
1464 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1465 DRD_(bm_print)(*conflict_set);
1466 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1467 }
1468 }
1469
1470 /**
1471 * Update the conflict set after the vector clock of thread tid has been
1472 * updated from old_vc to its current value, either because a new segment has
1473 * been created or because of a synchronization operation.
1474 */
DRD_(thread_update_conflict_set)1475 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1476 const VectorClock* const old_vc)
1477 {
1478 const VectorClock* new_vc;
1479 Segment* p;
1480 unsigned j;
1481
1482 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1483 && tid != DRD_INVALID_THREADID);
1484 tl_assert(old_vc);
1485 tl_assert(tid == DRD_(g_drd_running_tid));
1486 tl_assert(DRD_(g_conflict_set));
1487
1488 if (s_trace_conflict_set) {
1489 HChar* str;
1490
1491 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1492 VG_(message)(Vg_DebugMsg,
1493 "updating conflict set for thread %d with vc %s\n",
1494 tid, str);
1495 VG_(free)(str);
1496 }
1497
1498 new_vc = DRD_(thread_get_vc)(tid);
1499 tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1500
1501 DRD_(bm_unmark)(DRD_(g_conflict_set));
1502
1503 for (j = 0; j < DRD_N_THREADS; j++)
1504 {
1505 Segment* q;
1506
1507 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1508 continue;
1509
1510 for (q = DRD_(g_threadinfo)[j].sg_last;
1511 q && !DRD_(vc_lte)(&q->vc, new_vc);
1512 q = q->thr_prev) {
1513 const Bool included_in_old_conflict_set
1514 = !DRD_(vc_lte)(old_vc, &q->vc);
1515 const Bool included_in_new_conflict_set
1516 = !DRD_(vc_lte)(new_vc, &q->vc);
1517
1518 if (UNLIKELY(s_trace_conflict_set)) {
1519 HChar* str;
1520
1521 str = DRD_(vc_aprint)(&q->vc);
1522 VG_(message)(Vg_DebugMsg,
1523 "conflict set: [%d] %s segment %s\n", j,
1524 included_in_old_conflict_set
1525 != included_in_new_conflict_set
1526 ? "merging" : "ignoring", str);
1527 VG_(free)(str);
1528 }
1529 if (included_in_old_conflict_set != included_in_new_conflict_set)
1530 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1531 }
1532
1533 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
1534 const Bool included_in_old_conflict_set
1535 = !DRD_(vc_lte)(old_vc, &q->vc);
1536 const Bool included_in_new_conflict_set
1537 = !DRD_(vc_lte)(&q->vc, new_vc)
1538 && !DRD_(vc_lte)(new_vc, &q->vc);
1539
1540 if (UNLIKELY(s_trace_conflict_set)) {
1541 HChar* str;
1542
1543 str = DRD_(vc_aprint)(&q->vc);
1544 VG_(message)(Vg_DebugMsg,
1545 "conflict set: [%d] %s segment %s\n", j,
1546 included_in_old_conflict_set
1547 != included_in_new_conflict_set
1548 ? "merging" : "ignoring", str);
1549 VG_(free)(str);
1550 }
1551 if (included_in_old_conflict_set != included_in_new_conflict_set)
1552 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1553 }
1554 }
1555
1556 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1557
1558 p = DRD_(g_threadinfo)[tid].sg_last;
1559 for (j = 0; j < DRD_N_THREADS; j++) {
1560 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1561 Segment* q;
1562 for (q = DRD_(g_threadinfo)[j].sg_last;
1563 q && !DRD_(vc_lte)(&q->vc, &p->vc);
1564 q = q->thr_prev) {
1565 if (!DRD_(vc_lte)(&p->vc, &q->vc))
1566 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1567 }
1568 }
1569 }
1570
1571 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1572
1573 s_update_conflict_set_count++;
1574
1575 if (s_trace_conflict_set_bm)
1576 {
1577 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1578 DRD_(bm_print)(DRD_(g_conflict_set));
1579 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1580 }
1581
1582 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1583 }
1584
1585 /** Report the number of context switches performed. */
DRD_(thread_get_context_switch_count)1586 ULong DRD_(thread_get_context_switch_count)(void)
1587 {
1588 return s_context_switch_count;
1589 }
1590
1591 /** Report the number of ordered segments that have been discarded. */
DRD_(thread_get_discard_ordered_segments_count)1592 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1593 {
1594 return s_discard_ordered_segments_count;
1595 }
1596
1597 /** Return how many times the conflict set has been updated entirely. */
DRD_(thread_get_compute_conflict_set_count)1598 ULong DRD_(thread_get_compute_conflict_set_count)()
1599 {
1600 return s_compute_conflict_set_count;
1601 }
1602
1603 /** Return how many times the conflict set has been updated partially. */
DRD_(thread_get_update_conflict_set_count)1604 ULong DRD_(thread_get_update_conflict_set_count)(void)
1605 {
1606 return s_update_conflict_set_count;
1607 }
1608
1609 /**
1610 * Return how many times the conflict set has been updated partially
1611 * because a new segment has been created.
1612 */
DRD_(thread_get_update_conflict_set_new_sg_count)1613 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1614 {
1615 return s_update_conflict_set_new_sg_count;
1616 }
1617
1618 /**
1619 * Return how many times the conflict set has been updated partially
1620 * because of combining vector clocks due to synchronization operations
1621 * other than reader/writer lock or barrier operations.
1622 */
DRD_(thread_get_update_conflict_set_sync_count)1623 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1624 {
1625 return s_update_conflict_set_sync_count;
1626 }
1627
1628 /**
1629 * Return how many times the conflict set has been updated partially
1630 * because of thread joins.
1631 */
DRD_(thread_get_update_conflict_set_join_count)1632 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1633 {
1634 return s_update_conflict_set_join_count;
1635 }
1636
1637 /**
1638 * Return the number of first-level bitmaps that have been created during
1639 * conflict set updates.
1640 */
DRD_(thread_get_conflict_set_bitmap_creation_count)1641 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1642 {
1643 return s_conflict_set_bitmap_creation_count;
1644 }
1645
1646 /**
1647 * Return the number of second-level bitmaps that have been created during
1648 * conflict set updates.
1649 */
DRD_(thread_get_conflict_set_bitmap2_creation_count)1650 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1651 {
1652 return s_conflict_set_bitmap2_creation_count;
1653 }
1654