1 /* -*- mode: C; c-basic-offset: 3; indent-tabs-mode: nil; -*- */
2 /*
3 This file is part of drd, a thread error detector.
4
5 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>.
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23 */
24
25
26 #include "drd_error.h"
27 #include "drd_barrier.h"
28 #include "drd_clientobj.h"
29 #include "drd_cond.h"
30 #include "drd_mutex.h"
31 #include "drd_segment.h"
32 #include "drd_semaphore.h"
33 #include "drd_suppression.h"
34 #include "drd_thread.h"
35 #include "pub_tool_vki.h"
36 #include "pub_tool_basics.h" // Addr, SizeT
37 #include "pub_tool_libcassert.h" // tl_assert()
38 #include "pub_tool_libcbase.h" // VG_(strlen)()
39 #include "pub_tool_libcprint.h" // VG_(printf)()
40 #include "pub_tool_libcproc.h" // VG_(getenv)()
41 #include "pub_tool_machine.h"
42 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
43 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
44 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
45
46
47
48 /* Local functions. */
49
50 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
51 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
52 static void thread_compute_conflict_set(struct bitmap** conflict_set,
53 const DrdThreadId tid);
54 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
55
56
57 /* Local variables. */
58
59 static ULong s_context_switch_count;
60 static ULong s_discard_ordered_segments_count;
61 static ULong s_compute_conflict_set_count;
62 static ULong s_update_conflict_set_count;
63 static ULong s_update_conflict_set_new_sg_count;
64 static ULong s_update_conflict_set_sync_count;
65 static ULong s_update_conflict_set_join_count;
66 static ULong s_conflict_set_bitmap_creation_count;
67 static ULong s_conflict_set_bitmap2_creation_count;
68 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
69 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
70 ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS];
71 struct bitmap* DRD_(g_conflict_set);
72 static Bool s_trace_context_switches = False;
73 static Bool s_trace_conflict_set = False;
74 static Bool s_trace_conflict_set_bm = False;
75 static Bool s_trace_fork_join = False;
76 static Bool s_segment_merging = True;
77 static Bool s_new_segments_since_last_merge;
78 static int s_segment_merge_interval = 10;
79 static unsigned s_join_list_vol = 10;
80 static unsigned s_deletion_head;
81 static unsigned s_deletion_tail;
82
83
84 /* Function definitions. */
85
86 /** Enables/disables context switch tracing. */
DRD_(thread_trace_context_switches)87 void DRD_(thread_trace_context_switches)(const Bool t)
88 {
89 tl_assert(t == False || t == True);
90 s_trace_context_switches = t;
91 }
92
93 /** Enables/disables conflict set tracing. */
DRD_(thread_trace_conflict_set)94 void DRD_(thread_trace_conflict_set)(const Bool t)
95 {
96 tl_assert(t == False || t == True);
97 s_trace_conflict_set = t;
98 }
99
100 /** Enables/disables conflict set bitmap tracing. */
DRD_(thread_trace_conflict_set_bm)101 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
102 {
103 tl_assert(t == False || t == True);
104 s_trace_conflict_set_bm = t;
105 }
106
107 /** Report whether fork/join tracing is enabled. */
DRD_(thread_get_trace_fork_join)108 Bool DRD_(thread_get_trace_fork_join)(void)
109 {
110 return s_trace_fork_join;
111 }
112
113 /** Enables/disables fork/join tracing. */
DRD_(thread_set_trace_fork_join)114 void DRD_(thread_set_trace_fork_join)(const Bool t)
115 {
116 tl_assert(t == False || t == True);
117 s_trace_fork_join = t;
118 }
119
120 /** Enables/disables segment merging. */
DRD_(thread_set_segment_merging)121 void DRD_(thread_set_segment_merging)(const Bool m)
122 {
123 tl_assert(m == False || m == True);
124 s_segment_merging = m;
125 }
126
127 /** Get the segment merging interval. */
DRD_(thread_get_segment_merge_interval)128 int DRD_(thread_get_segment_merge_interval)(void)
129 {
130 return s_segment_merge_interval;
131 }
132
133 /** Set the segment merging interval. */
DRD_(thread_set_segment_merge_interval)134 void DRD_(thread_set_segment_merge_interval)(const int i)
135 {
136 s_segment_merge_interval = i;
137 }
138
DRD_(thread_set_join_list_vol)139 void DRD_(thread_set_join_list_vol)(const int jlv)
140 {
141 s_join_list_vol = jlv;
142 }
143
144 /**
145 * Convert Valgrind's ThreadId into a DrdThreadId.
146 *
147 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
148 * Valgrind ThreadId does not yet exist.
149 */
DRD_(VgThreadIdToDrdThreadId)150 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
151 {
152 int i;
153
154 if (tid == VG_INVALID_THREADID)
155 return DRD_INVALID_THREADID;
156
157 for (i = 1; i < DRD_N_THREADS; i++)
158 {
159 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
160 && DRD_(g_threadinfo)[i].vg_threadid == tid)
161 {
162 return i;
163 }
164 }
165
166 return DRD_INVALID_THREADID;
167 }
168
169 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
DRD_(VgThreadIdToNewDrdThreadId)170 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
171 {
172 int i;
173
174 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
175
176 for (i = 1; i < DRD_N_THREADS; i++)
177 {
178 if (!DRD_(g_threadinfo)[i].valid)
179 {
180 tl_assert(! DRD_(IsValidDrdThreadId)(i));
181
182 DRD_(g_threadinfo)[i].valid = True;
183 DRD_(g_threadinfo)[i].vg_thread_exists = True;
184 DRD_(g_threadinfo)[i].vg_threadid = tid;
185 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
186 DRD_(g_threadinfo)[i].stack_min = 0;
187 DRD_(g_threadinfo)[i].stack_min_min = 0;
188 DRD_(g_threadinfo)[i].stack_startup = 0;
189 DRD_(g_threadinfo)[i].stack_max = 0;
190 DRD_(thread_set_name)(i, "");
191 DRD_(g_threadinfo)[i].on_alt_stack = False;
192 DRD_(g_threadinfo)[i].is_recording_loads = True;
193 DRD_(g_threadinfo)[i].is_recording_stores = True;
194 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
195 DRD_(g_threadinfo)[i].synchr_nesting = 0;
196 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
197 tl_assert(DRD_(g_threadinfo)[i].first == 0);
198 tl_assert(DRD_(g_threadinfo)[i].last == 0);
199
200 tl_assert(DRD_(IsValidDrdThreadId)(i));
201
202 return i;
203 }
204 }
205
206 VG_(printf)(
207 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
208 "Aborting.\n");
209
210 tl_assert(False);
211
212 return DRD_INVALID_THREADID;
213 }
214
215 /** Convert a POSIX thread ID into a DRD thread ID. */
DRD_(PtThreadIdToDrdThreadId)216 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
217 {
218 int i;
219
220 if (tid != INVALID_POSIX_THREADID)
221 {
222 for (i = 1; i < DRD_N_THREADS; i++)
223 {
224 if (DRD_(g_threadinfo)[i].posix_thread_exists
225 && DRD_(g_threadinfo)[i].pt_threadid == tid)
226 {
227 return i;
228 }
229 }
230 }
231 return DRD_INVALID_THREADID;
232 }
233
234 /** Convert a DRD thread ID into a Valgrind thread ID. */
DRD_(DrdThreadIdToVgThreadId)235 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
236 {
237 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
238 && tid != DRD_INVALID_THREADID);
239
240 return (DRD_(g_threadinfo)[tid].vg_thread_exists
241 ? DRD_(g_threadinfo)[tid].vg_threadid
242 : VG_INVALID_THREADID);
243 }
244
245 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
246 /**
247 * Sanity check of the doubly linked list of segments referenced by a
248 * ThreadInfo struct.
249 * @return True if sane, False if not.
250 */
DRD_(sane_ThreadInfo)251 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
252 {
253 Segment* p;
254
255 for (p = ti->first; p; p = p->next) {
256 if (p->next && p->next->prev != p)
257 return False;
258 if (p->next == 0 && p != ti->last)
259 return False;
260 }
261 for (p = ti->last; p; p = p->prev) {
262 if (p->prev && p->prev->next != p)
263 return False;
264 if (p->prev == 0 && p != ti->first)
265 return False;
266 }
267 return True;
268 }
269 #endif
270
271 /**
272 * Create the first segment for a newly started thread.
273 *
274 * This function is called from the handler installed via
275 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
276 * from the context of the creator thread, before the new thread has been
277 * created.
278 *
279 * @param[in] creator DRD thread ID of the creator thread.
280 * @param[in] vg_created Valgrind thread ID of the created thread.
281 *
282 * @return DRD thread ID of the created thread.
283 */
DRD_(thread_pre_create)284 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
285 const ThreadId vg_created)
286 {
287 DrdThreadId created;
288
289 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
290 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
291 tl_assert(0 <= (int)created && created < DRD_N_THREADS
292 && created != DRD_INVALID_THREADID);
293
294 tl_assert(DRD_(g_threadinfo)[created].first == 0);
295 tl_assert(DRD_(g_threadinfo)[created].last == 0);
296 /* Create an initial segment for the newly created thread. */
297 thread_append_segment(created, DRD_(sg_new)(creator, created));
298
299 return created;
300 }
301
302 /**
303 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
304 * after the thread has been created and before any client instructions are run
305 * on the newly created thread, e.g. from the handler installed via
306 * VG_(track_pre_thread_first_insn)().
307 *
308 * @param[in] vg_created Valgrind thread ID of the newly created thread.
309 *
310 * @return DRD thread ID for the new thread.
311 */
DRD_(thread_post_create)312 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
313 {
314 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
315
316 tl_assert(0 <= (int)created && created < DRD_N_THREADS
317 && created != DRD_INVALID_THREADID);
318
319 DRD_(g_threadinfo)[created].stack_max
320 = VG_(thread_get_stack_max)(vg_created);
321 DRD_(g_threadinfo)[created].stack_startup
322 = DRD_(g_threadinfo)[created].stack_max;
323 DRD_(g_threadinfo)[created].stack_min
324 = DRD_(g_threadinfo)[created].stack_max;
325 DRD_(g_threadinfo)[created].stack_min_min
326 = DRD_(g_threadinfo)[created].stack_max;
327 DRD_(g_threadinfo)[created].stack_size
328 = VG_(thread_get_stack_size)(vg_created);
329 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
330
331 return created;
332 }
333
DRD_(thread_delayed_delete)334 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
335 {
336 int j;
337
338 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
339 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
340 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
341 #if 0
342 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
343 #endif
344 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
345 for (j = 0; j < DRD_N_THREADS; ++j) {
346 if (DRD_(IsValidDrdThreadId)(j)
347 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
348 {
349 s_deletion_tail++;
350 #if 0
351 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
352 #endif
353 DRD_(thread_delete)(j, False);
354 break;
355 }
356 }
357 }
358 }
359
360 /**
361 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
362 * after thread drd_joiner joined thread drd_joinee.
363 */
DRD_(thread_post_join)364 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
365 {
366 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
367 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
368
369 DRD_(thread_new_segment)(drd_joiner);
370 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
371 DRD_(thread_new_segment)(drd_joinee);
372
373 if (s_trace_fork_join)
374 {
375 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
376 const unsigned msg_size = 256;
377 char* msg;
378
379 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
380 tl_assert(msg);
381 VG_(snprintf)(msg, msg_size,
382 "drd_post_thread_join joiner = %d, joinee = %d",
383 drd_joiner, drd_joinee);
384 if (joiner)
385 {
386 char* vc;
387
388 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
389 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
390 ", new vc: %s", vc);
391 VG_(free)(vc);
392 }
393 DRD_(trace_msg)("%pS", msg);
394 VG_(free)(msg);
395 }
396
397 if (! DRD_(get_check_stack_accesses)())
398 {
399 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
400 - DRD_(thread_get_stack_size)(drd_joinee),
401 DRD_(thread_get_stack_max)(drd_joinee));
402 }
403 DRD_(clientobj_delete_thread)(drd_joinee);
404 DRD_(thread_delayed_delete)(drd_joinee);
405 }
406
407 /**
408 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
409 * and accesses this data structure from multiple threads without locking.
410 * Any conflicting accesses in the range stack_startup..stack_max will be
411 * ignored.
412 */
DRD_(thread_set_stack_startup)413 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
414 const Addr stack_startup)
415 {
416 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
417 && tid != DRD_INVALID_THREADID);
418 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
419 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
420 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
421 }
422
423 /** Return the stack pointer for the specified thread. */
DRD_(thread_get_stack_min)424 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
425 {
426 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
427 && tid != DRD_INVALID_THREADID);
428 return DRD_(g_threadinfo)[tid].stack_min;
429 }
430
431 /**
432 * Return the lowest value that was ever assigned to the stack pointer
433 * for the specified thread.
434 */
DRD_(thread_get_stack_min_min)435 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
436 {
437 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
438 && tid != DRD_INVALID_THREADID);
439 return DRD_(g_threadinfo)[tid].stack_min_min;
440 }
441
442 /** Return the top address for the stack of the specified thread. */
DRD_(thread_get_stack_max)443 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
444 {
445 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
446 && tid != DRD_INVALID_THREADID);
447 return DRD_(g_threadinfo)[tid].stack_max;
448 }
449
450 /** Return the maximum stack size for the specified thread. */
DRD_(thread_get_stack_size)451 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
452 {
453 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
454 && tid != DRD_INVALID_THREADID);
455 return DRD_(g_threadinfo)[tid].stack_size;
456 }
457
DRD_(thread_get_on_alt_stack)458 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
459 {
460 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
461 && tid != DRD_INVALID_THREADID);
462 return DRD_(g_threadinfo)[tid].on_alt_stack;
463 }
464
DRD_(thread_set_on_alt_stack)465 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
466 const Bool on_alt_stack)
467 {
468 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
469 && tid != DRD_INVALID_THREADID);
470 tl_assert(on_alt_stack == !!on_alt_stack);
471 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
472 }
473
DRD_(thread_get_threads_on_alt_stack)474 Int DRD_(thread_get_threads_on_alt_stack)(void)
475 {
476 int i, n = 0;
477
478 for (i = 1; i < DRD_N_THREADS; i++)
479 n += DRD_(g_threadinfo)[i].on_alt_stack;
480 return n;
481 }
482
483 /**
484 * Clean up thread-specific data structures.
485 */
DRD_(thread_delete)486 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
487 {
488 Segment* sg;
489 Segment* sg_prev;
490
491 tl_assert(DRD_(IsValidDrdThreadId)(tid));
492
493 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
494 for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
495 {
496 sg_prev = sg->prev;
497 sg->prev = 0;
498 sg->next = 0;
499 DRD_(sg_put)(sg);
500 }
501 DRD_(g_threadinfo)[tid].valid = False;
502 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
503 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
504 if (detached)
505 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
506 else
507 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
508 DRD_(g_threadinfo)[tid].first = 0;
509 DRD_(g_threadinfo)[tid].last = 0;
510
511 tl_assert(! DRD_(IsValidDrdThreadId)(tid));
512 }
513
514 /**
515 * Called after a thread performed its last memory access and before
516 * thread_delete() is called. Note: thread_delete() is only called for
517 * joinable threads, not for detached threads.
518 */
DRD_(thread_finished)519 void DRD_(thread_finished)(const DrdThreadId tid)
520 {
521 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
522 && tid != DRD_INVALID_THREADID);
523
524 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
525
526 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
527 {
528 /*
529 * Once a detached thread has finished, its stack is deallocated and
530 * should no longer be taken into account when computing the conflict set.
531 */
532 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
533
534 /*
535 * For a detached thread, calling pthread_exit() invalidates the
536 * POSIX thread ID associated with the detached thread. For joinable
537 * POSIX threads however, the POSIX thread ID remains live after the
538 * pthread_exit() call until pthread_join() is called.
539 */
540 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
541 }
542 }
543
544 /** Called just after fork() in the child process. */
DRD_(drd_thread_atfork_child)545 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
546 {
547 unsigned i;
548
549 for (i = 1; i < DRD_N_THREADS; i++)
550 {
551 if (i == tid)
552 continue;
553 if (DRD_(IsValidDrdThreadId(i)))
554 DRD_(thread_delete)(i, True);
555 tl_assert(!DRD_(IsValidDrdThreadId(i)));
556 }
557 }
558
559 /** Called just before pthread_cancel(). */
DRD_(thread_pre_cancel)560 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
561 {
562 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
563 && tid != DRD_INVALID_THREADID);
564 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
565
566 if (DRD_(thread_get_trace_fork_join)())
567 DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
568 DRD_(g_drd_running_tid), tid);
569 }
570
571 /**
572 * Store the POSIX thread ID for the specified thread.
573 *
574 * @note This function can be called two times for the same thread -- see also
575 * the comment block preceding the pthread_create() wrapper in
576 * drd_pthread_intercepts.c.
577 */
DRD_(thread_set_pthreadid)578 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
579 {
580 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
581 && tid != DRD_INVALID_THREADID);
582 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
583 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
584 tl_assert(ptid != INVALID_POSIX_THREADID);
585 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
586 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
587 }
588
589 /** Returns true for joinable threads and false for detached threads. */
DRD_(thread_get_joinable)590 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
591 {
592 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
593 && tid != DRD_INVALID_THREADID);
594 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
595 }
596
597 /** Store the thread mode: joinable or detached. */
DRD_(thread_set_joinable)598 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
599 {
600 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
601 && tid != DRD_INVALID_THREADID);
602 tl_assert(!! joinable == joinable);
603 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
604
605 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
606 }
607
608 /** Tells DRD that the calling thread is about to enter pthread_create(). */
DRD_(thread_entering_pthread_create)609 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
610 {
611 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
612 && tid != DRD_INVALID_THREADID);
613 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
614 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
615
616 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
617 }
618
619 /** Tells DRD that the calling thread has left pthread_create(). */
DRD_(thread_left_pthread_create)620 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
621 {
622 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
623 && tid != DRD_INVALID_THREADID);
624 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
625 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
626
627 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
628 }
629
630 /** Obtain the thread number and the user-assigned thread name. */
DRD_(thread_get_name)631 const char* DRD_(thread_get_name)(const DrdThreadId tid)
632 {
633 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
634 && tid != DRD_INVALID_THREADID);
635
636 return DRD_(g_threadinfo)[tid].name;
637 }
638
639 /** Set the name of the specified thread. */
DRD_(thread_set_name)640 void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
641 {
642 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
643 && tid != DRD_INVALID_THREADID);
644
645 if (name == NULL || name[0] == 0)
646 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
647 sizeof(DRD_(g_threadinfo)[tid].name),
648 "Thread %d",
649 tid);
650 else
651 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
652 sizeof(DRD_(g_threadinfo)[tid].name),
653 "Thread %d (%s)",
654 tid, name);
655 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
656 }
657
658 /**
659 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
660 * conflict set.
661 */
DRD_(thread_set_vg_running_tid)662 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
663 {
664 tl_assert(vg_tid != VG_INVALID_THREADID);
665
666 if (vg_tid != s_vg_running_tid)
667 {
668 DRD_(thread_set_running_tid)(vg_tid,
669 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
670 }
671
672 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
673 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
674 }
675
676 /**
677 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
678 * conflict set.
679 */
DRD_(thread_set_running_tid)680 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
681 const DrdThreadId drd_tid)
682 {
683 tl_assert(vg_tid != VG_INVALID_THREADID);
684 tl_assert(drd_tid != DRD_INVALID_THREADID);
685
686 if (vg_tid != s_vg_running_tid)
687 {
688 if (s_trace_context_switches
689 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
690 {
691 VG_(message)(Vg_DebugMsg,
692 "Context switch from thread %d to thread %d;"
693 " segments: %llu\n",
694 DRD_(g_drd_running_tid), drd_tid,
695 DRD_(sg_get_segments_alive_count)());
696 }
697 s_vg_running_tid = vg_tid;
698 DRD_(g_drd_running_tid) = drd_tid;
699 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
700 s_context_switch_count++;
701 }
702
703 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
704 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
705 }
706
707 /**
708 * Increase the synchronization nesting counter. Must be called before the
709 * client calls a synchronization function.
710 */
DRD_(thread_enter_synchr)711 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
712 {
713 tl_assert(DRD_(IsValidDrdThreadId)(tid));
714 return DRD_(g_threadinfo)[tid].synchr_nesting++;
715 }
716
717 /**
718 * Decrease the synchronization nesting counter. Must be called after the
719 * client left a synchronization function.
720 */
DRD_(thread_leave_synchr)721 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
722 {
723 tl_assert(DRD_(IsValidDrdThreadId)(tid));
724 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
725 return --DRD_(g_threadinfo)[tid].synchr_nesting;
726 }
727
728 /** Returns the synchronization nesting counter. */
DRD_(thread_get_synchr_nesting_count)729 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
730 {
731 tl_assert(DRD_(IsValidDrdThreadId)(tid));
732 return DRD_(g_threadinfo)[tid].synchr_nesting;
733 }
734
735 /** Append a new segment at the end of the segment list. */
736 static
thread_append_segment(const DrdThreadId tid,Segment * const sg)737 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
738 {
739 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
740 && tid != DRD_INVALID_THREADID);
741
742 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
743 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
744 #endif
745
746 sg->prev = DRD_(g_threadinfo)[tid].last;
747 sg->next = 0;
748 if (DRD_(g_threadinfo)[tid].last)
749 DRD_(g_threadinfo)[tid].last->next = sg;
750 DRD_(g_threadinfo)[tid].last = sg;
751 if (DRD_(g_threadinfo)[tid].first == 0)
752 DRD_(g_threadinfo)[tid].first = sg;
753
754 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
755 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
756 #endif
757 }
758
759 /**
760 * Remove a segment from the segment list of thread threadid, and free the
761 * associated memory.
762 */
763 static
thread_discard_segment(const DrdThreadId tid,Segment * const sg)764 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
765 {
766 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
767 && tid != DRD_INVALID_THREADID);
768
769 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
770 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
771 #endif
772
773 if (sg->prev)
774 sg->prev->next = sg->next;
775 if (sg->next)
776 sg->next->prev = sg->prev;
777 if (sg == DRD_(g_threadinfo)[tid].first)
778 DRD_(g_threadinfo)[tid].first = sg->next;
779 if (sg == DRD_(g_threadinfo)[tid].last)
780 DRD_(g_threadinfo)[tid].last = sg->prev;
781 DRD_(sg_put)(sg);
782
783 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
784 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
785 #endif
786 }
787
788 /**
789 * Returns a pointer to the vector clock of the most recent segment associated
790 * with thread 'tid'.
791 */
DRD_(thread_get_vc)792 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
793 {
794 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
795 && tid != DRD_INVALID_THREADID);
796 tl_assert(DRD_(g_threadinfo)[tid].last);
797 return &DRD_(g_threadinfo)[tid].last->vc;
798 }
799
800 /**
801 * Return the latest segment of thread 'tid' and increment its reference count.
802 */
DRD_(thread_get_latest_segment)803 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
804 {
805 tl_assert(sg);
806 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
807 && tid != DRD_INVALID_THREADID);
808 tl_assert(DRD_(g_threadinfo)[tid].last);
809
810 DRD_(sg_put)(*sg);
811 *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
812 }
813
814 /**
815 * Compute the minimum of all latest vector clocks of all threads
816 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
817 *
818 * @param vc pointer to a vectorclock, holds result upon return.
819 */
DRD_(thread_compute_minimum_vc)820 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
821 {
822 unsigned i;
823 Bool first;
824 Segment* latest_sg;
825
826 first = True;
827 for (i = 0; i < DRD_N_THREADS; i++)
828 {
829 latest_sg = DRD_(g_threadinfo)[i].last;
830 if (latest_sg)
831 {
832 if (first)
833 DRD_(vc_assign)(vc, &latest_sg->vc);
834 else
835 DRD_(vc_min)(vc, &latest_sg->vc);
836 first = False;
837 }
838 }
839 }
840
841 /**
842 * Compute the maximum of all latest vector clocks of all threads.
843 *
844 * @param vc pointer to a vectorclock, holds result upon return.
845 */
DRD_(thread_compute_maximum_vc)846 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
847 {
848 unsigned i;
849 Bool first;
850 Segment* latest_sg;
851
852 first = True;
853 for (i = 0; i < DRD_N_THREADS; i++)
854 {
855 latest_sg = DRD_(g_threadinfo)[i].last;
856 if (latest_sg)
857 {
858 if (first)
859 DRD_(vc_assign)(vc, &latest_sg->vc);
860 else
861 DRD_(vc_combine)(vc, &latest_sg->vc);
862 first = False;
863 }
864 }
865 }
866
867 /**
868 * Discard all segments that have a defined order against the latest vector
869 * clock of all threads -- these segments can no longer be involved in a
870 * data race.
871 */
thread_discard_ordered_segments(void)872 static void thread_discard_ordered_segments(void)
873 {
874 unsigned i;
875 VectorClock thread_vc_min;
876
877 s_discard_ordered_segments_count++;
878
879 DRD_(vc_init)(&thread_vc_min, 0, 0);
880 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
881 if (DRD_(sg_get_trace)())
882 {
883 char *vc_min, *vc_max;
884 VectorClock thread_vc_max;
885
886 DRD_(vc_init)(&thread_vc_max, 0, 0);
887 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
888 vc_min = DRD_(vc_aprint)(&thread_vc_min);
889 vc_max = DRD_(vc_aprint)(&thread_vc_max);
890 VG_(message)(Vg_DebugMsg,
891 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
892 vc_min, vc_max);
893 VG_(free)(vc_min);
894 VG_(free)(vc_max);
895 DRD_(vc_cleanup)(&thread_vc_max);
896 }
897
898 for (i = 0; i < DRD_N_THREADS; i++)
899 {
900 Segment* sg;
901 Segment* sg_next;
902 for (sg = DRD_(g_threadinfo)[i].first;
903 sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
904 sg = sg_next)
905 {
906 thread_discard_segment(i, sg);
907 }
908 }
909 DRD_(vc_cleanup)(&thread_vc_min);
910 }
911
912 /**
913 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
914 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
915 * all segments in the set CS are ordered consistently against both sg1 and
916 * sg2. The set CS is defined as the set of segments that can immediately
917 * precede future segments via inter-thread synchronization operations. In
918 * DRD the set CS consists of the latest segment of each thread combined with
919 * all segments for which the reference count is strictly greater than one.
920 * The code below is an optimized version of the following:
921 *
922 * for (i = 0; i < DRD_N_THREADS; i++)
923 * {
924 * Segment* sg;
925 *
926 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
927 * {
928 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
929 * {
930 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
931 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
932 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
933 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
934 * {
935 * return False;
936 * }
937 * }
938 * }
939 * }
940 */
thread_consistent_segment_ordering(const DrdThreadId tid,Segment * const sg1,Segment * const sg2)941 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
942 Segment* const sg1,
943 Segment* const sg2)
944 {
945 unsigned i;
946
947 tl_assert(sg1->next);
948 tl_assert(sg2->next);
949 tl_assert(sg1->next == sg2);
950 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
951
952 for (i = 0; i < DRD_N_THREADS; i++)
953 {
954 Segment* sg;
955
956 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
957 {
958 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
959 {
960 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
961 break;
962 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
963 return False;
964 }
965 }
966 for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
967 {
968 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
969 {
970 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
971 break;
972 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
973 return False;
974 }
975 }
976 }
977 return True;
978 }
979
980 /**
981 * Merge all segments that may be merged without triggering false positives
982 * or discarding real data races. For the theoretical background of segment
983 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
984 * and Koen De Bosschere. Bounding the number of segment histories during
985 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
986 * pp 1221-1238, September 2002. This paper contains a proof that merging
987 * consecutive segments for which the property equiv(s1,s2) holds can be
988 * merged without reducing the accuracy of datarace detection. Furthermore
989 * it is also proven that the total number of all segments will never grow
990 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
991 * every time a new segment is created. The property equiv(s1, s2) is defined
992 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
993 * clocks of segments s and s1 are ordered in the same way as those of segments
994 * s and s2. The set CS is defined as the set of existing segments s that have
995 * the potential to conflict with not yet created segments, either because the
996 * segment s is the latest segment of a thread or because it can become the
997 * immediate predecessor of a new segment due to a synchronization operation.
998 */
thread_merge_segments(void)999 static void thread_merge_segments(void)
1000 {
1001 unsigned i;
1002
1003 s_new_segments_since_last_merge = 0;
1004
1005 for (i = 0; i < DRD_N_THREADS; i++)
1006 {
1007 Segment* sg;
1008
1009 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1010 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1011 #endif
1012
1013 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
1014 {
1015 if (DRD_(sg_get_refcnt)(sg) == 1
1016 && sg->next
1017 && DRD_(sg_get_refcnt)(sg->next) == 1
1018 && sg->next->next
1019 && thread_consistent_segment_ordering(i, sg, sg->next))
1020 {
1021 /* Merge sg and sg->next into sg. */
1022 DRD_(sg_merge)(sg, sg->next);
1023 thread_discard_segment(i, sg->next);
1024 }
1025 }
1026
1027 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1028 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1029 #endif
1030 }
1031 }
1032
1033 /**
1034 * Create a new segment for the specified thread, and discard any segments
1035 * that cannot cause races anymore.
1036 */
DRD_(thread_new_segment)1037 void DRD_(thread_new_segment)(const DrdThreadId tid)
1038 {
1039 Segment* last_sg;
1040 Segment* new_sg;
1041
1042 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1043 && tid != DRD_INVALID_THREADID);
1044 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1045
1046 last_sg = DRD_(g_threadinfo)[tid].last;
1047 new_sg = DRD_(sg_new)(tid, tid);
1048 thread_append_segment(tid, new_sg);
1049 if (tid == DRD_(g_drd_running_tid) && last_sg)
1050 {
1051 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1052 s_update_conflict_set_new_sg_count++;
1053 }
1054
1055 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1056
1057 if (s_segment_merging
1058 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1059 {
1060 thread_discard_ordered_segments();
1061 thread_merge_segments();
1062 }
1063 }
1064
1065 /** Call this function after thread 'joiner' joined thread 'joinee'. */
DRD_(thread_combine_vc_join)1066 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1067 {
1068 tl_assert(joiner != joinee);
1069 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1070 && joiner != DRD_INVALID_THREADID);
1071 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1072 && joinee != DRD_INVALID_THREADID);
1073 tl_assert(DRD_(g_threadinfo)[joiner].last);
1074 tl_assert(DRD_(g_threadinfo)[joinee].last);
1075
1076 if (DRD_(sg_get_trace)())
1077 {
1078 char *str1, *str2;
1079 str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1080 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc);
1081 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1082 str1, str2);
1083 VG_(free)(str1);
1084 VG_(free)(str2);
1085 }
1086 if (joiner == DRD_(g_drd_running_tid))
1087 {
1088 VectorClock old_vc;
1089
1090 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc);
1091 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1092 &DRD_(g_threadinfo)[joinee].last->vc);
1093 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1094 s_update_conflict_set_join_count++;
1095 DRD_(vc_cleanup)(&old_vc);
1096 }
1097 else
1098 {
1099 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1100 &DRD_(g_threadinfo)[joinee].last->vc);
1101 }
1102
1103 thread_discard_ordered_segments();
1104
1105 if (DRD_(sg_get_trace)())
1106 {
1107 char* str;
1108 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1109 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1110 VG_(free)(str);
1111 }
1112 }
1113
1114 /**
1115 * Update the vector clock of the last segment of thread tid with the
1116 * the vector clock of segment sg.
1117 */
thread_combine_vc_sync(DrdThreadId tid,const Segment * sg)1118 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1119 {
1120 const VectorClock* const vc = &sg->vc;
1121
1122 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1123 && tid != DRD_INVALID_THREADID);
1124 tl_assert(DRD_(g_threadinfo)[tid].last);
1125 tl_assert(sg);
1126 tl_assert(vc);
1127
1128 if (tid != sg->tid)
1129 {
1130 VectorClock old_vc;
1131
1132 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc);
1133 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
1134 if (DRD_(sg_get_trace)())
1135 {
1136 char *str1, *str2;
1137 str1 = DRD_(vc_aprint)(&old_vc);
1138 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1139 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1140 VG_(free)(str1);
1141 VG_(free)(str2);
1142 }
1143
1144 thread_discard_ordered_segments();
1145
1146 DRD_(thread_update_conflict_set)(tid, &old_vc);
1147 s_update_conflict_set_sync_count++;
1148
1149 DRD_(vc_cleanup)(&old_vc);
1150 }
1151 else
1152 {
1153 tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc));
1154 }
1155 }
1156
1157 /**
1158 * Create a new segment for thread tid and update the vector clock of the last
1159 * segment of this thread with the the vector clock of segment sg. Call this
1160 * function after thread tid had to wait because of thread synchronization
1161 * until the memory accesses in the segment sg finished.
1162 */
DRD_(thread_new_segment_and_combine_vc)1163 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1164 {
1165 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1166 && tid != DRD_INVALID_THREADID);
1167 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1168 tl_assert(sg);
1169
1170 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1171
1172 thread_combine_vc_sync(tid, sg);
1173
1174 if (s_segment_merging
1175 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1176 {
1177 thread_discard_ordered_segments();
1178 thread_merge_segments();
1179 }
1180 }
1181
1182 /**
1183 * Call this function whenever a thread is no longer using the memory
1184 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1185 * increase.
1186 */
DRD_(thread_stop_using_mem)1187 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1188 {
1189 unsigned i;
1190 Segment* p;
1191
1192 for (i = 0; i < DRD_N_THREADS; i++)
1193 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1194 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1195
1196 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1197 }
1198
1199 /** Specify whether memory loads should be recorded. */
DRD_(thread_set_record_loads)1200 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1201 {
1202 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1203 && tid != DRD_INVALID_THREADID);
1204 tl_assert(enabled == !! enabled);
1205
1206 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1207 }
1208
1209 /** Specify whether memory stores should be recorded. */
DRD_(thread_set_record_stores)1210 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1211 {
1212 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1213 && tid != DRD_INVALID_THREADID);
1214 tl_assert(enabled == !! enabled);
1215
1216 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1217 }
1218
1219 /**
1220 * Print the segment information for all threads.
1221 *
1222 * This function is only used for debugging purposes.
1223 */
DRD_(thread_print_all)1224 void DRD_(thread_print_all)(void)
1225 {
1226 unsigned i;
1227 Segment* p;
1228
1229 for (i = 0; i < DRD_N_THREADS; i++)
1230 {
1231 if (DRD_(g_threadinfo)[i].first)
1232 {
1233 VG_(printf)("**************\n"
1234 "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1235 "**************\n",
1236 i,
1237 DRD_(g_threadinfo)[i].valid,
1238 DRD_(g_threadinfo)[i].vg_thread_exists,
1239 DRD_(g_threadinfo)[i].vg_threadid,
1240 DRD_(g_threadinfo)[i].posix_thread_exists,
1241 DRD_(g_threadinfo)[i].pt_threadid,
1242 DRD_(g_threadinfo)[i].detached_posix_thread);
1243 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1244 {
1245 DRD_(sg_print)(p);
1246 }
1247 }
1248 }
1249 }
1250
1251 /** Show a call stack involved in a data race. */
show_call_stack(const DrdThreadId tid,ExeContext * const callstack)1252 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1253 {
1254 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1255
1256 if (vg_tid != VG_INVALID_THREADID) {
1257 if (callstack)
1258 VG_(pp_ExeContext)(callstack);
1259 else
1260 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1261 } else {
1262 if (!VG_(clo_xml))
1263 VG_(message)(Vg_UserMsg,
1264 " (thread finished, call stack no longer available)\n");
1265 }
1266 }
1267
1268 /** Print information about the segments involved in a data race. */
1269 static void
thread_report_conflicting_segments_segment(const DrdThreadId tid,const Addr addr,const SizeT size,const BmAccessTypeT access_type,const Segment * const p)1270 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1271 const Addr addr,
1272 const SizeT size,
1273 const BmAccessTypeT access_type,
1274 const Segment* const p)
1275 {
1276 unsigned i;
1277
1278 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1279 && tid != DRD_INVALID_THREADID);
1280 tl_assert(p);
1281
1282 for (i = 0; i < DRD_N_THREADS; i++)
1283 {
1284 if (i != tid)
1285 {
1286 Segment* q;
1287 for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
1288 {
1289 /*
1290 * Since q iterates over the segments of thread i in order of
1291 * decreasing vector clocks, if q->vc <= p->vc, then
1292 * q->next->vc <= p->vc will also hold. Hence, break out of the
1293 * loop once this condition is met.
1294 */
1295 if (DRD_(vc_lte)(&q->vc, &p->vc))
1296 break;
1297 if (! DRD_(vc_lte)(&p->vc, &q->vc))
1298 {
1299 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1300 access_type))
1301 {
1302 tl_assert(q->stacktrace);
1303 if (VG_(clo_xml))
1304 VG_(printf_xml)(" <other_segment_start>\n");
1305 else
1306 VG_(message)(Vg_UserMsg,
1307 "Other segment start (thread %d)\n", i);
1308 show_call_stack(i, q->stacktrace);
1309 if (VG_(clo_xml))
1310 VG_(printf_xml)(" </other_segment_start>\n"
1311 " <other_segment_end>\n");
1312 else
1313 VG_(message)(Vg_UserMsg,
1314 "Other segment end (thread %d)\n", i);
1315 show_call_stack(i, q->next ? q->next->stacktrace : 0);
1316 if (VG_(clo_xml))
1317 VG_(printf_xml)(" </other_segment_end>\n");
1318 }
1319 }
1320 }
1321 }
1322 }
1323 }
1324
1325 /** Print information about all segments involved in a data race. */
DRD_(thread_report_conflicting_segments)1326 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1327 const Addr addr,
1328 const SizeT size,
1329 const BmAccessTypeT access_type)
1330 {
1331 Segment* p;
1332
1333 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1334 && tid != DRD_INVALID_THREADID);
1335
1336 for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
1337 {
1338 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1339 {
1340 thread_report_conflicting_segments_segment(tid, addr, size,
1341 access_type, p);
1342 }
1343 }
1344 }
1345
1346 /**
1347 * Verify whether the conflict set for thread tid is up to date. Only perform
1348 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1349 */
thread_conflict_set_up_to_date(const DrdThreadId tid)1350 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1351 {
1352 static int do_verify_conflict_set = -1;
1353 Bool result;
1354 struct bitmap* computed_conflict_set = 0;
1355
1356 if (do_verify_conflict_set < 0)
1357 do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1358
1359 if (do_verify_conflict_set == 0)
1360 return True;
1361
1362 thread_compute_conflict_set(&computed_conflict_set, tid);
1363 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1364 if (! result)
1365 {
1366 VG_(printf)("actual conflict set:\n");
1367 DRD_(bm_print)(DRD_(g_conflict_set));
1368 VG_(printf)("\n");
1369 VG_(printf)("computed conflict set:\n");
1370 DRD_(bm_print)(computed_conflict_set);
1371 VG_(printf)("\n");
1372 }
1373 DRD_(bm_delete)(computed_conflict_set);
1374 return result;
1375 }
1376
1377 /**
1378 * Compute the conflict set: a bitmap that represents the union of all memory
1379 * accesses of all segments that are unordered to the current segment of the
1380 * thread tid.
1381 */
thread_compute_conflict_set(struct bitmap ** conflict_set,const DrdThreadId tid)1382 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1383 const DrdThreadId tid)
1384 {
1385 Segment* p;
1386
1387 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1388 && tid != DRD_INVALID_THREADID);
1389 tl_assert(tid == DRD_(g_drd_running_tid));
1390
1391 s_compute_conflict_set_count++;
1392 s_conflict_set_bitmap_creation_count
1393 -= DRD_(bm_get_bitmap_creation_count)();
1394 s_conflict_set_bitmap2_creation_count
1395 -= DRD_(bm_get_bitmap2_creation_count)();
1396
1397 if (*conflict_set)
1398 {
1399 DRD_(bm_cleanup)(*conflict_set);
1400 DRD_(bm_init)(*conflict_set);
1401 }
1402 else
1403 {
1404 *conflict_set = DRD_(bm_new)();
1405 }
1406
1407 if (s_trace_conflict_set)
1408 {
1409 char* str;
1410
1411 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1412 VG_(message)(Vg_DebugMsg,
1413 "computing conflict set for thread %d with vc %s\n",
1414 tid, str);
1415 VG_(free)(str);
1416 }
1417
1418 p = DRD_(g_threadinfo)[tid].last;
1419 {
1420 unsigned j;
1421
1422 if (s_trace_conflict_set)
1423 {
1424 char* vc;
1425
1426 vc = DRD_(vc_aprint)(&p->vc);
1427 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1428 tid, vc);
1429 VG_(free)(vc);
1430 }
1431
1432 for (j = 0; j < DRD_N_THREADS; j++)
1433 {
1434 if (j != tid && DRD_(IsValidDrdThreadId)(j))
1435 {
1436 Segment* q;
1437 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1438 {
1439 if (! DRD_(vc_lte)(&q->vc, &p->vc)
1440 && ! DRD_(vc_lte)(&p->vc, &q->vc))
1441 {
1442 if (s_trace_conflict_set)
1443 {
1444 char* str;
1445
1446 str = DRD_(vc_aprint)(&q->vc);
1447 VG_(message)(Vg_DebugMsg,
1448 "conflict set: [%d] merging segment %s\n",
1449 j, str);
1450 VG_(free)(str);
1451 }
1452 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1453 }
1454 else
1455 {
1456 if (s_trace_conflict_set)
1457 {
1458 char* str;
1459
1460 str = DRD_(vc_aprint)(&q->vc);
1461 VG_(message)(Vg_DebugMsg,
1462 "conflict set: [%d] ignoring segment %s\n",
1463 j, str);
1464 VG_(free)(str);
1465 }
1466 }
1467 }
1468 }
1469 }
1470 }
1471
1472 s_conflict_set_bitmap_creation_count
1473 += DRD_(bm_get_bitmap_creation_count)();
1474 s_conflict_set_bitmap2_creation_count
1475 += DRD_(bm_get_bitmap2_creation_count)();
1476
1477 if (s_trace_conflict_set_bm)
1478 {
1479 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1480 DRD_(bm_print)(*conflict_set);
1481 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1482 }
1483 }
1484
1485 /**
1486 * Update the conflict set after the vector clock of thread tid has been
1487 * updated from old_vc to its current value, either because a new segment has
1488 * been created or because of a synchronization operation.
1489 */
DRD_(thread_update_conflict_set)1490 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1491 const VectorClock* const old_vc)
1492 {
1493 const VectorClock* new_vc;
1494 Segment* p;
1495 unsigned j;
1496
1497 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1498 && tid != DRD_INVALID_THREADID);
1499 tl_assert(old_vc);
1500 tl_assert(tid == DRD_(g_drd_running_tid));
1501 tl_assert(DRD_(g_conflict_set));
1502
1503 if (s_trace_conflict_set)
1504 {
1505 char* str;
1506
1507 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1508 VG_(message)(Vg_DebugMsg,
1509 "updating conflict set for thread %d with vc %s\n",
1510 tid, str);
1511 VG_(free)(str);
1512 }
1513
1514 new_vc = &DRD_(g_threadinfo)[tid].last->vc;
1515 tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1516
1517 DRD_(bm_unmark)(DRD_(g_conflict_set));
1518
1519 for (j = 0; j < DRD_N_THREADS; j++)
1520 {
1521 Segment* q;
1522
1523 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1524 continue;
1525
1526 for (q = DRD_(g_threadinfo)[j].last;
1527 q && !DRD_(vc_lte)(&q->vc, new_vc);
1528 q = q->prev) {
1529 const Bool included_in_old_conflict_set
1530 = !DRD_(vc_lte)(old_vc, &q->vc);
1531 const Bool included_in_new_conflict_set
1532 = !DRD_(vc_lte)(new_vc, &q->vc);
1533
1534 if (UNLIKELY(s_trace_conflict_set)) {
1535 char* str;
1536
1537 str = DRD_(vc_aprint)(&q->vc);
1538 VG_(message)(Vg_DebugMsg,
1539 "conflict set: [%d] %s segment %s\n", j,
1540 included_in_old_conflict_set
1541 != included_in_new_conflict_set
1542 ? "merging" : "ignoring", str);
1543 VG_(free)(str);
1544 }
1545 if (included_in_old_conflict_set != included_in_new_conflict_set)
1546 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1547 }
1548
1549 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->prev) {
1550 const Bool included_in_old_conflict_set
1551 = !DRD_(vc_lte)(old_vc, &q->vc);
1552 const Bool included_in_new_conflict_set
1553 = !DRD_(vc_lte)(&q->vc, new_vc)
1554 && !DRD_(vc_lte)(new_vc, &q->vc);
1555
1556 if (UNLIKELY(s_trace_conflict_set)) {
1557 char* str;
1558
1559 str = DRD_(vc_aprint)(&q->vc);
1560 VG_(message)(Vg_DebugMsg,
1561 "conflict set: [%d] %s segment %s\n", j,
1562 included_in_old_conflict_set
1563 != included_in_new_conflict_set
1564 ? "merging" : "ignoring", str);
1565 VG_(free)(str);
1566 }
1567 if (included_in_old_conflict_set != included_in_new_conflict_set)
1568 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1569 }
1570 }
1571
1572 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1573
1574 p = DRD_(g_threadinfo)[tid].last;
1575 for (j = 0; j < DRD_N_THREADS; j++)
1576 {
1577 if (j != tid && DRD_(IsValidDrdThreadId)(j))
1578 {
1579 Segment* q;
1580 for (q = DRD_(g_threadinfo)[j].last;
1581 q && !DRD_(vc_lte)(&q->vc, &p->vc);
1582 q = q->prev) {
1583 if (!DRD_(vc_lte)(&p->vc, &q->vc))
1584 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1585 }
1586 }
1587 }
1588
1589 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1590
1591 s_update_conflict_set_count++;
1592
1593 if (s_trace_conflict_set_bm)
1594 {
1595 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1596 DRD_(bm_print)(DRD_(g_conflict_set));
1597 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1598 }
1599
1600 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1601 }
1602
1603 /** Report the number of context switches performed. */
DRD_(thread_get_context_switch_count)1604 ULong DRD_(thread_get_context_switch_count)(void)
1605 {
1606 return s_context_switch_count;
1607 }
1608
1609 /** Report the number of ordered segments that have been discarded. */
DRD_(thread_get_discard_ordered_segments_count)1610 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1611 {
1612 return s_discard_ordered_segments_count;
1613 }
1614
1615 /** Return how many times the conflict set has been updated entirely. */
DRD_(thread_get_compute_conflict_set_count)1616 ULong DRD_(thread_get_compute_conflict_set_count)()
1617 {
1618 return s_compute_conflict_set_count;
1619 }
1620
1621 /** Return how many times the conflict set has been updated partially. */
DRD_(thread_get_update_conflict_set_count)1622 ULong DRD_(thread_get_update_conflict_set_count)(void)
1623 {
1624 return s_update_conflict_set_count;
1625 }
1626
1627 /**
1628 * Return how many times the conflict set has been updated partially
1629 * because a new segment has been created.
1630 */
DRD_(thread_get_update_conflict_set_new_sg_count)1631 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1632 {
1633 return s_update_conflict_set_new_sg_count;
1634 }
1635
1636 /**
1637 * Return how many times the conflict set has been updated partially
1638 * because of combining vector clocks due to synchronization operations
1639 * other than reader/writer lock or barrier operations.
1640 */
DRD_(thread_get_update_conflict_set_sync_count)1641 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1642 {
1643 return s_update_conflict_set_sync_count;
1644 }
1645
1646 /**
1647 * Return how many times the conflict set has been updated partially
1648 * because of thread joins.
1649 */
DRD_(thread_get_update_conflict_set_join_count)1650 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1651 {
1652 return s_update_conflict_set_join_count;
1653 }
1654
1655 /**
1656 * Return the number of first-level bitmaps that have been created during
1657 * conflict set updates.
1658 */
DRD_(thread_get_conflict_set_bitmap_creation_count)1659 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1660 {
1661 return s_conflict_set_bitmap_creation_count;
1662 }
1663
1664 /**
1665 * Return the number of second-level bitmaps that have been created during
1666 * conflict set updates.
1667 */
DRD_(thread_get_conflict_set_bitmap2_creation_count)1668 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1669 {
1670 return s_conflict_set_bitmap2_creation_count;
1671 }
1672