1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_basics.h"
26 #include "drd_clientobj.h"
27 #include "drd_error.h"
28 #include "drd_mutex.h"
29 #include "pub_tool_vki.h"
30 #include "pub_tool_errormgr.h" /* VG_(maybe_record_error)() */
31 #include "pub_tool_libcassert.h" /* tl_assert() */
32 #include "pub_tool_libcbase.h" /* VG_(strlen) */
33 #include "pub_tool_libcprint.h" /* VG_(message)() */
34 #include "pub_tool_libcproc.h" /* VG_(read_millisecond_timer)() */
35 #include "pub_tool_machine.h" /* VG_(get_IP)() */
36 #include "pub_tool_threadstate.h" /* VG_(get_running_tid)() */
37
38
39 /* Local functions. */
40
41 static void mutex_cleanup(struct mutex_info* p);
42 static Bool mutex_is_locked(struct mutex_info* const p);
43 static void mutex_delete_thread(struct mutex_info* p, const DrdThreadId tid);
44
45
46 /* Local variables. */
47
48 static Bool s_trace_mutex;
49 static ULong s_mutex_lock_count;
50 static ULong s_mutex_segment_creation_count;
51 static UInt s_mutex_lock_threshold_ms;
52
53
54 /* Function definitions. */
55
DRD_(mutex_set_trace)56 void DRD_(mutex_set_trace)(const Bool trace_mutex)
57 {
58 tl_assert((!! trace_mutex) == trace_mutex);
59 s_trace_mutex = trace_mutex;
60 }
61
DRD_(mutex_set_lock_threshold)62 void DRD_(mutex_set_lock_threshold)(const UInt lock_threshold_ms)
63 {
64 s_mutex_lock_threshold_ms = lock_threshold_ms;
65 }
66
67 static
DRD_(mutex_initialize)68 void DRD_(mutex_initialize)(struct mutex_info* const p,
69 const Addr mutex, const MutexT mutex_type)
70 {
71 tl_assert(mutex);
72 tl_assert(p->a1 == mutex);
73
74 p->cleanup = (void(*)(DrdClientobj*))mutex_cleanup;
75 p->delete_thread
76 = (void(*)(DrdClientobj*, DrdThreadId))mutex_delete_thread;
77 p->mutex_type = mutex_type;
78 p->recursion_count = 0;
79 p->ignore_ordering = False;
80 p->owner = DRD_INVALID_THREADID;
81 p->last_locked_segment = 0;
82 p->acquiry_time_ms = 0;
83 p->acquired_at = 0;
84 }
85
DRD_(mutex_ignore_ordering)86 void DRD_(mutex_ignore_ordering)(const Addr mutex)
87 {
88 struct mutex_info* p = DRD_(mutex_get)(mutex);
89
90 if (s_trace_mutex)
91 DRD_(trace_msg)("[%d] mutex_ignore_ordering %s 0x%lx",
92 DRD_(thread_get_running_tid)(),
93 p ? DRD_(mutex_type_name)(p->mutex_type) : "(?)",
94 mutex);
95
96 if (p) {
97 p->ignore_ordering = True;
98 } else {
99 DRD_(not_a_mutex)(mutex);
100 }
101 }
102
103 /** Deallocate the memory that was allocated by mutex_initialize(). */
mutex_cleanup(struct mutex_info * p)104 static void mutex_cleanup(struct mutex_info* p)
105 {
106 tl_assert(p);
107
108 if (s_trace_mutex)
109 DRD_(trace_msg)("[%d] mutex_destroy %s 0x%lx rc %d owner %d",
110 DRD_(thread_get_running_tid)(),
111 DRD_(mutex_get_typename)(p), p->a1,
112 p ? p->recursion_count : -1,
113 p ? p->owner : DRD_INVALID_THREADID);
114
115 if (mutex_is_locked(p))
116 {
117 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
118 p->a1, p->recursion_count, p->owner };
119 VG_(maybe_record_error)(VG_(get_running_tid)(),
120 MutexErr,
121 VG_(get_IP)(VG_(get_running_tid)()),
122 "Destroying locked mutex",
123 &MEI);
124 }
125
126 DRD_(sg_put)(p->last_locked_segment);
127 p->last_locked_segment = 0;
128 }
129
130 /** Report that address 'mutex' is not the address of a mutex object. */
DRD_(not_a_mutex)131 void DRD_(not_a_mutex)(const Addr mutex)
132 {
133 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
134 mutex, -1, DRD_INVALID_THREADID };
135 VG_(maybe_record_error)(VG_(get_running_tid)(),
136 MutexErr,
137 VG_(get_IP)(VG_(get_running_tid)()),
138 "Not a mutex",
139 &MEI);
140 }
141
142 /**
143 * Report that address 'mutex' is not the address of a mutex object of the
144 * expected type.
145 */
wrong_mutex_type(const Addr mutex)146 static void wrong_mutex_type(const Addr mutex)
147 {
148 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
149 mutex, -1, DRD_INVALID_THREADID };
150 VG_(maybe_record_error)(VG_(get_running_tid)(),
151 MutexErr,
152 VG_(get_IP)(VG_(get_running_tid)()),
153 "Mutex type mismatch",
154 &MEI);
155 }
156
157 static
158 struct mutex_info*
DRD_(mutex_get_or_allocate)159 DRD_(mutex_get_or_allocate)(const Addr mutex, const MutexT mutex_type)
160 {
161 struct mutex_info* p;
162
163 tl_assert(offsetof(DrdClientobj, mutex) == 0);
164 p = &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
165 if (p)
166 {
167 if (mutex_type == mutex_type_unknown || p->mutex_type == mutex_type)
168 return p;
169 else
170 {
171 wrong_mutex_type(mutex);
172 return 0;
173 }
174 }
175
176 if (DRD_(clientobj_present)(mutex, mutex + 1))
177 {
178 DRD_(not_a_mutex)(mutex);
179 return 0;
180 }
181
182 p = &(DRD_(clientobj_add)(mutex, ClientMutex)->mutex);
183 DRD_(mutex_initialize)(p, mutex, mutex_type);
184 return p;
185 }
186
DRD_(mutex_get)187 struct mutex_info* DRD_(mutex_get)(const Addr mutex)
188 {
189 tl_assert(offsetof(DrdClientobj, mutex) == 0);
190 return &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
191 }
192
193 /** Called before pthread_mutex_init(). */
194 struct mutex_info*
DRD_(mutex_init)195 DRD_(mutex_init)(const Addr mutex, const MutexT mutex_type)
196 {
197 struct mutex_info* p;
198
199 if (s_trace_mutex)
200 DRD_(trace_msg)("[%d] mutex_init %s 0x%lx",
201 DRD_(thread_get_running_tid)(),
202 DRD_(mutex_type_name)(mutex_type),
203 mutex);
204
205 if (mutex_type == mutex_type_invalid_mutex)
206 {
207 DRD_(not_a_mutex)(mutex);
208 return 0;
209 }
210
211 p = DRD_(mutex_get)(mutex);
212 if (p)
213 {
214 const ThreadId vg_tid = VG_(get_running_tid)();
215 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
216 p->a1, p->recursion_count, p->owner };
217 VG_(maybe_record_error)(vg_tid,
218 MutexErr,
219 VG_(get_IP)(vg_tid),
220 "Mutex reinitialization",
221 &MEI);
222 p->mutex_type = mutex_type;
223 return p;
224 }
225 p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
226
227 return p;
228 }
229
230 /** Called after pthread_mutex_destroy(). */
DRD_(mutex_post_destroy)231 void DRD_(mutex_post_destroy)(const Addr mutex)
232 {
233 struct mutex_info* p;
234
235 p = DRD_(mutex_get)(mutex);
236 if (p == 0)
237 {
238 DRD_(not_a_mutex)(mutex);
239 return;
240 }
241
242 DRD_(clientobj_remove)(mutex, ClientMutex);
243 }
244
245 /**
246 * Called before pthread_mutex_lock() is invoked. If a data structure for the
247 * client-side object was not yet created, do this now. Also check whether an
248 * attempt is made to lock recursively a synchronization object that must not
249 * be locked recursively.
250 */
DRD_(mutex_pre_lock)251 void DRD_(mutex_pre_lock)(const Addr mutex, MutexT mutex_type,
252 const Bool trylock)
253 {
254 struct mutex_info* p;
255
256 p = DRD_(mutex_get_or_allocate)(mutex, mutex_type);
257 if (p && mutex_type == mutex_type_unknown)
258 mutex_type = p->mutex_type;
259
260 if (s_trace_mutex)
261 DRD_(trace_msg)("[%d] %s %s 0x%lx rc %d owner %d",
262 DRD_(thread_get_running_tid)(),
263 trylock ? "pre_mutex_lock " : "mutex_trylock ",
264 p ? DRD_(mutex_get_typename)(p) : "(?)",
265 mutex, p ? p->recursion_count : -1,
266 p ? p->owner : DRD_INVALID_THREADID);
267
268 if (p == 0)
269 {
270 DRD_(not_a_mutex)(mutex);
271 return;
272 }
273
274 tl_assert(p);
275
276 if (mutex_type == mutex_type_invalid_mutex)
277 {
278 DRD_(not_a_mutex)(mutex);
279 return;
280 }
281
282 if (! trylock
283 && p->owner == DRD_(thread_get_running_tid)()
284 && p->recursion_count >= 1
285 && mutex_type != mutex_type_recursive_mutex)
286 {
287 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
288 p->a1, p->recursion_count, p->owner };
289 VG_(maybe_record_error)(VG_(get_running_tid)(),
290 MutexErr,
291 VG_(get_IP)(VG_(get_running_tid)()),
292 "Recursive locking not allowed",
293 &MEI);
294 }
295 }
296
297 /**
298 * Update mutex_info state when locking the pthread_mutex_t mutex.
299 * Note: this function must be called after pthread_mutex_lock() has been
300 * called, or a race condition is triggered !
301 */
DRD_(mutex_post_lock)302 void DRD_(mutex_post_lock)(const Addr mutex, const Bool took_lock,
303 const Bool post_cond_wait)
304 {
305 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
306 struct mutex_info* p;
307
308 p = DRD_(mutex_get)(mutex);
309
310 if (s_trace_mutex)
311 DRD_(trace_msg)("[%d] %s %s 0x%lx rc %d owner %d%s",
312 drd_tid,
313 post_cond_wait ? "cond_post_wait " : "post_mutex_lock",
314 p ? DRD_(mutex_get_typename)(p) : "(?)",
315 mutex, p ? p->recursion_count : 0,
316 p ? p->owner : VG_INVALID_THREADID,
317 took_lock ? "" : " (locking failed)");
318
319 if (! p || ! took_lock)
320 return;
321
322 if (p->recursion_count == 0) {
323 if (!p->ignore_ordering) {
324 if (p->owner != drd_tid && p->owner != DRD_INVALID_THREADID) {
325 tl_assert(p->last_locked_segment);
326
327 DRD_(thread_new_segment_and_combine_vc)(drd_tid,
328 p->last_locked_segment);
329 } else {
330 DRD_(thread_new_segment)(drd_tid);
331 }
332
333 s_mutex_segment_creation_count++;
334 }
335
336 p->owner = drd_tid;
337 p->acquiry_time_ms = VG_(read_millisecond_timer)();
338 p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
339 s_mutex_lock_count++;
340 } else if (p->owner != drd_tid) {
341 const ThreadId vg_tid = VG_(get_running_tid)();
342 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
343 p->a1, p->recursion_count, p->owner };
344 VG_(maybe_record_error)(vg_tid,
345 MutexErr,
346 VG_(get_IP)(vg_tid),
347 "The impossible happened: mutex is locked"
348 " simultaneously by two threads",
349 &MEI);
350 p->owner = drd_tid;
351 }
352 p->recursion_count++;
353 }
354
355 /**
356 * Update mutex_info state when unlocking the pthread_mutex_t mutex.
357 *
358 * @param[in] mutex Address of the client mutex.
359 * @param[in] mutex_type Mutex type.
360 *
361 * @return New value of the mutex recursion count.
362 *
363 * @note This function must be called before pthread_mutex_unlock() is called,
364 * or a race condition is triggered !
365 */
DRD_(mutex_unlock)366 void DRD_(mutex_unlock)(const Addr mutex, MutexT mutex_type)
367 {
368 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
369 const ThreadId vg_tid = VG_(get_running_tid)();
370 struct mutex_info* p;
371
372 p = DRD_(mutex_get)(mutex);
373 if (p && mutex_type == mutex_type_unknown)
374 mutex_type = p->mutex_type;
375
376 if (s_trace_mutex) {
377 DRD_(trace_msg)("[%d] mutex_unlock %s 0x%lx rc %d",
378 drd_tid, p ? DRD_(mutex_get_typename)(p) : "(?)",
379 mutex, p ? p->recursion_count : 0);
380 }
381
382 if (p == 0 || mutex_type == mutex_type_invalid_mutex)
383 {
384 DRD_(not_a_mutex)(mutex);
385 return;
386 }
387
388 if (p->owner == DRD_INVALID_THREADID)
389 {
390 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
391 p->a1, p->recursion_count, p->owner };
392 VG_(maybe_record_error)(vg_tid,
393 MutexErr,
394 VG_(get_IP)(vg_tid),
395 "Mutex not locked",
396 &MEI);
397 return;
398 }
399
400 tl_assert(p);
401 if (p->mutex_type != mutex_type) {
402 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
403 p->a1, p->recursion_count, p->owner };
404 VG_(maybe_record_error)(vg_tid, MutexErr, VG_(get_IP)(vg_tid),
405 "Mutex type changed", &MEI);
406 }
407 tl_assert(p->mutex_type == mutex_type);
408 tl_assert(p->owner != DRD_INVALID_THREADID);
409
410 if (p->owner != drd_tid || p->recursion_count <= 0)
411 {
412 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
413 p->a1, p->recursion_count, p->owner };
414 VG_(maybe_record_error)(vg_tid,
415 MutexErr,
416 VG_(get_IP)(vg_tid),
417 "Mutex not locked by calling thread",
418 &MEI);
419 return;
420 }
421 tl_assert(p->recursion_count > 0);
422 p->recursion_count--;
423 tl_assert(p->recursion_count >= 0);
424
425 if (p->recursion_count == 0)
426 {
427 if (s_mutex_lock_threshold_ms > 0)
428 {
429 Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
430 if (held > s_mutex_lock_threshold_ms)
431 {
432 HoldtimeErrInfo HEI
433 = { DRD_(thread_get_running_tid)(),
434 mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
435 VG_(maybe_record_error)(vg_tid,
436 HoldtimeErr,
437 VG_(get_IP)(vg_tid),
438 "mutex",
439 &HEI);
440 }
441 }
442
443 /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
444 /* current vector clock of the thread such that it is available when */
445 /* this mutex is locked again. */
446
447 DRD_(thread_get_latest_segment)(&p->last_locked_segment, drd_tid);
448 if (!p->ignore_ordering)
449 DRD_(thread_new_segment)(drd_tid);
450 p->acquired_at = 0;
451 s_mutex_segment_creation_count++;
452 }
453 }
454
DRD_(spinlock_init_or_unlock)455 void DRD_(spinlock_init_or_unlock)(const Addr spinlock)
456 {
457 struct mutex_info* mutex_p = DRD_(mutex_get)(spinlock);
458 if (mutex_p)
459 {
460 DRD_(mutex_unlock)(spinlock, mutex_type_spinlock);
461 }
462 else
463 {
464 DRD_(mutex_init)(spinlock, mutex_type_spinlock);
465 }
466 }
467
DRD_(mutex_get_typename)468 const HChar* DRD_(mutex_get_typename)(struct mutex_info* const p)
469 {
470 tl_assert(p);
471
472 return DRD_(mutex_type_name)(p->mutex_type);
473 }
474
DRD_(mutex_type_name)475 const HChar* DRD_(mutex_type_name)(const MutexT mt)
476 {
477 switch (mt)
478 {
479 case mutex_type_unknown:
480 return "mutex";
481 case mutex_type_invalid_mutex:
482 return "invalid mutex";
483 case mutex_type_recursive_mutex:
484 return "recursive mutex";
485 case mutex_type_errorcheck_mutex:
486 return "error checking mutex";
487 case mutex_type_default_mutex:
488 return "mutex";
489 case mutex_type_spinlock:
490 return "spinlock";
491 case mutex_type_cxa_guard:
492 return "cxa_guard";
493 }
494 tl_assert(0);
495 return "?";
496 }
497
498 /** Return true if the specified mutex is locked by any thread. */
mutex_is_locked(struct mutex_info * const p)499 static Bool mutex_is_locked(struct mutex_info* const p)
500 {
501 tl_assert(p);
502 return (p->recursion_count > 0);
503 }
504
DRD_(mutex_is_locked_by)505 Bool DRD_(mutex_is_locked_by)(const Addr mutex, const DrdThreadId tid)
506 {
507 struct mutex_info* const p = DRD_(mutex_get)(mutex);
508 if (p)
509 {
510 return (p->recursion_count > 0 && p->owner == tid);
511 }
512 return False;
513 }
514
DRD_(mutex_get_recursion_count)515 int DRD_(mutex_get_recursion_count)(const Addr mutex)
516 {
517 struct mutex_info* const p = DRD_(mutex_get)(mutex);
518 tl_assert(p);
519 return p->recursion_count;
520 }
521
522 /**
523 * Call this function when thread tid stops to exist, such that the
524 * "last owner" field can be cleared if it still refers to that thread.
525 */
mutex_delete_thread(struct mutex_info * p,const DrdThreadId tid)526 static void mutex_delete_thread(struct mutex_info* p, const DrdThreadId tid)
527 {
528 tl_assert(p);
529
530 if (p->owner == tid && p->recursion_count > 0)
531 {
532 MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
533 p->a1, p->recursion_count, p->owner };
534 VG_(maybe_record_error)(VG_(get_running_tid)(),
535 MutexErr,
536 VG_(get_IP)(VG_(get_running_tid)()),
537 "Mutex still locked at thread exit",
538 &MEI);
539 p->owner = VG_INVALID_THREADID;
540 }
541 }
542
DRD_(get_mutex_lock_count)543 ULong DRD_(get_mutex_lock_count)(void)
544 {
545 return s_mutex_lock_count;
546 }
547
DRD_(get_mutex_segment_creation_count)548 ULong DRD_(get_mutex_segment_creation_count)(void)
549 {
550 return s_mutex_segment_creation_count;
551 }
552