1 /*--------------------------------------------------------------------*/
2 /*--- Client-space code for DRD. drd_pthread_intercepts.c ---*/
3 /*--------------------------------------------------------------------*/
4
5 /*
6 This file is part of DRD, a thread error detector.
7
8 Copyright (C) 2006-2012 Bart Van Assche <bvanassche@acm.org>.
9
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23 02111-1307, USA.
24
25 The GNU General Public License is contained in the file COPYING.
26 */
27
28 /* ---------------------------------------------------------------------
29 ALL THE CODE IN THIS FILE RUNS ON THE SIMULATED CPU.
30
31 These functions are not called directly - they're the targets of code
32 redirection or load notifications (see pub_core_redir.h for info).
33 They're named weirdly so that the intercept code can find them when the
34 shared object is initially loaded.
35
36 Note that this filename has the "drd_" prefix because it can appear
37 in stack traces, and the "drd_" makes it a little clearer that it
38 originates from Valgrind.
39 ------------------------------------------------------------------ */
40
41 /*
42 * Define _GNU_SOURCE to make sure that pthread_spinlock_t is available when
43 * compiling with older glibc versions (2.3 or before).
44 */
45 #ifndef _GNU_SOURCE
46 #define _GNU_SOURCE
47 #endif
48
49 #include <assert.h> /* assert() */
50 #include <errno.h>
51 #include <pthread.h> /* pthread_mutex_t */
52 #include <semaphore.h> /* sem_t */
53 #include <stdint.h> /* uintptr_t */
54 #include <stdio.h> /* fprintf() */
55 #include <stdlib.h> /* malloc(), free() */
56 #include <unistd.h> /* confstr() */
57 #include "config.h" /* HAVE_PTHREAD_MUTEX_ADAPTIVE_NP etc. */
58 #ifdef HAVE_USABLE_LINUX_FUTEX_H
59 #include <asm/unistd.h> /* __NR_futex */
60 #include <linux/futex.h> /* FUTEX_WAIT */
61 #ifndef FUTEX_PRIVATE_FLAG
62 #define FUTEX_PRIVATE_FLAG 0
63 #endif
64 #endif
65 #include "drd_basics.h" /* DRD_() */
66 #include "drd_clientreq.h"
67 #include "pub_tool_redir.h" /* VG_WRAP_FUNCTION_ZZ() */
68
69
70 /*
71 * Notes regarding thread creation:
72 * - sg_init() runs on the context of the created thread and copies the vector
73 * clock of the creator thread. This only works reliably if the creator
74 * thread waits until this copy has been performed.
75 * - DRD_(thread_compute_minimum_vc)() does not take the vector clocks into
76 * account that are involved in thread creation and for which the
77 * corresponding thread has not yet been created. So not waiting until the
78 * created thread has been started would make it possible that segments get
79 * discarded that should not yet be discarded. Or: some data races are not
80 * detected.
81 */
82
83 /**
84 * Macro for generating a Valgrind interception function.
85 * @param[in] ret_ty Return type of the function to be generated.
86 * @param[in] zf Z-encoded name of the interception function.
87 * @param[in] implf Name of the function that implements the intercept.
88 * @param[in] arg_decl Argument declaration list enclosed in parentheses.
89 * @param[in] argl Argument list enclosed in parentheses.
90 */
91 #ifdef VGO_darwin
92 static int never_true;
93 #define PTH_FUNC(ret_ty, zf, implf, argl_decl, argl) \
94 ret_ty VG_WRAP_FUNCTION_ZZ(VG_Z_LIBPTHREAD_SONAME,zf) argl_decl; \
95 ret_ty VG_WRAP_FUNCTION_ZZ(VG_Z_LIBPTHREAD_SONAME,zf) argl_decl \
96 { \
97 ret_ty pth_func_result = implf argl; \
98 /* Apparently inserting a function call in wrapper functions */ \
99 /* is sufficient to avoid misaligned stack errors. */ \
100 if (never_true) \
101 fflush(stdout); \
102 return pth_func_result; \
103 }
104 #else
105 #define PTH_FUNC(ret_ty, zf, implf, argl_decl, argl) \
106 ret_ty VG_WRAP_FUNCTION_ZZ(VG_Z_LIBPTHREAD_SONAME,zf) argl_decl; \
107 ret_ty VG_WRAP_FUNCTION_ZZ(VG_Z_LIBPTHREAD_SONAME,zf) argl_decl \
108 { return implf argl; }
109 #endif
110
111 /**
112 * Macro for generating three Valgrind interception functions: one with the
113 * Z-encoded name zf, one with ZAZa ("@*") appended to the name zf and one
114 * with ZDZa ("$*") appended to the name zf. The second generated interception
115 * function will intercept versioned symbols on Linux, and the third will
116 * intercept versioned symbols on Darwin.
117 */
118 #define PTH_FUNCS(ret_ty, zf, implf, argl_decl, argl) \
119 PTH_FUNC(ret_ty, zf, implf, argl_decl, argl); \
120 PTH_FUNC(ret_ty, zf ## ZAZa, implf, argl_decl, argl); \
121 PTH_FUNC(ret_ty, zf ## ZDZa, implf, argl_decl, argl);
122
123 /*
124 * Not inlining one of the intercept functions will cause the regression
125 * tests to fail because this would cause an additional stackfram to appear
126 * in the output. The __always_inline macro guarantees that inlining will
127 * happen, even when compiling with optimization disabled.
128 */
129 #undef __always_inline /* since already defined in <cdefs.h> */
130 #if __GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 2
131 #define __always_inline __inline__ __attribute__((always_inline))
132 #else
133 #define __always_inline __inline__
134 #endif
135
136 /* Local data structures. */
137
138 typedef struct {
139 pthread_mutex_t mutex;
140 int counter;
141 int waiters;
142 } DrdSema;
143
144 typedef struct
145 {
146 void* (*start)(void*);
147 void* arg;
148 int detachstate;
149 DrdSema* wrapper_started;
150 } DrdPosixThreadArgs;
151
152
153 /* Local function declarations. */
154
155 static void DRD_(init)(void) __attribute__((constructor));
156 static void DRD_(check_threading_library)(void);
157 static void DRD_(set_main_thread_state)(void);
158 static void DRD_(sema_init)(DrdSema* sema);
159 static void DRD_(sema_destroy)(DrdSema* sema);
160 static void DRD_(sema_down)(DrdSema* sema);
161 static void DRD_(sema_up)(DrdSema* sema);
162
163
164 /* Function definitions. */
165
166 /**
167 * Shared library initialization function. The function init() is called after
168 * dlopen() has loaded the shared library with DRD client intercepts because
169 * the constructor attribute was specified in the declaration of this function.
170 * Note: do specify the -nostdlib option to gcc when linking this code into a
171 * shared library because doing so would cancel the effect of the constructor
172 * attribute ! Using the gcc option -nodefaultlibs is fine because this last
173 * option preserves the shared library initialization code that calls
174 * constructor and destructor functions.
175 */
DRD_(init)176 static void DRD_(init)(void)
177 {
178 DRD_(check_threading_library)();
179 DRD_(set_main_thread_state)();
180 }
181
DRD_(sema_init)182 static void DRD_(sema_init)(DrdSema* sema)
183 {
184 DRD_IGNORE_VAR(sema->counter);
185 pthread_mutex_init(&sema->mutex, NULL);
186 sema->counter = 0;
187 sema->waiters = 0;
188 }
189
DRD_(sema_destroy)190 static void DRD_(sema_destroy)(DrdSema* sema)
191 {
192 pthread_mutex_destroy(&sema->mutex);
193 }
194
DRD_(sema_down)195 static void DRD_(sema_down)(DrdSema* sema)
196 {
197 int res = ENOSYS;
198
199 pthread_mutex_lock(&sema->mutex);
200 if (sema->counter == 0) {
201 sema->waiters++;
202 while (sema->counter == 0) {
203 pthread_mutex_unlock(&sema->mutex);
204 #ifdef HAVE_USABLE_LINUX_FUTEX_H
205 if (syscall(__NR_futex, (UWord)&sema->counter,
206 FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0) == 0)
207 res = 0;
208 else
209 res = errno;
210 #endif
211 /*
212 * Invoke sched_yield() on non-Linux systems, if the futex syscall has
213 * not been invoked or if this code has been built on a Linux system
214 * where __NR_futex is defined and is run on a Linux system that does
215 * not support the futex syscall.
216 */
217 if (res != 0 && res != EWOULDBLOCK)
218 sched_yield();
219 pthread_mutex_lock(&sema->mutex);
220 }
221 sema->waiters--;
222 }
223 sema->counter--;
224 pthread_mutex_unlock(&sema->mutex);
225 }
226
DRD_(sema_up)227 static void DRD_(sema_up)(DrdSema* sema)
228 {
229 pthread_mutex_lock(&sema->mutex);
230 sema->counter++;
231 #ifdef HAVE_USABLE_LINUX_FUTEX_H
232 if (sema->waiters > 0)
233 syscall(__NR_futex, (UWord)&sema->counter,
234 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
235 #endif
236 pthread_mutex_unlock(&sema->mutex);
237 }
238
239 /**
240 * POSIX threads and DRD each have their own mutex type identification.
241 * Convert POSIX threads' mutex type to DRD's mutex type. In the code below
242 * if-statements are used to test the value of 'kind' instead of a switch
243 * statement because some of the PTHREAD_MUTEX_ macro's may have the same
244 * value.
245 */
DRD_(pthread_to_drd_mutex_type)246 static MutexT DRD_(pthread_to_drd_mutex_type)(const int kind)
247 {
248 if (kind == PTHREAD_MUTEX_RECURSIVE)
249 return mutex_type_recursive_mutex;
250 else if (kind == PTHREAD_MUTEX_ERRORCHECK)
251 return mutex_type_errorcheck_mutex;
252 else if (kind == PTHREAD_MUTEX_NORMAL)
253 return mutex_type_default_mutex;
254 else if (kind == PTHREAD_MUTEX_DEFAULT)
255 return mutex_type_default_mutex;
256 #if defined(HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
257 else if (kind == PTHREAD_MUTEX_ADAPTIVE_NP)
258 return mutex_type_default_mutex;
259 #endif
260 else
261 {
262 return mutex_type_invalid_mutex;
263 }
264 }
265
266 #define IS_ALIGNED(p) (((uintptr_t)(p) & (sizeof(*(p)) - 1)) == 0)
267
268 /**
269 * Read the mutex type stored in the client memory used for the mutex
270 * implementation.
271 *
272 * @note This function depends on the implementation of the POSIX threads
273 * library -- the POSIX standard does not define the name of the member in
274 * which the mutex type is stored.
275 * @note The function mutex_type() has been declared inline in order
276 * to avoid that it shows up in call stacks (drd/tests/...exp* files).
277 * @note glibc stores the mutex type in the lowest two bits, and uses the
278 * higher bits for flags like PTHREAD_MUTEXATTR_FLAG_ROBUST and
279 * PTHREAD_MUTEXATTR_FLAG_PSHARED.
280 */
DRD_(mutex_type)281 static __always_inline MutexT DRD_(mutex_type)(pthread_mutex_t* mutex)
282 {
283 #if defined(HAVE_PTHREAD_MUTEX_T__M_KIND)
284 /* glibc + LinuxThreads. */
285 if (IS_ALIGNED(&mutex->__m_kind))
286 {
287 const int kind = mutex->__m_kind & 3;
288 return DRD_(pthread_to_drd_mutex_type)(kind);
289 }
290 #elif defined(HAVE_PTHREAD_MUTEX_T__DATA__KIND)
291 /* glibc + NPTL. */
292 if (IS_ALIGNED(&mutex->__data.__kind))
293 {
294 const int kind = mutex->__data.__kind & 3;
295 return DRD_(pthread_to_drd_mutex_type)(kind);
296 }
297 #else
298 /*
299 * Another POSIX threads implementation. The mutex type won't be printed
300 * when enabling --trace-mutex=yes.
301 */
302 #endif
303 return mutex_type_unknown;
304 }
305
306 /**
307 * Tell DRD whether 'tid' is a joinable thread or a detached thread.
308 */
DRD_(set_joinable)309 static void DRD_(set_joinable)(const pthread_t tid, const int joinable)
310 {
311 assert(joinable == 0 || joinable == 1);
312 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__SET_JOINABLE,
313 tid, joinable, 0, 0, 0);
314 }
315
316 /** Tell DRD that the calling thread is about to enter pthread_create(). */
DRD_(entering_pthread_create)317 static __always_inline void DRD_(entering_pthread_create)(void)
318 {
319 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ENTERING_PTHREAD_CREATE,
320 0, 0, 0, 0, 0);
321 }
322
323 /** Tell DRD that the calling thread has left pthread_create(). */
DRD_(left_pthread_create)324 static __always_inline void DRD_(left_pthread_create)(void)
325 {
326 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LEFT_PTHREAD_CREATE,
327 0, 0, 0, 0, 0);
328 }
329
330 /**
331 * Entry point for newly created threads. This function is called from the
332 * thread created by pthread_create().
333 */
DRD_(thread_wrapper)334 static void* DRD_(thread_wrapper)(void* arg)
335 {
336 DrdPosixThreadArgs* arg_ptr;
337 DrdPosixThreadArgs arg_copy;
338
339 arg_ptr = (DrdPosixThreadArgs*)arg;
340 arg_copy = *arg_ptr;
341
342 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__SET_PTHREADID,
343 pthread_self(), 0, 0, 0, 0);
344
345 DRD_(set_joinable)(pthread_self(),
346 arg_copy.detachstate == PTHREAD_CREATE_JOINABLE);
347
348 /*
349 * Only set 'wrapper_started' after VG_USERREQ__SET_PTHREADID and
350 * DRD_(set_joinable)() have been invoked to avoid a race with
351 * a pthread_detach() invocation for this thread from another thread.
352 */
353 DRD_(sema_up)(arg_copy.wrapper_started);
354
355 return (arg_copy.start)(arg_copy.arg);
356 }
357
358 /**
359 * Return 1 if the LinuxThreads implementation of POSIX Threads has been
360 * detected, and 0 otherwise.
361 *
362 * @see For more information about the confstr() function, see also
363 * http://www.opengroup.org/onlinepubs/009695399/functions/confstr.html
364 */
DRD_(detected_linuxthreads)365 static int DRD_(detected_linuxthreads)(void)
366 {
367 #if defined(linux)
368 #if defined(_CS_GNU_LIBPTHREAD_VERSION)
369 /* Linux with a recent glibc. */
370 char buffer[256];
371 unsigned len;
372 len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
373 assert(len <= sizeof(buffer));
374 return len > 0 && buffer[0] == 'l';
375 #else
376 /* Linux without _CS_GNU_LIBPTHREAD_VERSION: most likely LinuxThreads. */
377 return 1;
378 #endif
379 #else
380 /* Another OS than Linux, hence no LinuxThreads. */
381 return 0;
382 #endif
383 }
384
385 /**
386 * Stop and print an error message in case a non-supported threading
387 * library implementation (LinuxThreads) has been detected.
388 */
DRD_(check_threading_library)389 static void DRD_(check_threading_library)(void)
390 {
391 if (DRD_(detected_linuxthreads)())
392 {
393 if (getenv("LD_ASSUME_KERNEL"))
394 {
395 fprintf(stderr,
396 "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
397 "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
398 "after having unset the environment variable LD_ASSUME_KERNEL. Giving up.\n"
399 );
400 }
401 else
402 {
403 fprintf(stderr,
404 "Detected the LinuxThreads threading library. Sorry, but DRD only supports\n"
405 "the newer NPTL (Native POSIX Threads Library). Please try to rerun DRD\n"
406 "after having upgraded to a newer version of your Linux distribution.\n"
407 "Giving up.\n"
408 );
409 }
410 abort();
411 }
412 }
413
414 /**
415 * The main thread is the only thread not created by pthread_create().
416 * Update DRD's state information about the main thread.
417 */
DRD_(set_main_thread_state)418 static void DRD_(set_main_thread_state)(void)
419 {
420 // Make sure that DRD knows about the main thread's POSIX thread ID.
421 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__SET_PTHREADID,
422 pthread_self(), 0, 0, 0, 0);
423 }
424
425 /*
426 * Note: as of today there exist three different versions of pthread_create
427 * in Linux:
428 * - pthread_create@GLIBC_2.0
429 * - pthread_create@@GLIBC_2.1
430 * - pthread_create@@GLIBC_2.2.5
431 * As an example, in libpthread-2.3.4 both pthread_create@GLIBC_2.0 and
432 * pthread_create@@GLIBC_2.1 are defined, while in libpthread-2.9 all three
433 * versions have been implemented. In any glibc version where more than one
434 * pthread_create function has been implemented, older versions call the
435 * newer versions. Or: the pthread_create* wrapper defined below can be
436 * called recursively. Any code in this wrapper should take this in account.
437 * As an example, it is not safe to invoke the DRD_STOP_RECORDING
438 * / DRD_START_RECORDING client requests from the pthread_create wrapper.
439 * See also the implementation of pthread_create@GLIBC_2.0 in
440 * glibc-2.9/nptl/pthread_create.c.
441 */
442
443 static __always_inline
pthread_create_intercept(pthread_t * thread,const pthread_attr_t * attr,void * (* start)(void *),void * arg)444 int pthread_create_intercept(pthread_t* thread, const pthread_attr_t* attr,
445 void* (*start)(void*), void* arg)
446 {
447 int ret;
448 OrigFn fn;
449 DrdSema wrapper_started;
450 DrdPosixThreadArgs thread_args;
451
452 VALGRIND_GET_ORIG_FN(fn);
453
454 DRD_(sema_init)(&wrapper_started);
455 thread_args.start = start;
456 thread_args.arg = arg;
457 thread_args.wrapper_started = &wrapper_started;
458 /*
459 * Find out whether the thread will be started as a joinable thread
460 * or as a detached thread. If no thread attributes have been specified,
461 * this means that the new thread will be started as a joinable thread.
462 */
463 thread_args.detachstate = PTHREAD_CREATE_JOINABLE;
464 if (attr)
465 {
466 if (pthread_attr_getdetachstate(attr, &thread_args.detachstate) != 0)
467 assert(0);
468 }
469 assert(thread_args.detachstate == PTHREAD_CREATE_JOINABLE
470 || thread_args.detachstate == PTHREAD_CREATE_DETACHED);
471
472 DRD_(entering_pthread_create)();
473 CALL_FN_W_WWWW(ret, fn, thread, attr, DRD_(thread_wrapper), &thread_args);
474 DRD_(left_pthread_create)();
475
476 if (ret == 0) {
477 /* Wait until the thread wrapper started. */
478 DRD_(sema_down)(&wrapper_started);
479 }
480
481 DRD_(sema_destroy)(&wrapper_started);
482
483 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_NEW_SEGMENT,
484 pthread_self(), 0, 0, 0, 0);
485
486 return ret;
487 }
488
489 PTH_FUNCS(int, pthreadZucreate, pthread_create_intercept,
490 (pthread_t *thread, const pthread_attr_t *attr,
491 void *(*start) (void *), void *arg),
492 (thread, attr, start, arg));
493
494 static __always_inline
pthread_join_intercept(pthread_t pt_joinee,void ** thread_return)495 int pthread_join_intercept(pthread_t pt_joinee, void **thread_return)
496 {
497 int ret;
498 OrigFn fn;
499
500 VALGRIND_GET_ORIG_FN(fn);
501 /*
502 * Avoid that the sys_futex(td->tid) call invoked by the NPTL pthread_join()
503 * implementation triggers a (false positive) race report.
504 */
505 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
506 CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
507 if (ret == 0)
508 {
509 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_THREAD_JOIN,
510 pt_joinee, 0, 0, 0, 0);
511 }
512 ANNOTATE_IGNORE_READS_AND_WRITES_END();
513 return ret;
514 }
515
516 PTH_FUNCS(int, pthreadZujoin, pthread_join_intercept,
517 (pthread_t pt_joinee, void **thread_return),
518 (pt_joinee, thread_return));
519
520 static __always_inline
pthread_detach_intercept(pthread_t pt_thread)521 int pthread_detach_intercept(pthread_t pt_thread)
522 {
523 int ret;
524 OrigFn fn;
525
526 VALGRIND_GET_ORIG_FN(fn);
527 CALL_FN_W_W(ret, fn, pt_thread);
528 DRD_(set_joinable)(pt_thread, 0);
529
530 return ret;
531 }
532
533 PTH_FUNCS(int, pthreadZudetach, pthread_detach_intercept,
534 (pthread_t thread), (thread));
535
536 // Don't intercept pthread_cancel() because pthread_cancel_init() loads
537 // libgcc.so. That library is loaded by calling _dl_open(). The function
538 // dl_open_worker() looks up from which object the caller is calling in
539 // GL(dn_ns)[]. Since the DRD intercepts are linked into vgpreload_drd-*.so
540 // and since that object file is not loaded through glibc, glibc does not
541 // have any information about that object. That results in the following
542 // segmentation fault on at least Fedora 17 x86_64:
543 // Process terminating with default action of signal 11 (SIGSEGV)
544 // General Protection Fault
545 // at 0x4006B75: _dl_map_object_from_fd (dl-load.c:1580)
546 // by 0x4008312: _dl_map_object (dl-load.c:2355)
547 // by 0x4012FFB: dl_open_worker (dl-open.c:226)
548 // by 0x400ECB5: _dl_catch_error (dl-error.c:178)
549 // by 0x4012B2B: _dl_open (dl-open.c:652)
550 // by 0x5184511: do_dlopen (dl-libc.c:89)
551 // by 0x400ECB5: _dl_catch_error (dl-error.c:178)
552 // by 0x51845D1: __libc_dlopen_mode (dl-libc.c:48)
553 // by 0x4E4A703: pthread_cancel_init (unwind-forcedunwind.c:53)
554 // by 0x4E476F2: pthread_cancel (pthread_cancel.c:40)
555 // by 0x4C2C050: pthread_cancel (drd_pthread_intercepts.c:547)
556 // by 0x400B3A: main (bar_bad.c:83)
557 #if 0
558 // NOTE: be careful to intercept only pthread_cancel() and not
559 // pthread_cancel_init() on Linux.
560
561 static __always_inline
562 int pthread_cancel_intercept(pthread_t pt_thread)
563 {
564 int ret;
565 OrigFn fn;
566 VALGRIND_GET_ORIG_FN(fn);
567 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_THREAD_CANCEL,
568 pt_thread, 0, 0, 0, 0);
569 CALL_FN_W_W(ret, fn, pt_thread);
570 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_THREAD_CANCEL,
571 pt_thread, ret==0, 0, 0, 0);
572 return ret;
573 }
574
575 PTH_FUNCS(int, pthreadZucancel, pthread_cancel_intercept,
576 (pthread_t thread), (thread))
577 #endif
578
579 static __always_inline
pthread_once_intercept(pthread_once_t * once_control,void (* init_routine)(void))580 int pthread_once_intercept(pthread_once_t *once_control,
581 void (*init_routine)(void))
582 {
583 int ret;
584 OrigFn fn;
585 VALGRIND_GET_ORIG_FN(fn);
586 /*
587 * Ignore any data races triggered by the implementation of pthread_once().
588 * Necessary for Darwin. This is not necessary for Linux but doesn't have
589 * any known adverse effects.
590 */
591 DRD_IGNORE_VAR(*once_control);
592 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
593 CALL_FN_W_WW(ret, fn, once_control, init_routine);
594 ANNOTATE_IGNORE_READS_AND_WRITES_END();
595 DRD_STOP_IGNORING_VAR(*once_control);
596 return ret;
597 }
598
599 PTH_FUNCS(int, pthreadZuonce, pthread_once_intercept,
600 (pthread_once_t *once_control, void (*init_routine)(void)),
601 (once_control, init_routine));
602
603 static __always_inline
pthread_mutex_init_intercept(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)604 int pthread_mutex_init_intercept(pthread_mutex_t *mutex,
605 const pthread_mutexattr_t* attr)
606 {
607 int ret;
608 OrigFn fn;
609 int mt;
610 VALGRIND_GET_ORIG_FN(fn);
611 mt = PTHREAD_MUTEX_DEFAULT;
612 if (attr)
613 pthread_mutexattr_gettype(attr, &mt);
614 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_INIT,
615 mutex, DRD_(pthread_to_drd_mutex_type)(mt),
616 0, 0, 0);
617 CALL_FN_W_WW(ret, fn, mutex, attr);
618 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_INIT,
619 mutex, 0, 0, 0, 0);
620 return ret;
621 }
622
623 PTH_FUNCS(int, pthreadZumutexZuinit, pthread_mutex_init_intercept,
624 (pthread_mutex_t *mutex, const pthread_mutexattr_t* attr),
625 (mutex, attr));
626
627 static __always_inline
pthread_mutex_destroy_intercept(pthread_mutex_t * mutex)628 int pthread_mutex_destroy_intercept(pthread_mutex_t* mutex)
629 {
630 int ret;
631 OrigFn fn;
632 VALGRIND_GET_ORIG_FN(fn);
633 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_DESTROY,
634 mutex, 0, 0, 0, 0);
635 CALL_FN_W_W(ret, fn, mutex);
636 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_DESTROY,
637 mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
638 return ret;
639 }
640
641 PTH_FUNCS(int, pthreadZumutexZudestroy, pthread_mutex_destroy_intercept,
642 (pthread_mutex_t *mutex), (mutex));
643
644 static __always_inline
pthread_mutex_lock_intercept(pthread_mutex_t * mutex)645 int pthread_mutex_lock_intercept(pthread_mutex_t* mutex)
646 {
647 int ret;
648 OrigFn fn;
649 VALGRIND_GET_ORIG_FN(fn);
650 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
651 mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
652 CALL_FN_W_W(ret, fn, mutex);
653 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
654 mutex, ret == 0, 0, 0, 0);
655 return ret;
656 }
657
658 PTH_FUNCS(int, pthreadZumutexZulock, pthread_mutex_lock_intercept,
659 (pthread_mutex_t *mutex), (mutex));
660
661 static __always_inline
pthread_mutex_trylock_intercept(pthread_mutex_t * mutex)662 int pthread_mutex_trylock_intercept(pthread_mutex_t* mutex)
663 {
664 int ret;
665 OrigFn fn;
666 VALGRIND_GET_ORIG_FN(fn);
667 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
668 mutex, DRD_(mutex_type)(mutex), 1, 0, 0);
669 CALL_FN_W_W(ret, fn, mutex);
670 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
671 mutex, ret == 0, 0, 0, 0);
672 return ret;
673 }
674
675 PTH_FUNCS(int, pthreadZumutexZutrylock, pthread_mutex_trylock_intercept,
676 (pthread_mutex_t *mutex), (mutex));
677
678 static __always_inline
pthread_mutex_timedlock_intercept(pthread_mutex_t * mutex,const struct timespec * abs_timeout)679 int pthread_mutex_timedlock_intercept(pthread_mutex_t *mutex,
680 const struct timespec *abs_timeout)
681 {
682 int ret;
683 OrigFn fn;
684 VALGRIND_GET_ORIG_FN(fn);
685 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
686 mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
687 CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
688 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
689 mutex, ret == 0, 0, 0, 0);
690 return ret;
691 }
692
693 PTH_FUNCS(int, pthreadZumutexZutimedlock, pthread_mutex_timedlock_intercept,
694 (pthread_mutex_t *mutex, const struct timespec *abs_timeout),
695 (mutex, abs_timeout));
696
697 static __always_inline
pthread_mutex_unlock_intercept(pthread_mutex_t * mutex)698 int pthread_mutex_unlock_intercept(pthread_mutex_t *mutex)
699 {
700 int ret;
701 OrigFn fn;
702 VALGRIND_GET_ORIG_FN(fn);
703 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_UNLOCK,
704 mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
705 CALL_FN_W_W(ret, fn, mutex);
706 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_UNLOCK,
707 mutex, 0, 0, 0, 0);
708 return ret;
709 }
710
711 PTH_FUNCS(int, pthreadZumutexZuunlock, pthread_mutex_unlock_intercept,
712 (pthread_mutex_t *mutex), (mutex));
713
714 static __always_inline
pthread_cond_init_intercept(pthread_cond_t * cond,const pthread_condattr_t * attr)715 int pthread_cond_init_intercept(pthread_cond_t* cond,
716 const pthread_condattr_t* attr)
717 {
718 int ret;
719 OrigFn fn;
720 VALGRIND_GET_ORIG_FN(fn);
721 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_INIT,
722 cond, 0, 0, 0, 0);
723 CALL_FN_W_WW(ret, fn, cond, attr);
724 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_INIT,
725 cond, 0, 0, 0, 0);
726 return ret;
727 }
728
729 PTH_FUNCS(int, pthreadZucondZuinit, pthread_cond_init_intercept,
730 (pthread_cond_t* cond, const pthread_condattr_t* attr),
731 (cond, attr));
732
733 static __always_inline
pthread_cond_destroy_intercept(pthread_cond_t * cond)734 int pthread_cond_destroy_intercept(pthread_cond_t* cond)
735 {
736 int ret;
737 OrigFn fn;
738 VALGRIND_GET_ORIG_FN(fn);
739 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_DESTROY,
740 cond, 0, 0, 0, 0);
741 CALL_FN_W_W(ret, fn, cond);
742 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_DESTROY,
743 cond, 0, 0, 0, 0);
744 return ret;
745 }
746
747 PTH_FUNCS(int, pthreadZucondZudestroy, pthread_cond_destroy_intercept,
748 (pthread_cond_t* cond), (cond));
749
750 static __always_inline
pthread_cond_wait_intercept(pthread_cond_t * cond,pthread_mutex_t * mutex)751 int pthread_cond_wait_intercept(pthread_cond_t *cond, pthread_mutex_t *mutex)
752 {
753 int ret;
754 OrigFn fn;
755 VALGRIND_GET_ORIG_FN(fn);
756 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT,
757 cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
758 CALL_FN_W_WW(ret, fn, cond, mutex);
759 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT,
760 cond, mutex, 1, 0, 0);
761 return ret;
762 }
763
764 PTH_FUNCS(int, pthreadZucondZuwait, pthread_cond_wait_intercept,
765 (pthread_cond_t *cond, pthread_mutex_t *mutex),
766 (cond, mutex));
767
768 static __always_inline
pthread_cond_timedwait_intercept(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)769 int pthread_cond_timedwait_intercept(pthread_cond_t *cond,
770 pthread_mutex_t *mutex,
771 const struct timespec* abstime)
772 {
773 int ret;
774 OrigFn fn;
775 VALGRIND_GET_ORIG_FN(fn);
776 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT,
777 cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
778 CALL_FN_W_WWW(ret, fn, cond, mutex, abstime);
779 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT,
780 cond, mutex, 1, 0, 0);
781 return ret;
782 }
783
784 PTH_FUNCS(int, pthreadZucondZutimedwait, pthread_cond_timedwait_intercept,
785 (pthread_cond_t *cond, pthread_mutex_t *mutex,
786 const struct timespec* abstime),
787 (cond, mutex, abstime));
788
789 // NOTE: be careful to intercept only pthread_cond_signal() and not Darwin's
790 // pthread_cond_signal_thread_np(). The former accepts one argument; the latter
791 // two. Intercepting all pthread_cond_signal* functions will cause only one
792 // argument to be passed to pthread_cond_signal_np() and hence will cause this
793 // last function to crash.
794
795 static __always_inline
pthread_cond_signal_intercept(pthread_cond_t * cond)796 int pthread_cond_signal_intercept(pthread_cond_t* cond)
797 {
798 int ret;
799 OrigFn fn;
800 VALGRIND_GET_ORIG_FN(fn);
801 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_SIGNAL,
802 cond, 0, 0, 0, 0);
803 CALL_FN_W_W(ret, fn, cond);
804 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_SIGNAL,
805 cond, 0, 0, 0, 0);
806 return ret;
807 }
808
809 PTH_FUNCS(int, pthreadZucondZusignal, pthread_cond_signal_intercept,
810 (pthread_cond_t* cond), (cond));
811
812 static __always_inline
pthread_cond_broadcast_intercept(pthread_cond_t * cond)813 int pthread_cond_broadcast_intercept(pthread_cond_t* cond)
814 {
815 int ret;
816 OrigFn fn;
817 VALGRIND_GET_ORIG_FN(fn);
818 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_BROADCAST,
819 cond, 0, 0, 0, 0);
820 CALL_FN_W_W(ret, fn, cond);
821 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_BROADCAST,
822 cond, 0, 0, 0, 0);
823 return ret;
824 }
825
826 PTH_FUNCS(int, pthreadZucondZubroadcast, pthread_cond_broadcast_intercept,
827 (pthread_cond_t* cond), (cond));
828
829 #if defined(HAVE_PTHREAD_SPIN_LOCK)
830 static __always_inline
pthread_spin_init_intercept(pthread_spinlock_t * spinlock,int pshared)831 int pthread_spin_init_intercept(pthread_spinlock_t *spinlock, int pshared)
832 {
833 int ret;
834 OrigFn fn;
835 VALGRIND_GET_ORIG_FN(fn);
836 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
837 spinlock, 0, 0, 0, 0);
838 CALL_FN_W_WW(ret, fn, spinlock, pshared);
839 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
840 spinlock, 0, 0, 0, 0);
841 return ret;
842 }
843
844 PTH_FUNCS(int, pthreadZuspinZuinit, pthread_spin_init_intercept,
845 (pthread_spinlock_t *spinlock, int pshared), (spinlock, pshared));
846
847 static __always_inline
pthread_spin_destroy_intercept(pthread_spinlock_t * spinlock)848 int pthread_spin_destroy_intercept(pthread_spinlock_t *spinlock)
849 {
850 int ret;
851 OrigFn fn;
852 VALGRIND_GET_ORIG_FN(fn);
853 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_DESTROY,
854 spinlock, 0, 0, 0, 0);
855 CALL_FN_W_W(ret, fn, spinlock);
856 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_DESTROY,
857 spinlock, mutex_type_spinlock, 0, 0, 0);
858 return ret;
859 }
860
861 PTH_FUNCS(int, pthreadZuspinZudestroy, pthread_spin_destroy_intercept,
862 (pthread_spinlock_t *spinlock), (spinlock));
863
864 static __always_inline
pthread_spin_lock_intercept(pthread_spinlock_t * spinlock)865 int pthread_spin_lock_intercept(pthread_spinlock_t *spinlock)
866 {
867 int ret;
868 OrigFn fn;
869 VALGRIND_GET_ORIG_FN(fn);
870 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
871 spinlock, mutex_type_spinlock, 0, 0, 0);
872 CALL_FN_W_W(ret, fn, spinlock);
873 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
874 spinlock, ret == 0, 0, 0, 0);
875 return ret;
876 }
877
878 PTH_FUNCS(int, pthreadZuspinZulock, pthread_spin_lock_intercept,
879 (pthread_spinlock_t *spinlock), (spinlock));
880
881 static __always_inline
pthread_spin_trylock_intercept(pthread_spinlock_t * spinlock)882 int pthread_spin_trylock_intercept(pthread_spinlock_t *spinlock)
883 {
884 int ret;
885 OrigFn fn;
886 VALGRIND_GET_ORIG_FN(fn);
887 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
888 spinlock, mutex_type_spinlock, 0, 0, 0);
889 CALL_FN_W_W(ret, fn, spinlock);
890 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
891 spinlock, ret == 0, 0, 0, 0);
892 return ret;
893 }
894
895 PTH_FUNCS(int, pthreadZuspinZutrylock, pthread_spin_trylock_intercept,
896 (pthread_spinlock_t *spinlock), (spinlock));
897
898 static __always_inline
pthread_spin_unlock_intercept(pthread_spinlock_t * spinlock)899 int pthread_spin_unlock_intercept(pthread_spinlock_t *spinlock)
900 {
901 int ret;
902 OrigFn fn;
903 VALGRIND_GET_ORIG_FN(fn);
904 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
905 spinlock, mutex_type_spinlock, 0, 0, 0);
906 CALL_FN_W_W(ret, fn, spinlock);
907 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
908 spinlock, 0, 0, 0, 0);
909 return ret;
910 }
911
912 PTH_FUNCS(int, pthreadZuspinZuunlock, pthread_spin_unlock_intercept,
913 (pthread_spinlock_t *spinlock), (spinlock));
914 #endif // HAVE_PTHREAD_SPIN_LOCK
915
916
917 #if defined(HAVE_PTHREAD_BARRIER_INIT)
918 static __always_inline
pthread_barrier_init_intercept(pthread_barrier_t * barrier,const pthread_barrierattr_t * attr,unsigned count)919 int pthread_barrier_init_intercept(pthread_barrier_t* barrier,
920 const pthread_barrierattr_t* attr,
921 unsigned count)
922 {
923 int ret;
924 OrigFn fn;
925 VALGRIND_GET_ORIG_FN(fn);
926 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_INIT,
927 barrier, pthread_barrier, count, 0, 0);
928 CALL_FN_W_WWW(ret, fn, barrier, attr, count);
929 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_INIT,
930 barrier, pthread_barrier, 0, 0, 0);
931 return ret;
932 }
933
934 PTH_FUNCS(int, pthreadZubarrierZuinit, pthread_barrier_init_intercept,
935 (pthread_barrier_t* barrier, const pthread_barrierattr_t* attr,
936 unsigned count), (barrier, attr, count));
937
938 static __always_inline
pthread_barrier_destroy_intercept(pthread_barrier_t * barrier)939 int pthread_barrier_destroy_intercept(pthread_barrier_t* barrier)
940 {
941 int ret;
942 OrigFn fn;
943 VALGRIND_GET_ORIG_FN(fn);
944 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_DESTROY,
945 barrier, pthread_barrier, 0, 0, 0);
946 CALL_FN_W_W(ret, fn, barrier);
947 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_DESTROY,
948 barrier, pthread_barrier, 0, 0, 0);
949 return ret;
950 }
951
952 PTH_FUNCS(int, pthreadZubarrierZudestroy, pthread_barrier_destroy_intercept,
953 (pthread_barrier_t* barrier), (barrier));
954
955 static __always_inline
pthread_barrier_wait_intercept(pthread_barrier_t * barrier)956 int pthread_barrier_wait_intercept(pthread_barrier_t* barrier)
957 {
958 int ret;
959 OrigFn fn;
960 VALGRIND_GET_ORIG_FN(fn);
961 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_WAIT,
962 barrier, pthread_barrier, 0, 0, 0);
963 CALL_FN_W_W(ret, fn, barrier);
964 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_WAIT,
965 barrier, pthread_barrier,
966 ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD,
967 ret == PTHREAD_BARRIER_SERIAL_THREAD, 0);
968 return ret;
969 }
970
971 PTH_FUNCS(int, pthreadZubarrierZuwait, pthread_barrier_wait_intercept,
972 (pthread_barrier_t* barrier), (barrier));
973 #endif // HAVE_PTHREAD_BARRIER_INIT
974
975
976 static __always_inline
sem_init_intercept(sem_t * sem,int pshared,unsigned int value)977 int sem_init_intercept(sem_t *sem, int pshared, unsigned int value)
978 {
979 int ret;
980 OrigFn fn;
981 VALGRIND_GET_ORIG_FN(fn);
982 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_INIT,
983 sem, pshared, value, 0, 0);
984 CALL_FN_W_WWW(ret, fn, sem, pshared, value);
985 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_INIT,
986 sem, 0, 0, 0, 0);
987 return ret;
988 }
989
990 PTH_FUNCS(int, semZuinit, sem_init_intercept,
991 (sem_t *sem, int pshared, unsigned int value), (sem, pshared, value));
992
993 static __always_inline
sem_destroy_intercept(sem_t * sem)994 int sem_destroy_intercept(sem_t *sem)
995 {
996 int ret;
997 OrigFn fn;
998 VALGRIND_GET_ORIG_FN(fn);
999 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_DESTROY,
1000 sem, 0, 0, 0, 0);
1001 CALL_FN_W_W(ret, fn, sem);
1002 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_DESTROY,
1003 sem, 0, 0, 0, 0);
1004 return ret;
1005 }
1006
1007 PTH_FUNCS(int, semZudestroy, sem_destroy_intercept, (sem_t *sem), (sem));
1008
1009 static __always_inline
sem_open_intercept(const char * name,int oflag,mode_t mode,unsigned int value)1010 sem_t* sem_open_intercept(const char *name, int oflag, mode_t mode,
1011 unsigned int value)
1012 {
1013 sem_t *ret;
1014 OrigFn fn;
1015 VALGRIND_GET_ORIG_FN(fn);
1016 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_OPEN,
1017 name, oflag, mode, value, 0);
1018 CALL_FN_W_WWWW(ret, fn, name, oflag, mode, value);
1019 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_OPEN,
1020 ret != SEM_FAILED ? ret : 0,
1021 name, oflag, mode, value);
1022 return ret;
1023 }
1024
1025 PTH_FUNCS(sem_t *, semZuopen, sem_open_intercept,
1026 (const char *name, int oflag, mode_t mode, unsigned int value),
1027 (name, oflag, mode, value));
1028
sem_close_intercept(sem_t * sem)1029 static __always_inline int sem_close_intercept(sem_t *sem)
1030 {
1031 int ret;
1032 OrigFn fn;
1033 VALGRIND_GET_ORIG_FN(fn);
1034 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_CLOSE,
1035 sem, 0, 0, 0, 0);
1036 CALL_FN_W_W(ret, fn, sem);
1037 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_CLOSE,
1038 sem, 0, 0, 0, 0);
1039 return ret;
1040 }
1041
1042 PTH_FUNCS(int, semZuclose, sem_close_intercept, (sem_t *sem), (sem));
1043
sem_wait_intercept(sem_t * sem)1044 static __always_inline int sem_wait_intercept(sem_t *sem)
1045 {
1046 int ret;
1047 OrigFn fn;
1048 VALGRIND_GET_ORIG_FN(fn);
1049 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_WAIT,
1050 sem, 0, 0, 0, 0);
1051 CALL_FN_W_W(ret, fn, sem);
1052 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_WAIT,
1053 sem, ret == 0, 0, 0, 0);
1054 return ret;
1055 }
1056
1057 PTH_FUNCS(int, semZuwait, sem_wait_intercept, (sem_t *sem), (sem));
1058
sem_trywait_intercept(sem_t * sem)1059 static __always_inline int sem_trywait_intercept(sem_t *sem)
1060 {
1061 int ret;
1062 OrigFn fn;
1063 VALGRIND_GET_ORIG_FN(fn);
1064 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_WAIT,
1065 sem, 0, 0, 0, 0);
1066 CALL_FN_W_W(ret, fn, sem);
1067 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_WAIT,
1068 sem, ret == 0, 0, 0, 0);
1069 return ret;
1070 }
1071
1072 PTH_FUNCS(int, semZutrywait, sem_trywait_intercept, (sem_t *sem), (sem));
1073
1074 static __always_inline
sem_timedwait_intercept(sem_t * sem,const struct timespec * abs_timeout)1075 int sem_timedwait_intercept(sem_t *sem, const struct timespec *abs_timeout)
1076 {
1077 int ret;
1078 OrigFn fn;
1079 VALGRIND_GET_ORIG_FN(fn);
1080 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_WAIT,
1081 sem, 0, 0, 0, 0);
1082 CALL_FN_W_WW(ret, fn, sem, abs_timeout);
1083 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_WAIT,
1084 sem, ret == 0, 0, 0, 0);
1085 return ret;
1086 }
1087
1088 PTH_FUNCS(int, semZutimedwait, sem_timedwait_intercept,
1089 (sem_t *sem, const struct timespec *abs_timeout),
1090 (sem, abs_timeout));
1091
sem_post_intercept(sem_t * sem)1092 static __always_inline int sem_post_intercept(sem_t *sem)
1093 {
1094 int ret;
1095 OrigFn fn;
1096 VALGRIND_GET_ORIG_FN(fn);
1097 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_POST,
1098 sem, 0, 0, 0, 0);
1099 CALL_FN_W_W(ret, fn, sem);
1100 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_POST,
1101 sem, ret == 0, 0, 0, 0);
1102 return ret;
1103 }
1104
1105 PTH_FUNCS(int, semZupost, sem_post_intercept, (sem_t *sem), (sem));
1106
1107 /* Android's pthread.h doesn't say anything about rwlocks, hence these
1108 functions have to be conditionally compiled. */
1109 #if defined(HAVE_PTHREAD_RWLOCK_T)
1110
1111 static __always_inline
pthread_rwlock_init_intercept(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)1112 int pthread_rwlock_init_intercept(pthread_rwlock_t* rwlock,
1113 const pthread_rwlockattr_t* attr)
1114 {
1115 int ret;
1116 OrigFn fn;
1117 VALGRIND_GET_ORIG_FN(fn);
1118 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_INIT,
1119 rwlock, 0, 0, 0, 0);
1120 CALL_FN_W_WW(ret, fn, rwlock, attr);
1121 return ret;
1122 }
1123
1124 PTH_FUNCS(int,
1125 pthreadZurwlockZuinit, pthread_rwlock_init_intercept,
1126 (pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr),
1127 (rwlock, attr));
1128
1129 static __always_inline
pthread_rwlock_destroy_intercept(pthread_rwlock_t * rwlock)1130 int pthread_rwlock_destroy_intercept(pthread_rwlock_t* rwlock)
1131 {
1132 int ret;
1133 OrigFn fn;
1134 VALGRIND_GET_ORIG_FN(fn);
1135 CALL_FN_W_W(ret, fn, rwlock);
1136 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_DESTROY,
1137 rwlock, 0, 0, 0, 0);
1138 return ret;
1139 }
1140
1141 PTH_FUNCS(int,
1142 pthreadZurwlockZudestroy, pthread_rwlock_destroy_intercept,
1143 (pthread_rwlock_t* rwlock), (rwlock));
1144
1145 static __always_inline
pthread_rwlock_rdlock_intercept(pthread_rwlock_t * rwlock)1146 int pthread_rwlock_rdlock_intercept(pthread_rwlock_t* rwlock)
1147 {
1148 int ret;
1149 OrigFn fn;
1150 VALGRIND_GET_ORIG_FN(fn);
1151 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_RDLOCK,
1152 rwlock, 0, 0, 0, 0);
1153 CALL_FN_W_W(ret, fn, rwlock);
1154 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_RDLOCK,
1155 rwlock, ret == 0, 0, 0, 0);
1156 return ret;
1157 }
1158
1159 PTH_FUNCS(int,
1160 pthreadZurwlockZurdlock, pthread_rwlock_rdlock_intercept,
1161 (pthread_rwlock_t* rwlock), (rwlock));
1162
1163 static __always_inline
pthread_rwlock_wrlock_intercept(pthread_rwlock_t * rwlock)1164 int pthread_rwlock_wrlock_intercept(pthread_rwlock_t* rwlock)
1165 {
1166 int ret;
1167 OrigFn fn;
1168 VALGRIND_GET_ORIG_FN(fn);
1169 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_WRLOCK,
1170 rwlock, 0, 0, 0, 0);
1171 CALL_FN_W_W(ret, fn, rwlock);
1172 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_WRLOCK,
1173 rwlock, ret == 0, 0, 0, 0);
1174 return ret;
1175 }
1176
1177 PTH_FUNCS(int,
1178 pthreadZurwlockZuwrlock, pthread_rwlock_wrlock_intercept,
1179 (pthread_rwlock_t* rwlock), (rwlock));
1180
1181 static __always_inline
pthread_rwlock_timedrdlock_intercept(pthread_rwlock_t * rwlock)1182 int pthread_rwlock_timedrdlock_intercept(pthread_rwlock_t* rwlock)
1183 {
1184 int ret;
1185 OrigFn fn;
1186 VALGRIND_GET_ORIG_FN(fn);
1187 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_RDLOCK,
1188 rwlock, 0, 0, 0, 0);
1189 CALL_FN_W_W(ret, fn, rwlock);
1190 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_RDLOCK,
1191 rwlock, ret == 0, 0, 0, 0);
1192 return ret;
1193 }
1194
1195 PTH_FUNCS(int,
1196 pthreadZurwlockZutimedrdlock, pthread_rwlock_timedrdlock_intercept,
1197 (pthread_rwlock_t* rwlock), (rwlock));
1198
1199 static __always_inline
pthread_rwlock_timedwrlock_intercept(pthread_rwlock_t * rwlock)1200 int pthread_rwlock_timedwrlock_intercept(pthread_rwlock_t* rwlock)
1201 {
1202 int ret;
1203 OrigFn fn;
1204 VALGRIND_GET_ORIG_FN(fn);
1205 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_WRLOCK,
1206 rwlock, 0, 0, 0, 0);
1207 CALL_FN_W_W(ret, fn, rwlock);
1208 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_WRLOCK,
1209 rwlock, ret == 0, 0, 0, 0);
1210 return ret;
1211 }
1212
1213 PTH_FUNCS(int,
1214 pthreadZurwlockZutimedwrlock, pthread_rwlock_timedwrlock_intercept,
1215 (pthread_rwlock_t* rwlock), (rwlock));
1216
1217 static __always_inline
pthread_rwlock_tryrdlock_intercept(pthread_rwlock_t * rwlock)1218 int pthread_rwlock_tryrdlock_intercept(pthread_rwlock_t* rwlock)
1219 {
1220 int ret;
1221 OrigFn fn;
1222 VALGRIND_GET_ORIG_FN(fn);
1223 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_RDLOCK,
1224 rwlock, 0, 0, 0, 0);
1225 CALL_FN_W_W(ret, fn, rwlock);
1226 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_RDLOCK,
1227 rwlock, ret == 0, 0, 0, 0);
1228 return ret;
1229 }
1230
1231 PTH_FUNCS(int,
1232 pthreadZurwlockZutryrdlock, pthread_rwlock_tryrdlock_intercept,
1233 (pthread_rwlock_t* rwlock), (rwlock));
1234
1235 static __always_inline
pthread_rwlock_trywrlock_intercept(pthread_rwlock_t * rwlock)1236 int pthread_rwlock_trywrlock_intercept(pthread_rwlock_t* rwlock)
1237 {
1238 int ret;
1239 OrigFn fn;
1240 VALGRIND_GET_ORIG_FN(fn);
1241 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_WRLOCK,
1242 rwlock, 0, 0, 0, 0);
1243 CALL_FN_W_W(ret, fn, rwlock);
1244 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_WRLOCK,
1245 rwlock, ret == 0, 0, 0, 0);
1246 return ret;
1247 }
1248
1249 PTH_FUNCS(int,
1250 pthreadZurwlockZutrywrlock, pthread_rwlock_trywrlock_intercept,
1251 (pthread_rwlock_t* rwlock), (rwlock));
1252
1253 static __always_inline
pthread_rwlock_unlock_intercept(pthread_rwlock_t * rwlock)1254 int pthread_rwlock_unlock_intercept(pthread_rwlock_t* rwlock)
1255 {
1256 int ret;
1257 OrigFn fn;
1258 VALGRIND_GET_ORIG_FN(fn);
1259 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_UNLOCK,
1260 rwlock, 0, 0, 0, 0);
1261 CALL_FN_W_W(ret, fn, rwlock);
1262 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_UNLOCK,
1263 rwlock, ret == 0, 0, 0, 0);
1264 return ret;
1265 }
1266
1267 PTH_FUNCS(int,
1268 pthreadZurwlockZuunlock, pthread_rwlock_unlock_intercept,
1269 (pthread_rwlock_t* rwlock), (rwlock));
1270
1271 #endif /* defined(HAVE_PTHREAD_RWLOCK_T) */
1272