1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2017 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_barrier.h"
26 #include "drd_clientreq.h"
27 #include "drd_cond.h"
28 #include "drd_error.h"
29 #include "drd_hb.h"
30 #include "drd_load_store.h"
31 #include "drd_malloc_wrappers.h"
32 #include "drd_mutex.h"
33 #include "drd_rwlock.h"
34 #include "drd_semaphore.h"
35 #include "drd_suppression.h" // drd_start_suppression()
36 #include "drd_thread.h"
37 #include "pub_tool_basics.h" // Bool
38 #include "pub_tool_libcassert.h"
39 #include "pub_tool_libcassert.h" // tl_assert()
40 #include "pub_tool_libcprint.h" // VG_(message)()
41 #include "pub_tool_machine.h" // VG_(get_SP)()
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h" // VG_(needs_...)()
44
45
46 /* Global variables. */
47
48 Bool DRD_(g_free_is_write);
49
50
51 /* Local function declarations. */
52
53 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret);
54
55
56 /* Function definitions. */
57
58 /**
59 * Tell the Valgrind core the address of the DRD function that processes
60 * client requests. Must be called before any client code is run.
61 */
DRD_(clientreq_init)62 void DRD_(clientreq_init)(void)
63 {
64 VG_(needs_client_requests)(handle_client_request);
65 }
66
67 /**
68 * DRD's handler for Valgrind client requests. The code below handles both
69 * DRD's public and tool-internal client requests.
70 */
71 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
72 /* There is a cse related issue in gcc for MIPS. Optimization level
73 has to be lowered, so cse related optimizations are not
74 included. */
75 __attribute__((optimize("O1")))
76 #endif
handle_client_request(ThreadId vg_tid,UWord * arg,UWord * ret)77 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
78 {
79 UWord result = 0;
80 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
81
82 tl_assert(vg_tid == VG_(get_running_tid)());
83 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid
84 || (VG_USERREQ__GDB_MONITOR_COMMAND == arg[0]
85 && vg_tid == VG_INVALID_THREADID));
86 /* Check the consistency of vg_tid and drd_tid, unless
87 vgdb has forced the invocation of a gdb monitor cmd
88 when no threads was running (i.e. all threads blocked
89 in a syscall. In such a case, vg_tid is invalid,
90 its conversion to a drd thread id gives also an invalid
91 drd thread id, but drd_tid is not invalid (probably
92 equal to the last running drd thread. */
93
94 switch (arg[0])
95 {
96 case VG_USERREQ__MALLOCLIKE_BLOCK:
97 if (DRD_(g_free_is_write)) {
98 GenericErrInfo GEI = {
99 .tid = DRD_(thread_get_running_tid)(),
100 .addr = 0,
101 };
102 VG_(maybe_record_error)(vg_tid,
103 GenericErr,
104 VG_(get_IP)(vg_tid),
105 "--free-is-write=yes is incompatible with"
106 " custom memory allocator client requests",
107 &GEI);
108 }
109 if (arg[1])
110 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[2]/*size*/);
111 break;
112
113 case VG_USERREQ__RESIZEINPLACE_BLOCK:
114 if (!DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
115 {
116 GenericErrInfo GEI = {
117 .tid = DRD_(thread_get_running_tid)(),
118 .addr = 0,
119 };
120 VG_(maybe_record_error)(vg_tid,
121 GenericErr,
122 VG_(get_IP)(vg_tid),
123 "Invalid VG_USERREQ__RESIZEINPLACE_BLOCK request",
124 &GEI);
125 }
126 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[3]/*newSize*/);
127 break;
128
129 case VG_USERREQ__FREELIKE_BLOCK:
130 if (arg[1] && ! DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
131 {
132 GenericErrInfo GEI = {
133 .tid = DRD_(thread_get_running_tid)(),
134 .addr = 0,
135 };
136 VG_(maybe_record_error)(vg_tid,
137 GenericErr,
138 VG_(get_IP)(vg_tid),
139 "Invalid VG_USERREQ__FREELIKE_BLOCK request",
140 &GEI);
141 }
142 break;
143
144 case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
145 result = vg_tid;
146 break;
147
148 case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
149 result = drd_tid;
150 break;
151
152 case VG_USERREQ__DRD_SET_THREAD_NAME:
153 DRD_(thread_set_name)(drd_tid, (const HChar*)arg[1]);
154 break;
155
156 case VG_USERREQ__DRD_START_SUPPRESSION:
157 /*_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED*/
158 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 39:
159 DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
160 break;
161
162 case VG_USERREQ__DRD_FINISH_SUPPRESSION:
163 /*_VG_USERREQ__HG_ARANGE_MAKE_TRACKED*/
164 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 40:
165 DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
166 break;
167
168 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE:
169 DRD_(hb_happens_before)(drd_tid, arg[1]);
170 break;
171
172 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER:
173 DRD_(hb_happens_after)(drd_tid, arg[1]);
174 break;
175
176 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE:
177 if (arg[1])
178 {
179 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
180 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
181 break;
182 }
183 DRD_(rwlock_pre_init)(arg[1], user_rwlock);
184 break;
185
186 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY:
187 if (arg[1])
188 {
189 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
190 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
191 break;
192 }
193 DRD_(rwlock_post_destroy)(arg[1], user_rwlock);
194 break;
195
196 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED:
197 if (arg[1])
198 {
199 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
200 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
201 break;
202 }
203 tl_assert(arg[2] == !! arg[2]);
204 if (arg[2])
205 {
206 DRD_(rwlock_pre_wrlock)(arg[1], user_rwlock);
207 DRD_(rwlock_post_wrlock)(arg[1], user_rwlock, True);
208 }
209 else
210 {
211 DRD_(rwlock_pre_rdlock)(arg[1], user_rwlock);
212 DRD_(rwlock_post_rdlock)(arg[1], user_rwlock, True);
213 }
214 break;
215
216 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED:
217 if (arg[1])
218 {
219 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
220 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
221 break;
222 }
223 tl_assert(arg[2] == !! arg[2]);
224 DRD_(rwlock_pre_unlock)(arg[1], user_rwlock);
225 break;
226
227 case VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE:
228 DRD_(semaphore_init)(arg[1], 0, arg[2]);
229 break;
230
231 case VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST:
232 DRD_(semaphore_destroy)(arg[1]);
233 break;
234
235 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE:
236 DRD_(semaphore_pre_wait)(arg[1]);
237 break;
238
239 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST:
240 DRD_(semaphore_post_wait)(drd_tid, arg[1], True /* waited */);
241 break;
242
243 case VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE:
244 DRD_(semaphore_pre_post)(drd_tid, arg[1]);
245 break;
246
247 case VG_USERREQ__SET_PTHREAD_COND_INITIALIZER:
248 DRD_(pthread_cond_initializer) = (Addr)arg[1];
249 DRD_(pthread_cond_initializer_size) = arg[2];
250 break;
251
252 case VG_USERREQ__DRD_START_NEW_SEGMENT:
253 DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
254 break;
255
256 case VG_USERREQ__DRD_START_TRACE_ADDR:
257 DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2], False);
258 break;
259
260 case VG_USERREQ__DRD_STOP_TRACE_ADDR:
261 DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
262 break;
263
264 case VG_USERREQ__DRD_RECORD_LOADS:
265 DRD_(thread_set_record_loads)(drd_tid, arg[1]);
266 break;
267
268 case VG_USERREQ__DRD_RECORD_STORES:
269 DRD_(thread_set_record_stores)(drd_tid, arg[1]);
270 break;
271
272 case VG_USERREQ__SET_PTHREADID:
273 // pthread_self() returns 0 for programs not linked with libpthread.so.
274 if (arg[1] != INVALID_POSIX_THREADID)
275 DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
276 break;
277
278 case VG_USERREQ__SET_JOINABLE:
279 {
280 const DrdThreadId drd_joinable = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
281 if (drd_joinable != DRD_INVALID_THREADID)
282 DRD_(thread_set_joinable)(drd_joinable, (Bool)arg[2]);
283 else {
284 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
285 VG_(maybe_record_error)(vg_tid,
286 InvalidThreadId,
287 VG_(get_IP)(vg_tid),
288 "pthread_detach(): invalid thread ID",
289 &ITI);
290 }
291 break;
292 }
293
294 case VG_USERREQ__ENTERING_PTHREAD_CREATE:
295 DRD_(thread_entering_pthread_create)(drd_tid);
296 break;
297
298 case VG_USERREQ__LEFT_PTHREAD_CREATE:
299 DRD_(thread_left_pthread_create)(drd_tid);
300 break;
301
302 case VG_USERREQ__POST_THREAD_JOIN:
303 {
304 const DrdThreadId thread_to_join = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
305 if (thread_to_join == DRD_INVALID_THREADID)
306 {
307 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
308 VG_(maybe_record_error)(vg_tid,
309 InvalidThreadId,
310 VG_(get_IP)(vg_tid),
311 "pthread_join(): invalid thread ID",
312 &ITI);
313 }
314 else
315 {
316 DRD_(thread_post_join)(drd_tid, thread_to_join);
317 }
318 break;
319 }
320
321 case VG_USERREQ__PRE_THREAD_CANCEL:
322 {
323 const DrdThreadId thread_to_cancel =DRD_(PtThreadIdToDrdThreadId)(arg[1]);
324 if (thread_to_cancel == DRD_INVALID_THREADID)
325 {
326 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
327 VG_(maybe_record_error)(vg_tid,
328 InvalidThreadId,
329 VG_(get_IP)(vg_tid),
330 "pthread_cancel(): invalid thread ID",
331 &ITI);
332 }
333 else
334 {
335 DRD_(thread_pre_cancel)(thread_to_cancel);
336 }
337 break;
338 }
339
340 case VG_USERREQ__POST_THREAD_CANCEL:
341 break;
342
343 case VG_USERREQ__PRE_MUTEX_INIT:
344 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
345 DRD_(mutex_init)(arg[1], arg[2]);
346 break;
347
348 case VG_USERREQ__POST_MUTEX_INIT:
349 DRD_(thread_leave_synchr)(drd_tid);
350 break;
351
352 case VG_USERREQ__PRE_MUTEX_DESTROY:
353 DRD_(thread_enter_synchr)(drd_tid);
354 break;
355
356 case VG_USERREQ__POST_MUTEX_DESTROY:
357 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
358 DRD_(mutex_post_destroy)(arg[1]);
359 break;
360
361 case VG_USERREQ__PRE_MUTEX_LOCK:
362 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
363 DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
364 break;
365
366 case VG_USERREQ__POST_MUTEX_LOCK:
367 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
368 DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
369 break;
370
371 case VG_USERREQ__PRE_MUTEX_UNLOCK:
372 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
373 DRD_(mutex_unlock)(arg[1], arg[2]);
374 break;
375
376 case VG_USERREQ__POST_MUTEX_UNLOCK:
377 DRD_(thread_leave_synchr)(drd_tid);
378 break;
379
380 case VG_USERREQ__DRD_IGNORE_MUTEX_ORDERING:
381 DRD_(mutex_ignore_ordering)(arg[1]);
382 break;
383
384 case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
385 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
386 DRD_(spinlock_init_or_unlock)(arg[1]);
387 break;
388
389 case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
390 DRD_(thread_leave_synchr)(drd_tid);
391 break;
392
393 case VG_USERREQ__PRE_COND_INIT:
394 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
395 DRD_(cond_pre_init)(arg[1]);
396 break;
397
398 case VG_USERREQ__POST_COND_INIT:
399 DRD_(thread_leave_synchr)(drd_tid);
400 break;
401
402 case VG_USERREQ__PRE_COND_DESTROY:
403 DRD_(thread_enter_synchr)(drd_tid);
404 break;
405
406 case VG_USERREQ__POST_COND_DESTROY:
407 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
408 DRD_(cond_post_destroy)(arg[1], arg[2]);
409 break;
410
411 case VG_USERREQ__PRE_COND_WAIT:
412 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
413 {
414 const Addr cond = arg[1];
415 const Addr mutex = arg[2];
416 const MutexT mutex_type = arg[3];
417 DRD_(mutex_unlock)(mutex, mutex_type);
418 DRD_(cond_pre_wait)(cond, mutex);
419 }
420 break;
421
422 case VG_USERREQ__POST_COND_WAIT:
423 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
424 {
425 const Addr cond = arg[1];
426 const Addr mutex = arg[2];
427 const Bool took_lock = arg[3];
428 DRD_(cond_post_wait)(cond);
429 DRD_(mutex_post_lock)(mutex, took_lock, True);
430 }
431 break;
432
433 case VG_USERREQ__PRE_COND_SIGNAL:
434 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
435 DRD_(cond_pre_signal)(arg[1]);
436 break;
437
438 case VG_USERREQ__POST_COND_SIGNAL:
439 DRD_(thread_leave_synchr)(drd_tid);
440 break;
441
442 case VG_USERREQ__PRE_COND_BROADCAST:
443 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
444 DRD_(cond_pre_broadcast)(arg[1]);
445 break;
446
447 case VG_USERREQ__POST_COND_BROADCAST:
448 DRD_(thread_leave_synchr)(drd_tid);
449 break;
450
451 case VG_USERREQ__PRE_SEM_INIT:
452 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
453 DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
454 break;
455
456 case VG_USERREQ__POST_SEM_INIT:
457 DRD_(thread_leave_synchr)(drd_tid);
458 break;
459
460 case VG_USERREQ__PRE_SEM_DESTROY:
461 DRD_(thread_enter_synchr)(drd_tid);
462 break;
463
464 case VG_USERREQ__POST_SEM_DESTROY:
465 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
466 DRD_(semaphore_destroy)(arg[1]);
467 break;
468
469 case VG_USERREQ__PRE_SEM_OPEN:
470 DRD_(thread_enter_synchr)(drd_tid);
471 break;
472
473 case VG_USERREQ__POST_SEM_OPEN:
474 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
475 DRD_(semaphore_open)(arg[1], (HChar*)arg[2], arg[3], arg[4], arg[5]);
476 break;
477
478 case VG_USERREQ__PRE_SEM_CLOSE:
479 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
480 DRD_(semaphore_close)(arg[1]);
481 break;
482
483 case VG_USERREQ__POST_SEM_CLOSE:
484 DRD_(thread_leave_synchr)(drd_tid);
485 break;
486
487 case VG_USERREQ__PRE_SEM_WAIT:
488 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
489 DRD_(semaphore_pre_wait)(arg[1]);
490 break;
491
492 case VG_USERREQ__POST_SEM_WAIT:
493 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
494 DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
495 break;
496
497 case VG_USERREQ__PRE_SEM_POST:
498 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
499 DRD_(semaphore_pre_post)(drd_tid, arg[1]);
500 break;
501
502 case VG_USERREQ__POST_SEM_POST:
503 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
504 DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
505 break;
506
507 case VG_USERREQ__PRE_BARRIER_INIT:
508 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
509 DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
510 break;
511
512 case VG_USERREQ__POST_BARRIER_INIT:
513 DRD_(thread_leave_synchr)(drd_tid);
514 break;
515
516 case VG_USERREQ__PRE_BARRIER_DESTROY:
517 DRD_(thread_enter_synchr)(drd_tid);
518 break;
519
520 case VG_USERREQ__POST_BARRIER_DESTROY:
521 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
522 DRD_(barrier_destroy)(arg[1], arg[2]);
523 break;
524
525 case VG_USERREQ__PRE_BARRIER_WAIT:
526 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
527 DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
528 break;
529
530 case VG_USERREQ__POST_BARRIER_WAIT:
531 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
532 DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
533 break;
534
535 case VG_USERREQ__PRE_RWLOCK_INIT:
536 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
537 DRD_(rwlock_pre_init)(arg[1], pthread_rwlock);
538 break;
539
540 case VG_USERREQ__POST_RWLOCK_INIT:
541 DRD_(thread_leave_synchr)(drd_tid);
542 break;
543
544 case VG_USERREQ__PRE_RWLOCK_DESTROY:
545 DRD_(thread_enter_synchr)(drd_tid);
546 break;
547
548 case VG_USERREQ__POST_RWLOCK_DESTROY:
549 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
550 DRD_(rwlock_post_destroy)(arg[1], pthread_rwlock);
551 break;
552
553 case VG_USERREQ__PRE_RWLOCK_RDLOCK:
554 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
555 DRD_(rwlock_pre_rdlock)(arg[1], pthread_rwlock);
556 break;
557
558 case VG_USERREQ__POST_RWLOCK_RDLOCK:
559 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
560 DRD_(rwlock_post_rdlock)(arg[1], pthread_rwlock, arg[2]);
561 break;
562
563 case VG_USERREQ__PRE_RWLOCK_WRLOCK:
564 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
565 DRD_(rwlock_pre_wrlock)(arg[1], pthread_rwlock);
566 break;
567
568 case VG_USERREQ__POST_RWLOCK_WRLOCK:
569 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
570 DRD_(rwlock_post_wrlock)(arg[1], pthread_rwlock, arg[2]);
571 break;
572
573 case VG_USERREQ__PRE_RWLOCK_UNLOCK:
574 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
575 DRD_(rwlock_pre_unlock)(arg[1], pthread_rwlock);
576 break;
577
578 case VG_USERREQ__POST_RWLOCK_UNLOCK:
579 DRD_(thread_leave_synchr)(drd_tid);
580 break;
581
582 case VG_USERREQ__DRD_CLEAN_MEMORY:
583 if (arg[2] > 0)
584 DRD_(clean_memory)(arg[1], arg[2]);
585 break;
586
587 case VG_USERREQ__HELGRIND_ANNOTATION_UNIMP:
588 {
589 /* Note: it is assumed below that the text arg[1] points to is never
590 * freed, e.g. because it points to static data.
591 */
592 UnimpClReqInfo UICR =
593 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
594 VG_(maybe_record_error)(vg_tid,
595 UnimpHgClReq,
596 VG_(get_IP)(vg_tid),
597 "",
598 &UICR);
599 }
600 break;
601
602 case VG_USERREQ__DRD_ANNOTATION_UNIMP:
603 {
604 /* Note: it is assumed below that the text arg[1] points to is never
605 * freed, e.g. because it points to static data.
606 */
607 UnimpClReqInfo UICR =
608 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
609 VG_(maybe_record_error)(vg_tid,
610 UnimpDrdClReq,
611 VG_(get_IP)(vg_tid),
612 "",
613 &UICR);
614 }
615 break;
616
617 #if defined(VGO_solaris)
618 case VG_USERREQ__RTLD_BIND_GUARD:
619 DRD_(thread_entering_rtld_bind_guard)(drd_tid, arg[1]);
620 break;
621
622 case VG_USERREQ__RTLD_BIND_CLEAR:
623 DRD_(thread_leaving_rtld_bind_clear)(drd_tid, arg[1]);
624 break;
625 #endif /* VGO_solaris */
626
627 default:
628 #if 0
629 VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
630 arg[0], arg[1]);
631 tl_assert(0);
632 #endif
633 return False;
634 }
635
636 *ret = result;
637 return True;
638 }
639