• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors               ---*/
4 /*--- in threaded programs.                              hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of Helgrind, a Valgrind tool for detecting errors
9    in threaded programs.
10 
11    Copyright (C) 2007-2017 OpenWorks LLP
12       info@open-works.co.uk
13 
14    Copyright (C) 2007-2017 Apple, Inc.
15 
16    This program is free software; you can redistribute it and/or
17    modify it under the terms of the GNU General Public License as
18    published by the Free Software Foundation; either version 2 of the
19    License, or (at your option) any later version.
20 
21    This program is distributed in the hope that it will be useful, but
22    WITHOUT ANY WARRANTY; without even the implied warranty of
23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24    General Public License for more details.
25 
26    You should have received a copy of the GNU General Public License
27    along with this program; if not, write to the Free Software
28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29    02111-1307, USA.
30 
31    The GNU General Public License is contained in the file COPYING.
32 
33    Neither the names of the U.S. Department of Energy nor the
34    University of California nor the names of its contributors may be
35    used to endorse or promote products derived from this software
36    without prior written permission.
37 */
38 
39 #include "pub_tool_basics.h"
40 #include "pub_tool_gdbserver.h"
41 #include "pub_tool_libcassert.h"
42 #include "pub_tool_libcbase.h"
43 #include "pub_tool_libcprint.h"
44 #include "pub_tool_threadstate.h"
45 #include "pub_tool_tooliface.h"
46 #include "pub_tool_hashtable.h"
47 #include "pub_tool_replacemalloc.h"
48 #include "pub_tool_machine.h"
49 #include "pub_tool_options.h"
50 #include "pub_tool_xarray.h"
51 #include "pub_tool_stacktrace.h"
52 #include "pub_tool_wordfm.h"
53 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54 #include "pub_tool_redir.h"     // sonames for the dynamic linkers
55 #include "pub_tool_vki.h"       // VKI_PAGE_SIZE
56 #include "pub_tool_libcproc.h"
57 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
58 #include "pub_tool_poolalloc.h"
59 #include "pub_tool_addrinfo.h"
60 #include "pub_tool_xtree.h"
61 #include "pub_tool_xtmemory.h"
62 
63 #include "hg_basics.h"
64 #include "hg_wordset.h"
65 #include "hg_addrdescr.h"
66 #include "hg_lock_n_thread.h"
67 #include "hg_errors.h"
68 
69 #include "libhb.h"
70 
71 #include "helgrind.h"
72 
73 
74 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
75 
76 // FIXME: when client destroys a lock or a CV, remove these
77 // from our mappings, so that the associated SO can be freed up
78 
79 /*----------------------------------------------------------------*/
80 /*---                                                          ---*/
81 /*----------------------------------------------------------------*/
82 
83 /* Note this needs to be compiled with -fno-strict-aliasing, since it
84    contains a whole bunch of calls to lookupFM etc which cast between
85    Word and pointer types.  gcc rightly complains this breaks ANSI C
86    strict aliasing rules, at -O2.  No complaints at -O, but -O2 gives
87    worthwhile performance benefits over -O.
88 */
89 
90 // FIXME what is supposed to happen to locks in memory which
91 // is relocated as a result of client realloc?
92 
93 // FIXME put referencing ThreadId into Thread and get
94 // rid of the slow reverse mapping function.
95 
96 // FIXME accesses to NoAccess areas: change state to Excl?
97 
98 // FIXME report errors for accesses of NoAccess memory?
99 
100 // FIXME pth_cond_wait/timedwait wrappers.  Even if these fail,
101 // the thread still holds the lock.
102 
103 /* ------------ Debug/trace options ------------ */
104 
105 // 0 for silent, 1 for some stuff, 2 for lots of stuff
106 #define SHOW_EVENTS 0
107 
108 
109 static void all__sanity_check ( const HChar* who ); /* fwds */
110 
111 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
112 
113 // 0 for none, 1 for dump at end of run
114 #define SHOW_DATA_STRUCTURES 0
115 
116 
117 /* ------------ Misc comments ------------ */
118 
119 // FIXME: don't hardwire initial entries for root thread.
120 // Instead, let the pre_thread_ll_create handler do this.
121 
122 
123 /*----------------------------------------------------------------*/
124 /*--- Primary data structures                                  ---*/
125 /*----------------------------------------------------------------*/
126 
127 /* Admin linked list of Threads */
128 static Thread* admin_threads = NULL;
get_admin_threads(void)129 Thread* get_admin_threads ( void ) { return admin_threads; }
130 
131 /* Admin double linked list of Locks */
132 /* We need a double linked list to properly and efficiently
133    handle del_LockN. */
134 static Lock* admin_locks = NULL;
135 
136 /* Mapping table for core ThreadIds to Thread* */
137 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138 
139 /* Mapping table for lock guest addresses to Lock* */
140 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141 
142 /* The word-set universes for lock sets. */
143 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
144 static WordSetU* univ_laog  = NULL; /* sets of Lock*, for LAOG */
145 static Int next_gc_univ_laog = 1;
146 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
147    >= next_gc_univ_laog. */
148 
149 /* Allow libhb to get at the universe of locksets stored
150    here.  Sigh. */
HG_(get_univ_lsets)151 WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
152 
153 /* Allow libhb to get at the list of locks stored here.  Ditto
154    sigh. */
HG_(get_admin_locks)155 Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
156 
157 
158 /*----------------------------------------------------------------*/
159 /*--- Simple helpers for the data structures                   ---*/
160 /*----------------------------------------------------------------*/
161 
162 static UWord stats__lockN_acquires = 0;
163 static UWord stats__lockN_releases = 0;
164 
165 #if defined(VGO_solaris)
166 Bool HG_(clo_ignore_thread_creation) = True;
167 #else
168 Bool HG_(clo_ignore_thread_creation) = False;
169 #endif /* VGO_solaris */
170 
171 static
172 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
173 
174 /* --------- Constructors --------- */
175 
mk_Thread(Thr * hbthr)176 static Thread* mk_Thread ( Thr* hbthr ) {
177    static Int indx      = 1;
178    Thread* thread       = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
179    thread->locksetA     = HG_(emptyWS)( univ_lsets );
180    thread->locksetW     = HG_(emptyWS)( univ_lsets );
181    thread->magic        = Thread_MAGIC;
182    thread->hbthr        = hbthr;
183    thread->coretid      = VG_INVALID_THREADID;
184    thread->created_at   = NULL;
185    thread->announced    = False;
186    thread->errmsg_index = indx++;
187    thread->admin        = admin_threads;
188    thread->synchr_nesting = 0;
189    thread->pthread_create_nesting_level = 0;
190 #if defined(VGO_solaris)
191    thread->bind_guard_flag = 0;
192 #endif /* VGO_solaris */
193 
194    admin_threads        = thread;
195    return thread;
196 }
197 
198 // Make a new lock which is unlocked (hence ownerless)
199 // and insert the new lock in admin_locks double linked list.
mk_LockN(LockKind kind,Addr guestaddr)200 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
201    static ULong unique = 0;
202    Lock* lock             = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
203    /* begin: add to double linked list */
204    if (admin_locks)
205       admin_locks->admin_prev = lock;
206    lock->admin_next       = admin_locks;
207    lock->admin_prev       = NULL;
208    admin_locks            = lock;
209    /* end: add */
210    lock->unique           = unique++;
211    lock->magic            = LockN_MAGIC;
212    lock->appeared_at      = NULL;
213    lock->acquired_at      = NULL;
214    lock->hbso             = libhb_so_alloc();
215    lock->guestaddr        = guestaddr;
216    lock->kind             = kind;
217    lock->heldW            = False;
218    lock->heldBy           = NULL;
219    tl_assert(HG_(is_sane_LockN)(lock));
220    return lock;
221 }
222 
223 /* Release storage for a Lock.  Also release storage in .heldBy, if
224    any. Removes from admin_locks double linked list. */
del_LockN(Lock * lk)225 static void del_LockN ( Lock* lk )
226 {
227    tl_assert(HG_(is_sane_LockN)(lk));
228    tl_assert(lk->hbso);
229    libhb_so_dealloc(lk->hbso);
230    if (lk->heldBy)
231       VG_(deleteBag)( lk->heldBy );
232    /* begin: del lock from double linked list */
233    if (lk == admin_locks) {
234       tl_assert(lk->admin_prev == NULL);
235       if (lk->admin_next)
236          lk->admin_next->admin_prev = NULL;
237       admin_locks = lk->admin_next;
238    }
239    else {
240       tl_assert(lk->admin_prev != NULL);
241       lk->admin_prev->admin_next = lk->admin_next;
242       if (lk->admin_next)
243          lk->admin_next->admin_prev = lk->admin_prev;
244    }
245    /* end: del */
246    VG_(memset)(lk, 0xAA, sizeof(*lk));
247    HG_(free)(lk);
248 }
249 
250 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
251    it.  This is done strictly: only combinations resulting from
252    correct program and libpthread behaviour are allowed. */
lockN_acquire_writer(Lock * lk,Thread * thr)253 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
254 {
255    tl_assert(HG_(is_sane_LockN)(lk));
256    tl_assert(HG_(is_sane_Thread)(thr));
257 
258    stats__lockN_acquires++;
259 
260    /* EXPOSITION only */
261    /* We need to keep recording snapshots of where the lock was
262       acquired, so as to produce better lock-order error messages. */
263    if (lk->acquired_at == NULL) {
264       ThreadId tid;
265       tl_assert(lk->heldBy == NULL);
266       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
267       lk->acquired_at
268          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
269    } else {
270       tl_assert(lk->heldBy != NULL);
271    }
272    /* end EXPOSITION only */
273 
274    switch (lk->kind) {
275       case LK_nonRec:
276       case_LK_nonRec:
277          tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
278          tl_assert(!lk->heldW);
279          lk->heldW  = True;
280          lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
281          VG_(addToBag)( lk->heldBy, (UWord)thr );
282          break;
283       case LK_mbRec:
284          if (lk->heldBy == NULL)
285             goto case_LK_nonRec;
286          /* 2nd and subsequent locking of a lock by its owner */
287          tl_assert(lk->heldW);
288          /* assert: lk is only held by one thread .. */
289          tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1);
290          /* assert: .. and that thread is 'thr'. */
291          tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
292                    == VG_(sizeTotalBag)(lk->heldBy));
293          VG_(addToBag)(lk->heldBy, (UWord)thr);
294          break;
295       case LK_rdwr:
296          tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
297          goto case_LK_nonRec;
298       default:
299          tl_assert(0);
300   }
301   tl_assert(HG_(is_sane_LockN)(lk));
302 }
303 
lockN_acquire_reader(Lock * lk,Thread * thr)304 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
305 {
306    tl_assert(HG_(is_sane_LockN)(lk));
307    tl_assert(HG_(is_sane_Thread)(thr));
308    /* can only add reader to a reader-writer lock. */
309    tl_assert(lk->kind == LK_rdwr);
310    /* lk must be free or already r-held. */
311    tl_assert(lk->heldBy == NULL
312              || (lk->heldBy != NULL && !lk->heldW));
313 
314    stats__lockN_acquires++;
315 
316    /* EXPOSITION only */
317    /* We need to keep recording snapshots of where the lock was
318       acquired, so as to produce better lock-order error messages. */
319    if (lk->acquired_at == NULL) {
320       ThreadId tid;
321       tl_assert(lk->heldBy == NULL);
322       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
323       lk->acquired_at
324          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
325    } else {
326       tl_assert(lk->heldBy != NULL);
327    }
328    /* end EXPOSITION only */
329 
330    if (lk->heldBy) {
331       VG_(addToBag)(lk->heldBy, (UWord)thr);
332    } else {
333       lk->heldW  = False;
334       lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
335       VG_(addToBag)( lk->heldBy, (UWord)thr );
336    }
337    tl_assert(!lk->heldW);
338    tl_assert(HG_(is_sane_LockN)(lk));
339 }
340 
341 /* Update 'lk' to reflect a release of it by 'thr'.  This is done
342    strictly: only combinations resulting from correct program and
343    libpthread behaviour are allowed. */
344 
lockN_release(Lock * lk,Thread * thr)345 static void lockN_release ( Lock* lk, Thread* thr )
346 {
347    Bool b;
348    tl_assert(HG_(is_sane_LockN)(lk));
349    tl_assert(HG_(is_sane_Thread)(thr));
350    /* lock must be held by someone */
351    tl_assert(lk->heldBy);
352    stats__lockN_releases++;
353    /* Remove it from the holder set */
354    b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
355    /* thr must actually have been a holder of lk */
356    tl_assert(b);
357    /* normalise */
358    tl_assert(lk->acquired_at);
359    if (VG_(isEmptyBag)(lk->heldBy)) {
360       VG_(deleteBag)(lk->heldBy);
361       lk->heldBy      = NULL;
362       lk->heldW       = False;
363       lk->acquired_at = NULL;
364    }
365    tl_assert(HG_(is_sane_LockN)(lk));
366 }
367 
remove_Lock_from_locksets_of_all_owning_Threads(Lock * lk)368 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
369 {
370    Thread* thr;
371    if (!lk->heldBy) {
372       tl_assert(!lk->heldW);
373       return;
374    }
375    /* for each thread that holds this lock do ... */
376    VG_(initIterBag)( lk->heldBy );
377    while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
378       tl_assert(HG_(is_sane_Thread)(thr));
379       tl_assert(HG_(elemWS)( univ_lsets,
380                              thr->locksetA, (UWord)lk ));
381       thr->locksetA
382          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
383 
384       if (lk->heldW) {
385          tl_assert(HG_(elemWS)( univ_lsets,
386                                 thr->locksetW, (UWord)lk ));
387          thr->locksetW
388             = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
389       }
390    }
391    VG_(doneIterBag)( lk->heldBy );
392 }
393 
394 
395 /*----------------------------------------------------------------*/
396 /*--- Print out the primary data structures                    ---*/
397 /*----------------------------------------------------------------*/
398 
399 #define PP_THREADS      (1<<1)
400 #define PP_LOCKS        (1<<2)
401 #define PP_ALL (PP_THREADS | PP_LOCKS)
402 
403 
404 static const Int sHOW_ADMIN = 0;
405 
space(Int n)406 static void space ( Int n )
407 {
408    Int  i;
409    HChar spaces[128+1];
410    tl_assert(n >= 0 && n < 128);
411    if (n == 0)
412       return;
413    for (i = 0; i < n; i++)
414       spaces[i] = ' ';
415    spaces[i] = 0;
416    tl_assert(i < 128+1);
417    VG_(printf)("%s", spaces);
418 }
419 
pp_Thread(Int d,Thread * t)420 static void pp_Thread ( Int d, Thread* t )
421 {
422    space(d+0); VG_(printf)("Thread %p {\n", t);
423    if (sHOW_ADMIN) {
424    space(d+3); VG_(printf)("admin    %p\n",   t->admin);
425    space(d+3); VG_(printf)("magic    0x%x\n", (UInt)t->magic);
426    }
427    space(d+3); VG_(printf)("locksetA %d\n",   (Int)t->locksetA);
428    space(d+3); VG_(printf)("locksetW %d\n",   (Int)t->locksetW);
429    space(d+0); VG_(printf)("}\n");
430 }
431 
pp_admin_threads(Int d)432 static void pp_admin_threads ( Int d )
433 {
434    Int     i, n;
435    Thread* t;
436    for (n = 0, t = admin_threads;  t;  n++, t = t->admin) {
437       /* nothing */
438    }
439    space(d); VG_(printf)("admin_threads (%d records) {\n", n);
440    for (i = 0, t = admin_threads;  t;  i++, t = t->admin) {
441       if (0) {
442          space(n);
443          VG_(printf)("admin_threads record %d of %d:\n", i, n);
444       }
445       pp_Thread(d+3, t);
446    }
447    space(d); VG_(printf)("}\n");
448 }
449 
pp_map_threads(Int d)450 static void pp_map_threads ( Int d )
451 {
452    Int i, n = 0;
453    space(d); VG_(printf)("map_threads ");
454    for (i = 0; i < VG_N_THREADS; i++) {
455       if (map_threads[i] != NULL)
456          n++;
457    }
458    VG_(printf)("(%d entries) {\n", n);
459    for (i = 0; i < VG_N_THREADS; i++) {
460       if (map_threads[i] == NULL)
461          continue;
462       space(d+3);
463       VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
464    }
465    space(d); VG_(printf)("}\n");
466 }
467 
show_LockKind(LockKind lkk)468 static const HChar* show_LockKind ( LockKind lkk ) {
469    switch (lkk) {
470       case LK_mbRec:  return "mbRec";
471       case LK_nonRec: return "nonRec";
472       case LK_rdwr:   return "rdwr";
473       default:        tl_assert(0);
474    }
475 }
476 
477 /* Pretty Print lock lk.
478    if show_lock_addrdescr, describes the (guest) lock address.
479      (this description will be more complete with --read-var-info=yes).
480    if show_internal_data, shows also helgrind internal information.
481    d is the level at which output is indented. */
pp_Lock(Int d,Lock * lk,Bool show_lock_addrdescr,Bool show_internal_data)482 static void pp_Lock ( Int d, Lock* lk,
483                       Bool show_lock_addrdescr,
484                       Bool show_internal_data)
485 {
486    space(d+0);
487    if (show_internal_data)
488       VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
489    else
490       VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
491    if (!show_lock_addrdescr
492        || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
493       VG_(printf)("\n");
494 
495    if (sHOW_ADMIN) {
496       space(d+3); VG_(printf)("admin_n  %p\n",   lk->admin_next);
497       space(d+3); VG_(printf)("admin_p  %p\n",   lk->admin_prev);
498       space(d+3); VG_(printf)("magic    0x%x\n", (UInt)lk->magic);
499    }
500    if (show_internal_data) {
501       space(d+3); VG_(printf)("unique %llu\n", lk->unique);
502    }
503    space(d+3); VG_(printf)("kind   %s\n", show_LockKind(lk->kind));
504    if (show_internal_data) {
505       space(d+3); VG_(printf)("heldW  %s\n", lk->heldW ? "yes" : "no");
506    }
507    if (show_internal_data) {
508       space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
509    }
510    if (lk->heldBy) {
511       Thread* thr;
512       UWord   count;
513       VG_(printf)(" { ");
514       VG_(initIterBag)( lk->heldBy );
515       while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
516          if (show_internal_data)
517             VG_(printf)("%lu:%p ", count, thr);
518          else {
519             VG_(printf)("%c%lu:thread #%d ",
520                         lk->heldW ? 'W' : 'R',
521                         count, thr->errmsg_index);
522             if (thr->coretid == VG_INVALID_THREADID)
523                VG_(printf)("tid (exited) ");
524             else
525                VG_(printf)("tid %u ", thr->coretid);
526 
527          }
528       }
529       VG_(doneIterBag)( lk->heldBy );
530       VG_(printf)("}\n");
531    }
532    space(d+0); VG_(printf)("}\n");
533 }
534 
pp_admin_locks(Int d)535 static void pp_admin_locks ( Int d )
536 {
537    Int   i, n;
538    Lock* lk;
539    for (n = 0, lk = admin_locks;  lk;  n++, lk = lk->admin_next) {
540       /* nothing */
541    }
542    space(d); VG_(printf)("admin_locks (%d records) {\n", n);
543    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
544       if (0) {
545          space(n);
546          VG_(printf)("admin_locks record %d of %d:\n", i, n);
547       }
548       pp_Lock(d+3, lk,
549               False /* show_lock_addrdescr */,
550               True /* show_internal_data */);
551    }
552    space(d); VG_(printf)("}\n");
553 }
554 
pp_map_locks(Int d)555 static void pp_map_locks ( Int d)
556 {
557    void* gla;
558    Lock* lk;
559    space(d); VG_(printf)("map_locks (%d entries) {\n",
560                          (Int)VG_(sizeFM)( map_locks ));
561    VG_(initIterFM)( map_locks );
562    while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
563                                       (UWord*)&lk )) {
564       space(d+3);
565       VG_(printf)("guest %p -> Lock %p\n", gla, lk);
566    }
567    VG_(doneIterFM)( map_locks );
568    space(d); VG_(printf)("}\n");
569 }
570 
pp_everything(Int flags,const HChar * caller)571 static void pp_everything ( Int flags, const HChar* caller )
572 {
573    Int d = 0;
574    VG_(printf)("\n");
575    VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
576    if (flags & PP_THREADS) {
577       VG_(printf)("\n");
578       pp_admin_threads(d+3);
579       VG_(printf)("\n");
580       pp_map_threads(d+3);
581    }
582    if (flags & PP_LOCKS) {
583       VG_(printf)("\n");
584       pp_admin_locks(d+3);
585       VG_(printf)("\n");
586       pp_map_locks(d+3);
587    }
588 
589    VG_(printf)("\n");
590    VG_(printf)("}\n");
591    VG_(printf)("\n");
592 }
593 
594 #undef SHOW_ADMIN
595 
596 
597 /*----------------------------------------------------------------*/
598 /*--- Initialise the primary data structures                   ---*/
599 /*----------------------------------------------------------------*/
600 
initialise_data_structures(Thr * hbthr_root)601 static void initialise_data_structures ( Thr* hbthr_root )
602 {
603    Thread*   thr;
604    WordSetID wsid;
605 
606    /* Get everything initialised and zeroed. */
607    tl_assert(admin_threads == NULL);
608    tl_assert(admin_locks == NULL);
609 
610    tl_assert(map_threads == NULL);
611    map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
612 
613    tl_assert(sizeof(Addr) == sizeof(UWord));
614    tl_assert(map_locks == NULL);
615    map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
616                            NULL/*unboxed Word cmp*/);
617 
618    tl_assert(univ_lsets == NULL);
619    univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
620                                   8/*cacheSize*/ );
621    tl_assert(univ_lsets != NULL);
622    /* Ensure that univ_lsets is non-empty, with lockset zero being the
623       empty lockset.  hg_errors.c relies on the assumption that
624       lockset number zero in univ_lsets is always valid. */
625    wsid = HG_(emptyWS)(univ_lsets);
626    tl_assert(wsid == 0);
627 
628    tl_assert(univ_laog == NULL);
629    if (HG_(clo_track_lockorders)) {
630       univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
631                                     HG_(free), 24/*cacheSize*/ );
632       tl_assert(univ_laog != NULL);
633    }
634 
635    /* Set up entries for the root thread */
636    // FIXME: this assumes that the first real ThreadId is 1
637 
638    /* a Thread for the new thread ... */
639    thr = mk_Thread(hbthr_root);
640    thr->coretid = 1; /* FIXME: hardwires an assumption about the
641                         identity of the root thread. */
642    tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
643    libhb_set_Thr_hgthread(hbthr_root, thr);
644 
645    /* and bind it in the thread-map table. */
646    tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
647    tl_assert(thr->coretid != VG_INVALID_THREADID);
648 
649    map_threads[thr->coretid] = thr;
650 
651    tl_assert(VG_INVALID_THREADID == 0);
652 
653    all__sanity_check("initialise_data_structures");
654 }
655 
656 
657 /*----------------------------------------------------------------*/
658 /*--- map_threads :: array[core-ThreadId] of Thread*           ---*/
659 /*----------------------------------------------------------------*/
660 
661 /* Doesn't assert if the relevant map_threads entry is NULL. */
map_threads_maybe_lookup(ThreadId coretid)662 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
663 {
664    Thread* thr;
665    tl_assert( HG_(is_sane_ThreadId)(coretid) );
666    thr = map_threads[coretid];
667    return thr;
668 }
669 
670 /* Asserts if the relevant map_threads entry is NULL. */
map_threads_lookup(ThreadId coretid)671 static inline Thread* map_threads_lookup ( ThreadId coretid )
672 {
673    Thread* thr;
674    tl_assert( HG_(is_sane_ThreadId)(coretid) );
675    thr = map_threads[coretid];
676    tl_assert(thr);
677    return thr;
678 }
679 
680 /* Do a reverse lookup.  Does not assert if 'thr' is not found in
681    map_threads. */
map_threads_maybe_reverse_lookup_SLOW(Thread * thr)682 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
683 {
684    ThreadId tid;
685    tl_assert(HG_(is_sane_Thread)(thr));
686    /* Check nobody used the invalid-threadid slot */
687    tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
688    tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
689    tid = thr->coretid;
690    tl_assert(HG_(is_sane_ThreadId)(tid));
691    return tid;
692 }
693 
694 /* Do a reverse lookup.  Warning: POTENTIALLY SLOW.  Asserts if 'thr'
695    is not found in map_threads. */
map_threads_reverse_lookup_SLOW(Thread * thr)696 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
697 {
698    ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
699    tl_assert(tid != VG_INVALID_THREADID);
700    tl_assert(map_threads[tid]);
701    tl_assert(map_threads[tid]->coretid == tid);
702    return tid;
703 }
704 
map_threads_delete(ThreadId coretid)705 static void map_threads_delete ( ThreadId coretid )
706 {
707    Thread* thr;
708    tl_assert(coretid != 0);
709    tl_assert( HG_(is_sane_ThreadId)(coretid) );
710    thr = map_threads[coretid];
711    tl_assert(thr);
712    map_threads[coretid] = NULL;
713 }
714 
HG_(thread_enter_synchr)715 static void HG_(thread_enter_synchr)(Thread *thr) {
716    tl_assert(thr->synchr_nesting >= 0);
717 #if defined(VGO_solaris)
718    thr->synchr_nesting += 1;
719 #endif /* VGO_solaris */
720 }
721 
HG_(thread_leave_synchr)722 static void HG_(thread_leave_synchr)(Thread *thr) {
723 #if defined(VGO_solaris)
724    thr->synchr_nesting -= 1;
725 #endif /* VGO_solaris */
726    tl_assert(thr->synchr_nesting >= 0);
727 }
728 
HG_(thread_enter_pthread_create)729 static void HG_(thread_enter_pthread_create)(Thread *thr) {
730    tl_assert(thr->pthread_create_nesting_level >= 0);
731    thr->pthread_create_nesting_level += 1;
732 }
733 
HG_(thread_leave_pthread_create)734 static void HG_(thread_leave_pthread_create)(Thread *thr) {
735    tl_assert(thr->pthread_create_nesting_level > 0);
736    thr->pthread_create_nesting_level -= 1;
737 }
738 
HG_(get_pthread_create_nesting_level)739 static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
740    Thread *thr = map_threads_maybe_lookup(tid);
741    return thr->pthread_create_nesting_level;
742 }
743 
744 /*----------------------------------------------------------------*/
745 /*--- map_locks :: WordFM guest-Addr-of-lock Lock*             ---*/
746 /*----------------------------------------------------------------*/
747 
748 /* Make sure there is a lock table entry for the given (lock) guest
749    address.  If not, create one of the stated 'kind' in unheld state.
750    In any case, return the address of the existing or new Lock. */
751 static
map_locks_lookup_or_create(LockKind lkk,Addr ga,ThreadId tid)752 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
753 {
754    Bool  found;
755    Lock* oldlock = NULL;
756    tl_assert(HG_(is_sane_ThreadId)(tid));
757    found = VG_(lookupFM)( map_locks,
758                           NULL, (UWord*)&oldlock, (UWord)ga );
759    if (!found) {
760       Lock* lock = mk_LockN(lkk, ga);
761       lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
762       tl_assert(HG_(is_sane_LockN)(lock));
763       VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
764       tl_assert(oldlock == NULL);
765       return lock;
766    } else {
767       tl_assert(oldlock != NULL);
768       tl_assert(HG_(is_sane_LockN)(oldlock));
769       tl_assert(oldlock->guestaddr == ga);
770       return oldlock;
771    }
772 }
773 
map_locks_maybe_lookup(Addr ga)774 static Lock* map_locks_maybe_lookup ( Addr ga )
775 {
776    Bool  found;
777    Lock* lk = NULL;
778    found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
779    tl_assert(found  ?  lk != NULL  :  lk == NULL);
780    return lk;
781 }
782 
map_locks_delete(Addr ga)783 static void map_locks_delete ( Addr ga )
784 {
785    Addr  ga2 = 0;
786    Lock* lk  = NULL;
787    VG_(delFromFM)( map_locks,
788                    (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
789    /* delFromFM produces the val which is being deleted, if it is
790       found.  So assert it is non-null; that in effect asserts that we
791       are deleting a (ga, Lock) pair which actually exists. */
792    tl_assert(lk != NULL);
793    tl_assert(ga2 == ga);
794 }
795 
796 
797 
798 /*----------------------------------------------------------------*/
799 /*--- Sanity checking the data structures                      ---*/
800 /*----------------------------------------------------------------*/
801 
802 static UWord stats__sanity_checks = 0;
803 
804 static void laog__sanity_check ( const HChar* who ); /* fwds */
805 
806 /* REQUIRED INVARIANTS:
807 
808    Thread vs Segment/Lock/SecMaps
809 
810       for each t in Threads {
811 
812          // Thread.lockset: each element is really a valid Lock
813 
814          // Thread.lockset: each Lock in set is actually held by that thread
815          for lk in Thread.lockset
816             lk == LockedBy(t)
817 
818          // Thread.csegid is a valid SegmentID
819          // and the associated Segment has .thr == t
820 
821       }
822 
823       all thread Locksets are pairwise empty under intersection
824       (that is, no lock is claimed to be held by more than one thread)
825       -- this is guaranteed if all locks in locksets point back to their
826       owner threads
827 
828    Lock vs Thread/Segment/SecMaps
829 
830       for each entry (gla, la) in map_locks
831          gla == la->guest_addr
832 
833       for each lk in Locks {
834 
835          lk->tag is valid
836          lk->guest_addr does not have shadow state NoAccess
837          if lk == LockedBy(t), then t->lockset contains lk
838          if lk == UnlockedBy(segid) then segid is valid SegmentID
839              and can be mapped to a valid Segment(seg)
840              and seg->thr->lockset does not contain lk
841          if lk == UnlockedNew then (no lockset contains lk)
842 
843          secmaps for lk has .mbHasLocks == True
844 
845       }
846 
847    Segment vs Thread/Lock/SecMaps
848 
849       the Segment graph is a dag (no cycles)
850       all of the Segment graph must be reachable from the segids
851          mentioned in the Threads
852 
853       for seg in Segments {
854 
855          seg->thr is a sane Thread
856 
857       }
858 
859    SecMaps vs Segment/Thread/Lock
860 
861       for sm in SecMaps {
862 
863          sm properly aligned
864          if any shadow word is ShR or ShM then .mbHasShared == True
865 
866          for each Excl(segid) state
867             map_segments_lookup maps to a sane Segment(seg)
868          for each ShM/ShR(tsetid,lsetid) state
869             each lk in lset is a valid Lock
870             each thr in tset is a valid thread, which is non-dead
871 
872       }
873 */
874 
875 
876 /* Return True iff 'thr' holds 'lk' in some mode. */
thread_is_a_holder_of_Lock(Thread * thr,Lock * lk)877 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
878 {
879    if (lk->heldBy)
880       return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
881    else
882       return False;
883 }
884 
885 /* Sanity check Threads, as far as possible */
886 __attribute__((noinline))
threads__sanity_check(const HChar * who)887 static void threads__sanity_check ( const HChar* who )
888 {
889 #define BAD(_str) do { how = (_str); goto bad; } while (0)
890    const HChar* how = "no error";
891    Thread*   thr;
892    WordSetID wsA, wsW;
893    UWord*    ls_words;
894    UWord     ls_size, i;
895    Lock*     lk;
896    for (thr = admin_threads; thr; thr = thr->admin) {
897       if (!HG_(is_sane_Thread)(thr)) BAD("1");
898       wsA = thr->locksetA;
899       wsW = thr->locksetW;
900       // locks held in W mode are a subset of all locks held
901       if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
902       HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
903       for (i = 0; i < ls_size; i++) {
904          lk = (Lock*)ls_words[i];
905          // Thread.lockset: each element is really a valid Lock
906          if (!HG_(is_sane_LockN)(lk)) BAD("2");
907          // Thread.lockset: each Lock in set is actually held by that
908          // thread
909          if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
910       }
911    }
912    return;
913   bad:
914    VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
915    tl_assert(0);
916 #undef BAD
917 }
918 
919 
920 /* Sanity check Locks, as far as possible */
921 __attribute__((noinline))
locks__sanity_check(const HChar * who)922 static void locks__sanity_check ( const HChar* who )
923 {
924 #define BAD(_str) do { how = (_str); goto bad; } while (0)
925    const HChar* how = "no error";
926    Addr      gla;
927    Lock*     lk;
928    Int       i;
929    // # entries in admin_locks == # entries in map_locks
930    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next)
931       ;
932    if (i != VG_(sizeFM)(map_locks)) BAD("1");
933    // for each entry (gla, lk) in map_locks
934    //      gla == lk->guest_addr
935    VG_(initIterFM)( map_locks );
936    while (VG_(nextIterFM)( map_locks,
937                            (UWord*)&gla, (UWord*)&lk )) {
938       if (lk->guestaddr != gla) BAD("2");
939    }
940    VG_(doneIterFM)( map_locks );
941    // scan through admin_locks ...
942    for (lk = admin_locks; lk; lk = lk->admin_next) {
943       // lock is sane.  Quite comprehensive, also checks that
944       // referenced (holder) threads are sane.
945       if (!HG_(is_sane_LockN)(lk)) BAD("3");
946       // map_locks binds guest address back to this lock
947       if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
948       // look at all threads mentioned as holders of this lock.  Ensure
949       // this lock is mentioned in their locksets.
950       if (lk->heldBy) {
951          Thread* thr;
952          UWord   count;
953          VG_(initIterBag)( lk->heldBy );
954          while (VG_(nextIterBag)( lk->heldBy,
955                                   (UWord*)&thr, &count )) {
956             // HG_(is_sane_LockN) above ensures these
957             tl_assert(count >= 1);
958             tl_assert(HG_(is_sane_Thread)(thr));
959             if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
960                BAD("6");
961             // also check the w-only lockset
962             if (lk->heldW
963                 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
964                BAD("7");
965             if ((!lk->heldW)
966                 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
967                BAD("8");
968          }
969          VG_(doneIterBag)( lk->heldBy );
970       } else {
971          /* lock not held by anybody */
972          if (lk->heldW) BAD("9"); /* should be False if !heldBy */
973          // since lk is unheld, then (no lockset contains lk)
974          // hmm, this is really too expensive to check.  Hmm.
975       }
976    }
977 
978    return;
979   bad:
980    VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
981    tl_assert(0);
982 #undef BAD
983 }
984 
985 
all_except_Locks__sanity_check(const HChar * who)986 static void all_except_Locks__sanity_check ( const HChar* who ) {
987    stats__sanity_checks++;
988    if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
989    threads__sanity_check(who);
990    if (HG_(clo_track_lockorders))
991       laog__sanity_check(who);
992 }
all__sanity_check(const HChar * who)993 static void all__sanity_check ( const HChar* who ) {
994    all_except_Locks__sanity_check(who);
995    locks__sanity_check(who);
996 }
997 
998 
999 /*----------------------------------------------------------------*/
1000 /*--- Shadow value and address range handlers                  ---*/
1001 /*----------------------------------------------------------------*/
1002 
1003 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1004 //static void laog__handle_lock_deletions    ( WordSetID ); /* fwds */
1005 static inline Thread* get_current_Thread ( void ); /* fwds */
1006 __attribute__((noinline))
1007 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
1008 
1009 
1010 /* Block-copy states (needed for implementing realloc()). */
1011 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1012    Is that a problem? (hence 'scopy' rather than 'ccopy') */
shadow_mem_scopy_range(Thread * thr,Addr src,Addr dst,SizeT len)1013 static void shadow_mem_scopy_range ( Thread* thr,
1014                                      Addr src, Addr dst, SizeT len )
1015 {
1016    Thr*     hbthr = thr->hbthr;
1017    tl_assert(hbthr);
1018    libhb_copy_shadow_state( hbthr, src, dst, len );
1019 }
1020 
shadow_mem_cread_range(Thread * thr,Addr a,SizeT len)1021 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1022 {
1023    Thr*     hbthr = thr->hbthr;
1024    tl_assert(hbthr);
1025    LIBHB_CREAD_N(hbthr, a, len);
1026 }
1027 
shadow_mem_cwrite_range(Thread * thr,Addr a,SizeT len)1028 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1029    Thr*     hbthr = thr->hbthr;
1030    tl_assert(hbthr);
1031    LIBHB_CWRITE_N(hbthr, a, len);
1032 }
1033 
shadow_mem_make_New(Thread * thr,Addr a,SizeT len)1034 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1035 {
1036    libhb_srange_new( thr->hbthr, a, len );
1037 }
1038 
shadow_mem_make_NoAccess_NoFX(Thread * thr,Addr aIN,SizeT len)1039 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
1040 {
1041    if (0 && len > 500)
1042       VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
1043    // has no effect (NoFX)
1044    libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1045 }
1046 
shadow_mem_make_NoAccess_AHAE(Thread * thr,Addr aIN,SizeT len)1047 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1048 {
1049    if (0 && len > 500)
1050       VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
1051    // Actually Has An Effect (AHAE)
1052    libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1053 }
1054 
shadow_mem_make_Untracked(Thread * thr,Addr aIN,SizeT len)1055 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1056 {
1057    if (0 && len > 500)
1058       VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
1059    libhb_srange_untrack( thr->hbthr, aIN, len );
1060 }
1061 
1062 
1063 /*----------------------------------------------------------------*/
1064 /*--- Event handlers (evh__* functions)                        ---*/
1065 /*--- plus helpers (evhH__* functions)                         ---*/
1066 /*----------------------------------------------------------------*/
1067 
1068 /*--------- Event handler helpers (evhH__* functions) ---------*/
1069 
1070 /* Create a new segment for 'thr', making it depend (.prev) on its
1071    existing segment, bind together the SegmentID and Segment, and
1072    return both of them.  Also update 'thr' so it references the new
1073    Segment. */
1074 //zz static
1075 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1076 //zz                                           /*OUT*/Segment** new_segP,
1077 //zz                                           Thread* thr )
1078 //zz {
1079 //zz    Segment* cur_seg;
1080 //zz    tl_assert(new_segP);
1081 //zz    tl_assert(new_segidP);
1082 //zz    tl_assert(HG_(is_sane_Thread)(thr));
1083 //zz    cur_seg = map_segments_lookup( thr->csegid );
1084 //zz    tl_assert(cur_seg);
1085 //zz    tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1086 //zz                                       at their owner thread. */
1087 //zz    *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1088 //zz    *new_segidP = alloc_SegmentID();
1089 //zz    map_segments_add( *new_segidP, *new_segP );
1090 //zz    thr->csegid = *new_segidP;
1091 //zz }
1092 
1093 
1094 /* The lock at 'lock_ga' has acquired a writer.  Make all necessary
1095    updates, and also do all possible error checks. */
1096 static
evhH__post_thread_w_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1097 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1098                                          LockKind lkk, Addr lock_ga )
1099 {
1100    Lock* lk;
1101 
1102    /* Basically what we need to do is call lockN_acquire_writer.
1103       However, that will barf if any 'invalid' lock states would
1104       result.  Therefore check before calling.  Side effect is that
1105       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1106       routine.
1107 
1108       Because this routine is only called after successful lock
1109       acquisition, we should not be asked to move the lock into any
1110       invalid states.  Requests to do so are bugs in libpthread, since
1111       that should have rejected any such requests. */
1112 
1113    tl_assert(HG_(is_sane_Thread)(thr));
1114    /* Try to find the lock.  If we can't, then create a new one with
1115       kind 'lkk'. */
1116    lk = map_locks_lookup_or_create(
1117            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1118    tl_assert( HG_(is_sane_LockN)(lk) );
1119 
1120    /* check libhb level entities exist */
1121    tl_assert(thr->hbthr);
1122    tl_assert(lk->hbso);
1123 
1124    if (lk->heldBy == NULL) {
1125       /* the lock isn't held.  Simple. */
1126       tl_assert(!lk->heldW);
1127       lockN_acquire_writer( lk, thr );
1128       /* acquire a dependency from the lock's VCs */
1129       libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1130       goto noerror;
1131    }
1132 
1133    /* So the lock is already held.  If held as a r-lock then
1134       libpthread must be buggy. */
1135    tl_assert(lk->heldBy);
1136    if (!lk->heldW) {
1137       HG_(record_error_Misc)(
1138          thr, "Bug in libpthread: write lock "
1139               "granted on rwlock which is currently rd-held");
1140       goto error;
1141    }
1142 
1143    /* So the lock is held in w-mode.  If it's held by some other
1144       thread, then libpthread must be buggy. */
1145    tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1146 
1147    if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1148       HG_(record_error_Misc)(
1149          thr, "Bug in libpthread: write lock "
1150               "granted on mutex/rwlock which is currently "
1151               "wr-held by a different thread");
1152       goto error;
1153    }
1154 
1155    /* So the lock is already held in w-mode by 'thr'.  That means this
1156       is an attempt to lock it recursively, which is only allowable
1157       for LK_mbRec kinded locks.  Since this routine is called only
1158       once the lock has been acquired, this must also be a libpthread
1159       bug. */
1160    if (lk->kind != LK_mbRec) {
1161       HG_(record_error_Misc)(
1162          thr, "Bug in libpthread: recursive write lock "
1163               "granted on mutex/wrlock which does not "
1164               "support recursion");
1165       goto error;
1166    }
1167 
1168    /* So we are recursively re-locking a lock we already w-hold. */
1169    lockN_acquire_writer( lk, thr );
1170    /* acquire a dependency from the lock's VC.  Probably pointless,
1171       but also harmless. */
1172    libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1173    goto noerror;
1174 
1175   noerror:
1176    if (HG_(clo_track_lockorders)) {
1177       /* check lock order acquisition graph, and update.  This has to
1178          happen before the lock is added to the thread's locksetA/W. */
1179       laog__pre_thread_acquires_lock( thr, lk );
1180    }
1181    /* update the thread's held-locks set */
1182    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1183    thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1184    /* fall through */
1185 
1186   error:
1187    tl_assert(HG_(is_sane_LockN)(lk));
1188 }
1189 
1190 
1191 /* The lock at 'lock_ga' has acquired a reader.  Make all necessary
1192    updates, and also do all possible error checks. */
1193 static
evhH__post_thread_r_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1194 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1195                                          LockKind lkk, Addr lock_ga )
1196 {
1197    Lock* lk;
1198 
1199    /* Basically what we need to do is call lockN_acquire_reader.
1200       However, that will barf if any 'invalid' lock states would
1201       result.  Therefore check before calling.  Side effect is that
1202       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1203       routine.
1204 
1205       Because this routine is only called after successful lock
1206       acquisition, we should not be asked to move the lock into any
1207       invalid states.  Requests to do so are bugs in libpthread, since
1208       that should have rejected any such requests. */
1209 
1210    tl_assert(HG_(is_sane_Thread)(thr));
1211    /* Try to find the lock.  If we can't, then create a new one with
1212       kind 'lkk'.  Only a reader-writer lock can be read-locked,
1213       hence the first assertion. */
1214    tl_assert(lkk == LK_rdwr);
1215    lk = map_locks_lookup_or_create(
1216            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1217    tl_assert( HG_(is_sane_LockN)(lk) );
1218 
1219    /* check libhb level entities exist */
1220    tl_assert(thr->hbthr);
1221    tl_assert(lk->hbso);
1222 
1223    if (lk->heldBy == NULL) {
1224       /* the lock isn't held.  Simple. */
1225       tl_assert(!lk->heldW);
1226       lockN_acquire_reader( lk, thr );
1227       /* acquire a dependency from the lock's VC */
1228       libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1229       goto noerror;
1230    }
1231 
1232    /* So the lock is already held.  If held as a w-lock then
1233       libpthread must be buggy. */
1234    tl_assert(lk->heldBy);
1235    if (lk->heldW) {
1236       HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1237                                    "granted on rwlock which is "
1238                                    "currently wr-held");
1239       goto error;
1240    }
1241 
1242    /* Easy enough.  In short anybody can get a read-lock on a rwlock
1243       provided it is either unlocked or already in rd-held. */
1244    lockN_acquire_reader( lk, thr );
1245    /* acquire a dependency from the lock's VC.  Probably pointless,
1246       but also harmless. */
1247    libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1248    goto noerror;
1249 
1250   noerror:
1251    if (HG_(clo_track_lockorders)) {
1252       /* check lock order acquisition graph, and update.  This has to
1253          happen before the lock is added to the thread's locksetA/W. */
1254       laog__pre_thread_acquires_lock( thr, lk );
1255    }
1256    /* update the thread's held-locks set */
1257    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1258    /* but don't update thr->locksetW, since lk is only rd-held */
1259    /* fall through */
1260 
1261   error:
1262    tl_assert(HG_(is_sane_LockN)(lk));
1263 }
1264 
1265 
1266 /* The lock at 'lock_ga' is just about to be unlocked.  Make all
1267    necessary updates, and also do all possible error checks. */
1268 static
evhH__pre_thread_releases_lock(Thread * thr,Addr lock_ga,Bool isRDWR)1269 void evhH__pre_thread_releases_lock ( Thread* thr,
1270                                       Addr lock_ga, Bool isRDWR )
1271 {
1272    Lock* lock;
1273    Word  n;
1274    Bool  was_heldW;
1275 
1276    /* This routine is called prior to a lock release, before
1277       libpthread has had a chance to validate the call.  Hence we need
1278       to detect and reject any attempts to move the lock into an
1279       invalid state.  Such attempts are bugs in the client.
1280 
1281       isRDWR is True if we know from the wrapper context that lock_ga
1282       should refer to a reader-writer lock, and is False if [ditto]
1283       lock_ga should refer to a standard mutex. */
1284 
1285    tl_assert(HG_(is_sane_Thread)(thr));
1286    lock = map_locks_maybe_lookup( lock_ga );
1287 
1288    if (!lock) {
1289       /* We know nothing about a lock at 'lock_ga'.  Nevertheless
1290          the client is trying to unlock it.  So complain, then ignore
1291          the attempt. */
1292       HG_(record_error_UnlockBogus)( thr, lock_ga );
1293       return;
1294    }
1295 
1296    tl_assert(lock->guestaddr == lock_ga);
1297    tl_assert(HG_(is_sane_LockN)(lock));
1298 
1299    if (isRDWR && lock->kind != LK_rdwr) {
1300       HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1301                                    "pthread_mutex_t* argument " );
1302    }
1303    if ((!isRDWR) && lock->kind == LK_rdwr) {
1304       HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1305                                    "pthread_rwlock_t* argument " );
1306    }
1307 
1308    if (!lock->heldBy) {
1309       /* The lock is not held.  This indicates a serious bug in the
1310          client. */
1311       tl_assert(!lock->heldW);
1312       HG_(record_error_UnlockUnlocked)( thr, lock );
1313       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1314       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1315       goto error;
1316    }
1317 
1318    /* test just above dominates */
1319    tl_assert(lock->heldBy);
1320    was_heldW = lock->heldW;
1321 
1322    /* The lock is held.  Is this thread one of the holders?  If not,
1323       report a bug in the client. */
1324    n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1325    tl_assert(n >= 0);
1326    if (n == 0) {
1327       /* We are not a current holder of the lock.  This is a bug in
1328          the guest, and (per POSIX pthread rules) the unlock
1329          attempt will fail.  So just complain and do nothing
1330          else. */
1331       Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1332       tl_assert(HG_(is_sane_Thread)(realOwner));
1333       tl_assert(realOwner != thr);
1334       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1335       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1336       HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1337       goto error;
1338    }
1339 
1340    /* Ok, we hold the lock 'n' times. */
1341    tl_assert(n >= 1);
1342 
1343    lockN_release( lock, thr );
1344 
1345    n--;
1346    tl_assert(n >= 0);
1347 
1348    if (n > 0) {
1349       tl_assert(lock->heldBy);
1350       tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1351       /* We still hold the lock.  So either it's a recursive lock
1352          or a rwlock which is currently r-held. */
1353       tl_assert(lock->kind == LK_mbRec
1354                 || (lock->kind == LK_rdwr && !lock->heldW));
1355       tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1356       if (lock->heldW)
1357          tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1358       else
1359          tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1360    } else {
1361       /* n is zero.  This means we don't hold the lock any more.  But
1362          if it's a rwlock held in r-mode, someone else could still
1363          hold it.  Just do whatever sanity checks we can. */
1364       if (lock->kind == LK_rdwr && lock->heldBy) {
1365          /* It's a rwlock.  We no longer hold it but we used to;
1366             nevertheless it still appears to be held by someone else.
1367             The implication is that, prior to this release, it must
1368             have been shared by us and and whoever else is holding it;
1369             which in turn implies it must be r-held, since a lock
1370             can't be w-held by more than one thread. */
1371          /* The lock is now R-held by somebody else: */
1372          tl_assert(lock->heldW == False);
1373       } else {
1374          /* Normal case.  It's either not a rwlock, or it's a rwlock
1375             that we used to hold in w-mode (which is pretty much the
1376             same thing as a non-rwlock.)  Since this transaction is
1377             atomic (V does not allow multiple threads to run
1378             simultaneously), it must mean the lock is now not held by
1379             anybody.  Hence assert for it. */
1380          /* The lock is now not held by anybody: */
1381          tl_assert(!lock->heldBy);
1382          tl_assert(lock->heldW == False);
1383       }
1384       //if (lock->heldBy) {
1385       //   tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1386       //}
1387       /* update this thread's lockset accordingly. */
1388       thr->locksetA
1389          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1390       thr->locksetW
1391          = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1392       /* push our VC into the lock */
1393       tl_assert(thr->hbthr);
1394       tl_assert(lock->hbso);
1395       /* If the lock was previously W-held, then we want to do a
1396          strong send, and if previously R-held, then a weak send. */
1397       libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1398    }
1399    /* fall through */
1400 
1401   error:
1402    tl_assert(HG_(is_sane_LockN)(lock));
1403 }
1404 
1405 
1406 /* ---------------------------------------------------------- */
1407 /* -------- Event handlers proper (evh__* functions) -------- */
1408 /* ---------------------------------------------------------- */
1409 
1410 /* What is the Thread* for the currently running thread?  This is
1411    absolutely performance critical.  We receive notifications from the
1412    core for client code starts/stops, and cache the looked-up result
1413    in 'current_Thread'.  Hence, for the vast majority of requests,
1414    finding the current thread reduces to a read of a global variable,
1415    provided get_current_Thread_in_C_C is inlined.
1416 
1417    Outside of client code, current_Thread is NULL, and presumably
1418    any uses of it will cause a segfault.  Hence:
1419 
1420    - for uses definitely within client code, use
1421      get_current_Thread_in_C_C.
1422 
1423    - for all other uses, use get_current_Thread.
1424 */
1425 
1426 static Thread *current_Thread      = NULL,
1427               *current_Thread_prev = NULL;
1428 
evh__start_client_code(ThreadId tid,ULong nDisp)1429 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1430    if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1431    tl_assert(current_Thread == NULL);
1432    current_Thread = map_threads_lookup( tid );
1433    tl_assert(current_Thread != NULL);
1434    if (current_Thread != current_Thread_prev) {
1435       libhb_Thr_resumes( current_Thread->hbthr );
1436       current_Thread_prev = current_Thread;
1437    }
1438 }
evh__stop_client_code(ThreadId tid,ULong nDisp)1439 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1440    if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1441    tl_assert(current_Thread != NULL);
1442    current_Thread = NULL;
1443    libhb_maybe_GC();
1444 }
get_current_Thread_in_C_C(void)1445 static inline Thread* get_current_Thread_in_C_C ( void ) {
1446    return current_Thread;
1447 }
get_current_Thread(void)1448 static inline Thread* get_current_Thread ( void ) {
1449    ThreadId coretid;
1450    Thread*  thr;
1451    thr = get_current_Thread_in_C_C();
1452    if (LIKELY(thr))
1453       return thr;
1454    /* evidently not in client code.  Do it the slow way. */
1455    coretid = VG_(get_running_tid)();
1456    /* FIXME: get rid of the following kludge.  It exists because
1457       evh__new_mem is called during initialisation (as notification
1458       of initial memory layout) and VG_(get_running_tid)() returns
1459       VG_INVALID_THREADID at that point. */
1460    if (coretid == VG_INVALID_THREADID)
1461       coretid = 1; /* KLUDGE */
1462    thr = map_threads_lookup( coretid );
1463    return thr;
1464 }
1465 
1466 static
evh__new_mem(Addr a,SizeT len)1467 void evh__new_mem ( Addr a, SizeT len ) {
1468    Thread *thr = get_current_Thread();
1469    if (SHOW_EVENTS >= 2)
1470       VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1471    shadow_mem_make_New( thr, a, len );
1472    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1473       all__sanity_check("evh__new_mem-post");
1474    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1475       shadow_mem_make_Untracked( thr, a, len );
1476 }
1477 
1478 static
evh__new_mem_stack(Addr a,SizeT len)1479 void evh__new_mem_stack ( Addr a, SizeT len ) {
1480    Thread *thr = get_current_Thread();
1481    if (SHOW_EVENTS >= 2)
1482       VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1483    shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1484    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1485       all__sanity_check("evh__new_mem_stack-post");
1486    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1487       shadow_mem_make_Untracked( thr, a, len );
1488 }
1489 
1490 static
evh__new_mem_w_tid(Addr a,SizeT len,ThreadId tid)1491 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1492    Thread *thr = get_current_Thread();
1493    if (SHOW_EVENTS >= 2)
1494       VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1495    shadow_mem_make_New( thr, a, len );
1496    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1497       all__sanity_check("evh__new_mem_w_tid-post");
1498    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1499       shadow_mem_make_Untracked( thr, a, len );
1500 }
1501 
1502 static
evh__new_mem_w_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1503 void evh__new_mem_w_perms ( Addr a, SizeT len,
1504                             Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1505    Thread *thr = get_current_Thread();
1506    if (SHOW_EVENTS >= 1)
1507       VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1508                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1509    if (rr || ww || xx) {
1510       shadow_mem_make_New( thr, a, len );
1511       if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1512          shadow_mem_make_Untracked( thr, a, len );
1513    }
1514    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1515       all__sanity_check("evh__new_mem_w_perms-post");
1516 }
1517 
1518 static
evh__set_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx)1519 void evh__set_perms ( Addr a, SizeT len,
1520                       Bool rr, Bool ww, Bool xx ) {
1521    // This handles mprotect requests.  If the memory is being put
1522    // into no-R no-W state, paint it as NoAccess, for the reasons
1523    // documented at evh__die_mem_munmap().
1524    if (SHOW_EVENTS >= 1)
1525       VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1526                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1527    /* Hmm.  What should we do here, that actually makes any sense?
1528       Let's say: if neither readable nor writable, then declare it
1529       NoAccess, else leave it alone. */
1530    if (!(rr || ww))
1531       shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1532    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1533       all__sanity_check("evh__set_perms-post");
1534 }
1535 
1536 static
evh__die_mem(Addr a,SizeT len)1537 void evh__die_mem ( Addr a, SizeT len ) {
1538    // Urr, libhb ignores this.
1539    if (SHOW_EVENTS >= 2)
1540       VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1541    shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
1542    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1543       all__sanity_check("evh__die_mem-post");
1544 }
1545 
1546 static
evh__die_mem_munmap(Addr a,SizeT len)1547 void evh__die_mem_munmap ( Addr a, SizeT len ) {
1548    // It's important that libhb doesn't ignore this.  If, as is likely,
1549    // the client is subject to address space layout randomization,
1550    // then unmapped areas may never get remapped over, even in long
1551    // runs.  If we just ignore them we wind up with large resource
1552    // (VTS) leaks in libhb.  So force them to NoAccess, so that all
1553    // VTS references in the affected area are dropped.  Marking memory
1554    // as NoAccess is expensive, but we assume that munmap is sufficiently
1555    // rare that the space gains of doing this are worth the costs.
1556    if (SHOW_EVENTS >= 2)
1557       VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1558    shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1559 }
1560 
1561 static
evh__untrack_mem(Addr a,SizeT len)1562 void evh__untrack_mem ( Addr a, SizeT len ) {
1563    // Libhb doesn't ignore this.
1564    if (SHOW_EVENTS >= 2)
1565       VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1566    shadow_mem_make_Untracked( get_current_Thread(), a, len );
1567    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1568       all__sanity_check("evh__untrack_mem-post");
1569 }
1570 
1571 static
evh__copy_mem(Addr src,Addr dst,SizeT len)1572 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1573    if (SHOW_EVENTS >= 2)
1574       VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1575    Thread *thr = get_current_Thread();
1576    if (LIKELY(thr->synchr_nesting == 0))
1577       shadow_mem_scopy_range( thr , src, dst, len );
1578    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1579       all__sanity_check("evh__copy_mem-post");
1580 }
1581 
1582 static
evh__pre_thread_ll_create(ThreadId parent,ThreadId child)1583 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1584 {
1585    if (SHOW_EVENTS >= 1)
1586       VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1587                   (Int)parent, (Int)child );
1588 
1589    if (parent != VG_INVALID_THREADID) {
1590       Thread* thr_p;
1591       Thread* thr_c;
1592       Thr*    hbthr_p;
1593       Thr*    hbthr_c;
1594 
1595       tl_assert(HG_(is_sane_ThreadId)(parent));
1596       tl_assert(HG_(is_sane_ThreadId)(child));
1597       tl_assert(parent != child);
1598 
1599       thr_p = map_threads_maybe_lookup( parent );
1600       thr_c = map_threads_maybe_lookup( child );
1601 
1602       tl_assert(thr_p != NULL);
1603       tl_assert(thr_c == NULL);
1604 
1605       hbthr_p = thr_p->hbthr;
1606       tl_assert(hbthr_p != NULL);
1607       tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
1608 
1609       hbthr_c = libhb_create ( hbthr_p );
1610 
1611       /* Create a new thread record for the child. */
1612       /* a Thread for the new thread ... */
1613       thr_c = mk_Thread( hbthr_c );
1614       tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1615       libhb_set_Thr_hgthread(hbthr_c, thr_c);
1616 
1617       /* and bind it in the thread-map table */
1618       map_threads[child] = thr_c;
1619       tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1620       thr_c->coretid = child;
1621 
1622       /* Record where the parent is so we can later refer to this in
1623          error messages.
1624 
1625          On x86/amd64-linux, this entails a nasty glibc specific hack.
1626          The stack snapshot is taken immediately after the parent has
1627          returned from its sys_clone call.  Unfortunately there is no
1628          unwind info for the insn following "syscall" - reading the
1629          glibc sources confirms this.  So we ask for a snapshot to be
1630          taken as if RIP was 3 bytes earlier, in a place where there
1631          is unwind info.  Sigh.
1632       */
1633       { Word first_ip_delta = 0;
1634 #       if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1635         first_ip_delta = -3;
1636 #       elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1637         first_ip_delta = -1;
1638 #       endif
1639         thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1640       }
1641 
1642       if (HG_(clo_ignore_thread_creation)) {
1643          HG_(thread_enter_pthread_create)(thr_c);
1644          tl_assert(thr_c->synchr_nesting == 0);
1645          HG_(thread_enter_synchr)(thr_c);
1646          /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1647       }
1648    }
1649 
1650    if (HG_(clo_sanity_flags) & SCE_THREADS)
1651       all__sanity_check("evh__pre_thread_create-post");
1652 }
1653 
1654 static
evh__pre_thread_ll_exit(ThreadId quit_tid)1655 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1656 {
1657    Int     nHeld;
1658    Thread* thr_q;
1659    if (SHOW_EVENTS >= 1)
1660       VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1661                   (Int)quit_tid );
1662 
1663    /* quit_tid has disappeared without joining to any other thread.
1664       Therefore there is no synchronisation event associated with its
1665       exit and so we have to pretty much treat it as if it was still
1666       alive but mysteriously making no progress.  That is because, if
1667       we don't know when it really exited, then we can never say there
1668       is a point in time when we're sure the thread really has
1669       finished, and so we need to consider the possibility that it
1670       lingers indefinitely and continues to interact with other
1671       threads. */
1672    /* However, it might have rendezvous'd with a thread that called
1673       pthread_join with this one as arg, prior to this point (that's
1674       how NPTL works).  In which case there has already been a prior
1675       sync event.  So in any case, just let the thread exit.  On NPTL,
1676       all thread exits go through here. */
1677    tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1678    thr_q = map_threads_maybe_lookup( quit_tid );
1679    tl_assert(thr_q != NULL);
1680 
1681    /* Complain if this thread holds any locks. */
1682    nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1683    tl_assert(nHeld >= 0);
1684    if (nHeld > 0) {
1685       HChar buf[80];
1686       VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1687                         nHeld, nHeld > 1 ? "s" : "");
1688       HG_(record_error_Misc)( thr_q, buf );
1689    }
1690 
1691    /* Not much to do here:
1692       - tell libhb the thread is gone
1693       - clear the map_threads entry, in order that the Valgrind core
1694         can re-use it. */
1695    /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1696       in sync. */
1697    tl_assert(thr_q->hbthr);
1698    libhb_async_exit(thr_q->hbthr);
1699    tl_assert(thr_q->coretid == quit_tid);
1700    thr_q->coretid = VG_INVALID_THREADID;
1701    map_threads_delete( quit_tid );
1702 
1703    if (HG_(clo_sanity_flags) & SCE_THREADS)
1704       all__sanity_check("evh__pre_thread_ll_exit-post");
1705 }
1706 
1707 /* This is called immediately after fork, for the child only.  'tid'
1708    is the only surviving thread (as per POSIX rules on fork() in
1709    threaded programs), so we have to clean up map_threads to remove
1710    entries for any other threads. */
1711 static
evh__atfork_child(ThreadId tid)1712 void evh__atfork_child ( ThreadId tid )
1713 {
1714    UInt    i;
1715    Thread* thr;
1716    /* Slot 0 should never be used. */
1717    thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1718    tl_assert(!thr);
1719    /* Clean up all other slots except 'tid'. */
1720    for (i = 1; i < VG_N_THREADS; i++) {
1721       if (i == tid)
1722          continue;
1723       thr = map_threads_maybe_lookup(i);
1724       if (!thr)
1725          continue;
1726       /* Cleanup actions (next 5 lines) copied from end of
1727          evh__pre_thread_ll_exit; keep in sync. */
1728       tl_assert(thr->hbthr);
1729       libhb_async_exit(thr->hbthr);
1730       tl_assert(thr->coretid == i);
1731       thr->coretid = VG_INVALID_THREADID;
1732       map_threads_delete(i);
1733    }
1734 }
1735 
1736 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1737 static
generate_quitter_stayer_dependence(Thr * hbthr_q,Thr * hbthr_s)1738 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1739 {
1740    SO*      so;
1741    /* Allocate a temporary synchronisation object and use it to send
1742       an imaginary message from the quitter to the stayer, the purpose
1743       being to generate a dependence from the quitter to the
1744       stayer. */
1745    so = libhb_so_alloc();
1746    tl_assert(so);
1747    /* Send last arg of _so_send as False, since the sending thread
1748       doesn't actually exist any more, so we don't want _so_send to
1749       try taking stack snapshots of it. */
1750    libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
1751    libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1752    libhb_so_dealloc(so);
1753 
1754    /* Tell libhb that the quitter has been reaped.  Note that we might
1755       have to be cleverer about this, to exclude 2nd and subsequent
1756       notifications for the same hbthr_q, in the case where the app is
1757       buggy (calls pthread_join twice or more on the same thread) AND
1758       where libpthread is also buggy and doesn't return ESRCH on
1759       subsequent calls.  (If libpthread isn't thusly buggy, then the
1760       wrapper for pthread_join in hg_intercepts.c will stop us getting
1761       notified here multiple times for the same joinee.)  See also
1762       comments in helgrind/tests/jointwice.c. */
1763    libhb_joinedwith_done(hbthr_q);
1764 }
1765 
1766 
1767 static
evh__HG_PTHREAD_JOIN_POST(ThreadId stay_tid,Thread * quit_thr)1768 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1769 {
1770    Thread*  thr_s;
1771    Thread*  thr_q;
1772    Thr*     hbthr_s;
1773    Thr*     hbthr_q;
1774 
1775    if (SHOW_EVENTS >= 1)
1776       VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1777                   (Int)stay_tid, quit_thr );
1778 
1779    tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1780 
1781    thr_s = map_threads_maybe_lookup( stay_tid );
1782    thr_q = quit_thr;
1783    tl_assert(thr_s != NULL);
1784    tl_assert(thr_q != NULL);
1785    tl_assert(thr_s != thr_q);
1786 
1787    hbthr_s = thr_s->hbthr;
1788    hbthr_q = thr_q->hbthr;
1789    tl_assert(hbthr_s != hbthr_q);
1790    tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1791    tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1792 
1793    generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
1794 
1795    /* evh__pre_thread_ll_exit issues an error message if the exiting
1796       thread holds any locks.  No need to check here. */
1797 
1798    /* This holds because, at least when using NPTL as the thread
1799       library, we should be notified the low level thread exit before
1800       we hear of any join event on it.  The low level exit
1801       notification feeds through into evh__pre_thread_ll_exit,
1802       which should clear the map_threads entry for it.  Hence we
1803       expect there to be no map_threads entry at this point. */
1804    tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1805               == VG_INVALID_THREADID);
1806 
1807    if (HG_(clo_sanity_flags) & SCE_THREADS)
1808       all__sanity_check("evh__post_thread_join-post");
1809 }
1810 
1811 static
evh__pre_mem_read(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1812 void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
1813                          Addr a, SizeT size) {
1814    if (SHOW_EVENTS >= 2
1815        || (SHOW_EVENTS >= 1 && size != 1))
1816       VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1817                   (Int)tid, s, (void*)a, size );
1818    Thread *thr = map_threads_lookup(tid);
1819    if (LIKELY(thr->synchr_nesting == 0))
1820       shadow_mem_cread_range(thr, a, size);
1821    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1822       all__sanity_check("evh__pre_mem_read-post");
1823 }
1824 
1825 static
evh__pre_mem_read_asciiz(CorePart part,ThreadId tid,const HChar * s,Addr a)1826 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1827                                 const HChar* s, Addr a ) {
1828    Int len;
1829    if (SHOW_EVENTS >= 1)
1830       VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1831                   (Int)tid, s, (void*)a );
1832    // Don't segfault if the string starts in an obviously stupid
1833    // place.  Actually we should check the whole string, not just
1834    // the start address, but that's too much trouble.  At least
1835    // checking the first byte is better than nothing.  See #255009.
1836    if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1837       return;
1838    Thread *thr = map_threads_lookup(tid);
1839    len = VG_(strlen)( (HChar*) a );
1840    if (LIKELY(thr->synchr_nesting == 0))
1841       shadow_mem_cread_range( thr, a, len+1 );
1842    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1843       all__sanity_check("evh__pre_mem_read_asciiz-post");
1844 }
1845 
1846 static
evh__pre_mem_write(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1847 void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
1848                           Addr a, SizeT size ) {
1849    if (SHOW_EVENTS >= 1)
1850       VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1851                   (Int)tid, s, (void*)a, size );
1852    Thread *thr = map_threads_lookup(tid);
1853    if (LIKELY(thr->synchr_nesting == 0))
1854       shadow_mem_cwrite_range(thr, a, size);
1855    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1856       all__sanity_check("evh__pre_mem_write-post");
1857 }
1858 
1859 static
evh__new_mem_heap(Addr a,SizeT len,Bool is_inited)1860 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1861    if (SHOW_EVENTS >= 1)
1862       VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1863                   (void*)a, len, (Int)is_inited );
1864    // We ignore the initialisation state (is_inited); that's ok.
1865    shadow_mem_make_New(get_current_Thread(), a, len);
1866    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1867       all__sanity_check("evh__pre_mem_read-post");
1868 }
1869 
1870 static
evh__die_mem_heap(Addr a,SizeT len)1871 void evh__die_mem_heap ( Addr a, SizeT len ) {
1872    Thread* thr;
1873    if (SHOW_EVENTS >= 1)
1874       VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1875    thr = get_current_Thread();
1876    tl_assert(thr);
1877    if (HG_(clo_free_is_write)) {
1878       /* Treat frees as if the memory was written immediately prior to
1879          the free.  This shakes out more races, specifically, cases
1880          where memory is referenced by one thread, and freed by
1881          another, and there's no observable synchronisation event to
1882          guarantee that the reference happens before the free. */
1883       if (LIKELY(thr->synchr_nesting == 0))
1884          shadow_mem_cwrite_range(thr, a, len);
1885    }
1886    shadow_mem_make_NoAccess_AHAE( thr, a, len );
1887    /* We used to call instead
1888           shadow_mem_make_NoAccess_NoFX( thr, a, len );
1889       A non-buggy application will not access anymore
1890       the freed memory, and so marking no access is in theory useless.
1891       Not marking freed memory would avoid the overhead for applications
1892       doing mostly malloc/free, as the freed memory should then be recycled
1893       very quickly after marking.
1894       We rather mark it noaccess for the following reasons:
1895         * accessibility bits then always correctly represents the memory
1896           status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1897         * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1898           blocks, on a ppc64le, for a unrealistic workload of an application
1899           doing only malloc/free).
1900         * marking no access allows to GC the SecMap, which might improve
1901           performance and/or memory usage.
1902         * we might detect more applications bugs when memory is marked
1903           noaccess.
1904       If needed, we could support here an option --free-is-noaccess=yes|no
1905       to avoid marking freed memory as no access if some applications
1906       would need to avoid the marking noaccess overhead. */
1907 
1908    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1909       all__sanity_check("evh__pre_mem_read-post");
1910 }
1911 
1912 /* --- Event handlers called from generated code --- */
1913 
1914 static VG_REGPARM(1)
evh__mem_help_cread_1(Addr a)1915 void evh__mem_help_cread_1(Addr a) {
1916    Thread*  thr = get_current_Thread_in_C_C();
1917    Thr*     hbthr = thr->hbthr;
1918    if (LIKELY(thr->synchr_nesting == 0))
1919       LIBHB_CREAD_1(hbthr, a);
1920 }
1921 
1922 static VG_REGPARM(1)
evh__mem_help_cread_2(Addr a)1923 void evh__mem_help_cread_2(Addr a) {
1924    Thread*  thr = get_current_Thread_in_C_C();
1925    Thr*     hbthr = thr->hbthr;
1926    if (LIKELY(thr->synchr_nesting == 0))
1927       LIBHB_CREAD_2(hbthr, a);
1928 }
1929 
1930 static VG_REGPARM(1)
evh__mem_help_cread_4(Addr a)1931 void evh__mem_help_cread_4(Addr a) {
1932    Thread*  thr = get_current_Thread_in_C_C();
1933    Thr*     hbthr = thr->hbthr;
1934    if (LIKELY(thr->synchr_nesting == 0))
1935       LIBHB_CREAD_4(hbthr, a);
1936 }
1937 
1938 static VG_REGPARM(1)
evh__mem_help_cread_8(Addr a)1939 void evh__mem_help_cread_8(Addr a) {
1940    Thread*  thr = get_current_Thread_in_C_C();
1941    Thr*     hbthr = thr->hbthr;
1942    if (LIKELY(thr->synchr_nesting == 0))
1943       LIBHB_CREAD_8(hbthr, a);
1944 }
1945 
1946 static VG_REGPARM(2)
evh__mem_help_cread_N(Addr a,SizeT size)1947 void evh__mem_help_cread_N(Addr a, SizeT size) {
1948    Thread*  thr = get_current_Thread_in_C_C();
1949    Thr*     hbthr = thr->hbthr;
1950    if (LIKELY(thr->synchr_nesting == 0))
1951       LIBHB_CREAD_N(hbthr, a, size);
1952 }
1953 
1954 static VG_REGPARM(1)
evh__mem_help_cwrite_1(Addr a)1955 void evh__mem_help_cwrite_1(Addr a) {
1956    Thread*  thr = get_current_Thread_in_C_C();
1957    Thr*     hbthr = thr->hbthr;
1958    if (LIKELY(thr->synchr_nesting == 0))
1959       LIBHB_CWRITE_1(hbthr, a);
1960 }
1961 
1962 static VG_REGPARM(1)
evh__mem_help_cwrite_2(Addr a)1963 void evh__mem_help_cwrite_2(Addr a) {
1964    Thread*  thr = get_current_Thread_in_C_C();
1965    Thr*     hbthr = thr->hbthr;
1966    if (LIKELY(thr->synchr_nesting == 0))
1967       LIBHB_CWRITE_2(hbthr, a);
1968 }
1969 
1970 static VG_REGPARM(1)
evh__mem_help_cwrite_4(Addr a)1971 void evh__mem_help_cwrite_4(Addr a) {
1972    Thread*  thr = get_current_Thread_in_C_C();
1973    Thr*     hbthr = thr->hbthr;
1974    if (LIKELY(thr->synchr_nesting == 0))
1975       LIBHB_CWRITE_4(hbthr, a);
1976 }
1977 
1978 static VG_REGPARM(1)
evh__mem_help_cwrite_8(Addr a)1979 void evh__mem_help_cwrite_8(Addr a) {
1980    Thread*  thr = get_current_Thread_in_C_C();
1981    Thr*     hbthr = thr->hbthr;
1982    if (LIKELY(thr->synchr_nesting == 0))
1983       LIBHB_CWRITE_8(hbthr, a);
1984 }
1985 
1986 static VG_REGPARM(2)
evh__mem_help_cwrite_N(Addr a,SizeT size)1987 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
1988    Thread*  thr = get_current_Thread_in_C_C();
1989    Thr*     hbthr = thr->hbthr;
1990    if (LIKELY(thr->synchr_nesting == 0))
1991       LIBHB_CWRITE_N(hbthr, a, size);
1992 }
1993 
1994 
1995 /* ------------------------------------------------------- */
1996 /* -------------- events to do with mutexes -------------- */
1997 /* ------------------------------------------------------- */
1998 
1999 /* EXPOSITION only: by intercepting lock init events we can show the
2000    user where the lock was initialised, rather than only being able to
2001    show where it was first locked.  Intercepting lock initialisations
2002    is not necessary for the basic operation of the race checker. */
2003 static
evh__HG_PTHREAD_MUTEX_INIT_POST(ThreadId tid,void * mutex,Word mbRec)2004 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2005                                       void* mutex, Word mbRec )
2006 {
2007    if (SHOW_EVENTS >= 1)
2008       VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2009                   (Int)tid, mbRec, (void*)mutex );
2010    tl_assert(mbRec == 0 || mbRec == 1);
2011    map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2012                                (Addr)mutex, tid );
2013    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2014       all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2015 }
2016 
2017 static
evh__HG_PTHREAD_MUTEX_DESTROY_PRE(ThreadId tid,void * mutex,Bool mutex_is_init)2018 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2019                                         Bool mutex_is_init )
2020 {
2021    Thread* thr;
2022    Lock*   lk;
2023    if (SHOW_EVENTS >= 1)
2024       VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2025                   "(ctid=%d, %p, isInit=%d)\n",
2026                   (Int)tid, (void*)mutex, (Int)mutex_is_init );
2027 
2028    thr = map_threads_maybe_lookup( tid );
2029    /* cannot fail - Thread* must already exist */
2030    tl_assert( HG_(is_sane_Thread)(thr) );
2031 
2032    lk = map_locks_maybe_lookup( (Addr)mutex );
2033 
2034    if (lk == NULL && mutex_is_init) {
2035       /* We're destroying a mutex which we don't have any record of,
2036          and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2037          Assume it never got used, and so we don't need to do anything
2038          more. */
2039       goto out;
2040    }
2041 
2042    if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
2043       HG_(record_error_Misc)(
2044          thr, "pthread_mutex_destroy with invalid argument" );
2045    }
2046 
2047    if (lk) {
2048       tl_assert( HG_(is_sane_LockN)(lk) );
2049       tl_assert( lk->guestaddr == (Addr)mutex );
2050       if (lk->heldBy) {
2051          /* Basically act like we unlocked the lock */
2052          HG_(record_error_Misc)(
2053             thr, "pthread_mutex_destroy of a locked mutex" );
2054          /* remove lock from locksets of all owning threads */
2055          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2056          VG_(deleteBag)( lk->heldBy );
2057          lk->heldBy = NULL;
2058          lk->heldW = False;
2059          lk->acquired_at = NULL;
2060       }
2061       tl_assert( !lk->heldBy );
2062       tl_assert( HG_(is_sane_LockN)(lk) );
2063 
2064       if (HG_(clo_track_lockorders))
2065          laog__handle_one_lock_deletion(lk);
2066       map_locks_delete( lk->guestaddr );
2067       del_LockN( lk );
2068    }
2069 
2070   out:
2071    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2072       all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2073 }
2074 
evh__HG_PTHREAD_MUTEX_LOCK_PRE(ThreadId tid,void * mutex,Word isTryLock)2075 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2076                                              void* mutex, Word isTryLock )
2077 {
2078    /* Just check the mutex is sane; nothing else to do. */
2079    // 'mutex' may be invalid - not checked by wrapper
2080    Thread* thr;
2081    Lock*   lk;
2082    if (SHOW_EVENTS >= 1)
2083       VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2084                   (Int)tid, (void*)mutex );
2085 
2086    tl_assert(isTryLock == 0 || isTryLock == 1);
2087    thr = map_threads_maybe_lookup( tid );
2088    tl_assert(thr); /* cannot fail - Thread* must already exist */
2089 
2090    lk = map_locks_maybe_lookup( (Addr)mutex );
2091 
2092    if (lk && (lk->kind == LK_rdwr)) {
2093       HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2094                                    "pthread_rwlock_t* argument " );
2095    }
2096 
2097    if ( lk
2098         && isTryLock == 0
2099         && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2100         && lk->heldBy
2101         && lk->heldW
2102         && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2103       /* uh, it's a non-recursive lock and we already w-hold it, and
2104          this is a real lock operation (not a speculative "tryLock"
2105          kind of thing).  Duh.  Deadlock coming up; but at least
2106          produce an error message. */
2107       const HChar* errstr = "Attempt to re-lock a "
2108                             "non-recursive lock I already hold";
2109       const HChar* auxstr = "Lock was previously acquired";
2110       if (lk->acquired_at) {
2111          HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2112       } else {
2113          HG_(record_error_Misc)( thr, errstr );
2114       }
2115    }
2116 }
2117 
evh__HG_PTHREAD_MUTEX_LOCK_POST(ThreadId tid,void * mutex)2118 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2119 {
2120    // only called if the real library call succeeded - so mutex is sane
2121    Thread* thr;
2122    if (SHOW_EVENTS >= 1)
2123       VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2124                   (Int)tid, (void*)mutex );
2125 
2126    thr = map_threads_maybe_lookup( tid );
2127    tl_assert(thr); /* cannot fail - Thread* must already exist */
2128 
2129    evhH__post_thread_w_acquires_lock(
2130       thr,
2131       LK_mbRec, /* if not known, create new lock with this LockKind */
2132       (Addr)mutex
2133    );
2134 }
2135 
evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ThreadId tid,void * mutex)2136 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2137 {
2138    // 'mutex' may be invalid - not checked by wrapper
2139    Thread* thr;
2140    if (SHOW_EVENTS >= 1)
2141       VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2142                   (Int)tid, (void*)mutex );
2143 
2144    thr = map_threads_maybe_lookup( tid );
2145    tl_assert(thr); /* cannot fail - Thread* must already exist */
2146 
2147    evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2148 }
2149 
evh__HG_PTHREAD_MUTEX_UNLOCK_POST(ThreadId tid,void * mutex)2150 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2151 {
2152    // only called if the real library call succeeded - so mutex is sane
2153    Thread* thr;
2154    if (SHOW_EVENTS >= 1)
2155       VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2156                   (Int)tid, (void*)mutex );
2157    thr = map_threads_maybe_lookup( tid );
2158    tl_assert(thr); /* cannot fail - Thread* must already exist */
2159 
2160    // anything we should do here?
2161 }
2162 
2163 
2164 /* ------------------------------------------------------- */
2165 /* -------------- events to do with spinlocks ------------ */
2166 /* ------------------------------------------------------- */
2167 
2168 /* All a bit of a kludge.  Pretend we're really dealing with ordinary
2169    pthread_mutex_t's instead, for the most part. */
2170 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE(ThreadId tid,void * slock)2171 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2172                                                      void* slock )
2173 {
2174    Thread* thr;
2175    Lock*   lk;
2176    /* In glibc's kludgey world, we're either initialising or unlocking
2177       it.  Since this is the pre-routine, if it is locked, unlock it
2178       and take a dependence edge.  Otherwise, do nothing. */
2179 
2180    if (SHOW_EVENTS >= 1)
2181       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2182                   "(ctid=%d, slock=%p)\n",
2183                   (Int)tid, (void*)slock );
2184 
2185    thr = map_threads_maybe_lookup( tid );
2186    /* cannot fail - Thread* must already exist */;
2187    tl_assert( HG_(is_sane_Thread)(thr) );
2188 
2189    lk = map_locks_maybe_lookup( (Addr)slock );
2190    if (lk && lk->heldBy) {
2191       /* it's held.  So do the normal pre-unlock actions, as copied
2192          from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE.  This stupidly
2193          duplicates the map_locks_maybe_lookup. */
2194       evhH__pre_thread_releases_lock( thr, (Addr)slock,
2195                                            False/*!isRDWR*/ );
2196    }
2197 }
2198 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST(ThreadId tid,void * slock)2199 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2200                                                       void* slock )
2201 {
2202    Lock* lk;
2203    /* More kludgery.  If the lock has never been seen before, do
2204       actions as per evh__HG_PTHREAD_MUTEX_INIT_POST.  Else do
2205       nothing. */
2206 
2207    if (SHOW_EVENTS >= 1)
2208       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2209                   "(ctid=%d, slock=%p)\n",
2210                   (Int)tid, (void*)slock );
2211 
2212    lk = map_locks_maybe_lookup( (Addr)slock );
2213    if (!lk) {
2214       map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2215    }
2216 }
2217 
evh__HG_PTHREAD_SPIN_LOCK_PRE(ThreadId tid,void * slock,Word isTryLock)2218 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2219                                            void* slock, Word isTryLock )
2220 {
2221    evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2222 }
2223 
evh__HG_PTHREAD_SPIN_LOCK_POST(ThreadId tid,void * slock)2224 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2225                                             void* slock )
2226 {
2227    evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2228 }
2229 
evh__HG_PTHREAD_SPIN_DESTROY_PRE(ThreadId tid,void * slock)2230 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2231                                               void* slock )
2232 {
2233    evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
2234 }
2235 
2236 
2237 /* ----------------------------------------------------- */
2238 /* --------------- events to do with CVs --------------- */
2239 /* ----------------------------------------------------- */
2240 
2241 /* A mapping from CV to (the SO associated with it, plus some
2242    auxiliary data for error checking).  When the CV is
2243    signalled/broadcasted upon, we do a 'send' into the SO, and when a
2244    wait on it completes, we do a 'recv' from the SO.  This is believed
2245    to give the correct happens-before events arising from CV
2246    signallings/broadcasts.
2247 */
2248 
2249 /* .so is the SO for this CV.
2250    .mx_ga is the associated mutex, when .nWaiters > 0
2251 
2252    POSIX says effectively that the first pthread_cond_{timed}wait call
2253    causes a dynamic binding between the CV and the mutex, and that
2254    lasts until such time as the waiter count falls to zero.  Hence
2255    need to keep track of the number of waiters in order to do
2256    consistency tracking. */
2257 typedef
2258    struct {
2259       SO*   so;       /* libhb-allocated SO */
2260       void* mx_ga;    /* addr of associated mutex, if any */
2261       UWord nWaiters; /* # threads waiting on the CV */
2262    }
2263    CVInfo;
2264 
2265 
2266 /* pthread_cond_t* -> CVInfo* */
2267 static WordFM* map_cond_to_CVInfo = NULL;
2268 
map_cond_to_CVInfo_INIT(void)2269 static void map_cond_to_CVInfo_INIT ( void ) {
2270    if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2271       map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2272                                        "hg.mctCI.1", HG_(free), NULL );
2273    }
2274 }
2275 
map_cond_to_CVInfo_lookup_or_alloc(void * cond)2276 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2277    UWord key, val;
2278    map_cond_to_CVInfo_INIT();
2279    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2280       tl_assert(key == (UWord)cond);
2281       return (CVInfo*)val;
2282    } else {
2283       SO*     so  = libhb_so_alloc();
2284       CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2285       cvi->so     = so;
2286       cvi->mx_ga  = 0;
2287       VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2288       return cvi;
2289    }
2290 }
2291 
map_cond_to_CVInfo_lookup_NO_alloc(void * cond)2292 static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2293    UWord key, val;
2294    map_cond_to_CVInfo_INIT();
2295    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2296       tl_assert(key == (UWord)cond);
2297       return (CVInfo*)val;
2298    } else {
2299       return NULL;
2300    }
2301 }
2302 
map_cond_to_CVInfo_delete(ThreadId tid,void * cond,Bool cond_is_init)2303 static void map_cond_to_CVInfo_delete ( ThreadId tid,
2304                                         void* cond, Bool cond_is_init ) {
2305    Thread*   thr;
2306    UWord keyW, valW;
2307 
2308    thr = map_threads_maybe_lookup( tid );
2309    tl_assert(thr); /* cannot fail - Thread* must already exist */
2310 
2311    map_cond_to_CVInfo_INIT();
2312    if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2313       CVInfo* cvi = (CVInfo*)valW;
2314       tl_assert(keyW == (UWord)cond);
2315       tl_assert(cvi);
2316       tl_assert(cvi->so);
2317       if (cvi->nWaiters > 0) {
2318          HG_(record_error_Misc)(
2319             thr, "pthread_cond_destroy:"
2320                  " destruction of condition variable being waited upon");
2321          /* Destroying a cond var being waited upon outcome is EBUSY and
2322             variable is not destroyed. */
2323          return;
2324       }
2325       if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2326          tl_assert(0); // cond var found above, and not here ???
2327       libhb_so_dealloc(cvi->so);
2328       cvi->mx_ga = 0;
2329       HG_(free)(cvi);
2330    } else {
2331       /* We have no record of this CV.  So complain about it
2332          .. except, don't bother to complain if it has exactly the
2333          value PTHREAD_COND_INITIALIZER, since it might be that the CV
2334          was initialised like that but never used. */
2335       if (!cond_is_init) {
2336          HG_(record_error_Misc)(
2337             thr, "pthread_cond_destroy: destruction of unknown cond var");
2338       }
2339    }
2340 }
2341 
evh__HG_PTHREAD_COND_SIGNAL_PRE(ThreadId tid,void * cond)2342 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2343 {
2344    /* 'tid' has signalled on 'cond'.  As per the comment above, bind
2345       cond to a SO if it is not already so bound, and 'send' on the
2346       SO.  This is later used by other thread(s) which successfully
2347       exit from a pthread_cond_wait on the same cv; then they 'recv'
2348       from the SO, thereby acquiring a dependency on this signalling
2349       event. */
2350    Thread*   thr;
2351    CVInfo*   cvi;
2352    //Lock*     lk;
2353 
2354    if (SHOW_EVENTS >= 1)
2355       VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2356                   (Int)tid, (void*)cond );
2357 
2358    thr = map_threads_maybe_lookup( tid );
2359    tl_assert(thr); /* cannot fail - Thread* must already exist */
2360 
2361    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2362    tl_assert(cvi);
2363    tl_assert(cvi->so);
2364 
2365    // error-if: mutex is bogus
2366    // error-if: mutex is not locked
2367    // Hmm.  POSIX doesn't actually say that it's an error to call
2368    // pthread_cond_signal with the associated mutex being unlocked.
2369    // Although it does say that it should be "if consistent scheduling
2370    // is desired."  For that reason, print "dubious" if the lock isn't
2371    // held by any thread.  Skip the "dubious" if it is held by some
2372    // other thread; that sounds straight-out wrong.
2373    //
2374    // Anybody who writes code that signals on a CV without holding
2375    // the associated MX needs to be shipped off to a lunatic asylum
2376    // ASAP, even though POSIX doesn't actually declare such behaviour
2377    // illegal -- it makes code extremely difficult to understand/
2378    // reason about.  In particular it puts the signalling thread in
2379    // a situation where it is racing against the released waiter
2380    // as soon as the signalling is done, and so there needs to be
2381    // some auxiliary synchronisation mechanism in the program that
2382    // makes this safe -- or the race(s) need to be harmless, or
2383    // probably nonexistent.
2384    //
2385    if (1) {
2386       Lock* lk = NULL;
2387       if (cvi->mx_ga != 0) {
2388          lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2389       }
2390       /* note: lk could be NULL.  Be careful. */
2391       if (lk) {
2392          if (lk->kind == LK_rdwr) {
2393             HG_(record_error_Misc)(thr,
2394                "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2395          }
2396          if (lk->heldBy == NULL) {
2397             HG_(record_error_Misc)(thr,
2398                "pthread_cond_{signal,broadcast}: dubious: "
2399                "associated lock is not held by any thread");
2400          }
2401          if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2402             HG_(record_error_Misc)(thr,
2403                "pthread_cond_{signal,broadcast}: "
2404                "associated lock is not held by calling thread");
2405          }
2406       } else {
2407          /* Couldn't even find the damn thing. */
2408          // But actually .. that's not necessarily an error.  We don't
2409          // know the (CV,MX) binding until a pthread_cond_wait or bcast
2410          // shows us what it is, and if that may not have happened yet.
2411          // So just keep quiet in this circumstance.
2412          //HG_(record_error_Misc)( thr,
2413          //   "pthread_cond_{signal,broadcast}: "
2414          //   "no or invalid mutex associated with cond");
2415       }
2416    }
2417 
2418    libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2419 }
2420 
2421 /* returns True if it reckons 'mutex' is valid and held by this
2422    thread, else False */
evh__HG_PTHREAD_COND_WAIT_PRE(ThreadId tid,void * cond,void * mutex)2423 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2424                                             void* cond, void* mutex )
2425 {
2426    Thread* thr;
2427    Lock*   lk;
2428    Bool    lk_valid = True;
2429    CVInfo* cvi;
2430 
2431    if (SHOW_EVENTS >= 1)
2432       VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2433                   "(ctid=%d, cond=%p, mutex=%p)\n",
2434                   (Int)tid, (void*)cond, (void*)mutex );
2435 
2436    thr = map_threads_maybe_lookup( tid );
2437    tl_assert(thr); /* cannot fail - Thread* must already exist */
2438 
2439    lk = map_locks_maybe_lookup( (Addr)mutex );
2440 
2441    /* Check for stupid mutex arguments.  There are various ways to be
2442       a bozo.  Only complain once, though, even if more than one thing
2443       is wrong. */
2444    if (lk == NULL) {
2445       lk_valid = False;
2446       HG_(record_error_Misc)(
2447          thr,
2448          "pthread_cond_{timed}wait called with invalid mutex" );
2449    } else {
2450       tl_assert( HG_(is_sane_LockN)(lk) );
2451       if (lk->kind == LK_rdwr) {
2452          lk_valid = False;
2453          HG_(record_error_Misc)(
2454             thr, "pthread_cond_{timed}wait called with mutex "
2455                  "of type pthread_rwlock_t*" );
2456       } else
2457          if (lk->heldBy == NULL) {
2458          lk_valid = False;
2459          HG_(record_error_Misc)(
2460             thr, "pthread_cond_{timed}wait called with un-held mutex");
2461       } else
2462       if (lk->heldBy != NULL
2463           && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2464          lk_valid = False;
2465          HG_(record_error_Misc)(
2466             thr, "pthread_cond_{timed}wait called with mutex "
2467                  "held by a different thread" );
2468       }
2469    }
2470 
2471    // error-if: cond is also associated with a different mutex
2472    cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2473    tl_assert(cvi);
2474    tl_assert(cvi->so);
2475    if (cvi->nWaiters == 0) {
2476       /* form initial (CV,MX) binding */
2477       cvi->mx_ga = mutex;
2478    }
2479    else /* check existing (CV,MX) binding */
2480    if (cvi->mx_ga != mutex) {
2481       HG_(record_error_Misc)(
2482          thr, "pthread_cond_{timed}wait: cond is associated "
2483               "with a different mutex");
2484    }
2485    cvi->nWaiters++;
2486 
2487    return lk_valid;
2488 }
2489 
evh__HG_PTHREAD_COND_WAIT_POST(ThreadId tid,void * cond,void * mutex,Bool timeout)2490 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2491                                              void* cond, void* mutex,
2492                                              Bool timeout)
2493 {
2494    /* A pthread_cond_wait(cond, mutex) completed successfully.  Find
2495       the SO for this cond, and 'recv' from it so as to acquire a
2496       dependency edge back to the signaller/broadcaster. */
2497    Thread* thr;
2498    CVInfo* cvi;
2499 
2500    if (SHOW_EVENTS >= 1)
2501       VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2502                   "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2503                   (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
2504 
2505    thr = map_threads_maybe_lookup( tid );
2506    tl_assert(thr); /* cannot fail - Thread* must already exist */
2507 
2508    // error-if: cond is also associated with a different mutex
2509 
2510    cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2511    if (!cvi) {
2512       /* This could be either a bug in helgrind or the guest application
2513          that did an error (e.g. cond var was destroyed by another thread.
2514          Let's assume helgrind is perfect ...
2515          Note that this is similar to drd behaviour. */
2516       HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2517                              " being waited upon");
2518       return;
2519    }
2520 
2521    tl_assert(cvi);
2522    tl_assert(cvi->so);
2523    tl_assert(cvi->nWaiters > 0);
2524 
2525    if (!timeout && !libhb_so_everSent(cvi->so)) {
2526       /* Hmm.  How can a wait on 'cond' succeed if nobody signalled
2527          it?  If this happened it would surely be a bug in the threads
2528          library.  Or one of those fabled "spurious wakeups". */
2529       HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2530                                    "succeeded"
2531                                    " without prior pthread_cond_post");
2532    }
2533 
2534    /* anyway, acquire a dependency on it. */
2535    libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2536 
2537    cvi->nWaiters--;
2538 }
2539 
evh__HG_PTHREAD_COND_INIT_POST(ThreadId tid,void * cond,void * cond_attr)2540 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2541                                              void* cond, void* cond_attr )
2542 {
2543    CVInfo* cvi;
2544 
2545    if (SHOW_EVENTS >= 1)
2546       VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2547                   "(ctid=%d, cond=%p, cond_attr=%p)\n",
2548                   (Int)tid, (void*)cond, (void*) cond_attr );
2549 
2550    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2551    tl_assert (cvi);
2552    tl_assert (cvi->so);
2553 }
2554 
2555 
evh__HG_PTHREAD_COND_DESTROY_PRE(ThreadId tid,void * cond,Bool cond_is_init)2556 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2557                                                void* cond, Bool cond_is_init )
2558 {
2559    /* Deal with destroy events.  The only purpose is to free storage
2560       associated with the CV, so as to avoid any possible resource
2561       leaks. */
2562    if (SHOW_EVENTS >= 1)
2563       VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2564                   "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2565                   (Int)tid, (void*)cond, (Int)cond_is_init );
2566 
2567    map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
2568 }
2569 
2570 
2571 /* ------------------------------------------------------- */
2572 /* -------------- events to do with rwlocks -------------- */
2573 /* ------------------------------------------------------- */
2574 
2575 /* EXPOSITION only */
2576 static
evh__HG_PTHREAD_RWLOCK_INIT_POST(ThreadId tid,void * rwl)2577 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2578 {
2579    if (SHOW_EVENTS >= 1)
2580       VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2581                   (Int)tid, (void*)rwl );
2582    map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2583    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2584       all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2585 }
2586 
2587 static
evh__HG_PTHREAD_RWLOCK_DESTROY_PRE(ThreadId tid,void * rwl)2588 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2589 {
2590    Thread* thr;
2591    Lock*   lk;
2592    if (SHOW_EVENTS >= 1)
2593       VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2594                   (Int)tid, (void*)rwl );
2595 
2596    thr = map_threads_maybe_lookup( tid );
2597    /* cannot fail - Thread* must already exist */
2598    tl_assert( HG_(is_sane_Thread)(thr) );
2599 
2600    lk = map_locks_maybe_lookup( (Addr)rwl );
2601 
2602    if (lk == NULL || lk->kind != LK_rdwr) {
2603       HG_(record_error_Misc)(
2604          thr, "pthread_rwlock_destroy with invalid argument" );
2605    }
2606 
2607    if (lk) {
2608       tl_assert( HG_(is_sane_LockN)(lk) );
2609       tl_assert( lk->guestaddr == (Addr)rwl );
2610       if (lk->heldBy) {
2611          /* Basically act like we unlocked the lock */
2612          HG_(record_error_Misc)(
2613             thr, "pthread_rwlock_destroy of a locked mutex" );
2614          /* remove lock from locksets of all owning threads */
2615          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2616          VG_(deleteBag)( lk->heldBy );
2617          lk->heldBy = NULL;
2618          lk->heldW = False;
2619          lk->acquired_at = NULL;
2620       }
2621       tl_assert( !lk->heldBy );
2622       tl_assert( HG_(is_sane_LockN)(lk) );
2623 
2624       if (HG_(clo_track_lockorders))
2625          laog__handle_one_lock_deletion(lk);
2626       map_locks_delete( lk->guestaddr );
2627       del_LockN( lk );
2628    }
2629 
2630    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2631       all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2632 }
2633 
2634 static
evh__HG_PTHREAD_RWLOCK_LOCK_PRE(ThreadId tid,void * rwl,Word isW,Word isTryLock)2635 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2636                                        void* rwl,
2637                                        Word isW, Word isTryLock )
2638 {
2639    /* Just check the rwl is sane; nothing else to do. */
2640    // 'rwl' may be invalid - not checked by wrapper
2641    Thread* thr;
2642    Lock*   lk;
2643    if (SHOW_EVENTS >= 1)
2644       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2645                   (Int)tid, (Int)isW, (void*)rwl );
2646 
2647    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2648    tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2649    thr = map_threads_maybe_lookup( tid );
2650    tl_assert(thr); /* cannot fail - Thread* must already exist */
2651 
2652    lk = map_locks_maybe_lookup( (Addr)rwl );
2653    if ( lk
2654         && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2655       /* Wrong kind of lock.  Duh.  */
2656       HG_(record_error_Misc)(
2657          thr, "pthread_rwlock_{rd,rw}lock with a "
2658               "pthread_mutex_t* argument " );
2659    }
2660 }
2661 
2662 static
evh__HG_PTHREAD_RWLOCK_LOCK_POST(ThreadId tid,void * rwl,Word isW)2663 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2664 {
2665    // only called if the real library call succeeded - so mutex is sane
2666    Thread* thr;
2667    if (SHOW_EVENTS >= 1)
2668       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2669                   (Int)tid, (Int)isW, (void*)rwl );
2670 
2671    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2672    thr = map_threads_maybe_lookup( tid );
2673    tl_assert(thr); /* cannot fail - Thread* must already exist */
2674 
2675    (isW ? evhH__post_thread_w_acquires_lock
2676         : evhH__post_thread_r_acquires_lock)(
2677       thr,
2678       LK_rdwr, /* if not known, create new lock with this LockKind */
2679       (Addr)rwl
2680    );
2681 }
2682 
evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ThreadId tid,void * rwl)2683 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2684 {
2685    // 'rwl' may be invalid - not checked by wrapper
2686    Thread* thr;
2687    if (SHOW_EVENTS >= 1)
2688       VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2689                   (Int)tid, (void*)rwl );
2690 
2691    thr = map_threads_maybe_lookup( tid );
2692    tl_assert(thr); /* cannot fail - Thread* must already exist */
2693 
2694    evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2695 }
2696 
evh__HG_PTHREAD_RWLOCK_UNLOCK_POST(ThreadId tid,void * rwl)2697 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2698 {
2699    // only called if the real library call succeeded - so mutex is sane
2700    Thread* thr;
2701    if (SHOW_EVENTS >= 1)
2702       VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2703                   (Int)tid, (void*)rwl );
2704    thr = map_threads_maybe_lookup( tid );
2705    tl_assert(thr); /* cannot fail - Thread* must already exist */
2706 
2707    // anything we should do here?
2708 }
2709 
2710 
2711 /* ---------------------------------------------------------- */
2712 /* -------------- events to do with semaphores -------------- */
2713 /* ---------------------------------------------------------- */
2714 
2715 /* This is similar to but not identical to the handling for condition
2716    variables. */
2717 
2718 /* For each semaphore, we maintain a stack of SOs.  When a 'post'
2719    operation is done on a semaphore (unlocking, essentially), a new SO
2720    is created for the posting thread, the posting thread does a strong
2721    send to it (which merely installs the posting thread's VC in the
2722    SO), and the SO is pushed on the semaphore's stack.
2723 
2724    Later, when a (probably different) thread completes 'wait' on the
2725    semaphore, we pop a SO off the semaphore's stack (which should be
2726    nonempty), and do a strong recv from it.  This mechanism creates
2727    dependencies between posters and waiters of the semaphore.
2728 
2729    It may not be necessary to use a stack - perhaps a bag of SOs would
2730    do.  But we do need to keep track of how many unused-up posts have
2731    happened for the semaphore.
2732 
2733    Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2734    twice on S.  T3 cannot complete its waits without both T1 and T2
2735    posting.  The above mechanism will ensure that T3 acquires
2736    dependencies on both T1 and T2.
2737 
2738    When a semaphore is initialised with value N, we do as if we'd
2739    posted N times on the semaphore: basically create N SOs and do a
2740    strong send to all of then.  This allows up to N waits on the
2741    semaphore to acquire a dependency on the initialisation point,
2742    which AFAICS is the correct behaviour.
2743 
2744    We don't emit an error for DESTROY_PRE on a semaphore we don't know
2745    about.  We should.
2746 */
2747 
2748 /* sem_t* -> XArray* SO* */
2749 static WordFM* map_sem_to_SO_stack = NULL;
2750 
map_sem_to_SO_stack_INIT(void)2751 static void map_sem_to_SO_stack_INIT ( void ) {
2752    if (map_sem_to_SO_stack == NULL) {
2753       map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2754                                         HG_(free), NULL );
2755    }
2756 }
2757 
push_SO_for_sem(void * sem,SO * so)2758 static void push_SO_for_sem ( void* sem, SO* so ) {
2759    UWord   keyW;
2760    XArray* xa;
2761    tl_assert(so);
2762    map_sem_to_SO_stack_INIT();
2763    if (VG_(lookupFM)( map_sem_to_SO_stack,
2764                       &keyW, (UWord*)&xa, (UWord)sem )) {
2765       tl_assert(keyW == (UWord)sem);
2766       tl_assert(xa);
2767       VG_(addToXA)( xa, &so );
2768    } else {
2769      xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2770       VG_(addToXA)( xa, &so );
2771       VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
2772    }
2773 }
2774 
mb_pop_SO_for_sem(void * sem)2775 static SO* mb_pop_SO_for_sem ( void* sem ) {
2776    UWord    keyW;
2777    XArray*  xa;
2778    SO* so;
2779    map_sem_to_SO_stack_INIT();
2780    if (VG_(lookupFM)( map_sem_to_SO_stack,
2781                       &keyW, (UWord*)&xa, (UWord)sem )) {
2782       /* xa is the stack for this semaphore. */
2783       Word sz;
2784       tl_assert(keyW == (UWord)sem);
2785       sz = VG_(sizeXA)( xa );
2786       tl_assert(sz >= 0);
2787       if (sz == 0)
2788          return NULL; /* odd, the stack is empty */
2789       so = *(SO**)VG_(indexXA)( xa, sz-1 );
2790       tl_assert(so);
2791       VG_(dropTailXA)( xa, 1 );
2792       return so;
2793    } else {
2794       /* hmm, that's odd.  No stack for this semaphore. */
2795       return NULL;
2796    }
2797 }
2798 
evh__HG_POSIX_SEM_DESTROY_PRE(ThreadId tid,void * sem)2799 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2800 {
2801    UWord keyW, valW;
2802    SO*   so;
2803 
2804    if (SHOW_EVENTS >= 1)
2805       VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2806                   (Int)tid, (void*)sem );
2807 
2808    map_sem_to_SO_stack_INIT();
2809 
2810    /* Empty out the semaphore's SO stack.  This way of doing it is
2811       stupid, but at least it's easy. */
2812    while (1) {
2813       so = mb_pop_SO_for_sem( sem );
2814       if (!so) break;
2815       libhb_so_dealloc(so);
2816    }
2817 
2818    if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2819       XArray* xa = (XArray*)valW;
2820       tl_assert(keyW == (UWord)sem);
2821       tl_assert(xa);
2822       tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2823       VG_(deleteXA)(xa);
2824    }
2825 }
2826 
2827 static
evh__HG_POSIX_SEM_INIT_POST(ThreadId tid,void * sem,UWord value)2828 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2829 {
2830    SO*     so;
2831    Thread* thr;
2832 
2833    if (SHOW_EVENTS >= 1)
2834       VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2835                   (Int)tid, (void*)sem, value );
2836 
2837    thr = map_threads_maybe_lookup( tid );
2838    tl_assert(thr); /* cannot fail - Thread* must already exist */
2839 
2840    /* Empty out the semaphore's SO stack.  This way of doing it is
2841       stupid, but at least it's easy. */
2842    while (1) {
2843       so = mb_pop_SO_for_sem( sem );
2844       if (!so) break;
2845       libhb_so_dealloc(so);
2846    }
2847 
2848    /* If we don't do this check, the following while loop runs us out
2849       of memory for stupid initial values of 'value'. */
2850    if (value > 10000) {
2851       HG_(record_error_Misc)(
2852          thr, "sem_init: initial value exceeds 10000; using 10000" );
2853       value = 10000;
2854    }
2855 
2856    /* Now create 'valid' new SOs for the thread, do a strong send to
2857       each of them, and push them all on the stack. */
2858    for (; value > 0; value--) {
2859       Thr* hbthr = thr->hbthr;
2860       tl_assert(hbthr);
2861 
2862       so = libhb_so_alloc();
2863       libhb_so_send( hbthr, so, True/*strong send*/ );
2864       push_SO_for_sem( sem, so );
2865    }
2866 }
2867 
evh__HG_POSIX_SEM_POST_PRE(ThreadId tid,void * sem)2868 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2869 {
2870    /* 'tid' has posted on 'sem'.  Create a new SO, do a strong send to
2871       it (iow, write our VC into it, then tick ours), and push the SO
2872       on on a stack of SOs associated with 'sem'.  This is later used
2873       by other thread(s) which successfully exit from a sem_wait on
2874       the same sem; by doing a strong recv from SOs popped of the
2875       stack, they acquire dependencies on the posting thread
2876       segment(s). */
2877 
2878    Thread* thr;
2879    SO*     so;
2880    Thr*    hbthr;
2881 
2882    if (SHOW_EVENTS >= 1)
2883       VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2884                   (Int)tid, (void*)sem );
2885 
2886    thr = map_threads_maybe_lookup( tid );
2887    tl_assert(thr); /* cannot fail - Thread* must already exist */
2888 
2889    // error-if: sem is bogus
2890 
2891    hbthr = thr->hbthr;
2892    tl_assert(hbthr);
2893 
2894    so = libhb_so_alloc();
2895    libhb_so_send( hbthr, so, True/*strong send*/ );
2896    push_SO_for_sem( sem, so );
2897 }
2898 
evh__HG_POSIX_SEM_WAIT_POST(ThreadId tid,void * sem)2899 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2900 {
2901    /* A sem_wait(sem) completed successfully.  Pop the posting-SO for
2902       the 'sem' from this semaphore's SO-stack, and do a strong recv
2903       from it.  This creates a dependency back to one of the post-ers
2904       for the semaphore. */
2905 
2906    Thread* thr;
2907    SO*     so;
2908    Thr*    hbthr;
2909 
2910    if (SHOW_EVENTS >= 1)
2911       VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2912                   (Int)tid, (void*)sem );
2913 
2914    thr = map_threads_maybe_lookup( tid );
2915    tl_assert(thr); /* cannot fail - Thread* must already exist */
2916 
2917    // error-if: sem is bogus
2918 
2919    so = mb_pop_SO_for_sem( sem );
2920 
2921    if (so) {
2922       hbthr = thr->hbthr;
2923       tl_assert(hbthr);
2924 
2925       libhb_so_recv( hbthr, so, True/*strong recv*/ );
2926       libhb_so_dealloc(so);
2927    } else {
2928       /* Hmm.  How can a wait on 'sem' succeed if nobody posted to it?
2929          If this happened it would surely be a bug in the threads
2930          library. */
2931       HG_(record_error_Misc)(
2932          thr, "Bug in libpthread: sem_wait succeeded on"
2933               " semaphore without prior sem_post");
2934    }
2935 }
2936 
2937 
2938 /* -------------------------------------------------------- */
2939 /* -------------- events to do with barriers -------------- */
2940 /* -------------------------------------------------------- */
2941 
2942 typedef
2943    struct {
2944       Bool    initted; /* has it yet been initted by guest? */
2945       Bool    resizable; /* is resizing allowed? */
2946       UWord   size;    /* declared size */
2947       XArray* waiting; /* XA of Thread*.  # present is 0 .. .size */
2948    }
2949    Bar;
2950 
new_Bar(void)2951 static Bar* new_Bar ( void ) {
2952    Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2953    /* all fields are zero */
2954    tl_assert(bar->initted == False);
2955    return bar;
2956 }
2957 
delete_Bar(Bar * bar)2958 static void delete_Bar ( Bar* bar ) {
2959    tl_assert(bar);
2960    if (bar->waiting)
2961       VG_(deleteXA)(bar->waiting);
2962    HG_(free)(bar);
2963 }
2964 
2965 /* A mapping which stores auxiliary data for barriers. */
2966 
2967 /* pthread_barrier_t* -> Bar* */
2968 static WordFM* map_barrier_to_Bar = NULL;
2969 
map_barrier_to_Bar_INIT(void)2970 static void map_barrier_to_Bar_INIT ( void ) {
2971    if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2972       map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2973                                        "hg.mbtBI.1", HG_(free), NULL );
2974    }
2975 }
2976 
map_barrier_to_Bar_lookup_or_alloc(void * barrier)2977 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2978    UWord key, val;
2979    map_barrier_to_Bar_INIT();
2980    if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2981       tl_assert(key == (UWord)barrier);
2982       return (Bar*)val;
2983    } else {
2984       Bar* bar = new_Bar();
2985       VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2986       return bar;
2987    }
2988 }
2989 
map_barrier_to_Bar_delete(void * barrier)2990 static void map_barrier_to_Bar_delete ( void* barrier ) {
2991    UWord keyW, valW;
2992    map_barrier_to_Bar_INIT();
2993    if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2994       Bar* bar = (Bar*)valW;
2995       tl_assert(keyW == (UWord)barrier);
2996       delete_Bar(bar);
2997    }
2998 }
2999 
3000 
evh__HG_PTHREAD_BARRIER_INIT_PRE(ThreadId tid,void * barrier,UWord count,UWord resizable)3001 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3002                                                void* barrier,
3003                                                UWord count,
3004                                                UWord resizable )
3005 {
3006    Thread* thr;
3007    Bar*    bar;
3008 
3009    if (SHOW_EVENTS >= 1)
3010       VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3011                   "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3012                   (Int)tid, (void*)barrier, count, resizable );
3013 
3014    thr = map_threads_maybe_lookup( tid );
3015    tl_assert(thr); /* cannot fail - Thread* must already exist */
3016 
3017    if (count == 0) {
3018       HG_(record_error_Misc)(
3019          thr, "pthread_barrier_init: 'count' argument is zero"
3020       );
3021    }
3022 
3023    if (resizable != 0 && resizable != 1) {
3024       HG_(record_error_Misc)(
3025          thr, "pthread_barrier_init: invalid 'resizable' argument"
3026       );
3027    }
3028 
3029    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3030    tl_assert(bar);
3031 
3032    if (bar->initted) {
3033       HG_(record_error_Misc)(
3034          thr, "pthread_barrier_init: barrier is already initialised"
3035       );
3036    }
3037 
3038    if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3039       tl_assert(bar->initted);
3040       HG_(record_error_Misc)(
3041          thr, "pthread_barrier_init: threads are waiting at barrier"
3042       );
3043       VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3044    }
3045    if (!bar->waiting) {
3046       bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3047                                  sizeof(Thread*) );
3048    }
3049 
3050    tl_assert(VG_(sizeXA)(bar->waiting) == 0);
3051    bar->initted   = True;
3052    bar->resizable = resizable == 1 ? True : False;
3053    bar->size      = count;
3054 }
3055 
3056 
evh__HG_PTHREAD_BARRIER_DESTROY_PRE(ThreadId tid,void * barrier)3057 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3058                                                   void* barrier )
3059 {
3060    Thread* thr;
3061    Bar*    bar;
3062 
3063    /* Deal with destroy events.  The only purpose is to free storage
3064       associated with the barrier, so as to avoid any possible
3065       resource leaks. */
3066    if (SHOW_EVENTS >= 1)
3067       VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3068                   "(tid=%d, barrier=%p)\n",
3069                   (Int)tid, (void*)barrier );
3070 
3071    thr = map_threads_maybe_lookup( tid );
3072    tl_assert(thr); /* cannot fail - Thread* must already exist */
3073 
3074    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3075    tl_assert(bar);
3076 
3077    if (!bar->initted) {
3078       HG_(record_error_Misc)(
3079          thr, "pthread_barrier_destroy: barrier was never initialised"
3080       );
3081    }
3082 
3083    if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3084       HG_(record_error_Misc)(
3085          thr, "pthread_barrier_destroy: threads are waiting at barrier"
3086       );
3087    }
3088 
3089    /* Maybe we shouldn't do this; just let it persist, so that when it
3090       is reinitialised we don't need to do any dynamic memory
3091       allocation?  The downside is a potentially unlimited space leak,
3092       if the client creates (in turn) a large number of barriers all
3093       at different locations.  Note that if we do later move to the
3094       don't-delete-it scheme, we need to mark the barrier as
3095       uninitialised again since otherwise a later _init call will
3096       elicit a duplicate-init error.  */
3097    map_barrier_to_Bar_delete( barrier );
3098 }
3099 
3100 
3101 /* All the threads have arrived.  Now do the Interesting Bit.  Get a
3102    new synchronisation object and do a weak send to it from all the
3103    participating threads.  This makes its vector clocks be the join of
3104    all the individual threads' vector clocks.  Then do a strong
3105    receive from it back to all threads, so that their VCs are a copy
3106    of it (hence are all equal to the join of their original VCs.) */
do_barrier_cross_sync_and_empty(Bar * bar)3107 static void do_barrier_cross_sync_and_empty ( Bar* bar )
3108 {
3109    /* XXX check bar->waiting has no duplicates */
3110    UWord i;
3111    SO*   so = libhb_so_alloc();
3112 
3113    tl_assert(bar->waiting);
3114    tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3115 
3116    /* compute the join ... */
3117    for (i = 0; i < bar->size; i++) {
3118       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3119       Thr* hbthr = t->hbthr;
3120       libhb_so_send( hbthr, so, False/*weak send*/ );
3121    }
3122    /* ... and distribute to all threads */
3123    for (i = 0; i < bar->size; i++) {
3124       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3125       Thr* hbthr = t->hbthr;
3126       libhb_so_recv( hbthr, so, True/*strong recv*/ );
3127    }
3128 
3129    /* finally, we must empty out the waiting vector */
3130    VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3131 
3132    /* and we don't need this any more.  Perhaps a stack-allocated
3133       SO would be better? */
3134    libhb_so_dealloc(so);
3135 }
3136 
3137 
evh__HG_PTHREAD_BARRIER_WAIT_PRE(ThreadId tid,void * barrier)3138 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3139                                                void* barrier )
3140 {
3141   /* This function gets called after a client thread calls
3142      pthread_barrier_wait but before it arrives at the real
3143      pthread_barrier_wait.
3144 
3145      Why is the following correct?  It's a bit subtle.
3146 
3147      If this is not the last thread arriving at the barrier, we simply
3148      note its presence and return.  Because valgrind (at least as of
3149      Nov 08) is single threaded, we are guaranteed safe from any race
3150      conditions when in this function -- no other client threads are
3151      running.
3152 
3153      If this is the last thread, then we are again the only running
3154      thread.  All the other threads will have either arrived at the
3155      real pthread_barrier_wait or are on their way to it, but in any
3156      case are guaranteed not to be able to move past it, because this
3157      thread is currently in this function and so has not yet arrived
3158      at the real pthread_barrier_wait.  That means that:
3159 
3160      1. While we are in this function, none of the other threads
3161         waiting at the barrier can move past it.
3162 
3163      2. When this function returns (and simulated execution resumes),
3164         this thread and all other waiting threads will be able to move
3165         past the real barrier.
3166 
3167      Because of this, it is now safe to update the vector clocks of
3168      all threads, to represent the fact that they all arrived at the
3169      barrier and have all moved on.  There is no danger of any
3170      complications to do with some threads leaving the barrier and
3171      racing back round to the front, whilst others are still leaving
3172      (which is the primary source of complication in correct handling/
3173      implementation of barriers).  That can't happen because we update
3174      here our data structures so as to indicate that the threads have
3175      passed the barrier, even though, as per (2) above, they are
3176      guaranteed not to pass the barrier until we return.
3177 
3178      This relies crucially on Valgrind being single threaded.  If that
3179      changes, this will need to be reconsidered.
3180    */
3181    Thread* thr;
3182    Bar*    bar;
3183    UWord   present;
3184 
3185    if (SHOW_EVENTS >= 1)
3186       VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3187                   "(tid=%d, barrier=%p)\n",
3188                   (Int)tid, (void*)barrier );
3189 
3190    thr = map_threads_maybe_lookup( tid );
3191    tl_assert(thr); /* cannot fail - Thread* must already exist */
3192 
3193    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3194    tl_assert(bar);
3195 
3196    if (!bar->initted) {
3197       HG_(record_error_Misc)(
3198          thr, "pthread_barrier_wait: barrier is uninitialised"
3199       );
3200       return; /* client is broken .. avoid assertions below */
3201    }
3202 
3203    /* guaranteed by _INIT_PRE above */
3204    tl_assert(bar->size > 0);
3205    tl_assert(bar->waiting);
3206 
3207    VG_(addToXA)( bar->waiting, &thr );
3208 
3209    /* guaranteed by this function */
3210    present = VG_(sizeXA)(bar->waiting);
3211    tl_assert(present > 0 && present <= bar->size);
3212 
3213    if (present < bar->size)
3214       return;
3215 
3216    do_barrier_cross_sync_and_empty(bar);
3217 }
3218 
3219 
evh__HG_PTHREAD_BARRIER_RESIZE_PRE(ThreadId tid,void * barrier,UWord newcount)3220 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3221                                                  void* barrier,
3222                                                  UWord newcount )
3223 {
3224    Thread* thr;
3225    Bar*    bar;
3226    UWord   present;
3227 
3228    if (SHOW_EVENTS >= 1)
3229       VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3230                   "(tid=%d, barrier=%p, newcount=%lu)\n",
3231                   (Int)tid, (void*)barrier, newcount );
3232 
3233    thr = map_threads_maybe_lookup( tid );
3234    tl_assert(thr); /* cannot fail - Thread* must already exist */
3235 
3236    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3237    tl_assert(bar);
3238 
3239    if (!bar->initted) {
3240       HG_(record_error_Misc)(
3241          thr, "pthread_barrier_resize: barrier is uninitialised"
3242       );
3243       return; /* client is broken .. avoid assertions below */
3244    }
3245 
3246    if (!bar->resizable) {
3247       HG_(record_error_Misc)(
3248          thr, "pthread_barrier_resize: barrier is may not be resized"
3249       );
3250       return; /* client is broken .. avoid assertions below */
3251    }
3252 
3253    if (newcount == 0) {
3254       HG_(record_error_Misc)(
3255          thr, "pthread_barrier_resize: 'newcount' argument is zero"
3256       );
3257       return; /* client is broken .. avoid assertions below */
3258    }
3259 
3260    /* guaranteed by _INIT_PRE above */
3261    tl_assert(bar->size > 0);
3262    tl_assert(bar->waiting);
3263    /* Guaranteed by this fn */
3264    tl_assert(newcount > 0);
3265 
3266    if (newcount >= bar->size) {
3267       /* Increasing the capacity.  There's no possibility of threads
3268          moving on from the barrier in this situation, so just note
3269          the fact and do nothing more. */
3270       bar->size = newcount;
3271    } else {
3272       /* Decreasing the capacity.  If we decrease it to be equal or
3273          below the number of waiting threads, they will now move past
3274          the barrier, so need to mess with dep edges in the same way
3275          as if the barrier had filled up normally. */
3276       present = VG_(sizeXA)(bar->waiting);
3277       tl_assert(present >= 0 && present <= bar->size);
3278       if (newcount <= present) {
3279          bar->size = present; /* keep the cross_sync call happy */
3280          do_barrier_cross_sync_and_empty(bar);
3281       }
3282       bar->size = newcount;
3283    }
3284 }
3285 
3286 
3287 /* ----------------------------------------------------- */
3288 /* ----- events to do with user-specified HB edges ----- */
3289 /* ----------------------------------------------------- */
3290 
3291 /* A mapping from arbitrary UWord tag to the SO associated with it.
3292    The UWord tags are meaningless to us, interpreted only by the
3293    user. */
3294 
3295 
3296 
3297 /* UWord -> SO* */
3298 static WordFM* map_usertag_to_SO = NULL;
3299 
map_usertag_to_SO_INIT(void)3300 static void map_usertag_to_SO_INIT ( void ) {
3301    if (UNLIKELY(map_usertag_to_SO == NULL)) {
3302       map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3303                                       "hg.mutS.1", HG_(free), NULL );
3304    }
3305 }
3306 
map_usertag_to_SO_lookup_or_alloc(UWord usertag)3307 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3308    UWord key, val;
3309    map_usertag_to_SO_INIT();
3310    if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3311       tl_assert(key == (UWord)usertag);
3312       return (SO*)val;
3313    } else {
3314       SO* so = libhb_so_alloc();
3315       VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3316       return so;
3317    }
3318 }
3319 
map_usertag_to_SO_delete(UWord usertag)3320 static void map_usertag_to_SO_delete ( UWord usertag ) {
3321    UWord keyW, valW;
3322    map_usertag_to_SO_INIT();
3323    if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3324       SO* so = (SO*)valW;
3325       tl_assert(keyW == usertag);
3326       tl_assert(so);
3327       libhb_so_dealloc(so);
3328    }
3329 }
3330 
3331 
3332 static
evh__HG_USERSO_SEND_PRE(ThreadId tid,UWord usertag)3333 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3334 {
3335    /* TID is just about to notionally sent a message on a notional
3336       abstract synchronisation object whose identity is given by
3337       USERTAG.  Bind USERTAG to a real SO if it is not already so
3338       bound, and do a 'weak send' on the SO.  This joins the vector
3339       clocks from this thread into any vector clocks already present
3340       in the SO.  The resulting SO vector clocks are later used by
3341       other thread(s) which successfully 'receive' from the SO,
3342       thereby acquiring a dependency on all the events that have
3343       previously signalled on this SO. */
3344    Thread* thr;
3345    SO*     so;
3346 
3347    if (SHOW_EVENTS >= 1)
3348       VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3349                   (Int)tid, usertag );
3350 
3351    thr = map_threads_maybe_lookup( tid );
3352    tl_assert(thr); /* cannot fail - Thread* must already exist */
3353 
3354    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3355    tl_assert(so);
3356 
3357    libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3358 }
3359 
3360 static
evh__HG_USERSO_RECV_POST(ThreadId tid,UWord usertag)3361 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3362 {
3363    /* TID has just notionally received a message from a notional
3364       abstract synchronisation object whose identity is given by
3365       USERTAG.  Bind USERTAG to a real SO if it is not already so
3366       bound.  If the SO has at some point in the past been 'sent' on,
3367       to a 'strong receive' on it, thereby acquiring a dependency on
3368       the sender. */
3369    Thread* thr;
3370    SO*     so;
3371 
3372    if (SHOW_EVENTS >= 1)
3373       VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3374                   (Int)tid, usertag );
3375 
3376    thr = map_threads_maybe_lookup( tid );
3377    tl_assert(thr); /* cannot fail - Thread* must already exist */
3378 
3379    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3380    tl_assert(so);
3381 
3382    /* Acquire a dependency on it.  If the SO has never so far been
3383       sent on, then libhb_so_recv will do nothing.  So we're safe
3384       regardless of SO's history. */
3385    libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3386 }
3387 
3388 static
evh__HG_USERSO_FORGET_ALL(ThreadId tid,UWord usertag)3389 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3390 {
3391    /* TID declares that any happens-before edges notionally stored in
3392       USERTAG can be deleted.  If (as would normally be the case) a
3393       SO is associated with USERTAG, then the association is removed
3394       and all resources associated with SO are freed.  Importantly,
3395       that frees up any VTSs stored in SO. */
3396    if (SHOW_EVENTS >= 1)
3397       VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3398                   (Int)tid, usertag );
3399 
3400    map_usertag_to_SO_delete( usertag );
3401 }
3402 
3403 
3404 #if defined(VGO_solaris)
3405 /* ----------------------------------------------------- */
3406 /* --- events to do with bind guard/clear intercepts --- */
3407 /* ----------------------------------------------------- */
3408 
3409 static
evh__HG_RTLD_BIND_GUARD(ThreadId tid,Int flags)3410 void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3411 {
3412    if (SHOW_EVENTS >= 1)
3413       VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3414                   "(tid=%d, flags=%d)\n",
3415                   (Int)tid, flags);
3416 
3417    Thread *thr = map_threads_maybe_lookup(tid);
3418    tl_assert(thr != NULL);
3419 
3420    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3421    if ((bindflag & thr->bind_guard_flag) == 0) {
3422       thr->bind_guard_flag |= bindflag;
3423       HG_(thread_enter_synchr)(thr);
3424       /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3425       HG_(thread_enter_pthread_create)(thr);
3426    }
3427 }
3428 
3429 static
evh__HG_RTLD_BIND_CLEAR(ThreadId tid,Int flags)3430 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3431 {
3432    if (SHOW_EVENTS >= 1)
3433       VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3434                   "(tid=%d, flags=%d)\n",
3435                   (Int)tid, flags);
3436 
3437    Thread *thr = map_threads_maybe_lookup(tid);
3438    tl_assert(thr != NULL);
3439 
3440    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3441    if ((thr->bind_guard_flag & bindflag) != 0) {
3442       thr->bind_guard_flag &= ~bindflag;
3443       HG_(thread_leave_synchr)(thr);
3444       HG_(thread_leave_pthread_create)(thr);
3445    }
3446 }
3447 #endif /* VGO_solaris */
3448 
3449 
3450 /*--------------------------------------------------------------*/
3451 /*--- Lock acquisition order monitoring                      ---*/
3452 /*--------------------------------------------------------------*/
3453 
3454 /* FIXME: here are some optimisations still to do in
3455           laog__pre_thread_acquires_lock.
3456 
3457    The graph is structured so that if L1 --*--> L2 then L1 must be
3458    acquired before L2.
3459 
3460    The common case is that some thread T holds (eg) L1 L2 and L3 and
3461    is repeatedly acquiring and releasing Ln, and there is no ordering
3462    error in what it is doing.  Hence it repeatedly:
3463 
3464    (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3465        produces the answer No (because there is no error).
3466 
3467    (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3468        (because they already got added the first time T acquired Ln).
3469 
3470    Hence cache these two events:
3471 
3472    (1) Cache result of the query from last time.  Invalidate the cache
3473        any time any edges are added to or deleted from laog.
3474 
3475    (2) Cache these add-edge requests and ignore them if said edges
3476        have already been added to laog.  Invalidate the cache any time
3477        any edges are deleted from laog.
3478 */
3479 
3480 typedef
3481    struct {
3482       WordSetID inns; /* in univ_laog */
3483       WordSetID outs; /* in univ_laog */
3484    }
3485    LAOGLinks;
3486 
3487 /* lock order acquisition graph */
3488 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3489 
3490 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3491    where that edge was created, so that we can show the user later if
3492    we need to. */
3493 typedef
3494    struct {
3495       Addr        src_ga; /* Lock guest addresses for */
3496       Addr        dst_ga; /* src/dst of the edge */
3497       ExeContext* src_ec; /* And corresponding places where that */
3498       ExeContext* dst_ec; /* ordering was established */
3499    }
3500    LAOGLinkExposition;
3501 
cmp_LAOGLinkExposition(UWord llx1W,UWord llx2W)3502 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3503    /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3504    LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3505    LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3506    if (llx1->src_ga < llx2->src_ga) return -1;
3507    if (llx1->src_ga > llx2->src_ga) return  1;
3508    if (llx1->dst_ga < llx2->dst_ga) return -1;
3509    if (llx1->dst_ga > llx2->dst_ga) return  1;
3510    return 0;
3511 }
3512 
3513 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3514 /* end EXPOSITION ONLY */
3515 
3516 
3517 __attribute__((noinline))
laog__init(void)3518 static void laog__init ( void )
3519 {
3520    tl_assert(!laog);
3521    tl_assert(!laog_exposition);
3522    tl_assert(HG_(clo_track_lockorders));
3523 
3524    laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3525                       HG_(free), NULL/*unboxedcmp*/ );
3526 
3527    laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3528                                  cmp_LAOGLinkExposition );
3529 }
3530 
laog__show(const HChar * who)3531 static void laog__show ( const HChar* who ) {
3532    UWord i, ws_size;
3533    UWord* ws_words;
3534    Lock* me;
3535    LAOGLinks* links;
3536    VG_(printf)("laog (requested by %s) {\n", who);
3537    VG_(initIterFM)( laog );
3538    me = NULL;
3539    links = NULL;
3540    while (VG_(nextIterFM)( laog, (UWord*)&me,
3541                                  (UWord*)&links )) {
3542       tl_assert(me);
3543       tl_assert(links);
3544       VG_(printf)("   node %p:\n", me);
3545       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3546       for (i = 0; i < ws_size; i++)
3547          VG_(printf)("      inn %#lx\n", ws_words[i] );
3548       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3549       for (i = 0; i < ws_size; i++)
3550          VG_(printf)("      out %#lx\n", ws_words[i] );
3551       me = NULL;
3552       links = NULL;
3553    }
3554    VG_(doneIterFM)( laog );
3555    VG_(printf)("}\n");
3556 }
3557 
univ_laog_do_GC(void)3558 static void univ_laog_do_GC ( void ) {
3559    Word i;
3560    LAOGLinks* links;
3561    Word seen = 0;
3562    Int prev_next_gc_univ_laog = next_gc_univ_laog;
3563    const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3564 
3565    Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3566                                         (Int) univ_laog_cardinality
3567                                         * sizeof(Bool) );
3568    // univ_laog_seen[*] set to 0 (False) by zalloc.
3569 
3570    VG_(initIterFM)( laog );
3571    links = NULL;
3572    while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3573       tl_assert(links);
3574       tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3575       univ_laog_seen[links->inns] = True;
3576       tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3577       univ_laog_seen[links->outs] = True;
3578       links = NULL;
3579    }
3580    VG_(doneIterFM)( laog );
3581 
3582    for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3583       if (univ_laog_seen[i])
3584          seen++;
3585       else
3586          HG_(dieWS) ( univ_laog, (WordSet)i );
3587    }
3588 
3589    HG_(free) (univ_laog_seen);
3590 
3591    // We need to decide the value of the next_gc.
3592    // 3 solutions were looked at:
3593    // Sol 1: garbage collect at seen * 2
3594    //   This solution was a lot slower, probably because we both do a lot of
3595    //   garbage collection and do not keep long enough laog WV that will become
3596    //   useful  again very soon.
3597    // Sol 2: garbage collect at a percentage increase of the current cardinality
3598    //         (with a min increase of 1)
3599    //   Trials on a small test program with 1%, 5% and 10% increase was done.
3600    //   1% is slightly faster than 5%, which is slightly slower than 10%.
3601    //   However, on a big application, this caused the memory to be exhausted,
3602    //   as even a 1% increase of size at each gc becomes a lot, when many gc
3603    //   are done.
3604    // Sol 3: always garbage collect at current cardinality + 1.
3605    //   This solution was the fastest of the 3 solutions, and caused no memory
3606    //   exhaustion in the big application.
3607    //
3608    // With regards to cost introduced by gc: on the t2t perf test (doing only
3609    // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3610    // version with garbage collection. With t2t 50 20 2, my machine started
3611    // to page out, and so the garbage collected version was much faster.
3612    // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3613    // difference performance is insignificant (~ 0.1 s).
3614    // Of course, it might be that real life programs are not well represented
3615    // by t2t.
3616 
3617    // If ever we want to have a more sophisticated control
3618    // (e.g. clo options to control the percentage increase or fixed increased),
3619    // we should do it here, eg.
3620    //     next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3621    // Currently, we just hard-code the solution 3 above.
3622    next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3623 
3624    if (VG_(clo_stats))
3625       VG_(message)
3626          (Vg_DebugMsg,
3627           "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3628           (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
3629 }
3630 
3631 
3632 __attribute__((noinline))
laog__add_edge(Lock * src,Lock * dst)3633 static void laog__add_edge ( Lock* src, Lock* dst ) {
3634    UWord      keyW;
3635    LAOGLinks* links;
3636    Bool       presentF, presentR;
3637    if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3638 
3639    /* Take the opportunity to sanity check the graph.  Record in
3640       presentF if there is already a src->dst mapping in this node's
3641       forwards links, and presentR if there is already a src->dst
3642       mapping in this node's backwards links.  They should agree!
3643       Also, we need to know whether the edge was already present so as
3644       to decide whether or not to update the link details mapping.  We
3645       can compute presentF and presentR essentially for free, so may
3646       as well do this always. */
3647    presentF = presentR = False;
3648 
3649    /* Update the out edges for src */
3650    keyW  = 0;
3651    links = NULL;
3652    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3653       WordSetID outs_new;
3654       tl_assert(links);
3655       tl_assert(keyW == (UWord)src);
3656       outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
3657       presentF = outs_new == links->outs;
3658       links->outs = outs_new;
3659    } else {
3660       links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3661       links->inns = HG_(emptyWS)( univ_laog );
3662       links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3663       VG_(addToFM)( laog, (UWord)src, (UWord)links );
3664    }
3665    /* Update the in edges for dst */
3666    keyW  = 0;
3667    links = NULL;
3668    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3669       WordSetID inns_new;
3670       tl_assert(links);
3671       tl_assert(keyW == (UWord)dst);
3672       inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
3673       presentR = inns_new == links->inns;
3674       links->inns = inns_new;
3675    } else {
3676       links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3677       links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
3678       links->outs = HG_(emptyWS)( univ_laog );
3679       VG_(addToFM)( laog, (UWord)dst, (UWord)links );
3680    }
3681 
3682    tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3683 
3684    if (!presentF && src->acquired_at && dst->acquired_at) {
3685       LAOGLinkExposition expo;
3686       /* If this edge is entering the graph, and we have acquired_at
3687          information for both src and dst, record those acquisition
3688          points.  Hence, if there is later a violation of this
3689          ordering, we can show the user the two places in which the
3690          required src-dst ordering was previously established. */
3691       if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3692                          src->guestaddr, dst->guestaddr);
3693       expo.src_ga = src->guestaddr;
3694       expo.dst_ga = dst->guestaddr;
3695       expo.src_ec = NULL;
3696       expo.dst_ec = NULL;
3697       tl_assert(laog_exposition);
3698       if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
3699          /* we already have it; do nothing */
3700       } else {
3701          LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3702                                                sizeof(LAOGLinkExposition));
3703          expo2->src_ga = src->guestaddr;
3704          expo2->dst_ga = dst->guestaddr;
3705          expo2->src_ec = src->acquired_at;
3706          expo2->dst_ec = dst->acquired_at;
3707          VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
3708       }
3709    }
3710 
3711    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3712       univ_laog_do_GC();
3713 }
3714 
3715 __attribute__((noinline))
laog__del_edge(Lock * src,Lock * dst)3716 static void laog__del_edge ( Lock* src, Lock* dst ) {
3717    UWord      keyW;
3718    LAOGLinks* links;
3719    if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
3720    /* Update the out edges for src */
3721    keyW  = 0;
3722    links = NULL;
3723    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3724       tl_assert(links);
3725       tl_assert(keyW == (UWord)src);
3726       links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
3727    }
3728    /* Update the in edges for dst */
3729    keyW  = 0;
3730    links = NULL;
3731    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3732       tl_assert(links);
3733       tl_assert(keyW == (UWord)dst);
3734       links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
3735    }
3736 
3737    /* Remove the exposition of src,dst (if present) */
3738    {
3739       LAOGLinkExposition *fm_expo;
3740 
3741       LAOGLinkExposition expo;
3742       expo.src_ga = src->guestaddr;
3743       expo.dst_ga = dst->guestaddr;
3744       expo.src_ec = NULL;
3745       expo.dst_ec = NULL;
3746 
3747       if (VG_(delFromFM) (laog_exposition,
3748                           (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3749          HG_(free) (fm_expo);
3750       }
3751    }
3752 
3753    /* deleting edges can increase nr of of WS so check for gc. */
3754    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3755       univ_laog_do_GC();
3756    if (0) VG_(printf)("laog__del_edge exit\n");
3757 }
3758 
3759 __attribute__((noinline))
laog__succs(Lock * lk)3760 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3761    UWord      keyW;
3762    LAOGLinks* links;
3763    keyW  = 0;
3764    links = NULL;
3765    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3766       tl_assert(links);
3767       tl_assert(keyW == (UWord)lk);
3768       return links->outs;
3769    } else {
3770       return HG_(emptyWS)( univ_laog );
3771    }
3772 }
3773 
3774 __attribute__((noinline))
laog__preds(Lock * lk)3775 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3776    UWord      keyW;
3777    LAOGLinks* links;
3778    keyW  = 0;
3779    links = NULL;
3780    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3781       tl_assert(links);
3782       tl_assert(keyW == (UWord)lk);
3783       return links->inns;
3784    } else {
3785       return HG_(emptyWS)( univ_laog );
3786    }
3787 }
3788 
3789 __attribute__((noinline))
laog__sanity_check(const HChar * who)3790 static void laog__sanity_check ( const HChar* who ) {
3791    UWord i, ws_size;
3792    UWord* ws_words;
3793    Lock* me;
3794    LAOGLinks* links;
3795    VG_(initIterFM)( laog );
3796    me = NULL;
3797    links = NULL;
3798    if (0) VG_(printf)("laog sanity check\n");
3799    while (VG_(nextIterFM)( laog, (UWord*)&me,
3800                                  (UWord*)&links )) {
3801       tl_assert(me);
3802       tl_assert(links);
3803       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3804       for (i = 0; i < ws_size; i++) {
3805          if ( ! HG_(elemWS)( univ_laog,
3806                              laog__succs( (Lock*)ws_words[i] ),
3807                              (UWord)me ))
3808             goto bad;
3809       }
3810       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3811       for (i = 0; i < ws_size; i++) {
3812          if ( ! HG_(elemWS)( univ_laog,
3813                              laog__preds( (Lock*)ws_words[i] ),
3814                              (UWord)me ))
3815             goto bad;
3816       }
3817       me = NULL;
3818       links = NULL;
3819    }
3820    VG_(doneIterFM)( laog );
3821    return;
3822 
3823   bad:
3824    VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3825    laog__show(who);
3826    tl_assert(0);
3827 }
3828 
3829 /* If there is a path in laog from 'src' to any of the elements in
3830    'dst', return an arbitrarily chosen element of 'dst' reachable from
3831    'src'.  If no path exist from 'src' to any element in 'dst', return
3832    NULL. */
3833 __attribute__((noinline))
3834 static
laog__do_dfs_from_to(Lock * src,WordSetID dsts)3835 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3836 {
3837    Lock*     ret;
3838    Word      ssz;
3839    XArray*   stack;   /* of Lock* */
3840    WordFM*   visited; /* Lock* -> void, iow, Set(Lock*) */
3841    Lock*     here;
3842    WordSetID succs;
3843    UWord     succs_size, i;
3844    UWord*    succs_words;
3845    //laog__sanity_check();
3846 
3847    /* If the destination set is empty, we can never get there from
3848       'src' :-), so don't bother to try */
3849    if (HG_(isEmptyWS)( univ_lsets, dsts ))
3850       return NULL;
3851 
3852    ret     = NULL;
3853    stack   = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3854    visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3855 
3856    (void) VG_(addToXA)( stack, &src );
3857 
3858    while (True) {
3859 
3860       ssz = VG_(sizeXA)( stack );
3861 
3862       if (ssz == 0) { ret = NULL; break; }
3863 
3864       here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3865       VG_(dropTailXA)( stack, 1 );
3866 
3867       if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
3868 
3869       if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
3870          continue;
3871 
3872       VG_(addToFM)( visited, (UWord)here, 0 );
3873 
3874       succs = laog__succs( here );
3875       HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3876       for (i = 0; i < succs_size; i++)
3877          (void) VG_(addToXA)( stack, &succs_words[i] );
3878    }
3879 
3880    VG_(deleteFM)( visited, NULL, NULL );
3881    VG_(deleteXA)( stack );
3882    return ret;
3883 }
3884 
3885 
3886 /* Thread 'thr' is acquiring 'lk'.  Check for inconsistent ordering
3887    between 'lk' and the locks already held by 'thr' and issue a
3888    complaint if so.  Also, update the ordering graph appropriately.
3889 */
3890 __attribute__((noinline))
laog__pre_thread_acquires_lock(Thread * thr,Lock * lk)3891 static void laog__pre_thread_acquires_lock (
3892                Thread* thr, /* NB: BEFORE lock is added */
3893                Lock*   lk
3894             )
3895 {
3896    UWord*   ls_words;
3897    UWord    ls_size, i;
3898    Lock*    other;
3899 
3900    /* It may be that 'thr' already holds 'lk' and is recursively
3901       relocking in.  In this case we just ignore the call. */
3902    /* NB: univ_lsets really is correct here */
3903    if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3904       return;
3905 
3906    /* First, the check.  Complain if there is any path in laog from lk
3907       to any of the locks already held by thr, since if any such path
3908       existed, it would mean that previously lk was acquired before
3909       (rather than after, as we are doing here) at least one of those
3910       locks.
3911    */
3912    other = laog__do_dfs_from_to(lk, thr->locksetA);
3913    if (other) {
3914       LAOGLinkExposition key, *found;
3915       /* So we managed to find a path lk --*--> other in the graph,
3916          which implies that 'lk' should have been acquired before
3917          'other' but is in fact being acquired afterwards.  We present
3918          the lk/other arguments to record_error_LockOrder in the order
3919          in which they should have been acquired. */
3920       /* Go look in the laog_exposition mapping, to find the allocation
3921          points for this edge, so we can show the user. */
3922       key.src_ga = lk->guestaddr;
3923       key.dst_ga = other->guestaddr;
3924       key.src_ec = NULL;
3925       key.dst_ec = NULL;
3926       found = NULL;
3927       if (VG_(lookupFM)( laog_exposition,
3928                          (UWord*)&found, NULL, (UWord)&key )) {
3929          tl_assert(found != &key);
3930          tl_assert(found->src_ga == key.src_ga);
3931          tl_assert(found->dst_ga == key.dst_ga);
3932          tl_assert(found->src_ec);
3933          tl_assert(found->dst_ec);
3934          HG_(record_error_LockOrder)(
3935             thr, lk, other,
3936                  found->src_ec, found->dst_ec, other->acquired_at );
3937       } else {
3938          /* Hmm.  This can't happen (can it?) */
3939          /* Yes, it can happen: see tests/tc14_laog_dinphils.
3940             Imagine we have 3 philosophers A B C, and the forks
3941             between them:
3942 
3943                            C
3944 
3945                        fCA   fBC
3946 
3947                       A   fAB   B
3948 
3949             Let's have the following actions:
3950                    A takes    fCA,fAB
3951                    A releases fCA,fAB
3952                    B takes    fAB,fBC
3953                    B releases fAB,fBC
3954                    C takes    fBC,fCA
3955                    C releases fBC,fCA
3956 
3957             Helgrind will report a lock order error when C takes fCA.
3958             Effectively, we have a deadlock if the following
3959             sequence is done:
3960                 A takes fCA
3961                 B takes fAB
3962                 C takes fBC
3963 
3964             The error reported is:
3965               Observed (incorrect) order fBC followed by fCA
3966             but the stack traces that have established the required order
3967             are not given.
3968 
3969             This is because there is no pair (fCA, fBC) in laog exposition :
3970             the laog_exposition records all pairs of locks between a new lock
3971             taken by a thread and all the already taken locks.
3972             So, there is no laog_exposition (fCA, fBC) as no thread ever
3973             first locked fCA followed by fBC.
3974 
3975             In other words, when the deadlock cycle involves more than
3976             two locks, then helgrind does not report the sequence of
3977             operations that created the cycle.
3978 
3979             However, we can report the current stack trace (where
3980             lk is being taken), and the stack trace where other was acquired:
3981             Effectively, the variable 'other' contains a lock currently
3982             held by this thread, with its 'acquired_at'. */
3983 
3984          HG_(record_error_LockOrder)(
3985             thr, lk, other,
3986                  NULL, NULL, other->acquired_at );
3987       }
3988    }
3989 
3990    /* Second, add to laog the pairs
3991         (old, lk)  |  old <- locks already held by thr
3992       Since both old and lk are currently held by thr, their acquired_at
3993       fields must be non-NULL.
3994    */
3995    tl_assert(lk->acquired_at);
3996    HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3997    for (i = 0; i < ls_size; i++) {
3998       Lock* old = (Lock*)ls_words[i];
3999       tl_assert(old->acquired_at);
4000       laog__add_edge( old, lk );
4001    }
4002 
4003    /* Why "except_Locks" ?  We're here because a lock is being
4004       acquired by a thread, and we're in an inconsistent state here.
4005       See the call points in evhH__post_thread_{r,w}_acquires_lock.
4006       When called in this inconsistent state, locks__sanity_check duly
4007       barfs. */
4008    if (HG_(clo_sanity_flags) & SCE_LAOG)
4009       all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4010 }
4011 
4012 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
UWordV_dup(UWord * words,Word words_size)4013 static UWord* UWordV_dup(UWord* words, Word words_size)
4014 {
4015    UInt i;
4016 
4017    if (words_size == 0)
4018       return NULL;
4019 
4020    UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4021 
4022    for (i = 0; i < words_size; i++)
4023       dup[i] = words[i];
4024 
4025    return dup;
4026 }
4027 
4028 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4029 
4030 __attribute__((noinline))
laog__handle_one_lock_deletion(Lock * lk)4031 static void laog__handle_one_lock_deletion ( Lock* lk )
4032 {
4033    WordSetID preds, succs;
4034    UWord preds_size, succs_size, i, j;
4035    UWord *preds_words, *succs_words;
4036 
4037    preds = laog__preds( lk );
4038    succs = laog__succs( lk );
4039 
4040    // We need to duplicate the payload, as these can be garbage collected
4041    // during the del/add operations below.
4042    HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
4043    preds_words = UWordV_dup(preds_words, preds_size);
4044 
4045    HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4046    succs_words = UWordV_dup(succs_words, succs_size);
4047 
4048    for (i = 0; i < preds_size; i++)
4049       laog__del_edge( (Lock*)preds_words[i], lk );
4050 
4051    for (j = 0; j < succs_size; j++)
4052       laog__del_edge( lk, (Lock*)succs_words[j] );
4053 
4054    for (i = 0; i < preds_size; i++) {
4055       for (j = 0; j < succs_size; j++) {
4056          if (preds_words[i] != succs_words[j]) {
4057             /* This can pass unlocked locks to laog__add_edge, since
4058                we're deleting stuff.  So their acquired_at fields may
4059                be NULL. */
4060             laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4061          }
4062       }
4063    }
4064 
4065    if (preds_words)
4066       HG_(free) (preds_words);
4067    if (succs_words)
4068       HG_(free) (succs_words);
4069 
4070    // Remove lk information from laog links FM
4071    {
4072       LAOGLinks *links;
4073       Lock* linked_lk;
4074 
4075       if (VG_(delFromFM) (laog,
4076                           (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4077          tl_assert (linked_lk == lk);
4078          HG_(free) (links);
4079       }
4080    }
4081    /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4082 }
4083 
4084 //__attribute__((noinline))
4085 //static void laog__handle_lock_deletions (
4086 //               WordSetID /* in univ_laog */ locksToDelete
4087 //            )
4088 //{
4089 //   Word   i, ws_size;
4090 //   UWord* ws_words;
4091 //
4092 //
4093 //   HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4094 //   UWordV_dup call needed here ...
4095 //   for (i = 0; i < ws_size; i++)
4096 //      laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4097 //
4098 //   if (HG_(clo_sanity_flags) & SCE_LAOG)
4099 //      all__sanity_check("laog__handle_lock_deletions-post");
4100 //}
4101 
4102 
4103 /*--------------------------------------------------------------*/
4104 /*--- Malloc/free replacements                               ---*/
4105 /*--------------------------------------------------------------*/
4106 
4107 typedef
4108    struct {
4109       void*       next;    /* required by m_hashtable */
4110       Addr        payload; /* ptr to actual block    */
4111       SizeT       szB;     /* size requested         */
4112       ExeContext* where;   /* where it was allocated */
4113       Thread*     thr;     /* allocating thread      */
4114    }
4115    MallocMeta;
4116 
4117 /* A hash table of MallocMetas, used to track malloc'd blocks
4118    (obviously). */
4119 static VgHashTable *hg_mallocmeta_table = NULL;
4120 
4121 /* MallocMeta are small elements. We use a pool to avoid
4122    the overhead of malloc for each MallocMeta. */
4123 static PoolAlloc *MallocMeta_poolalloc = NULL;
4124 
new_MallocMeta(void)4125 static MallocMeta* new_MallocMeta ( void ) {
4126    MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4127    VG_(memset)(md, 0, sizeof(MallocMeta));
4128    return md;
4129 }
delete_MallocMeta(MallocMeta * md)4130 static void delete_MallocMeta ( MallocMeta* md ) {
4131    VG_(freeEltPA)(MallocMeta_poolalloc, md);
4132 }
4133 
4134 
4135 /* Allocate a client block and set up the metadata for it. */
4136 
4137 static
handle_alloc(ThreadId tid,SizeT szB,SizeT alignB,Bool is_zeroed)4138 void* handle_alloc ( ThreadId tid,
4139                      SizeT szB, SizeT alignB, Bool is_zeroed )
4140 {
4141    Addr        p;
4142    MallocMeta* md;
4143 
4144    tl_assert( ((SSizeT)szB) >= 0 );
4145    p = (Addr)VG_(cli_malloc)(alignB, szB);
4146    if (!p) {
4147       return NULL;
4148    }
4149    if (is_zeroed)
4150       VG_(memset)((void*)p, 0, szB);
4151 
4152    /* Note that map_threads_lookup must succeed (cannot assert), since
4153       memory can only be allocated by currently alive threads, hence
4154       they must have an entry in map_threads. */
4155    md = new_MallocMeta();
4156    md->payload = p;
4157    md->szB     = szB;
4158    md->where   = VG_(record_ExeContext)( tid, 0 );
4159    md->thr     = map_threads_lookup( tid );
4160 
4161    VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4162    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
4163       VG_(XTMemory_Full_alloc)(md->szB, md->where);
4164 
4165    /* Tell the lower level memory wranglers. */
4166    evh__new_mem_heap( p, szB, is_zeroed );
4167 
4168    return (void*)p;
4169 }
4170 
4171 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4172    Cast to a signed type to catch any unexpectedly negative args.
4173    We're assuming here that the size asked for is not greater than
4174    2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4175    platforms). */
hg_cli__malloc(ThreadId tid,SizeT n)4176 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4177    if (((SSizeT)n) < 0) return NULL;
4178    return handle_alloc ( tid, n, VG_(clo_alignment),
4179                          /*is_zeroed*/False );
4180 }
hg_cli____builtin_new(ThreadId tid,SizeT n)4181 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4182    if (((SSizeT)n) < 0) return NULL;
4183    return handle_alloc ( tid, n, VG_(clo_alignment),
4184                          /*is_zeroed*/False );
4185 }
hg_cli____builtin_vec_new(ThreadId tid,SizeT n)4186 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4187    if (((SSizeT)n) < 0) return NULL;
4188    return handle_alloc ( tid, n, VG_(clo_alignment),
4189                          /*is_zeroed*/False );
4190 }
hg_cli__memalign(ThreadId tid,SizeT align,SizeT n)4191 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4192    if (((SSizeT)n) < 0) return NULL;
4193    return handle_alloc ( tid, n, align,
4194                          /*is_zeroed*/False );
4195 }
hg_cli__calloc(ThreadId tid,SizeT nmemb,SizeT size1)4196 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4197    if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4198    return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4199                          /*is_zeroed*/True );
4200 }
4201 
4202 
4203 /* Free a client block, including getting rid of the relevant
4204    metadata. */
4205 
handle_free(ThreadId tid,void * p)4206 static void handle_free ( ThreadId tid, void* p )
4207 {
4208    MallocMeta *md, *old_md;
4209    SizeT      szB;
4210 
4211    /* First see if we can find the metadata for 'p'. */
4212    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4213    if (!md)
4214       return; /* apparently freeing a bogus address.  Oh well. */
4215 
4216    tl_assert(md->payload == (Addr)p);
4217    szB = md->szB;
4218    if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
4219       ExeContext* ec_free = VG_(record_ExeContext)( tid, 0 );
4220       VG_(XTMemory_Full_free)(md->szB, md->where, ec_free);
4221    }
4222 
4223    /* Nuke the metadata block */
4224    old_md = (MallocMeta*)
4225             VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4226    tl_assert(old_md); /* it must be present - we just found it */
4227    tl_assert(old_md == md);
4228    tl_assert(old_md->payload == (Addr)p);
4229 
4230    VG_(cli_free)((void*)old_md->payload);
4231    delete_MallocMeta(old_md);
4232 
4233    /* Tell the lower level memory wranglers. */
4234    evh__die_mem_heap( (Addr)p, szB );
4235 }
4236 
hg_cli__free(ThreadId tid,void * p)4237 static void hg_cli__free ( ThreadId tid, void* p ) {
4238    handle_free(tid, p);
4239 }
hg_cli____builtin_delete(ThreadId tid,void * p)4240 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4241    handle_free(tid, p);
4242 }
hg_cli____builtin_vec_delete(ThreadId tid,void * p)4243 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4244    handle_free(tid, p);
4245 }
4246 
4247 
hg_cli__realloc(ThreadId tid,void * payloadV,SizeT new_size)4248 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4249 {
4250    MallocMeta *md, *md_new, *md_tmp;
4251    SizeT      i;
4252 
4253    Addr payload = (Addr)payloadV;
4254 
4255    if (((SSizeT)new_size) < 0) return NULL;
4256 
4257    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4258    if (!md)
4259       return NULL; /* apparently realloc-ing a bogus address.  Oh well. */
4260 
4261    tl_assert(md->payload == payload);
4262 
4263    if (md->szB == new_size) {
4264       /* size unchanged */
4265       md->where = VG_(record_ExeContext)(tid, 0);
4266       return payloadV;
4267    }
4268 
4269    if (md->szB > new_size) {
4270       /* new size is smaller */
4271       md->szB   = new_size;
4272       md->where = VG_(record_ExeContext)(tid, 0);
4273       evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4274       return payloadV;
4275    }
4276 
4277    /* else */ {
4278       /* new size is bigger */
4279       Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4280 
4281       /* First half kept and copied, second half new */
4282       // FIXME: shouldn't we use a copier which implements the
4283       // memory state machine?
4284       evh__copy_mem( payload, p_new, md->szB );
4285       evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
4286                           /*inited*/False );
4287       /* FIXME: can anything funny happen here?  specifically, if the
4288          old range contained a lock, then die_mem_heap will complain.
4289          Is that the correct behaviour?  Not sure. */
4290       evh__die_mem_heap( payload, md->szB );
4291 
4292       /* Copy from old to new */
4293       for (i = 0; i < md->szB; i++)
4294          ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4295 
4296       /* Because the metadata hash table is index by payload address,
4297          we have to get rid of the old hash table entry and make a new
4298          one.  We can't just modify the existing metadata in place,
4299          because then it would (almost certainly) be in the wrong hash
4300          chain. */
4301       md_new = new_MallocMeta();
4302       *md_new = *md;
4303 
4304       md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4305       tl_assert(md_tmp);
4306       tl_assert(md_tmp == md);
4307 
4308       VG_(cli_free)((void*)md->payload);
4309       delete_MallocMeta(md);
4310 
4311       /* Update fields */
4312       md_new->where   = VG_(record_ExeContext)( tid, 0 );
4313       md_new->szB     = new_size;
4314       md_new->payload = p_new;
4315       md_new->thr     = map_threads_lookup( tid );
4316 
4317       /* and add */
4318       VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4319 
4320       return (void*)p_new;
4321    }
4322 }
4323 
hg_cli_malloc_usable_size(ThreadId tid,void * p)4324 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4325 {
4326    MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4327 
4328    // There may be slop, but pretend there isn't because only the asked-for
4329    // area will have been shadowed properly.
4330    return ( md ? md->szB : 0 );
4331 }
4332 
4333 
4334 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4335    Slow linear search.  With a bit of hash table help if 'data_addr'
4336    is either the start of a block or up to 15 word-sized steps along
4337    from the start of a block. */
4338 
addr_is_in_MM_Chunk(MallocMeta * mm,Addr a)4339 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4340 {
4341    /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4342       right at it. */
4343   if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4344      return True;
4345   /* else normal interval rules apply */
4346   if (LIKELY(a < mm->payload)) return False;
4347   if (LIKELY(a >= mm->payload + mm->szB)) return False;
4348   return True;
4349 }
4350 
HG_(mm_find_containing_block)4351 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
4352                                     /*OUT*/UInt*        tnr,
4353                                     /*OUT*/Addr*        payload,
4354                                     /*OUT*/SizeT*       szB,
4355                                     Addr                data_addr )
4356 {
4357    MallocMeta* mm;
4358    Int i;
4359    const Int n_fast_check_words = 16;
4360 
4361    /* First, do a few fast searches on the basis that data_addr might
4362       be exactly the start of a block or up to 15 words inside.  This
4363       can happen commonly via the creq
4364       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4365    for (i = 0; i < n_fast_check_words; i++) {
4366       mm = VG_(HT_lookup)( hg_mallocmeta_table,
4367                            data_addr - (UWord)(UInt)i * sizeof(UWord) );
4368       if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4369          goto found;
4370    }
4371 
4372    /* Well, this totally sucks.  But without using an interval tree or
4373       some such, it's hard to see how to do better.  We have to check
4374       every block in the entire table. */
4375    VG_(HT_ResetIter)(hg_mallocmeta_table);
4376    while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
4377       if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4378          goto found;
4379    }
4380 
4381    /* Not found.  Bah. */
4382    return False;
4383    /*NOTREACHED*/
4384 
4385   found:
4386    tl_assert(mm);
4387    tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4388    if (where)   *where   = mm->where;
4389    if (tnr)     *tnr     = mm->thr->errmsg_index;
4390    if (payload) *payload = mm->payload;
4391    if (szB)     *szB     = mm->szB;
4392    return True;
4393 }
4394 
4395 
4396 /*--------------------------------------------------------------*/
4397 /*--- Instrumentation                                        ---*/
4398 /*--------------------------------------------------------------*/
4399 
4400 #define unop(_op, _arg1)         IRExpr_Unop((_op),(_arg1))
4401 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4402 #define mkexpr(_tmp)             IRExpr_RdTmp((_tmp))
4403 #define mkU32(_n)                IRExpr_Const(IRConst_U32(_n))
4404 #define mkU64(_n)                IRExpr_Const(IRConst_U64(_n))
4405 #define assign(_t, _e)           IRStmt_WrTmp((_t), (_e))
4406 
4407 /* This takes and returns atoms, of course.  Not full IRExprs. */
mk_And1(IRSB * sbOut,IRExpr * arg1,IRExpr * arg2)4408 static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4409 {
4410    tl_assert(arg1 && arg2);
4411    tl_assert(isIRAtom(arg1));
4412    tl_assert(isIRAtom(arg2));
4413    /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))).  Appalling
4414       code, I know. */
4415    IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4416    IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4417    IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4418    IRTemp res   = newIRTemp(sbOut->tyenv, Ity_I1);
4419    addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4420    addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4421    addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4422                                                        mkexpr(wide2))));
4423    addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4424    return mkexpr(res);
4425 }
4426 
instrument_mem_access(IRSB * sbOut,IRExpr * addr,Int szB,Bool isStore,Int hWordTy_szB,Int goff_sp,IRExpr * guard)4427 static void instrument_mem_access ( IRSB*   sbOut,
4428                                     IRExpr* addr,
4429                                     Int     szB,
4430                                     Bool    isStore,
4431                                     Int     hWordTy_szB,
4432                                     Int     goff_sp,
4433                                     IRExpr* guard ) /* NULL => True */
4434 {
4435    IRType   tyAddr   = Ity_INVALID;
4436    const HChar* hName    = NULL;
4437    void*    hAddr    = NULL;
4438    Int      regparms = 0;
4439    IRExpr** argv     = NULL;
4440    IRDirty* di       = NULL;
4441 
4442    // THRESH is the size of the window above SP (well,
4443    // mostly above) that we assume implies a stack reference.
4444    const Int THRESH = 4096 * 4; // somewhat arbitrary
4445    const Int rz_szB = VG_STACK_REDZONE_SZB;
4446 
4447    tl_assert(isIRAtom(addr));
4448    tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4449 
4450    tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
4451    tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4452 
4453    /* So the effective address is in 'addr' now. */
4454    regparms = 1; // unless stated otherwise
4455    if (isStore) {
4456       switch (szB) {
4457          case 1:
4458             hName = "evh__mem_help_cwrite_1";
4459             hAddr = &evh__mem_help_cwrite_1;
4460             argv = mkIRExprVec_1( addr );
4461             break;
4462          case 2:
4463             hName = "evh__mem_help_cwrite_2";
4464             hAddr = &evh__mem_help_cwrite_2;
4465             argv = mkIRExprVec_1( addr );
4466             break;
4467          case 4:
4468             hName = "evh__mem_help_cwrite_4";
4469             hAddr = &evh__mem_help_cwrite_4;
4470             argv = mkIRExprVec_1( addr );
4471             break;
4472          case 8:
4473             hName = "evh__mem_help_cwrite_8";
4474             hAddr = &evh__mem_help_cwrite_8;
4475             argv = mkIRExprVec_1( addr );
4476             break;
4477          default:
4478             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4479             regparms = 2;
4480             hName = "evh__mem_help_cwrite_N";
4481             hAddr = &evh__mem_help_cwrite_N;
4482             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4483             break;
4484       }
4485    } else {
4486       switch (szB) {
4487          case 1:
4488             hName = "evh__mem_help_cread_1";
4489             hAddr = &evh__mem_help_cread_1;
4490             argv = mkIRExprVec_1( addr );
4491             break;
4492          case 2:
4493             hName = "evh__mem_help_cread_2";
4494             hAddr = &evh__mem_help_cread_2;
4495             argv = mkIRExprVec_1( addr );
4496             break;
4497          case 4:
4498             hName = "evh__mem_help_cread_4";
4499             hAddr = &evh__mem_help_cread_4;
4500             argv = mkIRExprVec_1( addr );
4501             break;
4502          case 8:
4503             hName = "evh__mem_help_cread_8";
4504             hAddr = &evh__mem_help_cread_8;
4505             argv = mkIRExprVec_1( addr );
4506             break;
4507          default:
4508             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4509             regparms = 2;
4510             hName = "evh__mem_help_cread_N";
4511             hAddr = &evh__mem_help_cread_N;
4512             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4513             break;
4514       }
4515    }
4516 
4517    /* Create the helper. */
4518    tl_assert(hName);
4519    tl_assert(hAddr);
4520    tl_assert(argv);
4521    di = unsafeIRDirty_0_N( regparms,
4522                            hName, VG_(fnptr_to_fnentry)( hAddr ),
4523                            argv );
4524 
4525    if (! HG_(clo_check_stack_refs)) {
4526       /* We're ignoring memory references which are (obviously) to the
4527          stack.  In fact just skip stack refs that are within 4 pages
4528          of SP (SP - the redzone, really), as that's simple, easy, and
4529          filters out most stack references. */
4530       /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4531          some arbitrary N.  If that is true then addr is outside the
4532          range (SP - RZ .. SP + N - RZ).  If N is smallish (a few
4533          pages) then we can say addr is within a few pages of SP and
4534          so can't possibly be a heap access, and so can be skipped.
4535 
4536          Note that the condition simplifies to
4537             (addr - SP + RZ) >u N
4538          which generates better code in x86/amd64 backends, but it does
4539          not unfortunately simplify to
4540             (addr - SP) >u (N - RZ)
4541          (would be beneficial because N - RZ is a constant) because
4542          wraparound arithmetic messes up the comparison.  eg.
4543          20 >u 10 == True,
4544          but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4545       */
4546       IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4547       addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4548 
4549       /* "addr - SP" */
4550       IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4551       addStmtToIRSB(
4552          sbOut,
4553          assign(addr_minus_sp,
4554                 tyAddr == Ity_I32
4555                    ? binop(Iop_Sub32, addr, mkexpr(sp))
4556                    : binop(Iop_Sub64, addr, mkexpr(sp)))
4557       );
4558 
4559       /* "addr - SP + RZ" */
4560       IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4561       addStmtToIRSB(
4562          sbOut,
4563          assign(diff,
4564                 tyAddr == Ity_I32
4565                    ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4566                    : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4567       );
4568 
4569       /* guardA == "guard on the address" */
4570       IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
4571       addStmtToIRSB(
4572          sbOut,
4573          assign(guardA,
4574                 tyAddr == Ity_I32
4575                    ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4576                    : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4577       );
4578       di->guard = mkexpr(guardA);
4579    }
4580 
4581    /* If there's a guard on the access itself (as supplied by the
4582       caller of this routine), we need to AND that in to any guard we
4583       might already have. */
4584    if (guard) {
4585       di->guard = mk_And1(sbOut, di->guard, guard);
4586    }
4587 
4588    /* Add the helper. */
4589    addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
4590 }
4591 
4592 
4593 /* Figure out if GA is a guest code address in the dynamic linker, and
4594    if so return True.  Otherwise (and in case of any doubt) return
4595    False.  (sidedly safe w/ False as the safe value) */
is_in_dynamic_linker_shared_object(Addr ga)4596 static Bool is_in_dynamic_linker_shared_object( Addr ga )
4597 {
4598    DebugInfo* dinfo;
4599    const HChar* soname;
4600 
4601    dinfo = VG_(find_DebugInfo)( ga );
4602    if (!dinfo) return False;
4603 
4604    soname = VG_(DebugInfo_get_soname)(dinfo);
4605    tl_assert(soname);
4606    if (0) VG_(printf)("%s\n", soname);
4607 
4608    return VG_(is_soname_ld_so)(soname);
4609 }
4610 
4611 static
hg_instrument(VgCallbackClosure * closure,IRSB * bbIn,const VexGuestLayout * layout,const VexGuestExtents * vge,const VexArchInfo * archinfo_host,IRType gWordTy,IRType hWordTy)4612 IRSB* hg_instrument ( VgCallbackClosure* closure,
4613                       IRSB* bbIn,
4614                       const VexGuestLayout* layout,
4615                       const VexGuestExtents* vge,
4616                       const VexArchInfo* archinfo_host,
4617                       IRType gWordTy, IRType hWordTy )
4618 {
4619    Int     i;
4620    IRSB*   bbOut;
4621    Addr    cia; /* address of current insn */
4622    IRStmt* st;
4623    Bool    inLDSO = False;
4624    Addr    inLDSOmask4K = 1; /* mismatches on first check */
4625 
4626    const Int goff_sp = layout->offset_SP;
4627 
4628    if (gWordTy != hWordTy) {
4629       /* We don't currently support this case. */
4630       VG_(tool_panic)("host/guest word size mismatch");
4631    }
4632 
4633    if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4634       VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4635    }
4636 
4637    /* Set up BB */
4638    bbOut           = emptyIRSB();
4639    bbOut->tyenv    = deepCopyIRTypeEnv(bbIn->tyenv);
4640    bbOut->next     = deepCopyIRExpr(bbIn->next);
4641    bbOut->jumpkind = bbIn->jumpkind;
4642    bbOut->offsIP   = bbIn->offsIP;
4643 
4644    // Copy verbatim any IR preamble preceding the first IMark
4645    i = 0;
4646    while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4647       addStmtToIRSB( bbOut, bbIn->stmts[i] );
4648       i++;
4649    }
4650 
4651    // Get the first statement, and initial cia from it
4652    tl_assert(bbIn->stmts_used > 0);
4653    tl_assert(i < bbIn->stmts_used);
4654    st = bbIn->stmts[i];
4655    tl_assert(Ist_IMark == st->tag);
4656    cia = st->Ist.IMark.addr;
4657    st = NULL;
4658 
4659    for (/*use current i*/; i < bbIn->stmts_used; i++) {
4660       st = bbIn->stmts[i];
4661       tl_assert(st);
4662       tl_assert(isFlatIRStmt(st));
4663       switch (st->tag) {
4664          case Ist_NoOp:
4665          case Ist_AbiHint:
4666          case Ist_Put:
4667          case Ist_PutI:
4668          case Ist_Exit:
4669             /* None of these can contain any memory references. */
4670             break;
4671 
4672          case Ist_IMark:
4673             /* no mem refs, but note the insn address. */
4674             cia = st->Ist.IMark.addr;
4675             /* Don't instrument the dynamic linker.  It generates a
4676                lot of races which we just expensively suppress, so
4677                it's pointless.
4678 
4679                Avoid flooding is_in_dynamic_linker_shared_object with
4680                requests by only checking at transitions between 4K
4681                pages. */
4682             if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4683                if (0) VG_(printf)("NEW %#lx\n", cia);
4684                inLDSOmask4K = cia & ~(Addr)0xFFF;
4685                inLDSO = is_in_dynamic_linker_shared_object(cia);
4686             } else {
4687                if (0) VG_(printf)("old %#lx\n", cia);
4688             }
4689             break;
4690 
4691          case Ist_MBE:
4692             switch (st->Ist.MBE.event) {
4693                case Imbe_Fence:
4694                case Imbe_CancelReservation:
4695                   break; /* not interesting */
4696                default:
4697                   goto unhandled;
4698             }
4699             break;
4700 
4701          case Ist_CAS: {
4702             /* Atomic read-modify-write cycle.  Just pretend it's a
4703                read. */
4704             IRCAS* cas    = st->Ist.CAS.details;
4705             Bool   isDCAS = cas->oldHi != IRTemp_INVALID;
4706             if (isDCAS) {
4707                tl_assert(cas->expdHi);
4708                tl_assert(cas->dataHi);
4709             } else {
4710                tl_assert(!cas->expdHi);
4711                tl_assert(!cas->dataHi);
4712             }
4713             /* Just be boring about it. */
4714             if (!inLDSO) {
4715                instrument_mem_access(
4716                   bbOut,
4717                   cas->addr,
4718                   (isDCAS ? 2 : 1)
4719                      * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4720                   False/*!isStore*/,
4721                   sizeofIRType(hWordTy), goff_sp,
4722                   NULL/*no-guard*/
4723                );
4724             }
4725             break;
4726          }
4727 
4728          case Ist_LLSC: {
4729             /* We pretend store-conditionals don't exist, viz, ignore
4730                them.  Whereas load-linked's are treated the same as
4731                normal loads. */
4732             IRType dataTy;
4733             if (st->Ist.LLSC.storedata == NULL) {
4734                /* LL */
4735                dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4736                if (!inLDSO) {
4737                   instrument_mem_access(
4738                      bbOut,
4739                      st->Ist.LLSC.addr,
4740                      sizeofIRType(dataTy),
4741                      False/*!isStore*/,
4742                      sizeofIRType(hWordTy), goff_sp,
4743                      NULL/*no-guard*/
4744                   );
4745                }
4746             } else {
4747                /* SC */
4748                /*ignore */
4749             }
4750             break;
4751          }
4752 
4753          case Ist_Store:
4754             if (!inLDSO) {
4755                instrument_mem_access(
4756                   bbOut,
4757                   st->Ist.Store.addr,
4758                   sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4759                   True/*isStore*/,
4760                   sizeofIRType(hWordTy), goff_sp,
4761                   NULL/*no-guard*/
4762                );
4763             }
4764             break;
4765 
4766          case Ist_StoreG: {
4767             IRStoreG* sg   = st->Ist.StoreG.details;
4768             IRExpr*   data = sg->data;
4769             IRExpr*   addr = sg->addr;
4770             IRType    type = typeOfIRExpr(bbIn->tyenv, data);
4771             tl_assert(type != Ity_INVALID);
4772             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4773                                    True/*isStore*/,
4774                                    sizeofIRType(hWordTy),
4775                                    goff_sp, sg->guard );
4776             break;
4777          }
4778 
4779          case Ist_LoadG: {
4780             IRLoadG* lg       = st->Ist.LoadG.details;
4781             IRType   type     = Ity_INVALID; /* loaded type */
4782             IRType   typeWide = Ity_INVALID; /* after implicit widening */
4783             IRExpr*  addr     = lg->addr;
4784             typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4785             tl_assert(type != Ity_INVALID);
4786             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4787                                    False/*!isStore*/,
4788                                    sizeofIRType(hWordTy),
4789                                    goff_sp, lg->guard );
4790             break;
4791          }
4792 
4793          case Ist_WrTmp: {
4794             IRExpr* data = st->Ist.WrTmp.data;
4795             if (data->tag == Iex_Load) {
4796                if (!inLDSO) {
4797                   instrument_mem_access(
4798                      bbOut,
4799                      data->Iex.Load.addr,
4800                      sizeofIRType(data->Iex.Load.ty),
4801                      False/*!isStore*/,
4802                      sizeofIRType(hWordTy), goff_sp,
4803                      NULL/*no-guard*/
4804                   );
4805                }
4806             }
4807             break;
4808          }
4809 
4810          case Ist_Dirty: {
4811             Int      dataSize;
4812             IRDirty* d = st->Ist.Dirty.details;
4813             if (d->mFx != Ifx_None) {
4814                /* This dirty helper accesses memory.  Collect the
4815                   details. */
4816                tl_assert(d->mAddr != NULL);
4817                tl_assert(d->mSize != 0);
4818                dataSize = d->mSize;
4819                if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4820                   if (!inLDSO) {
4821                      instrument_mem_access(
4822                         bbOut, d->mAddr, dataSize, False/*!isStore*/,
4823                         sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
4824                      );
4825                   }
4826                }
4827                if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
4828                   if (!inLDSO) {
4829                      instrument_mem_access(
4830                         bbOut, d->mAddr, dataSize, True/*isStore*/,
4831                         sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
4832                      );
4833                   }
4834                }
4835             } else {
4836                tl_assert(d->mAddr == NULL);
4837                tl_assert(d->mSize == 0);
4838             }
4839             break;
4840          }
4841 
4842          default:
4843          unhandled:
4844             ppIRStmt(st);
4845             tl_assert(0);
4846 
4847       } /* switch (st->tag) */
4848 
4849       addStmtToIRSB( bbOut, st );
4850    } /* iterate over bbIn->stmts */
4851 
4852    return bbOut;
4853 }
4854 
4855 #undef binop
4856 #undef mkexpr
4857 #undef mkU32
4858 #undef mkU64
4859 #undef assign
4860 
4861 
4862 /*----------------------------------------------------------------*/
4863 /*--- Client requests                                          ---*/
4864 /*----------------------------------------------------------------*/
4865 
4866 /* Sheesh.  Yet another goddam finite map. */
4867 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4868 
map_pthread_t_to_Thread_INIT(void)4869 static void map_pthread_t_to_Thread_INIT ( void ) {
4870    if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
4871       map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4872                                             HG_(free), NULL );
4873    }
4874 }
4875 
4876 /* A list of Ada dependent tasks and their masters. Used for implementing
4877    the Ada task termination semantic as implemented by the
4878    gcc gnat Ada runtime. */
4879 typedef
4880    struct {
4881       void* dependent; // Ada Task Control Block of the Dependent
4882       void* master;    // ATCB of the master
4883       Word  master_level; // level of dependency between master and dependent
4884       Thread* hg_dependent; // helgrind Thread* for dependent task.
4885    }
4886    GNAT_dmml; // (d)ependent (m)aster (m)aster_(l)evel.
4887 static XArray* gnat_dmmls;   /* of GNAT_dmml */
gnat_dmmls_INIT(void)4888 static void gnat_dmmls_INIT (void)
4889 {
4890    if (UNLIKELY(gnat_dmmls == NULL)) {
4891       gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4892                                HG_(free),
4893                                sizeof(GNAT_dmml) );
4894    }
4895 }
4896 
xtmemory_report_next_block(XT_Allocs * xta,ExeContext ** ec_alloc)4897 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
4898 {
4899    const MallocMeta* md = VG_(HT_Next)(hg_mallocmeta_table);
4900    if (md) {
4901       xta->nbytes = md->szB;
4902       xta->nblocks = 1;
4903       *ec_alloc = md->where;
4904    } else
4905       xta->nblocks = 0;
4906 }
HG_(xtmemory_report)4907 static void HG_(xtmemory_report) ( const HChar* filename, Bool fini )
4908 {
4909    // Make xtmemory_report_next_block ready to be called.
4910    VG_(HT_ResetIter)(hg_mallocmeta_table);
4911    VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
4912                         VG_(XT_filter_1top_and_maybe_below_main));
4913 }
4914 
print_monitor_help(void)4915 static void print_monitor_help ( void )
4916 {
4917    VG_(gdb_printf)
4918       (
4919 "\n"
4920 "helgrind monitor commands:\n"
4921 "  info locks [lock_addr]  : show status of lock at addr lock_addr\n"
4922 "           with no lock_addr, show status of all locks\n"
4923 "  accesshistory <addr> [<len>]   : show access history recorded\n"
4924 "                     for <len> (or 1) bytes at <addr>\n"
4925 "  xtmemory [<filename>]\n"
4926 "        dump xtree memory profile in <filename> (default xtmemory.kcg)\n"
4927 "\n");
4928 }
4929 
4930 /* return True if request recognised, False otherwise */
handle_gdb_monitor_command(ThreadId tid,HChar * req)4931 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4932 {
4933    HChar* wcmd;
4934    HChar s[VG_(strlen)(req)]; /* copy for strtok_r */
4935    HChar *ssaveptr;
4936    Int   kwdid;
4937 
4938    VG_(strcpy) (s, req);
4939 
4940    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4941    /* NB: if possible, avoid introducing a new command below which
4942       starts with the same first letter(s) as an already existing
4943       command. This ensures a shorter abbreviation for the user. */
4944    switch (VG_(keyword_id)
4945            ("help info accesshistory xtmemory",
4946             wcmd, kwd_report_duplicated_matches)) {
4947    case -2: /* multiple matches */
4948       return True;
4949    case -1: /* not found */
4950       return False;
4951    case  0: /* help */
4952       print_monitor_help();
4953       return True;
4954    case  1: /* info */
4955       wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4956       switch (kwdid = VG_(keyword_id)
4957               ("locks",
4958                wcmd, kwd_report_all)) {
4959       case -2:
4960       case -1:
4961          break;
4962       case 0: // locks
4963          {
4964             const HChar* wa;
4965             Addr lk_addr = 0;
4966             Bool lk_shown = False;
4967             Bool all_locks = True;
4968             Int i;
4969             Lock* lk;
4970 
4971             wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
4972             if (wa != NULL) {
4973                if (VG_(parse_Addr) (&wa, &lk_addr) )
4974                   all_locks = False;
4975                else {
4976                   VG_(gdb_printf) ("missing or malformed address\n");
4977                }
4978             }
4979             for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
4980                if (all_locks || lk_addr == lk->guestaddr) {
4981                   pp_Lock(0, lk,
4982                           True /* show_lock_addrdescr */,
4983                           False /* show_internal_data */);
4984                   lk_shown = True;
4985                }
4986             }
4987             if (i == 0)
4988                VG_(gdb_printf) ("no locks\n");
4989             if (!all_locks && !lk_shown)
4990                VG_(gdb_printf) ("lock with address %p not found\n",
4991                                 (void*)lk_addr);
4992          }
4993          break;
4994       default:
4995          tl_assert(0);
4996       }
4997       return True;
4998 
4999    case  2: /* accesshistory */
5000       {
5001          Addr address;
5002          SizeT szB = 1;
5003          if (HG_(clo_history_level) < 2) {
5004             VG_(gdb_printf)
5005                ("helgrind must be started with --history-level=full"
5006                 " to use accesshistory\n");
5007             return True;
5008          }
5009          if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
5010             if (szB >= 1)
5011                libhb_event_map_access_history (address, szB, HG_(print_access));
5012             else
5013                VG_(gdb_printf) ("len must be >=1\n");
5014          }
5015          return True;
5016       }
5017 
5018    case  3: { /* xtmemory */
5019       HChar* filename;
5020       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
5021       HG_(xtmemory_report)(filename, False);
5022       return True;
5023    }
5024 
5025    default:
5026       tl_assert(0);
5027       return False;
5028    }
5029 }
5030 
5031 static
hg_handle_client_request(ThreadId tid,UWord * args,UWord * ret)5032 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5033 {
5034    if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5035        && VG_USERREQ__GDB_MONITOR_COMMAND   != args[0])
5036       return False;
5037 
5038    /* Anything that gets past the above check is one of ours, so we
5039       should be able to handle it. */
5040 
5041    /* default, meaningless return value, unless otherwise set */
5042    *ret = 0;
5043 
5044    switch (args[0]) {
5045 
5046       /* --- --- User-visible client requests --- --- */
5047 
5048       case VG_USERREQ__HG_CLEAN_MEMORY:
5049          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5050                             args[1], args[2]);
5051          /* Call die_mem to (expensively) tidy up properly, if there
5052             are any held locks etc in the area.  Calling evh__die_mem
5053             and then evh__new_mem is a bit inefficient; probably just
5054             the latter would do. */
5055          if (args[2] > 0) { /* length */
5056             evh__die_mem(args[1], args[2]);
5057             /* and then set it to New */
5058             evh__new_mem(args[1], args[2]);
5059          }
5060          break;
5061 
5062       case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5063          Addr  payload = 0;
5064          SizeT pszB = 0;
5065          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5066                             args[1]);
5067          if (HG_(mm_find_containing_block)(NULL, NULL,
5068                                            &payload, &pszB, args[1])) {
5069             if (pszB > 0) {
5070                evh__die_mem(payload, pszB);
5071                evh__new_mem(payload, pszB);
5072             }
5073             *ret = pszB;
5074          } else {
5075             *ret = (UWord)-1;
5076          }
5077          break;
5078       }
5079 
5080       case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
5081          if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5082                             args[1], args[2]);
5083          if (args[2] > 0) { /* length */
5084             evh__untrack_mem(args[1], args[2]);
5085          }
5086          break;
5087 
5088       case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
5089          if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5090                             args[1], args[2]);
5091          if (args[2] > 0) { /* length */
5092             evh__new_mem(args[1], args[2]);
5093          }
5094          break;
5095 
5096       case _VG_USERREQ__HG_GET_ABITS:
5097          if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5098                             args[1], args[2], args[3]);
5099          UChar *zzabit = (UChar *) args[2];
5100          if (zzabit == NULL
5101              || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5102                                             VKI_PROT_READ|VKI_PROT_WRITE))
5103             *ret = (UWord) libhb_srange_get_abits ((Addr)   args[1],
5104                                                    (UChar*) args[2],
5105                                                    (SizeT)  args[3]);
5106          else
5107             *ret = -1;
5108          break;
5109 
5110       /* This thread (tid) (a master) is informing us that it has
5111          seen the termination of a dependent task, and that this should
5112          be considered as a join between master and dependent. */
5113       case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN: {
5114          Word n;
5115          const Thread *stayer = map_threads_maybe_lookup( tid );
5116          const void *dependent = (void*)args[1];
5117          const void *master = (void*)args[2];
5118 
5119          if (0)
5120          VG_(printf)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5121                      "self_id = %p Thread* = %p dependent %p\n",
5122                      (Int)tid, master, stayer, dependent);
5123 
5124          gnat_dmmls_INIT();
5125          /* Similar loop as for master completed hook below, but stops at
5126             the first matching occurence, only comparing master and
5127             dependent. */
5128          for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5129             GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5130             if (dmml->master == master
5131                 && dmml->dependent == dependent) {
5132                if (0)
5133                VG_(printf)("quitter %p dependency to stayer %p (join)\n",
5134                            dmml->hg_dependent->hbthr,  stayer->hbthr);
5135                tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5136                generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5137                                                    stayer->hbthr);
5138                VG_(removeIndexXA) (gnat_dmmls, n);
5139                break;
5140             }
5141          }
5142          break;
5143       }
5144 
5145       /* --- --- Client requests for Helgrind's use only --- --- */
5146 
5147       /* Some thread is telling us its pthread_t value.  Record the
5148          binding between that and the associated Thread*, so we can
5149          later find the Thread* again when notified of a join by the
5150          thread. */
5151       case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5152          Thread* my_thr = NULL;
5153          if (0)
5154          VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5155                      (void*)args[1]);
5156          map_pthread_t_to_Thread_INIT();
5157          my_thr = map_threads_maybe_lookup( tid );
5158          /* This assertion should hold because the map_threads (tid to
5159             Thread*) binding should have been made at the point of
5160             low-level creation of this thread, which should have
5161             happened prior to us getting this client request for it.
5162             That's because this client request is sent from
5163             client-world from the 'thread_wrapper' function, which
5164             only runs once the thread has been low-level created. */
5165          tl_assert(my_thr != NULL);
5166          /* So now we know that (pthread_t)args[1] is associated with
5167             (Thread*)my_thr.  Note that down. */
5168          if (0)
5169          VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5170                      (void*)args[1], (void*)my_thr );
5171          VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
5172 
5173          if (my_thr->coretid != 1) {
5174             /* FIXME: hardwires assumption about identity of the root thread. */
5175             if (HG_(clo_ignore_thread_creation)) {
5176                HG_(thread_leave_pthread_create)(my_thr);
5177                HG_(thread_leave_synchr)(my_thr);
5178                tl_assert(my_thr->synchr_nesting == 0);
5179             }
5180          }
5181          break;
5182       }
5183 
5184       case _VG_USERREQ__HG_PTH_API_ERROR: {
5185          Thread* my_thr = NULL;
5186          map_pthread_t_to_Thread_INIT();
5187          my_thr = map_threads_maybe_lookup( tid );
5188          tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
5189          HG_(record_error_PthAPIerror)(
5190             my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
5191          break;
5192       }
5193 
5194       /* This thread (tid) has completed a join with the quitting
5195          thread whose pthread_t is in args[1]. */
5196       case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5197          Thread* thr_q = NULL; /* quitter Thread* */
5198          Bool    found = False;
5199          if (0)
5200          VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5201                      (void*)args[1]);
5202          map_pthread_t_to_Thread_INIT();
5203          found = VG_(lookupFM)( map_pthread_t_to_Thread,
5204                                 NULL, (UWord*)&thr_q, (UWord)args[1] );
5205           /* Can this fail?  It would mean that our pthread_join
5206              wrapper observed a successful join on args[1] yet that
5207              thread never existed (or at least, it never lodged an
5208              entry in the mapping (via SET_MY_PTHREAD_T)).  Which
5209              sounds like a bug in the threads library. */
5210          // FIXME: get rid of this assertion; handle properly
5211          tl_assert(found);
5212          if (found) {
5213             if (0)
5214             VG_(printf)(".................... quitter Thread* = %p\n",
5215                         thr_q);
5216             evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5217          }
5218          break;
5219       }
5220 
5221       /* This thread (tid) is informing us of its master. */
5222       case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5223          GNAT_dmml dmml;
5224          dmml.dependent = (void*)args[1];
5225          dmml.master = (void*)args[2];
5226          dmml.master_level = (Word)args[3];
5227          dmml.hg_dependent = map_threads_maybe_lookup( tid );
5228          tl_assert(dmml.hg_dependent);
5229 
5230          if (0)
5231          VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5232                      "dependent = %p master = %p master_level = %ld"
5233                      " dependent Thread* = %p\n",
5234                      (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5235                      dmml.hg_dependent);
5236          gnat_dmmls_INIT();
5237          VG_(addToXA) (gnat_dmmls, &dmml);
5238          break;
5239       }
5240 
5241       /* This thread (tid) is informing us that it has completed a
5242          master. */
5243       case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5244          Word n;
5245          const Thread *stayer = map_threads_maybe_lookup( tid );
5246          const void *master = (void*)args[1];
5247          const Word master_level = (Word) args[2];
5248          tl_assert(stayer);
5249 
5250          if (0)
5251          VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5252                      "self_id = %p master_level = %ld Thread* = %p\n",
5253                      (Int)tid, master, master_level, stayer);
5254 
5255          gnat_dmmls_INIT();
5256          /* Reverse loop on the array, simulating a pthread_join for
5257             the Dependent tasks of the completed master, and removing
5258             them from the array. */
5259          for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5260             GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5261             if (dmml->master == master
5262                 && dmml->master_level == master_level) {
5263                if (0)
5264                VG_(printf)("quitter %p dependency to stayer %p\n",
5265                            dmml->hg_dependent->hbthr,  stayer->hbthr);
5266                tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5267                generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5268                                                    stayer->hbthr);
5269                VG_(removeIndexXA) (gnat_dmmls, n);
5270             }
5271          }
5272          break;
5273       }
5274 
5275       /* EXPOSITION only: by intercepting lock init events we can show
5276          the user where the lock was initialised, rather than only
5277          being able to show where it was first locked.  Intercepting
5278          lock initialisations is not necessary for the basic operation
5279          of the race checker. */
5280       case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5281          evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5282          break;
5283 
5284       /* mutex=arg[1], mutex_is_init=arg[2] */
5285       case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
5286          evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5287          break;
5288 
5289       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE:   // pth_mx_t*
5290          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5291          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5292             evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5293          break;
5294 
5295       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST:  // pth_mx_t*
5296          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5297             evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5298          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5299          break;
5300 
5301       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE:     // pth_mx_t*
5302          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5303          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5304             evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5305          break;
5306 
5307       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST:    // pth_mx_t*, long
5308          if ((args[2] == True) // lock actually taken
5309              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5310             evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5311          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5312          break;
5313 
5314       /* This thread is about to do pthread_cond_signal on the
5315          pthread_cond_t* in arg[1].  Ditto pthread_cond_broadcast. */
5316       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5317       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5318          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5319          evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5320          break;
5321 
5322       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5323       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5324          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5325          break;
5326 
5327       /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5328          Returns a flag indicating whether or not the mutex is believed to be
5329          valid for this operation. */
5330       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5331          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5332          Bool mutex_is_valid
5333             = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5334                                                   (void*)args[2] );
5335          *ret = mutex_is_valid ? 1 : 0;
5336          break;
5337       }
5338 
5339       /* Thread successfully completed pthread_cond_init:
5340          cond=arg[1], cond_attr=arg[2] */
5341       case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5342          evh__HG_PTHREAD_COND_INIT_POST( tid,
5343                                          (void*)args[1], (void*)args[2] );
5344 	 break;
5345 
5346       /* cond=arg[1], cond_is_init=arg[2] */
5347       case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
5348          evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5349          break;
5350 
5351       /* Thread completed pthread_cond_wait, cond=arg[1],
5352          mutex=arg[2], timeout=arg[3], successful=arg[4] */
5353       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5354          if (args[4] == True)
5355             evh__HG_PTHREAD_COND_WAIT_POST( tid,
5356                                             (void*)args[1], (void*)args[2],
5357                                             (Bool)args[3] );
5358          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5359          break;
5360 
5361       case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5362          evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5363          break;
5364 
5365       case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5366          evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5367          break;
5368 
5369       /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5370       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
5371          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5372          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5373             evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5374                                              args[2], args[3] );
5375          break;
5376 
5377       /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5378       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5379          if ((args[3] == True)
5380              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5381             evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5382          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5383          break;
5384 
5385       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5386          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5387          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5388             evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5389          break;
5390 
5391       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5392          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5393             evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5394          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5395          break;
5396 
5397       case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5398          evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
5399          break;
5400 
5401       case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5402          evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
5403          break;
5404 
5405       case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5406          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5407          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5408          break;
5409 
5410       case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5411          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5412          break;
5413 
5414       case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5415          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5416          break;
5417 
5418       case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5419          if (args[2] == True)
5420             evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5421          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5422          break;
5423 
5424       case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
5425          /* pth_bar_t*, ulong count, ulong resizable */
5426          evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5427                                                 args[2], args[3] );
5428          break;
5429 
5430       case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5431          /* pth_bar_t*, ulong newcount */
5432          evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5433                                               args[2] );
5434          break;
5435 
5436       case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5437          /* pth_bar_t* */
5438          evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5439          break;
5440 
5441       case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5442          /* pth_bar_t* */
5443          evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5444          break;
5445 
5446       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5447          /* pth_spinlock_t* */
5448          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5449          break;
5450 
5451       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5452          /* pth_spinlock_t* */
5453          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5454          break;
5455 
5456       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5457          /* pth_spinlock_t*, Word */
5458          evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5459          break;
5460 
5461       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5462          /* pth_spinlock_t* */
5463          evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5464          break;
5465 
5466       case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5467          /* pth_spinlock_t* */
5468          evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5469          break;
5470 
5471       case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
5472          /* HChar* who */
5473          HChar*  who = (HChar*)args[1];
5474          HChar   buf[50 + 50];
5475          Thread* thr = map_threads_maybe_lookup( tid );
5476          tl_assert( thr ); /* I must be mapped */
5477          tl_assert( who );
5478          tl_assert( VG_(strlen)(who) <= 50 );
5479          VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5480          /* record_error_Misc strdup's buf, so this is safe: */
5481          HG_(record_error_Misc)( thr, buf );
5482          break;
5483       }
5484 
5485       case _VG_USERREQ__HG_USERSO_SEND_PRE:
5486          /* UWord arbitrary-SO-tag */
5487          evh__HG_USERSO_SEND_PRE( tid, args[1] );
5488          break;
5489 
5490       case _VG_USERREQ__HG_USERSO_RECV_POST:
5491          /* UWord arbitrary-SO-tag */
5492          evh__HG_USERSO_RECV_POST( tid, args[1] );
5493          break;
5494 
5495       case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5496          /* UWord arbitrary-SO-tag */
5497          evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5498          break;
5499 
5500       case VG_USERREQ__GDB_MONITOR_COMMAND: {
5501          Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5502          if (handled)
5503             *ret = 1;
5504          else
5505             *ret = 0;
5506          return handled;
5507       }
5508 
5509       case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5510          Thread *thr = map_threads_maybe_lookup(tid);
5511          if (HG_(clo_ignore_thread_creation)) {
5512             HG_(thread_enter_pthread_create)(thr);
5513             HG_(thread_enter_synchr)(thr);
5514          }
5515          break;
5516       }
5517 
5518       case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5519          Thread *thr = map_threads_maybe_lookup(tid);
5520          if (HG_(clo_ignore_thread_creation)) {
5521             HG_(thread_leave_pthread_create)(thr);
5522             HG_(thread_leave_synchr)(thr);
5523          }
5524          break;
5525       }
5526 
5527       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5528          evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5529          break;
5530 
5531       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST:    // pth_mx_t*
5532          evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5533          break;
5534 
5535       case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED:       // void*, long isW
5536          evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5537          break;
5538 
5539       case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED:       // void*
5540          evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5541          break;
5542 
5543       case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5544          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5545          break;
5546 
5547       case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5548          evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5549          break;
5550 
5551 #if defined(VGO_solaris)
5552       case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5553          evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5554          break;
5555 
5556       case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5557          evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5558          break;
5559 #endif /* VGO_solaris */
5560 
5561       default:
5562          /* Unhandled Helgrind client request! */
5563          tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5564                        args[0]);
5565    }
5566 
5567    return True;
5568 }
5569 
5570 
5571 /*----------------------------------------------------------------*/
5572 /*--- Setup                                                    ---*/
5573 /*----------------------------------------------------------------*/
5574 
hg_process_cmd_line_option(const HChar * arg)5575 static Bool hg_process_cmd_line_option ( const HChar* arg )
5576 {
5577    const HChar* tmp_str;
5578 
5579    if      VG_BOOL_CLO(arg, "--track-lockorders",
5580                             HG_(clo_track_lockorders)) {}
5581    else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5582                             HG_(clo_cmp_race_err_addrs)) {}
5583 
5584    else if VG_XACT_CLO(arg, "--history-level=none",
5585                             HG_(clo_history_level), 0);
5586    else if VG_XACT_CLO(arg, "--history-level=approx",
5587                             HG_(clo_history_level), 1);
5588    else if VG_XACT_CLO(arg, "--history-level=full",
5589                             HG_(clo_history_level), 2);
5590 
5591    else if VG_BINT_CLO(arg, "--conflict-cache-size",
5592                        HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
5593 
5594    /* "stuvwx" --> stuvwx (binary) */
5595    else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
5596       Int j;
5597 
5598       if (6 != VG_(strlen)(tmp_str)) {
5599          VG_(message)(Vg_UserMsg,
5600                       "--hg-sanity-flags argument must have 6 digits\n");
5601          return False;
5602       }
5603       for (j = 0; j < 6; j++) {
5604          if      ('0' == tmp_str[j]) { /* do nothing */ }
5605          else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
5606          else {
5607             VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
5608                                      "only contain 0s and 1s\n");
5609             return False;
5610          }
5611       }
5612       if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
5613    }
5614 
5615    else if VG_BOOL_CLO(arg, "--free-is-write",
5616                             HG_(clo_free_is_write)) {}
5617 
5618    else if VG_XACT_CLO(arg, "--vts-pruning=never",
5619                             HG_(clo_vts_pruning), 0);
5620    else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5621                             HG_(clo_vts_pruning), 1);
5622    else if VG_XACT_CLO(arg, "--vts-pruning=always",
5623                             HG_(clo_vts_pruning), 2);
5624 
5625    else if VG_BOOL_CLO(arg, "--check-stack-refs",
5626                             HG_(clo_check_stack_refs)) {}
5627    else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5628                             HG_(clo_ignore_thread_creation)) {}
5629 
5630    else
5631       return VG_(replacement_malloc_process_cmd_line_option)(arg);
5632 
5633    return True;
5634 }
5635 
hg_print_usage(void)5636 static void hg_print_usage ( void )
5637 {
5638    VG_(printf)(
5639 "    --free-is-write=no|yes    treat heap frees as writes [no]\n"
5640 "    --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5641 "    --history-level=none|approx|full [full]\n"
5642 "       full:   show both stack traces for a data race (can be very slow)\n"
5643 "       approx: full trace for one thread, approx for the other (faster)\n"
5644 "       none:   only show trace for one thread in a race (fastest)\n"
5645 "    --conflict-cache-size=N   size of 'full' history cache [2000000]\n"
5646 "    --check-stack-refs=no|yes race-check reads and writes on the\n"
5647 "                              main stack and thread stacks? [yes]\n"
5648 "    --ignore-thread-creation=yes|no Ignore activities during thread\n"
5649 "                              creation [%s]\n",
5650 HG_(clo_ignore_thread_creation) ? "yes" : "no"
5651    );
5652 }
5653 
hg_print_debug_usage(void)5654 static void hg_print_debug_usage ( void )
5655 {
5656    VG_(printf)("    --cmp-race-err-addrs=no|yes  are data addresses in "
5657                "race errors significant? [no]\n");
5658    VG_(printf)("    --hg-sanity-flags=<XXXXXX>   sanity check "
5659                "  at events (X = 0|1) [000000]\n");
5660    VG_(printf)("    --hg-sanity-flags values:\n");
5661    VG_(printf)("       010000   after changes to "
5662                "lock-order-acquisition-graph\n");
5663    VG_(printf)("       001000   at memory accesses (NB: not currently used)\n");
5664    VG_(printf)("       000100   at mem permission setting for "
5665                "ranges >= %d bytes\n", SCE_BIGRANGE_T);
5666    VG_(printf)("       000010   at lock/unlock events\n");
5667    VG_(printf)("       000001   at thread create/join events\n");
5668    VG_(printf)(
5669 "    --vts-pruning=never|auto|always [auto]\n"
5670 "       never:   is never done (may cause big space leaks in Helgrind)\n"
5671 "       auto:    done just often enough to keep space usage under control\n"
5672 "       always:  done after every VTS GC (mostly just a big time waster)\n"
5673     );
5674 }
5675 
hg_print_stats(void)5676 static void hg_print_stats (void)
5677 {
5678 
5679    if (1) {
5680       VG_(printf)("\n");
5681       HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5682       if (HG_(clo_track_lockorders)) {
5683          VG_(printf)("\n");
5684          HG_(ppWSUstats)( univ_laog,  "univ_laog" );
5685       }
5686    }
5687 
5688    //zz       VG_(printf)("\n");
5689    //zz       VG_(printf)(" hbefore: %'10lu queries\n",        stats__hbefore_queries);
5690    //zz       VG_(printf)(" hbefore: %'10lu cache 0 hits\n",   stats__hbefore_cache0s);
5691    //zz       VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5692    //zz       VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5693    //zz       VG_(printf)(" hbefore: %'10lu   of which slow\n",
5694    //zz                   stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5695    //zz       VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5696    //zz                   stats__hbefore_stk_hwm);
5697    //zz       VG_(printf)(" hbefore: %'10lu cache invals\n",   stats__hbefore_invals);
5698    //zz       VG_(printf)(" hbefore: %'10lu probes\n",         stats__hbefore_probes);
5699 
5700    VG_(printf)("\n");
5701    VG_(printf)("        locksets: %'8d unique lock sets\n",
5702                (Int)HG_(cardinalityWSU)( univ_lsets ));
5703    if (HG_(clo_track_lockorders)) {
5704       VG_(printf)("       univ_laog: %'8d unique lock sets\n",
5705                   (Int)HG_(cardinalityWSU)( univ_laog ));
5706    }
5707 
5708    //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5709    //            stats__ga_LL_adds,
5710    //            (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5711 
5712    VG_(printf)("  LockN-to-P map: %'8llu queries (%llu map size)\n",
5713                HG_(stats__LockN_to_P_queries),
5714                HG_(stats__LockN_to_P_get_map_size)() );
5715 
5716    VG_(printf)("client malloc-ed blocks: %'8u\n",
5717                VG_(HT_count_nodes)(hg_mallocmeta_table));
5718 
5719    VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5720                HG_(stats__string_table_queries),
5721                HG_(stats__string_table_get_map_size)() );
5722    if (HG_(clo_track_lockorders)) {
5723       VG_(printf)("            LAOG: %'8d map size\n",
5724                   (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5725       VG_(printf)(" LAOG exposition: %'8d map size\n",
5726                   (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5727    }
5728 
5729    VG_(printf)("           locks: %'8lu acquires, "
5730                "%'lu releases\n",
5731                stats__lockN_acquires,
5732                stats__lockN_releases
5733               );
5734    VG_(printf)("   sanity checks: %'8lu\n", stats__sanity_checks);
5735 
5736    VG_(printf)("\n");
5737    libhb_shutdown(True); // This in fact only print stats.
5738 }
5739 
hg_fini(Int exitcode)5740 static void hg_fini ( Int exitcode )
5741 {
5742    HG_(xtmemory_report) (VG_(clo_xtree_memory_file), True);
5743    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5744       VG_(message)(Vg_UserMsg,
5745                    "For counts of detected and suppressed errors, "
5746                    "rerun with: -v\n");
5747    }
5748 
5749    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5750        && HG_(clo_history_level) >= 2) {
5751       VG_(umsg)(
5752          "Use --history-level=approx or =none to gain increased speed, at\n" );
5753       VG_(umsg)(
5754          "the cost of reduced accuracy of conflicting-access information\n");
5755    }
5756 
5757    if (SHOW_DATA_STRUCTURES)
5758       pp_everything( PP_ALL, "SK_(fini)" );
5759    if (HG_(clo_sanity_flags))
5760       all__sanity_check("SK_(fini)");
5761 
5762    if (VG_(clo_stats))
5763       hg_print_stats();
5764 }
5765 
5766 /* FIXME: move these somewhere sane */
5767 
5768 static
for_libhb__get_stacktrace(Thr * hbt,Addr * frames,UWord nRequest)5769 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5770 {
5771    Thread*     thr;
5772    ThreadId    tid;
5773    UWord       nActual;
5774    tl_assert(hbt);
5775    thr = libhb_get_Thr_hgthread( hbt );
5776    tl_assert(thr);
5777    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5778    nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5779                                          NULL, NULL, 0 );
5780    tl_assert(nActual <= nRequest);
5781    for (; nActual < nRequest; nActual++)
5782       frames[nActual] = 0;
5783 }
5784 
5785 static
for_libhb__get_EC(Thr * hbt)5786 ExeContext* for_libhb__get_EC ( Thr* hbt )
5787 {
5788    Thread*     thr;
5789    ThreadId    tid;
5790    ExeContext* ec;
5791    tl_assert(hbt);
5792    thr = libhb_get_Thr_hgthread( hbt );
5793    tl_assert(thr);
5794    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5795    /* this will assert if tid is invalid */
5796    ec = VG_(record_ExeContext)( tid, 0 );
5797    return ec;
5798 }
5799 
5800 
hg_post_clo_init(void)5801 static void hg_post_clo_init ( void )
5802 {
5803    Thr* hbthr_root;
5804 
5805    /////////////////////////////////////////////
5806    hbthr_root = libhb_init( for_libhb__get_stacktrace,
5807                             for_libhb__get_EC );
5808    /////////////////////////////////////////////
5809 
5810 
5811    if (HG_(clo_track_lockorders))
5812       laog__init();
5813 
5814    initialise_data_structures(hbthr_root);
5815    if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
5816       // Activate full xtree memory profiling.
5817       VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
5818 }
5819 
hg_info_location(Addr a)5820 static void hg_info_location (Addr a)
5821 {
5822    (void) HG_(get_and_pp_addrdescr) (a);
5823 }
5824 
hg_pre_clo_init(void)5825 static void hg_pre_clo_init ( void )
5826 {
5827    VG_(details_name)            ("Helgrind");
5828    VG_(details_version)         (NULL);
5829    VG_(details_description)     ("a thread error detector");
5830    VG_(details_copyright_author)(
5831       "Copyright (C) 2007-2017, and GNU GPL'd, by OpenWorks LLP et al.");
5832    VG_(details_bug_reports_to)  (VG_BUGS_TO);
5833    VG_(details_avg_translation_sizeB) ( 320 );
5834 
5835    VG_(basic_tool_funcs)          (hg_post_clo_init,
5836                                    hg_instrument,
5837                                    hg_fini);
5838 
5839    VG_(needs_core_errors)         ();
5840    VG_(needs_tool_errors)         (HG_(eq_Error),
5841                                    HG_(before_pp_Error),
5842                                    HG_(pp_Error),
5843                                    False,/*show TIDs for errors*/
5844                                    HG_(update_extra),
5845                                    HG_(recognised_suppression),
5846                                    HG_(read_extra_suppression_info),
5847                                    HG_(error_matches_suppression),
5848                                    HG_(get_error_name),
5849                                    HG_(get_extra_suppression_info),
5850                                    HG_(print_extra_suppression_use),
5851                                    HG_(update_extra_suppression_use));
5852 
5853    VG_(needs_xml_output)          ();
5854 
5855    VG_(needs_command_line_options)(hg_process_cmd_line_option,
5856                                    hg_print_usage,
5857                                    hg_print_debug_usage);
5858    VG_(needs_client_requests)     (hg_handle_client_request);
5859 
5860    // FIXME?
5861    //VG_(needs_sanity_checks)       (hg_cheap_sanity_check,
5862    //                                hg_expensive_sanity_check);
5863 
5864    VG_(needs_print_stats) (hg_print_stats);
5865    VG_(needs_info_location) (hg_info_location);
5866 
5867    VG_(needs_malloc_replacement)  (hg_cli__malloc,
5868                                    hg_cli____builtin_new,
5869                                    hg_cli____builtin_vec_new,
5870                                    hg_cli__memalign,
5871                                    hg_cli__calloc,
5872                                    hg_cli__free,
5873                                    hg_cli____builtin_delete,
5874                                    hg_cli____builtin_vec_delete,
5875                                    hg_cli__realloc,
5876                                    hg_cli_malloc_usable_size,
5877                                    HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
5878 
5879    /* 21 Dec 08: disabled this; it mostly causes H to start more
5880       slowly and use significantly more memory, without very often
5881       providing useful results.  The user can request to load this
5882       information manually with --read-var-info=yes. */
5883    if (0) VG_(needs_var_info)(); /* optional */
5884 
5885    VG_(track_new_mem_startup)     ( evh__new_mem_w_perms );
5886    VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5887    VG_(track_new_mem_brk)         ( evh__new_mem_w_tid );
5888    VG_(track_new_mem_mmap)        ( evh__new_mem_w_perms );
5889    VG_(track_new_mem_stack)       ( evh__new_mem_stack );
5890 
5891    // FIXME: surely this isn't thread-aware
5892    VG_(track_copy_mem_remap)      ( evh__copy_mem );
5893 
5894    VG_(track_change_mem_mprotect) ( evh__set_perms );
5895 
5896    VG_(track_die_mem_stack_signal)( evh__die_mem );
5897    VG_(track_die_mem_brk)         ( evh__die_mem_munmap );
5898    VG_(track_die_mem_munmap)      ( evh__die_mem_munmap );
5899 
5900    /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
5901       which has no effect. We do not use  VG_(track_die_mem_stack),
5902       as this would be an expensive way to do nothing. */
5903    // VG_(track_die_mem_stack)       ( evh__die_mem );
5904 
5905    // FIXME: what is this for?
5906    VG_(track_ban_mem_stack)       (NULL);
5907 
5908    VG_(track_pre_mem_read)        ( evh__pre_mem_read );
5909    VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5910    VG_(track_pre_mem_write)       ( evh__pre_mem_write );
5911    VG_(track_post_mem_write)      (NULL);
5912 
5913    /////////////////
5914 
5915    VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5916    VG_(track_pre_thread_ll_exit)  ( evh__pre_thread_ll_exit );
5917 
5918    VG_(track_start_client_code)( evh__start_client_code );
5919    VG_(track_stop_client_code)( evh__stop_client_code );
5920 
5921    /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5922       as described in comments at the top of pub_tool_hashtable.h, are
5923       met.  Blargh. */
5924    tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5925    tl_assert( sizeof(UWord) == sizeof(Addr) );
5926    hg_mallocmeta_table
5927       = VG_(HT_construct)( "hg_malloc_metadata_table" );
5928 
5929    MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5930                                        1000,
5931                                        HG_(zalloc),
5932                                        "hg_malloc_metadata_pool",
5933                                        HG_(free));
5934 
5935    // add a callback to clean up on (threaded) fork.
5936    VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
5937 }
5938 
5939 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5940 
5941 /*--------------------------------------------------------------------*/
5942 /*--- end                                                hg_main.c ---*/
5943 /*--------------------------------------------------------------------*/
5944