1
2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
6
7 /*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
11 Copyright (C) 2007-2010 OpenWorks Ltd
12 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30 */
31
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_execontext.h"
37 #include "pub_tool_errormgr.h"
38 #include "pub_tool_wordfm.h"
39 #include "pub_tool_xarray.h"
40 #include "pub_tool_debuginfo.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_options.h" // VG_(clo_xml)
43
44 #include "hg_basics.h"
45 #include "hg_wordset.h"
46 #include "hg_lock_n_thread.h"
47 #include "libhb.h"
48 #include "hg_errors.h" /* self */
49
50
51 /*----------------------------------------------------------------*/
52 /*--- Error management -- storage ---*/
53 /*----------------------------------------------------------------*/
54
55 /* maps (by value) strings to a copy of them in ARENA_TOOL */
56
57 static WordFM* string_table = NULL;
58
59 ULong HG_(stats__string_table_queries) = 0;
60
HG_(stats__string_table_get_map_size)61 ULong HG_(stats__string_table_get_map_size) ( void ) {
62 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
63 }
64
string_table_cmp(UWord s1,UWord s2)65 static Word string_table_cmp ( UWord s1, UWord s2 ) {
66 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
67 }
68
string_table_strdup(HChar * str)69 static HChar* string_table_strdup ( HChar* str ) {
70 HChar* copy = NULL;
71 HG_(stats__string_table_queries)++;
72 if (!str)
73 str = "(null)";
74 if (!string_table) {
75 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
76 HG_(free), string_table_cmp );
77 tl_assert(string_table);
78 }
79 if (VG_(lookupFM)( string_table,
80 NULL, (Word*)©, (Word)str )) {
81 tl_assert(copy);
82 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
83 return copy;
84 } else {
85 copy = HG_(strdup)("hg.sts.2", str);
86 tl_assert(copy);
87 VG_(addToFM)( string_table, (Word)copy, (Word)copy );
88 return copy;
89 }
90 }
91
92 /* maps from Lock .unique fields to LockP*s */
93
94 static WordFM* map_LockN_to_P = NULL;
95
96 ULong HG_(stats__LockN_to_P_queries) = 0;
97
HG_(stats__LockN_to_P_get_map_size)98 ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
99 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
100 }
101
lock_unique_cmp(UWord lk1W,UWord lk2W)102 static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
103 {
104 Lock* lk1 = (Lock*)lk1W;
105 Lock* lk2 = (Lock*)lk2W;
106 tl_assert( HG_(is_sane_LockNorP)(lk1) );
107 tl_assert( HG_(is_sane_LockNorP)(lk2) );
108 if (lk1->unique < lk2->unique) return -1;
109 if (lk1->unique > lk2->unique) return 1;
110 return 0;
111 }
112
mk_LockP_from_LockN(Lock * lkn)113 static Lock* mk_LockP_from_LockN ( Lock* lkn )
114 {
115 Lock* lkp = NULL;
116 HG_(stats__LockN_to_P_queries)++;
117 tl_assert( HG_(is_sane_LockN)(lkn) );
118 if (!map_LockN_to_P) {
119 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
120 HG_(free), lock_unique_cmp );
121 tl_assert(map_LockN_to_P);
122 }
123 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
124 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
125 *lkp = *lkn;
126 lkp->admin = NULL;
127 lkp->magic = LockP_MAGIC;
128 /* Forget about the bag of lock holders - don't copy that.
129 Also, acquired_at should be NULL whenever heldBy is, and vice
130 versa. Also forget about the associated libhb synch object. */
131 lkp->heldW = False;
132 lkp->heldBy = NULL;
133 lkp->acquired_at = NULL;
134 lkp->hbso = NULL;
135 VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
136 }
137 tl_assert( HG_(is_sane_LockP)(lkp) );
138 return lkp;
139 }
140
141 /* Errors:
142
143 race: program counter
144 read or write
145 data size
146 previous state
147 current state
148
149 FIXME: how does state printing interact with lockset gc?
150 Are the locksets in prev/curr state always valid?
151 Ditto question for the threadsets
152 ThreadSets - probably are always valid if Threads
153 are never thrown away.
154 LockSets - could at least print the lockset elements that
155 correspond to actual locks at the time of printing. Hmm.
156 */
157
158 /* Error kinds */
159 typedef
160 enum {
161 XE_Race=1101, // race
162 XE_UnlockUnlocked, // unlocking a not-locked lock
163 XE_UnlockForeign, // unlocking a lock held by some other thread
164 XE_UnlockBogus, // unlocking an address not known to be a lock
165 XE_PthAPIerror, // error from the POSIX pthreads API
166 XE_LockOrder, // lock order error
167 XE_Misc // misc other error (w/ string to describe it)
168 }
169 XErrorTag;
170
171 /* Extra contexts for kinds */
172 typedef
173 struct {
174 XErrorTag tag;
175 union {
176 struct {
177 Addr data_addr;
178 Int szB;
179 Bool isWrite;
180 Thread* thr;
181 /* descr1/2 provide a description of stack/global locs */
182 XArray* descr1; /* XArray* of HChar */
183 XArray* descr2; /* XArray* of HChar */
184 /* halloc/haddr/hszB describe the addr if it is a heap block. */
185 ExeContext* hctxt;
186 Addr haddr;
187 SizeT hszB;
188 /* h1_* and h2_* provide some description of a previously
189 observed access with which we are conflicting. */
190 Thread* h1_ct; /* non-NULL means h1 info present */
191 ExeContext* h1_ct_mbsegstartEC;
192 ExeContext* h1_ct_mbsegendEC;
193 Thread* h2_ct; /* non-NULL means h2 info present */
194 ExeContext* h2_ct_accEC;
195 Int h2_ct_accSzB;
196 Bool h2_ct_accIsW;
197 } Race;
198 struct {
199 Thread* thr; /* doing the unlocking */
200 Lock* lock; /* lock (that is already unlocked) */
201 } UnlockUnlocked;
202 struct {
203 Thread* thr; /* doing the unlocking */
204 Thread* owner; /* thread that actually holds the lock */
205 Lock* lock; /* lock (that is held by 'owner') */
206 } UnlockForeign;
207 struct {
208 Thread* thr; /* doing the unlocking */
209 Addr lock_ga; /* purported address of the lock */
210 } UnlockBogus;
211 struct {
212 Thread* thr;
213 HChar* fnname; /* persistent, in tool-arena */
214 Word err; /* pth error code */
215 HChar* errstr; /* persistent, in tool-arena */
216 } PthAPIerror;
217 struct {
218 Thread* thr;
219 Addr before_ga; /* always locked first in prog. history */
220 Addr after_ga;
221 ExeContext* before_ec;
222 ExeContext* after_ec;
223 } LockOrder;
224 struct {
225 Thread* thr;
226 HChar* errstr; /* persistent, in tool-arena */
227 HChar* auxstr; /* optional, persistent, in tool-arena */
228 ExeContext* auxctx; /* optional */
229 } Misc;
230 } XE;
231 }
232 XError;
233
init_XError(XError * xe)234 static void init_XError ( XError* xe ) {
235 VG_(memset)(xe, 0, sizeof(*xe) );
236 xe->tag = XE_Race-1; /* bogus */
237 }
238
239
240 /* Extensions of suppressions */
241 typedef
242 enum {
243 XS_Race=1201, /* race */
244 XS_FreeMemLock,
245 XS_UnlockUnlocked,
246 XS_UnlockForeign,
247 XS_UnlockBogus,
248 XS_PthAPIerror,
249 XS_LockOrder,
250 XS_Misc
251 }
252 XSuppTag;
253
254
255 /* Updates the copy with address info if necessary. */
HG_(update_extra)256 UInt HG_(update_extra) ( Error* err )
257 {
258 XError* xe = (XError*)VG_(get_error_extra)(err);
259 tl_assert(xe);
260 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
261 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
262 //}
263
264 if (xe->tag == XE_Race) {
265
266 /* See if we can come up with a source level description of the
267 raced-upon address. This is potentially expensive, which is
268 why it's only done at the update_extra point, not when the
269 error is initially created. */
270 static Int xxx = 0;
271 xxx++;
272 if (0)
273 VG_(printf)("HG_(update_extra): "
274 "%d conflicting-event queries\n", xxx);
275
276 tl_assert(!xe->XE.Race.hctxt);
277 tl_assert(!xe->XE.Race.descr1);
278 tl_assert(!xe->XE.Race.descr2);
279
280 /* First, see if it's in any heap block. Unfortunately this
281 means a linear search through all allocated heap blocks. The
282 assertion says that if it's detected as a heap block, then we
283 must have an allocation context for it, since all heap blocks
284 should have an allocation context. */
285 Bool is_heapblock
286 = HG_(mm_find_containing_block)(
287 &xe->XE.Race.hctxt, &xe->XE.Race.haddr, &xe->XE.Race.hszB,
288 xe->XE.Race.data_addr
289 );
290 tl_assert(is_heapblock == (xe->XE.Race.hctxt != NULL));
291
292 if (!xe->XE.Race.hctxt) {
293 /* It's not in any heap block. See if we can map it to a
294 stack or global symbol. */
295
296 xe->XE.Race.descr1
297 = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr1",
298 HG_(free), sizeof(HChar) );
299 xe->XE.Race.descr2
300 = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr2",
301 HG_(free), sizeof(HChar) );
302
303 (void) VG_(get_data_description)( xe->XE.Race.descr1,
304 xe->XE.Race.descr2,
305 xe->XE.Race.data_addr );
306
307 /* If there's nothing in descr1/2, free it. Why is it safe to
308 to VG_(indexXA) at zero here? Because
309 VG_(get_data_description) guarantees to zero terminate
310 descr1/2 regardless of the outcome of the call. So there's
311 always at least one element in each XA after the call.
312 */
313 if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr1, 0 ))) {
314 VG_(deleteXA)( xe->XE.Race.descr1 );
315 xe->XE.Race.descr1 = NULL;
316 }
317 if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr2, 0 ))) {
318 VG_(deleteXA)( xe->XE.Race.descr2 );
319 xe->XE.Race.descr2 = NULL;
320 }
321 }
322
323 /* And poke around in the conflicting-event map, to see if we
324 can rustle up a plausible-looking conflicting memory access
325 to show. */
326 if (HG_(clo_history_level) >= 2) {
327 Thr* thrp = NULL;
328 ExeContext* wherep = NULL;
329 Addr acc_addr = xe->XE.Race.data_addr;
330 Int acc_szB = xe->XE.Race.szB;
331 Thr* acc_thr = xe->XE.Race.thr->hbthr;
332 Bool acc_isW = xe->XE.Race.isWrite;
333 SizeT conf_szB = 0;
334 Bool conf_isW = False;
335 tl_assert(!xe->XE.Race.h2_ct_accEC);
336 tl_assert(!xe->XE.Race.h2_ct);
337 if (libhb_event_map_lookup(
338 &wherep, &thrp, &conf_szB, &conf_isW,
339 acc_thr, acc_addr, acc_szB, acc_isW )) {
340 Thread* threadp;
341 tl_assert(wherep);
342 tl_assert(thrp);
343 threadp = libhb_get_Thr_opaque( thrp );
344 tl_assert(threadp);
345 xe->XE.Race.h2_ct_accEC = wherep;
346 xe->XE.Race.h2_ct = threadp;
347 xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
348 xe->XE.Race.h2_ct_accIsW = conf_isW;
349 }
350 }
351
352 // both NULL or both non-NULL
353 tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
354 }
355
356 return sizeof(XError);
357 }
358
HG_(record_error_Race)359 void HG_(record_error_Race) ( Thread* thr,
360 Addr data_addr, Int szB, Bool isWrite,
361 Thread* h1_ct,
362 ExeContext* h1_ct_segstart,
363 ExeContext* h1_ct_mbsegendEC )
364 {
365 XError xe;
366 tl_assert( HG_(is_sane_Thread)(thr) );
367
368 # if defined(VGO_linux)
369 /* Skip any races on locations apparently in GOTPLT sections. This
370 is said to be caused by ld.so poking PLT table entries (or
371 whatever) when it writes the resolved address of a dynamically
372 linked routine, into the table (or whatever) when it is called
373 for the first time. */
374 {
375 VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, 0, data_addr );
376 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
377 data_addr, VG_(pp_SectKind)(sect));
378 /* SectPLT is required on ???-linux */
379 if (sect == Vg_SectGOTPLT) return;
380 /* SectPLT is required on ppc32/64-linux */
381 if (sect == Vg_SectPLT) return;
382 }
383 # endif
384
385 init_XError(&xe);
386 xe.tag = XE_Race;
387 xe.XE.Race.data_addr = data_addr;
388 xe.XE.Race.szB = szB;
389 xe.XE.Race.isWrite = isWrite;
390 xe.XE.Race.thr = thr;
391 tl_assert(isWrite == False || isWrite == True);
392 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
393 /* Skip on the detailed description of the raced-on address at this
394 point; it's expensive. Leave it for the update_extra function
395 if we ever make it that far. */
396 tl_assert(xe.XE.Race.descr1 == NULL);
397 tl_assert(xe.XE.Race.descr2 == NULL);
398 // FIXME: tid vs thr
399 // Skip on any of the conflicting-access info at this point.
400 // It's expensive to obtain, and this error is more likely than
401 // not to be discarded. We'll fill these fields in in
402 // HG_(update_extra) just above, assuming the error ever makes
403 // it that far (unlikely).
404 xe.XE.Race.h2_ct_accSzB = 0;
405 xe.XE.Race.h2_ct_accIsW = False;
406 xe.XE.Race.h2_ct_accEC = NULL;
407 xe.XE.Race.h2_ct = NULL;
408 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
409 tl_assert( thr->coretid != VG_INVALID_THREADID );
410
411 xe.XE.Race.h1_ct = h1_ct;
412 xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
413 xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC;
414
415 VG_(maybe_record_error)( thr->coretid,
416 XE_Race, data_addr, NULL, &xe );
417 }
418
HG_(record_error_UnlockUnlocked)419 void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
420 {
421 XError xe;
422 tl_assert( HG_(is_sane_Thread)(thr) );
423 tl_assert( HG_(is_sane_LockN)(lk) );
424 init_XError(&xe);
425 xe.tag = XE_UnlockUnlocked;
426 xe.XE.UnlockUnlocked.thr = thr;
427 xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
428 // FIXME: tid vs thr
429 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
430 tl_assert( thr->coretid != VG_INVALID_THREADID );
431 VG_(maybe_record_error)( thr->coretid,
432 XE_UnlockUnlocked, 0, NULL, &xe );
433 }
434
HG_(record_error_UnlockForeign)435 void HG_(record_error_UnlockForeign) ( Thread* thr,
436 Thread* owner, Lock* lk )
437 {
438 XError xe;
439 tl_assert( HG_(is_sane_Thread)(thr) );
440 tl_assert( HG_(is_sane_Thread)(owner) );
441 tl_assert( HG_(is_sane_LockN)(lk) );
442 init_XError(&xe);
443 xe.tag = XE_UnlockForeign;
444 xe.XE.UnlockForeign.thr = thr;
445 xe.XE.UnlockForeign.owner = owner;
446 xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk);
447 // FIXME: tid vs thr
448 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
449 tl_assert( thr->coretid != VG_INVALID_THREADID );
450 VG_(maybe_record_error)( thr->coretid,
451 XE_UnlockForeign, 0, NULL, &xe );
452 }
453
HG_(record_error_UnlockBogus)454 void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
455 {
456 XError xe;
457 tl_assert( HG_(is_sane_Thread)(thr) );
458 init_XError(&xe);
459 xe.tag = XE_UnlockBogus;
460 xe.XE.UnlockBogus.thr = thr;
461 xe.XE.UnlockBogus.lock_ga = lock_ga;
462 // FIXME: tid vs thr
463 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
464 tl_assert( thr->coretid != VG_INVALID_THREADID );
465 VG_(maybe_record_error)( thr->coretid,
466 XE_UnlockBogus, 0, NULL, &xe );
467 }
468
HG_(record_error_LockOrder)469 void HG_(record_error_LockOrder)(
470 Thread* thr, Addr before_ga, Addr after_ga,
471 ExeContext* before_ec, ExeContext* after_ec
472 )
473 {
474 XError xe;
475 tl_assert( HG_(is_sane_Thread)(thr) );
476 if (!HG_(clo_track_lockorders))
477 return;
478 init_XError(&xe);
479 xe.tag = XE_LockOrder;
480 xe.XE.LockOrder.thr = thr;
481 xe.XE.LockOrder.before_ga = before_ga;
482 xe.XE.LockOrder.before_ec = before_ec;
483 xe.XE.LockOrder.after_ga = after_ga;
484 xe.XE.LockOrder.after_ec = after_ec;
485 // FIXME: tid vs thr
486 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
487 tl_assert( thr->coretid != VG_INVALID_THREADID );
488 VG_(maybe_record_error)( thr->coretid,
489 XE_LockOrder, 0, NULL, &xe );
490 }
491
HG_(record_error_PthAPIerror)492 void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
493 Word err, HChar* errstr )
494 {
495 XError xe;
496 tl_assert( HG_(is_sane_Thread)(thr) );
497 tl_assert(fnname);
498 tl_assert(errstr);
499 init_XError(&xe);
500 xe.tag = XE_PthAPIerror;
501 xe.XE.PthAPIerror.thr = thr;
502 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
503 xe.XE.PthAPIerror.err = err;
504 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
505 // FIXME: tid vs thr
506 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
507 tl_assert( thr->coretid != VG_INVALID_THREADID );
508 VG_(maybe_record_error)( thr->coretid,
509 XE_PthAPIerror, 0, NULL, &xe );
510 }
511
HG_(record_error_Misc_w_aux)512 void HG_(record_error_Misc_w_aux) ( Thread* thr, HChar* errstr,
513 HChar* auxstr, ExeContext* auxctx )
514 {
515 XError xe;
516 tl_assert( HG_(is_sane_Thread)(thr) );
517 tl_assert(errstr);
518 init_XError(&xe);
519 xe.tag = XE_Misc;
520 xe.XE.Misc.thr = thr;
521 xe.XE.Misc.errstr = string_table_strdup(errstr);
522 xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
523 xe.XE.Misc.auxctx = auxctx;
524 // FIXME: tid vs thr
525 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
526 tl_assert( thr->coretid != VG_INVALID_THREADID );
527 VG_(maybe_record_error)( thr->coretid,
528 XE_Misc, 0, NULL, &xe );
529 }
530
HG_(record_error_Misc)531 void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
532 {
533 HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
534 }
535
HG_(eq_Error)536 Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
537 {
538 XError *xe1, *xe2;
539
540 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
541
542 xe1 = (XError*)VG_(get_error_extra)(e1);
543 xe2 = (XError*)VG_(get_error_extra)(e2);
544 tl_assert(xe1);
545 tl_assert(xe2);
546
547 switch (VG_(get_error_kind)(e1)) {
548 case XE_Race:
549 return xe1->XE.Race.szB == xe2->XE.Race.szB
550 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
551 && (HG_(clo_cmp_race_err_addrs)
552 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
553 : True);
554 case XE_UnlockUnlocked:
555 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
556 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
557 case XE_UnlockForeign:
558 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
559 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
560 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
561 case XE_UnlockBogus:
562 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
563 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
564 case XE_PthAPIerror:
565 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
566 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
567 xe2->XE.PthAPIerror.fnname)
568 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
569 case XE_LockOrder:
570 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
571 case XE_Misc:
572 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
573 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
574 default:
575 tl_assert(0);
576 }
577
578 /*NOTREACHED*/
579 tl_assert(0);
580 }
581
582
583 /*----------------------------------------------------------------*/
584 /*--- Error management -- printing ---*/
585 /*----------------------------------------------------------------*/
586
587 /* Do a printf-style operation on either the XML or normal output
588 channel, depending on the setting of VG_(clo_xml).
589 */
emit_WRK(HChar * format,va_list vargs)590 static void emit_WRK ( HChar* format, va_list vargs )
591 {
592 if (VG_(clo_xml)) {
593 VG_(vprintf_xml)(format, vargs);
594 } else {
595 VG_(vmessage)(Vg_UserMsg, format, vargs);
596 }
597 }
598 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
emit(HChar * format,...)599 static void emit ( HChar* format, ... )
600 {
601 va_list vargs;
602 va_start(vargs, format);
603 emit_WRK(format, vargs);
604 va_end(vargs);
605 }
emit_no_f_c(HChar * format,...)606 static void emit_no_f_c ( HChar* format, ... )
607 {
608 va_list vargs;
609 va_start(vargs, format);
610 emit_WRK(format, vargs);
611 va_end(vargs);
612 }
613
614
615 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
616 this once, as we only want to see these announcements once per
617 thread. Returned Bool indicates whether or not an announcement was
618 made.
619 */
announce_one_thread(Thread * thr)620 static Bool announce_one_thread ( Thread* thr )
621 {
622 tl_assert(HG_(is_sane_Thread)(thr));
623 tl_assert(thr->errmsg_index >= 1);
624 if (thr->announced)
625 return False;
626
627 if (VG_(clo_xml)) {
628
629 VG_(printf_xml)("<announcethread>\n");
630 VG_(printf_xml)(" <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
631 if (thr->errmsg_index == 1) {
632 tl_assert(thr->created_at == NULL);
633 VG_(printf_xml)(" <isrootthread></isrootthread>\n");
634 } else {
635 tl_assert(thr->created_at != NULL);
636 VG_(pp_ExeContext)( thr->created_at );
637 }
638 VG_(printf_xml)("</announcethread>\n\n");
639
640 } else {
641
642 if (thr->errmsg_index == 1) {
643 tl_assert(thr->created_at == NULL);
644 VG_(message)(Vg_UserMsg,
645 "Thread #%d is the program's root thread\n",
646 thr->errmsg_index);
647 } else {
648 tl_assert(thr->created_at != NULL);
649 VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
650 thr->errmsg_index);
651 VG_(pp_ExeContext)( thr->created_at );
652 }
653 VG_(message)(Vg_UserMsg, "\n");
654
655 }
656
657 thr->announced = True;
658 return True;
659 }
660
661
662 /* This is the "this error is due to be printed shortly; so have a
663 look at it any print any preamble you want" function. We use it to
664 announce any previously un-announced threads in the upcoming error
665 message.
666 */
HG_(before_pp_Error)667 void HG_(before_pp_Error) ( Error* err )
668 {
669 XError* xe;
670 tl_assert(err);
671 xe = (XError*)VG_(get_error_extra)(err);
672 tl_assert(xe);
673
674 switch (VG_(get_error_kind)(err)) {
675 case XE_Misc:
676 announce_one_thread( xe->XE.Misc.thr );
677 break;
678 case XE_LockOrder:
679 announce_one_thread( xe->XE.LockOrder.thr );
680 break;
681 case XE_PthAPIerror:
682 announce_one_thread( xe->XE.PthAPIerror.thr );
683 break;
684 case XE_UnlockBogus:
685 announce_one_thread( xe->XE.UnlockBogus.thr );
686 break;
687 case XE_UnlockForeign:
688 announce_one_thread( xe->XE.UnlockForeign.thr );
689 announce_one_thread( xe->XE.UnlockForeign.owner );
690 break;
691 case XE_UnlockUnlocked:
692 announce_one_thread( xe->XE.UnlockUnlocked.thr );
693 break;
694 case XE_Race:
695 announce_one_thread( xe->XE.Race.thr );
696 if (xe->XE.Race.h2_ct)
697 announce_one_thread( xe->XE.Race.h2_ct );
698 if (xe->XE.Race.h1_ct)
699 announce_one_thread( xe->XE.Race.h1_ct );
700 break;
701 default:
702 tl_assert(0);
703 }
704 }
705
HG_(pp_Error)706 void HG_(pp_Error) ( Error* err )
707 {
708 const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
709
710 XError *xe = (XError*)VG_(get_error_extra)(err);
711 tl_assert(xe);
712
713 switch (VG_(get_error_kind)(err)) {
714
715 case XE_Misc: {
716 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
717
718 if (xml) {
719
720 emit( " <kind>Misc</kind>\n");
721 emit( " <xwhat>\n" );
722 emit( " <text>Thread #%d: %s</text>\n",
723 (Int)xe->XE.Misc.thr->errmsg_index,
724 xe->XE.Misc.errstr );
725 emit( " <hthreadid>%d</hthreadid>\n",
726 (Int)xe->XE.Misc.thr->errmsg_index );
727 emit( " </xwhat>\n" );
728 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
729 if (xe->XE.Misc.auxstr) {
730 emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
731 if (xe->XE.Misc.auxctx)
732 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
733 }
734
735 } else {
736
737 emit( "Thread #%d: %s\n",
738 (Int)xe->XE.Misc.thr->errmsg_index,
739 xe->XE.Misc.errstr );
740 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
741 if (xe->XE.Misc.auxstr) {
742 emit(" %s\n", xe->XE.Misc.auxstr);
743 if (xe->XE.Misc.auxctx)
744 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
745 }
746
747 }
748 break;
749 }
750
751 case XE_LockOrder: {
752 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
753
754 if (xml) {
755
756 emit( " <kind>LockOrder</kind>\n");
757 emit( " <xwhat>\n" );
758 emit( " <text>Thread #%d: lock order \"%p before %p\" "
759 "violated</text>\n",
760 (Int)xe->XE.LockOrder.thr->errmsg_index,
761 (void*)xe->XE.LockOrder.before_ga,
762 (void*)xe->XE.LockOrder.after_ga );
763 emit( " <hthreadid>%d</hthreadid>\n",
764 (Int)xe->XE.LockOrder.thr->errmsg_index );
765 emit( " </xwhat>\n" );
766 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
767 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
768 emit( " <auxwhat>Required order was established by "
769 "acquisition of lock at %p</auxwhat>\n",
770 (void*)xe->XE.LockOrder.before_ga );
771 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
772 emit( " <auxwhat>followed by a later acquisition "
773 "of lock at %p</auxwhat>\n",
774 (void*)xe->XE.LockOrder.after_ga );
775 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
776 }
777
778 } else {
779
780 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
781 (Int)xe->XE.LockOrder.thr->errmsg_index,
782 (void*)xe->XE.LockOrder.before_ga,
783 (void*)xe->XE.LockOrder.after_ga );
784 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
785 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
786 emit( " Required order was established by "
787 "acquisition of lock at %p\n",
788 (void*)xe->XE.LockOrder.before_ga );
789 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
790 emit( " followed by a later acquisition of lock at %p\n",
791 (void*)xe->XE.LockOrder.after_ga );
792 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
793 }
794
795 }
796
797 break;
798 }
799
800 case XE_PthAPIerror: {
801 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
802
803 if (xml) {
804
805 emit( " <kind>PthAPIerror</kind>\n");
806 emit( " <xwhat>\n" );
807 emit_no_f_c(
808 " <text>Thread #%d's call to %t failed</text>\n",
809 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
810 xe->XE.PthAPIerror.fnname );
811 emit( " <hthreadid>%d</hthreadid>\n",
812 (Int)xe->XE.PthAPIerror.thr->errmsg_index );
813 emit( " </xwhat>\n" );
814 emit( " <what>with error code %ld (%s)</what>\n",
815 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
816 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
817
818 } else {
819
820 emit_no_f_c( "Thread #%d's call to %t failed\n",
821 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
822 xe->XE.PthAPIerror.fnname );
823 emit( " with error code %ld (%s)\n",
824 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
825 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
826
827 }
828
829 break;
830 }
831
832 case XE_UnlockBogus: {
833 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
834
835 if (xml) {
836
837 emit( " <kind>UnlockBogus</kind>\n");
838 emit( " <xwhat>\n" );
839 emit( " <text>Thread #%d unlocked an invalid "
840 "lock at %p</text>\n",
841 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
842 (void*)xe->XE.UnlockBogus.lock_ga );
843 emit( " <hthreadid>%d</hthreadid>\n",
844 (Int)xe->XE.UnlockBogus.thr->errmsg_index );
845 emit( " </xwhat>\n" );
846 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
847
848 } else {
849
850 emit( "Thread #%d unlocked an invalid lock at %p\n",
851 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
852 (void*)xe->XE.UnlockBogus.lock_ga );
853 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
854
855 }
856
857 break;
858 }
859
860 case XE_UnlockForeign: {
861 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
862 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
863 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
864
865 if (xml) {
866
867 emit( " <kind>UnlockForeign</kind>\n");
868 emit( " <xwhat>\n" );
869 emit( " <text>Thread #%d unlocked lock at %p "
870 "currently held by thread #%d</text>\n",
871 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
872 (void*)xe->XE.UnlockForeign.lock->guestaddr,
873 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
874 emit( " <hthreadid>%d</hthreadid>\n",
875 (Int)xe->XE.UnlockForeign.thr->errmsg_index );
876 emit( " <hthreadid>%d</hthreadid>\n",
877 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
878 emit( " </xwhat>\n" );
879 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
880
881 if (xe->XE.UnlockForeign.lock->appeared_at) {
882 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
883 (void*)xe->XE.UnlockForeign.lock->guestaddr );
884 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
885 }
886
887 } else {
888
889 emit( "Thread #%d unlocked lock at %p "
890 "currently held by thread #%d\n",
891 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
892 (void*)xe->XE.UnlockForeign.lock->guestaddr,
893 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
894 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
895 if (xe->XE.UnlockForeign.lock->appeared_at) {
896 emit( " Lock at %p was first observed\n",
897 (void*)xe->XE.UnlockForeign.lock->guestaddr );
898 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
899 }
900
901 }
902
903 break;
904 }
905
906 case XE_UnlockUnlocked: {
907 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
908 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
909
910 if (xml) {
911
912 emit( " <kind>UnlockUnlocked</kind>\n");
913 emit( " <xwhat>\n" );
914 emit( " <text>Thread #%d unlocked a "
915 "not-locked lock at %p</text>\n",
916 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
917 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
918 emit( " <hthreadid>%d</hthreadid>\n",
919 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
920 emit( " </xwhat>\n" );
921 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
922 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
923 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
924 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
925 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
926 }
927
928 } else {
929
930 emit( "Thread #%d unlocked a not-locked lock at %p\n",
931 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
932 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
933 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
934 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
935 emit( " Lock at %p was first observed\n",
936 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
937 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
938 }
939
940 }
941
942 break;
943 }
944
945 case XE_Race: {
946 Addr err_ga;
947 HChar* what;
948 Int szB;
949 what = xe->XE.Race.isWrite ? "write" : "read";
950 szB = xe->XE.Race.szB;
951 err_ga = VG_(get_error_address)(err);
952
953 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
954 if (xe->XE.Race.h2_ct)
955 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
956
957 if (xml) {
958
959 /* ------ XML ------ */
960 emit( " <kind>Race</kind>\n" );
961 emit( " <xwhat>\n" );
962 emit( " <text>Possible data race during %s of size %d "
963 "at %#lx by thread #%d</text>\n",
964 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
965 emit( " <hthreadid>%d</hthreadid>\n",
966 (Int)xe->XE.Race.thr->errmsg_index );
967 emit( " </xwhat>\n" );
968 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
969
970 if (xe->XE.Race.h2_ct) {
971 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
972 emit( " <xauxwhat>\n");
973 emit( " <text>This conflicts with a previous %s of size %d "
974 "by thread #%d</text>\n",
975 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
976 xe->XE.Race.h2_ct_accSzB,
977 xe->XE.Race.h2_ct->errmsg_index );
978 emit( " <hthreadid>%d</hthreadid>\n",
979 xe->XE.Race.h2_ct->errmsg_index);
980 emit(" </xauxwhat>\n");
981 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
982 }
983
984 if (xe->XE.Race.h1_ct) {
985 emit( " <xauxwhat>\n");
986 emit( " <text>This conflicts with a previous access "
987 "by thread #%d, after</text>\n",
988 xe->XE.Race.h1_ct->errmsg_index );
989 emit( " <hthreadid>%d</hthreadid>\n",
990 xe->XE.Race.h1_ct->errmsg_index );
991 emit(" </xauxwhat>\n");
992 if (xe->XE.Race.h1_ct_mbsegstartEC) {
993 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
994 } else {
995 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
996 }
997 emit( " <auxwhat>but before</auxwhat>\n" );
998 if (xe->XE.Race.h1_ct_mbsegendEC) {
999 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1000 } else {
1001 emit( " <auxwhat>(the end of the the thread)</auxwhat>\n" );
1002 }
1003 }
1004
1005 } else {
1006
1007 /* ------ Text ------ */
1008 emit( "Possible data race during %s of size %d "
1009 "at %#lx by thread #%d\n",
1010 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1011 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1012
1013 if (xe->XE.Race.h2_ct) {
1014 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1015 emit( " This conflicts with a previous %s of size %d "
1016 "by thread #%d\n",
1017 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1018 xe->XE.Race.h2_ct_accSzB,
1019 xe->XE.Race.h2_ct->errmsg_index );
1020 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1021 }
1022
1023 if (xe->XE.Race.h1_ct) {
1024 emit( " This conflicts with a previous access by thread #%d, "
1025 "after\n",
1026 xe->XE.Race.h1_ct->errmsg_index );
1027 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1028 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1029 } else {
1030 emit( " (the start of the thread)\n" );
1031 }
1032 emit( " but before\n" );
1033 if (xe->XE.Race.h1_ct_mbsegendEC) {
1034 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1035 } else {
1036 emit( " (the end of the the thread)\n" );
1037 }
1038 }
1039
1040 }
1041
1042 /* If we have a description of the address in terms of a heap
1043 block, show it. */
1044 if (xe->XE.Race.hctxt) {
1045 SizeT delta = err_ga - xe->XE.Race.haddr;
1046 if (xml) {
1047 emit(" <auxwhat>Address %#lx is %ld bytes inside a block "
1048 "of size %ld alloc'd</auxwhat>\n", err_ga, delta,
1049 xe->XE.Race.hszB);
1050 VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1051 } else {
1052 emit(" Address %#lx is %ld bytes inside a block "
1053 "of size %ld alloc'd\n", err_ga, delta,
1054 xe->XE.Race.hszB);
1055 VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1056 }
1057 }
1058
1059 /* If we have a better description of the address, show it.
1060 Note that in XML mode, it will already by nicely wrapped up
1061 in tags, either <auxwhat> or <xauxwhat>, so we can just emit
1062 it verbatim. */
1063 if (xe->XE.Race.descr1)
1064 emit( "%s%s\n", xml ? " " : " ",
1065 (HChar*)VG_(indexXA)( xe->XE.Race.descr1, 0 ) );
1066 if (xe->XE.Race.descr2)
1067 emit( "%s%s\n", xml ? " " : " ",
1068 (HChar*)VG_(indexXA)( xe->XE.Race.descr2, 0 ) );
1069
1070 break; /* case XE_Race */
1071 } /* case XE_Race */
1072
1073 default:
1074 tl_assert(0);
1075 } /* switch (VG_(get_error_kind)(err)) */
1076 }
1077
HG_(get_error_name)1078 Char* HG_(get_error_name) ( Error* err )
1079 {
1080 switch (VG_(get_error_kind)(err)) {
1081 case XE_Race: return "Race";
1082 case XE_UnlockUnlocked: return "UnlockUnlocked";
1083 case XE_UnlockForeign: return "UnlockForeign";
1084 case XE_UnlockBogus: return "UnlockBogus";
1085 case XE_PthAPIerror: return "PthAPIerror";
1086 case XE_LockOrder: return "LockOrder";
1087 case XE_Misc: return "Misc";
1088 default: tl_assert(0); /* fill in missing case */
1089 }
1090 }
1091
HG_(recognised_suppression)1092 Bool HG_(recognised_suppression) ( Char* name, Supp *su )
1093 {
1094 # define TRY(_name,_xskind) \
1095 if (0 == VG_(strcmp)(name, (_name))) { \
1096 VG_(set_supp_kind)(su, (_xskind)); \
1097 return True; \
1098 }
1099 TRY("Race", XS_Race);
1100 TRY("FreeMemLock", XS_FreeMemLock);
1101 TRY("UnlockUnlocked", XS_UnlockUnlocked);
1102 TRY("UnlockForeign", XS_UnlockForeign);
1103 TRY("UnlockBogus", XS_UnlockBogus);
1104 TRY("PthAPIerror", XS_PthAPIerror);
1105 TRY("LockOrder", XS_LockOrder);
1106 TRY("Misc", XS_Misc);
1107 return False;
1108 # undef TRY
1109 }
1110
HG_(read_extra_suppression_info)1111 Bool HG_(read_extra_suppression_info) ( Int fd, Char** bufpp, SizeT* nBufp,
1112 Supp* su )
1113 {
1114 /* do nothing -- no extra suppression info present. Return True to
1115 indicate nothing bad happened. */
1116 return True;
1117 }
1118
HG_(error_matches_suppression)1119 Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
1120 {
1121 switch (VG_(get_supp_kind)(su)) {
1122 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
1123 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1124 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
1125 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
1126 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
1127 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
1128 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
1129 //case XS_: return VG_(get_error_kind)(err) == XE_;
1130 default: tl_assert(0); /* fill in missing cases */
1131 }
1132 }
1133
HG_(get_extra_suppression_info)1134 Bool HG_(get_extra_suppression_info) ( Error* err,
1135 /*OUT*/Char* buf, Int nBuf )
1136 {
1137 /* Do nothing */
1138 return False;
1139 }
1140
1141
1142 /*--------------------------------------------------------------------*/
1143 /*--- end hg_errors.c ---*/
1144 /*--------------------------------------------------------------------*/
1145