• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions.       ---*/
4 /*---                                                  mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2010 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_tool_basics.h"
33 #include "pub_tool_hashtable.h"     // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h"     // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_vki.h"
46 #include "pub_tool_libcfile.h"
47 
48 #include "mc_include.h"
49 
50 
51 /*------------------------------------------------------------*/
52 /*--- Error types                                          ---*/
53 /*------------------------------------------------------------*/
54 
55 /* See comment in mc_include.h */
56 Bool MC_(any_value_errors) = False;
57 
58 
59 // Different kinds of blocks.
60 typedef enum {
61    Block_Mallocd = 111,
62    Block_Freed,
63    Block_Mempool,
64    Block_MempoolChunk,
65    Block_UserG
66 } BlockKind;
67 
68 /* ------------------ Addresses -------------------- */
69 
70 /* The classification of a faulting address. */
71 typedef
72    enum {
73       Addr_Undescribed, // as-yet unclassified
74       Addr_Unknown,     // classification yielded nothing useful
75       Addr_Block,       // in malloc'd/free'd block
76       Addr_Stack,       // on a thread's stack
77       Addr_DataSym,     // in a global data sym
78       Addr_Variable,    // variable described by the debug info
79       Addr_SectKind     // last-ditch classification attempt
80    }
81    AddrTag;
82 
83 typedef
84    struct _AddrInfo
85    AddrInfo;
86 
87 struct _AddrInfo {
88    AddrTag tag;
89    union {
90       // As-yet unclassified.
91       struct { } Undescribed;
92 
93       // On a stack.
94       struct {
95          ThreadId tid;        // Which thread's stack?
96       } Stack;
97 
98       // This covers heap blocks (normal and from mempools) and user-defined
99       // blocks.
100       struct {
101          BlockKind   block_kind;
102          Char*       block_desc;    // "block", "mempool" or user-defined
103          SizeT       block_szB;
104          PtrdiffT    rwoffset;
105          ExeContext* lastchange;
106       } Block;
107 
108       // In a global .data symbol.  This holds the first 127 chars of
109       // the variable's name (zero terminated), plus a (memory) offset.
110       struct {
111          Char     name[128];
112          PtrdiffT offset;
113       } DataSym;
114 
115       // Is described by Dwarf debug info.  XArray*s of HChar.
116       struct {
117          XArray* /* of HChar */ descr1;
118          XArray* /* of HChar */ descr2;
119       } Variable;
120 
121       // Could only narrow it down to be the PLT/GOT/etc of a given
122       // object.  Better than nothing, perhaps.
123       struct {
124          Char       objname[128];
125          VgSectKind kind;
126       } SectKind;
127 
128       // Classification yielded nothing useful.
129       struct { } Unknown;
130 
131    } Addr;
132 };
133 
134 /* ------------------ Errors ----------------------- */
135 
136 /* What kind of error it is. */
137 typedef
138    enum {
139       Err_Value,
140       Err_Cond,
141       Err_CoreMem,
142       Err_Addr,
143       Err_Jump,
144       Err_RegParam,
145       Err_MemParam,
146       Err_User,
147       Err_Free,
148       Err_FreeMismatch,
149       Err_Overlap,
150       Err_Leak,
151       Err_IllegalMempool,
152    }
153    MC_ErrorTag;
154 
155 
156 typedef struct _MC_Error MC_Error;
157 
158 struct _MC_Error {
159    // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
160    //MC_ErrorTag tag;
161 
162    union {
163       // Use of an undefined value:
164       // - as a pointer in a load or store
165       // - as a jump target
166       struct {
167          SizeT szB;   // size of value in bytes
168          // Origin info
169          UInt        otag;      // origin tag
170          ExeContext* origin_ec; // filled in later
171       } Value;
172 
173       // Use of an undefined value in a conditional branch or move.
174       struct {
175          // Origin info
176          UInt        otag;      // origin tag
177          ExeContext* origin_ec; // filled in later
178       } Cond;
179 
180       // Addressability error in core (signal-handling) operation.
181       // It would be good to get rid of this error kind, merge it with
182       // another one somehow.
183       struct {
184       } CoreMem;
185 
186       // Use of an unaddressable memory location in a load or store.
187       struct {
188          Bool     isWrite;    // read or write?
189          SizeT    szB;        // not used for exec (jump) errors
190          Bool     maybe_gcc;  // True if just below %esp -- could be a gcc bug
191          AddrInfo ai;
192       } Addr;
193 
194       // Jump to an unaddressable memory location.
195       struct {
196          AddrInfo ai;
197       } Jump;
198 
199       // System call register input contains undefined bytes.
200       struct {
201          // Origin info
202          UInt        otag;      // origin tag
203          ExeContext* origin_ec; // filled in later
204       } RegParam;
205 
206       // System call memory input contains undefined/unaddressable bytes
207       struct {
208          Bool     isAddrErr;  // Addressability or definedness error?
209          AddrInfo ai;
210          // Origin info
211          UInt        otag;      // origin tag
212          ExeContext* origin_ec; // filled in later
213       } MemParam;
214 
215       // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
216       struct {
217          Bool     isAddrErr;  // Addressability or definedness error?
218          AddrInfo ai;
219          // Origin info
220          UInt        otag;      // origin tag
221          ExeContext* origin_ec; // filled in later
222       } User;
223 
224       // Program tried to free() something that's not a heap block (this
225       // covers double-frees). */
226       struct {
227          AddrInfo ai;
228       } Free;
229 
230       // Program allocates heap block with one function
231       // (malloc/new/new[]/custom) and deallocates with not the matching one.
232       struct {
233          AddrInfo ai;
234       } FreeMismatch;
235 
236       // Call to strcpy, memcpy, etc, with overlapping blocks.
237       struct {
238          Addr src;   // Source block
239          Addr dst;   // Destination block
240          Int  szB;   // Size in bytes;  0 if unused.
241       } Overlap;
242 
243       // A memory leak.
244       struct {
245          UInt        n_this_record;
246          UInt        n_total_records;
247          LossRecord* lr;
248       } Leak;
249 
250       // A memory pool error.
251       struct {
252          AddrInfo ai;
253       } IllegalMempool;
254 
255    } Err;
256 };
257 
258 
259 /*------------------------------------------------------------*/
260 /*--- Printing errors                                      ---*/
261 /*------------------------------------------------------------*/
262 
263 /* This is the "this error is due to be printed shortly; so have a
264    look at it any print any preamble you want" function.  Which, in
265    Memcheck, we don't use.  Hence a no-op.
266 */
MC_(before_pp_Error)267 void MC_(before_pp_Error) ( Error* err ) {
268 }
269 
270 /* Do a printf-style operation on either the XML or normal output
271    channel, depending on the setting of VG_(clo_xml).
272 */
emit_WRK(HChar * format,va_list vargs)273 static void emit_WRK ( HChar* format, va_list vargs )
274 {
275    if (VG_(clo_xml)) {
276       VG_(vprintf_xml)(format, vargs);
277    } else {
278       VG_(vmessage)(Vg_UserMsg, format, vargs);
279    }
280 }
281 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
emit(HChar * format,...)282 static void emit ( HChar* format, ... )
283 {
284    va_list vargs;
285    va_start(vargs, format);
286    emit_WRK(format, vargs);
287    va_end(vargs);
288 }
emiN(HChar * format,...)289 static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
290 {
291    va_list vargs;
292    va_start(vargs, format);
293    emit_WRK(format, vargs);
294    va_end(vargs);
295 }
296 
297 
mc_pp_AddrInfo(Addr a,AddrInfo * ai,Bool maybe_gcc)298 static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
299 {
300    HChar* xpre  = VG_(clo_xml) ? "  <auxwhat>" : " ";
301    HChar* xpost = VG_(clo_xml) ? "</auxwhat>"  : "";
302 
303    switch (ai->tag) {
304       case Addr_Unknown:
305          if (maybe_gcc) {
306             emit( "%sAddress 0x%llx is just below the stack ptr.  "
307                   "To suppress, use: --workaround-gcc296-bugs=yes%s\n",
308                   xpre, (ULong)a, xpost );
309 	 } else {
310             emit( "%sAddress 0x%llx "
311                   "is not stack'd, malloc'd or (recently) free'd%s\n",
312                   xpre, (ULong)a, xpost );
313          }
314          break;
315 
316       case Addr_Stack:
317          emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
318                xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
319          break;
320 
321       case Addr_Block: {
322          SizeT    block_szB = ai->Addr.Block.block_szB;
323          PtrdiffT rwoffset  = ai->Addr.Block.rwoffset;
324          SizeT    delta;
325          const    Char* relative;
326 
327          if (rwoffset < 0) {
328             delta    = (SizeT)(-rwoffset);
329             relative = "before";
330          } else if (rwoffset >= block_szB) {
331             delta    = rwoffset - block_szB;
332             relative = "after";
333          } else {
334             delta    = rwoffset;
335             relative = "inside";
336          }
337          emit(
338             "%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
339             xpre,
340             a, delta, relative, ai->Addr.Block.block_desc,
341             block_szB,
342             ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
343             : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
344                                                      : "client-defined",
345             xpost
346          );
347          VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
348          break;
349       }
350 
351       case Addr_DataSym:
352          emiN( "%sAddress 0x%llx is %llu bytes "
353                "inside data symbol \"%t\"%s\n",
354                xpre,
355                (ULong)a,
356                (ULong)ai->Addr.DataSym.offset,
357                ai->Addr.DataSym.name,
358                xpost );
359          break;
360 
361       case Addr_Variable:
362          /* Note, no need for XML tags here, because descr1/2 will
363             already have <auxwhat> or <xauxwhat>s on them, in XML
364             mode. */
365          if (ai->Addr.Variable.descr1)
366             emit( "%s%s\n",
367                   VG_(clo_xml) ? "  " : " ",
368                   (HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
369          if (ai->Addr.Variable.descr2)
370             emit( "%s%s\n",
371                   VG_(clo_xml) ? "  " : " ",
372                   (HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
373          break;
374 
375       case Addr_SectKind:
376          emiN( "%sAddress 0x%llx is in the %t segment of %t%s\n",
377                xpre,
378                (ULong)a,
379                VG_(pp_SectKind)(ai->Addr.SectKind.kind),
380                ai->Addr.SectKind.objname,
381                xpost );
382          break;
383 
384       default:
385          VG_(tool_panic)("mc_pp_AddrInfo");
386    }
387 }
388 
str_leak_lossmode(Reachedness lossmode)389 static const HChar* str_leak_lossmode ( Reachedness lossmode )
390 {
391    const HChar *loss = "?";
392    switch (lossmode) {
393       case Unreached:    loss = "definitely lost"; break;
394       case IndirectLeak: loss = "indirectly lost"; break;
395       case Possible:     loss = "possibly lost"; break;
396       case Reachable:    loss = "still reachable"; break;
397    }
398    return loss;
399 }
400 
xml_leak_kind(Reachedness lossmode)401 static const HChar* xml_leak_kind ( Reachedness lossmode )
402 {
403    const HChar *loss = "?";
404    switch (lossmode) {
405       case Unreached:    loss = "Leak_DefinitelyLost"; break;
406       case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
407       case Possible:     loss = "Leak_PossiblyLost"; break;
408       case Reachable:    loss = "Leak_StillReachable"; break;
409    }
410    return loss;
411 }
412 
mc_pp_origin(ExeContext * ec,UInt okind)413 static void mc_pp_origin ( ExeContext* ec, UInt okind )
414 {
415    HChar* src = NULL;
416    tl_assert(ec);
417 
418    switch (okind) {
419       case MC_OKIND_STACK:   src = " by a stack allocation"; break;
420       case MC_OKIND_HEAP:    src = " by a heap allocation"; break;
421       case MC_OKIND_USER:    src = " by a client request"; break;
422       case MC_OKIND_UNKNOWN: src = ""; break;
423    }
424    tl_assert(src); /* guards against invalid 'okind' */
425 
426    if (VG_(clo_xml)) {
427       emit( "  <auxwhat>Uninitialised value was created%s</auxwhat>\n",
428             src);
429       VG_(pp_ExeContext)( ec );
430    } else {
431       emit( " Uninitialised value was created%s\n", src);
432       VG_(pp_ExeContext)( ec );
433    }
434 }
435 
MC_(pp_Error)436 void MC_(pp_Error) ( Error* err )
437 {
438    const Bool xml  = VG_(clo_xml); /* a shorthand */
439    MC_Error* extra = VG_(get_error_extra)(err);
440 
441    switch (VG_(get_error_kind)(err)) {
442       case Err_CoreMem:
443          /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
444          /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
445             signal handler frame.  --njn */
446          // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
447          // the following code is untested.  Bad.
448          if (xml) {
449             emit( "  <kind>CoreMemError</kind>\n" );
450             emiN( "  <what>%t contains unaddressable byte(s)</what>\n",
451                   VG_(get_error_string)(err));
452             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
453          } else {
454             emit( "%s contains unaddressable byte(s)\n",
455                   VG_(get_error_string)(err));
456             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
457          }
458          break;
459 
460       case Err_Value:
461          MC_(any_value_errors) = True;
462          if (xml) {
463             emit( "  <kind>UninitValue</kind>\n" );
464             emit( "  <what>Use of uninitialised value of size %ld</what>\n",
465                   extra->Err.Value.szB );
466             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
467             if (extra->Err.Value.origin_ec)
468                mc_pp_origin( extra->Err.Value.origin_ec,
469                             extra->Err.Value.otag & 3 );
470          } else {
471             /* Could also show extra->Err.Cond.otag if debugging origin
472                tracking */
473             emit( "Use of uninitialised value of size %ld\n",
474                   extra->Err.Value.szB );
475             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
476             if (extra->Err.Value.origin_ec)
477                mc_pp_origin( extra->Err.Value.origin_ec,
478                             extra->Err.Value.otag & 3 );
479          }
480          break;
481 
482       case Err_Cond:
483          MC_(any_value_errors) = True;
484          if (xml) {
485             emit( "  <kind>UninitCondition</kind>\n" );
486             emit( "  <what>Conditional jump or move depends"
487                   " on uninitialised value(s)</what>\n" );
488             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
489             if (extra->Err.Cond.origin_ec)
490                mc_pp_origin( extra->Err.Cond.origin_ec,
491                              extra->Err.Cond.otag & 3 );
492          } else {
493             /* Could also show extra->Err.Cond.otag if debugging origin
494                tracking */
495             emit( "Conditional jump or move depends"
496                   " on uninitialised value(s)\n" );
497             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
498             if (extra->Err.Cond.origin_ec)
499                mc_pp_origin( extra->Err.Cond.origin_ec,
500                              extra->Err.Cond.otag & 3 );
501          }
502          break;
503 
504       case Err_RegParam:
505          MC_(any_value_errors) = True;
506          if (xml) {
507             emit( "  <kind>SyscallParam</kind>\n" );
508             emiN( "  <what>Syscall param %t contains "
509                   "uninitialised byte(s)</what>\n",
510                   VG_(get_error_string)(err) );
511             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
512             if (extra->Err.RegParam.origin_ec)
513                mc_pp_origin( extra->Err.RegParam.origin_ec,
514                              extra->Err.RegParam.otag & 3 );
515          } else {
516             emit( "Syscall param %s contains uninitialised byte(s)\n",
517                   VG_(get_error_string)(err) );
518             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
519             if (extra->Err.RegParam.origin_ec)
520                mc_pp_origin( extra->Err.RegParam.origin_ec,
521                              extra->Err.RegParam.otag & 3 );
522          }
523          break;
524 
525       case Err_MemParam:
526          if (!extra->Err.MemParam.isAddrErr)
527             MC_(any_value_errors) = True;
528          if (xml) {
529             emit( "  <kind>SyscallParam</kind>\n" );
530             emiN( "  <what>Syscall param %t points to %s byte(s)</what>\n",
531                   VG_(get_error_string)(err),
532                   extra->Err.MemParam.isAddrErr
533                      ? "unaddressable" : "uninitialised" );
534             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
535             mc_pp_AddrInfo(VG_(get_error_address)(err),
536                            &extra->Err.MemParam.ai, False);
537             if (extra->Err.MemParam.origin_ec
538                 && !extra->Err.MemParam.isAddrErr)
539                mc_pp_origin( extra->Err.MemParam.origin_ec,
540                              extra->Err.MemParam.otag & 3 );
541          } else {
542             emit( "Syscall param %s points to %s byte(s)\n",
543                   VG_(get_error_string)(err),
544                   extra->Err.MemParam.isAddrErr
545                      ? "unaddressable" : "uninitialised" );
546             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
547             mc_pp_AddrInfo(VG_(get_error_address)(err),
548                            &extra->Err.MemParam.ai, False);
549             if (extra->Err.MemParam.origin_ec
550                 && !extra->Err.MemParam.isAddrErr)
551                mc_pp_origin( extra->Err.MemParam.origin_ec,
552                              extra->Err.MemParam.otag & 3 );
553          }
554          break;
555 
556       case Err_User:
557          if (!extra->Err.User.isAddrErr)
558             MC_(any_value_errors) = True;
559          if (xml) {
560             emit( "  <kind>ClientCheck</kind>\n" );
561             emit( "  <what>%s byte(s) found "
562                   "during client check request</what>\n",
563                    extra->Err.User.isAddrErr
564                       ? "Unaddressable" : "Uninitialised" );
565             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
566             mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
567                            False);
568             if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
569                mc_pp_origin( extra->Err.User.origin_ec,
570                              extra->Err.User.otag & 3 );
571          } else {
572             emit( "%s byte(s) found during client check request\n",
573                    extra->Err.User.isAddrErr
574                       ? "Unaddressable" : "Uninitialised" );
575             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
576             mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
577                            False);
578             if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
579                mc_pp_origin( extra->Err.User.origin_ec,
580                              extra->Err.User.otag & 3 );
581          }
582          break;
583 
584       case Err_Free:
585          if (xml) {
586             emit( "  <kind>InvalidFree</kind>\n" );
587             emit( "  <what>Invalid free() / delete / delete[]</what>\n" );
588             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
589             mc_pp_AddrInfo( VG_(get_error_address)(err),
590                             &extra->Err.Free.ai, False );
591          } else {
592             emit( "Invalid free() / delete / delete[]\n" );
593             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
594             mc_pp_AddrInfo( VG_(get_error_address)(err),
595                             &extra->Err.Free.ai, False );
596          }
597          break;
598 
599       case Err_FreeMismatch:
600          if (xml) {
601             emit( "  <kind>MismatchedFree</kind>\n" );
602             emit( "  <what>Mismatched free() / delete / delete []</what>\n" );
603             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
604             mc_pp_AddrInfo(VG_(get_error_address)(err),
605                            &extra->Err.FreeMismatch.ai, False);
606          } else {
607             emit( "Mismatched free() / delete / delete []\n" );
608             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
609             mc_pp_AddrInfo(VG_(get_error_address)(err),
610                            &extra->Err.FreeMismatch.ai, False);
611          }
612          break;
613 
614       case Err_Addr:
615          if (xml) {
616             emit( "  <kind>Invalid%s</kind>\n",
617                   extra->Err.Addr.isWrite ? "Write" : "Read"  );
618             emit( "  <what>Invalid %s of size %ld</what>\n",
619                   extra->Err.Addr.isWrite ? "write" : "read",
620                   extra->Err.Addr.szB );
621             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
622             mc_pp_AddrInfo( VG_(get_error_address)(err),
623                             &extra->Err.Addr.ai,
624                             extra->Err.Addr.maybe_gcc );
625          } else {
626             emit( "Invalid %s of size %ld\n",
627                   extra->Err.Addr.isWrite ? "write" : "read",
628                   extra->Err.Addr.szB );
629             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
630 
631             mc_pp_AddrInfo( VG_(get_error_address)(err),
632                             &extra->Err.Addr.ai,
633                             extra->Err.Addr.maybe_gcc );
634          }
635          break;
636 
637       case Err_Jump:
638          if (xml) {
639             emit( "  <kind>InvalidJump</kind>\n" );
640             emit( "  <what>Jump to the invalid address stated "
641                   "on the next line</what>\n" );
642             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
643             mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
644                             False );
645          } else {
646             emit( "Jump to the invalid address stated on the next line\n" );
647             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
648             mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
649                             False );
650          }
651          break;
652 
653       case Err_Overlap:
654          if (xml) {
655             emit( "  <kind>Overlap</kind>\n" );
656             if (extra->Err.Overlap.szB == 0) {
657                emiN( "  <what>Source and destination overlap "
658                      "in %t(%#lx, %#lx)\n</what>\n",
659                      VG_(get_error_string)(err),
660                      extra->Err.Overlap.dst, extra->Err.Overlap.src );
661             } else {
662                emit( "  <what>Source and destination overlap "
663                      "in %s(%#lx, %#lx, %d)</what>\n",
664                      VG_(get_error_string)(err),
665                      extra->Err.Overlap.dst, extra->Err.Overlap.src,
666                      extra->Err.Overlap.szB );
667             }
668             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
669          } else {
670             if (extra->Err.Overlap.szB == 0) {
671                emiN( "Source and destination overlap in %t(%#lx, %#lx)\n",
672                      VG_(get_error_string)(err),
673                      extra->Err.Overlap.dst, extra->Err.Overlap.src );
674             } else {
675                emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
676                      VG_(get_error_string)(err),
677                      extra->Err.Overlap.dst, extra->Err.Overlap.src,
678                      extra->Err.Overlap.szB );
679             }
680             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
681          }
682          break;
683 
684       case Err_IllegalMempool:
685          // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
686          // the following code is untested.  Bad.
687          if (xml) {
688             emit( "  <kind>InvalidMemPool</kind>\n" );
689             emit( "  <what>Illegal memory pool address</what>\n" );
690             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
691             mc_pp_AddrInfo( VG_(get_error_address)(err),
692                             &extra->Err.IllegalMempool.ai, False );
693          } else {
694             emit( "Illegal memory pool address\n" );
695             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
696             mc_pp_AddrInfo( VG_(get_error_address)(err),
697                             &extra->Err.IllegalMempool.ai, False );
698          }
699          break;
700 
701       case Err_Leak: {
702          UInt        n_this_record   = extra->Err.Leak.n_this_record;
703          UInt        n_total_records = extra->Err.Leak.n_total_records;
704          LossRecord* lr              = extra->Err.Leak.lr;
705          if (xml) {
706             emit("  <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
707             if (lr->indirect_szB > 0) {
708                emit( "  <xwhat>\n" );
709                emit( "    <text>%'lu (%'lu direct, %'lu indirect) bytes "
710                      "in %'u blocks"
711                      " are %s in loss record %'u of %'u</text>\n",
712                      lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
713                      lr->num_blocks,
714                      str_leak_lossmode(lr->key.state),
715                      n_this_record, n_total_records );
716                // Nb: don't put commas in these XML numbers
717                emit( "    <leakedbytes>%lu</leakedbytes>\n",
718                      lr->szB + lr->indirect_szB );
719                emit( "    <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
720                emit( "  </xwhat>\n" );
721             } else {
722                emit( "  <xwhat>\n" );
723                emit( "    <text>%'lu bytes in %'u blocks"
724                      " are %s in loss record %'u of %'u</text>\n",
725                      lr->szB, lr->num_blocks,
726                      str_leak_lossmode(lr->key.state),
727                      n_this_record, n_total_records );
728                emit( "    <leakedbytes>%ld</leakedbytes>\n", lr->szB);
729                emit( "    <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
730                emit( "  </xwhat>\n" );
731             }
732             VG_(pp_ExeContext)(lr->key.allocated_at);
733          } else { /* ! if (xml) */
734             if (lr->indirect_szB > 0) {
735                emit(
736                   "%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
737                   " are %s in loss record %'u of %'u\n",
738                   lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
739                   lr->num_blocks, str_leak_lossmode(lr->key.state),
740                   n_this_record, n_total_records
741                );
742             } else {
743                emit(
744                   "%'lu bytes in %'u blocks are %s in loss record %'u of %'u\n",
745                   lr->szB, lr->num_blocks, str_leak_lossmode(lr->key.state),
746                   n_this_record, n_total_records
747                );
748             }
749             VG_(pp_ExeContext)(lr->key.allocated_at);
750          } /* if (xml) */
751          break;
752       }
753 
754       default:
755          VG_(printf)("Error:\n  unknown Memcheck error code %d\n",
756                      VG_(get_error_kind)(err));
757          VG_(tool_panic)("unknown error code in mc_pp_Error)");
758    }
759 
760    if (MC_(clo_summary_file)) {
761       /* Each time we report a warning, we replace the contents of the summary
762        * file with one line indicating the number of reported warnings.
763        * This way, at the end of memcheck execution we will have a file with
764        * one line saying
765        *   Memcheck: XX warnings reported
766        * If there were no warnings, the file will not be changed.
767        * If memcheck crashes, the file will still contain the last summary.
768        * */
769       static int n_warnings = 0;
770       char buf[100];
771       SysRes sres = VG_(open)(MC_(clo_summary_file),
772                               VKI_O_WRONLY|VKI_O_CREAT|VKI_O_TRUNC,
773                               VKI_S_IRUSR|VKI_S_IWUSR);
774       if (sr_isError(sres)) {
775          VG_(tool_panic)("can not open the summary file");
776       }
777       n_warnings++;
778       VG_(snprintf)(buf, sizeof(buf), "Memcheck: %d warning(s) reported\n",
779                     n_warnings);
780       VG_(write)(sr_Res(sres), buf, VG_(strlen)(buf));
781       VG_(close)(sr_Res(sres));
782    }
783 }
784 
785 /*------------------------------------------------------------*/
786 /*--- Recording errors                                     ---*/
787 /*------------------------------------------------------------*/
788 
789 /* These many bytes below %ESP are considered addressible if we're
790    doing the --workaround-gcc296-bugs hack. */
791 #define VG_GCC296_BUG_STACK_SLOP 1024
792 
793 /* Is this address within some small distance below %ESP?  Used only
794    for the --workaround-gcc296-bugs kludge. */
is_just_below_ESP(Addr esp,Addr aa)795 static Bool is_just_below_ESP( Addr esp, Addr aa )
796 {
797    esp -= VG_STACK_REDZONE_SZB;
798    if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
799       return True;
800    else
801       return False;
802 }
803 
804 /* --- Called from generated and non-generated code --- */
805 
MC_(record_address_error)806 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
807                                  Bool isWrite )
808 {
809    MC_Error extra;
810    Bool     just_below_esp;
811 
812    if (MC_(in_ignored_range)(a))
813       return;
814 
815 #  if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
816    /* AIX zero-page handling.  On AIX, reads from page zero are,
817       bizarrely enough, legitimate.  Writes to page zero aren't,
818       though.  Since memcheck can't distinguish reads from writes, the
819       best we can do is to 'act normal' and mark the A bits in the
820       normal way as noaccess, but then hide any reads from that page
821       that get reported here. */
822    if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
823       return;
824 
825    /* Appalling AIX hack.  It suppresses reads done by glink
826       fragments.  Getting rid of this would require figuring out
827       somehow where the referenced data areas are (and their
828       sizes). */
829    if ((!isWrite) && szB == sizeof(Word)) {
830       UInt i1, i2;
831       UInt* pc = (UInt*)VG_(get_IP)(tid);
832       if (sizeof(Word) == 4) {
833          i1 = 0x800c0000; /* lwz r0,0(r12) */
834          i2 = 0x804c0004; /* lwz r2,4(r12) */
835       } else {
836          i1 = 0xe80c0000; /* ld  r0,0(r12) */
837          i2 = 0xe84c0008; /* ld  r2,8(r12) */
838       }
839       if (pc[0] == i1 && pc[1] == i2) return;
840       if (pc[0] == i2 && pc[-1] == i1) return;
841    }
842 #  endif
843 
844    just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
845 
846    /* If this is caused by an access immediately below %ESP, and the
847       user asks nicely, we just ignore it. */
848    if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
849       return;
850 
851    extra.Err.Addr.isWrite   = isWrite;
852    extra.Err.Addr.szB       = szB;
853    extra.Err.Addr.maybe_gcc = just_below_esp;
854    extra.Err.Addr.ai.tag    = Addr_Undescribed;
855    VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
856 }
857 
MC_(record_value_error)858 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
859 {
860    MC_Error extra;
861    tl_assert( MC_(clo_mc_level) >= 2 );
862    if (otag > 0)
863       tl_assert( MC_(clo_mc_level) == 3 );
864    extra.Err.Value.szB       = szB;
865    extra.Err.Value.otag      = otag;
866    extra.Err.Value.origin_ec = NULL;  /* Filled in later */
867    VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
868 }
869 
MC_(record_cond_error)870 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
871 {
872    MC_Error extra;
873    tl_assert( MC_(clo_mc_level) >= 2 );
874    if (otag > 0)
875       tl_assert( MC_(clo_mc_level) == 3 );
876    extra.Err.Cond.otag      = otag;
877    extra.Err.Cond.origin_ec = NULL;  /* Filled in later */
878    VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
879 }
880 
881 /* --- Called from non-generated code --- */
882 
883 /* This is for memory errors in signal-related memory. */
MC_(record_core_mem_error)884 void MC_(record_core_mem_error) ( ThreadId tid, Char* msg )
885 {
886    VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
887 }
888 
MC_(record_regparam_error)889 void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag )
890 {
891    MC_Error extra;
892    tl_assert(VG_INVALID_THREADID != tid);
893    if (otag > 0)
894       tl_assert( MC_(clo_mc_level) == 3 );
895    extra.Err.RegParam.otag      = otag;
896    extra.Err.RegParam.origin_ec = NULL;  /* Filled in later */
897    VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
898 }
899 
MC_(record_memparam_error)900 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
901                                   Bool isAddrErr, Char* msg, UInt otag )
902 {
903    MC_Error extra;
904    tl_assert(VG_INVALID_THREADID != tid);
905    if (!isAddrErr)
906       tl_assert( MC_(clo_mc_level) >= 2 );
907    if (otag != 0) {
908       tl_assert( MC_(clo_mc_level) == 3 );
909       tl_assert( !isAddrErr );
910    }
911    extra.Err.MemParam.isAddrErr = isAddrErr;
912    extra.Err.MemParam.ai.tag    = Addr_Undescribed;
913    extra.Err.MemParam.otag      = otag;
914    extra.Err.MemParam.origin_ec = NULL;  /* Filled in later */
915    VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
916 }
917 
MC_(record_jump_error)918 void MC_(record_jump_error) ( ThreadId tid, Addr a )
919 {
920    MC_Error extra;
921    tl_assert(VG_INVALID_THREADID != tid);
922    extra.Err.Jump.ai.tag = Addr_Undescribed;
923    VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
924 }
925 
MC_(record_free_error)926 void MC_(record_free_error) ( ThreadId tid, Addr a )
927 {
928    MC_Error extra;
929    tl_assert(VG_INVALID_THREADID != tid);
930    extra.Err.Free.ai.tag = Addr_Undescribed;
931    VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
932 }
933 
MC_(record_freemismatch_error)934 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
935 {
936    MC_Error extra;
937    AddrInfo* ai = &extra.Err.FreeMismatch.ai;
938    tl_assert(VG_INVALID_THREADID != tid);
939    ai->tag = Addr_Block;
940    ai->Addr.Block.block_kind = Block_Mallocd;  // Nb: Not 'Block_Freed'
941    ai->Addr.Block.block_desc = "block";
942    ai->Addr.Block.block_szB  = mc->szB;
943    ai->Addr.Block.rwoffset   = 0;
944    ai->Addr.Block.lastchange = mc->where;
945    VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
946                             &extra );
947 }
948 
MC_(record_illegal_mempool_error)949 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
950 {
951    MC_Error extra;
952    tl_assert(VG_INVALID_THREADID != tid);
953    extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
954    VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
955 }
956 
MC_(record_overlap_error)957 void MC_(record_overlap_error) ( ThreadId tid, Char* function,
958                                  Addr src, Addr dst, SizeT szB )
959 {
960    MC_Error extra;
961    tl_assert(VG_INVALID_THREADID != tid);
962    extra.Err.Overlap.src = src;
963    extra.Err.Overlap.dst = dst;
964    extra.Err.Overlap.szB = szB;
965    VG_(maybe_record_error)(
966       tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
967 }
968 
MC_(record_leak_error)969 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
970                               UInt n_total_records, LossRecord* lr,
971                               Bool print_record, Bool count_error )
972 {
973    MC_Error extra;
974    extra.Err.Leak.n_this_record   = n_this_record;
975    extra.Err.Leak.n_total_records = n_total_records;
976    extra.Err.Leak.lr              = lr;
977    return
978    VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
979                        lr->key.allocated_at, print_record,
980                        /*allow_GDB_attach*/False, count_error );
981 }
982 
MC_(record_user_error)983 void MC_(record_user_error) ( ThreadId tid, Addr a,
984                               Bool isAddrErr, UInt otag )
985 {
986    MC_Error extra;
987    if (otag != 0) {
988       tl_assert(!isAddrErr);
989       tl_assert( MC_(clo_mc_level) == 3 );
990    }
991    if (!isAddrErr) {
992       tl_assert( MC_(clo_mc_level) >= 2 );
993    }
994    tl_assert(VG_INVALID_THREADID != tid);
995    extra.Err.User.isAddrErr = isAddrErr;
996    extra.Err.User.ai.tag    = Addr_Undescribed;
997    extra.Err.User.otag      = otag;
998    extra.Err.User.origin_ec = NULL;  /* Filled in later */
999    VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
1000 }
1001 
1002 /*------------------------------------------------------------*/
1003 /*--- Other error operations                               ---*/
1004 /*------------------------------------------------------------*/
1005 
1006 /* Compare error contexts, to detect duplicates.  Note that if they
1007    are otherwise the same, the faulting addrs and associated rwoffsets
1008    are allowed to be different.  */
MC_(eq_Error)1009 Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
1010 {
1011    MC_Error* extra1 = VG_(get_error_extra)(e1);
1012    MC_Error* extra2 = VG_(get_error_extra)(e2);
1013 
1014    /* Guaranteed by calling function */
1015    tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
1016 
1017    switch (VG_(get_error_kind)(e1)) {
1018       case Err_CoreMem: {
1019          Char *e1s, *e2s;
1020          e1s = VG_(get_error_string)(e1);
1021          e2s = VG_(get_error_string)(e2);
1022          if (e1s == e2s)                   return True;
1023          if (VG_STREQ(e1s, e2s))           return True;
1024          return False;
1025       }
1026 
1027       case Err_RegParam:
1028          return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1029 
1030       // Perhaps we should also check the addrinfo.akinds for equality.
1031       // That would result in more error reports, but only in cases where
1032       // a register contains uninitialised bytes and points to memory
1033       // containing uninitialised bytes.  Currently, the 2nd of those to be
1034       // detected won't be reported.  That is (nearly?) always the memory
1035       // error, which is good.
1036       case Err_MemParam:
1037          if (!VG_STREQ(VG_(get_error_string)(e1),
1038                        VG_(get_error_string)(e2))) return False;
1039          // fall through
1040       case Err_User:
1041          return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1042                 ? True : False );
1043 
1044       case Err_Free:
1045       case Err_FreeMismatch:
1046       case Err_Jump:
1047       case Err_IllegalMempool:
1048       case Err_Overlap:
1049       case Err_Cond:
1050          return True;
1051 
1052       case Err_Addr:
1053          return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1054                 ? True : False );
1055 
1056       case Err_Value:
1057          return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1058                 ? True : False );
1059 
1060       case Err_Leak:
1061          VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1062                          "since it's handled with VG_(unique_error)()!");
1063 
1064       default:
1065          VG_(printf)("Error:\n  unknown error code %d\n",
1066                      VG_(get_error_kind)(e1));
1067          VG_(tool_panic)("unknown error code in mc_eq_Error");
1068    }
1069 }
1070 
1071 /* Functions used when searching MC_Chunk lists */
1072 static
addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk * mc,Addr a)1073 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1074 {
1075    return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1076                                  MC_MALLOC_REDZONE_SZB );
1077 }
1078 static
addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk * mc,Addr a,SizeT rzB)1079 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1080 {
1081    return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1082                                  rzB );
1083 }
1084 
1085 // Forward declarations
1086 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1087 static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai );
1088 
1089 
1090 /* Describe an address as best you can, for error messages,
1091    putting the result in ai. */
describe_addr(Addr a,AddrInfo * ai)1092 static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
1093 {
1094    MC_Chunk*  mc;
1095    ThreadId   tid;
1096    Addr       stack_min, stack_max;
1097    VgSectKind sect;
1098 
1099    tl_assert(Addr_Undescribed == ai->tag);
1100 
1101    /* -- Perhaps it's a user-named block? -- */
1102    if (client_block_maybe_describe( a, ai )) {
1103       return;
1104    }
1105    /* -- Perhaps it's in mempool block? -- */
1106    if (mempool_block_maybe_describe( a, ai )) {
1107       return;
1108    }
1109    /* -- Search for a recently freed block which might bracket it. -- */
1110    mc = MC_(get_freed_list_head)();
1111    while (mc) {
1112       if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1113          ai->tag = Addr_Block;
1114          ai->Addr.Block.block_kind = Block_Freed;
1115          ai->Addr.Block.block_desc = "block";
1116          ai->Addr.Block.block_szB  = mc->szB;
1117          ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1118          ai->Addr.Block.lastchange = mc->where;
1119          return;
1120       }
1121       mc = mc->next;
1122    }
1123    /* -- Search for a currently malloc'd block which might bracket it. -- */
1124    VG_(HT_ResetIter)(MC_(malloc_list));
1125    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1126       if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1127          ai->tag = Addr_Block;
1128          ai->Addr.Block.block_kind = Block_Mallocd;
1129          ai->Addr.Block.block_desc = "block";
1130          ai->Addr.Block.block_szB  = mc->szB;
1131          ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1132          ai->Addr.Block.lastchange = mc->where;
1133          return;
1134       }
1135    }
1136    /* -- Perhaps the variable type/location data describes it? -- */
1137    ai->Addr.Variable.descr1
1138       = VG_(newXA)( VG_(malloc), "mc.da.descr1",
1139                     VG_(free), sizeof(HChar) );
1140    ai->Addr.Variable.descr2
1141       = VG_(newXA)( VG_(malloc), "mc.da.descr2",
1142                     VG_(free), sizeof(HChar) );
1143 
1144    (void) VG_(get_data_description)( ai->Addr.Variable.descr1,
1145                                      ai->Addr.Variable.descr2, a );
1146    /* If there's nothing in descr1/2, free them.  Why is it safe to to
1147       VG_(indexXA) at zero here?  Because VG_(get_data_description)
1148       guarantees to zero terminate descr1/2 regardless of the outcome
1149       of the call.  So there's always at least one element in each XA
1150       after the call.
1151    */
1152    if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
1153       VG_(deleteXA)( ai->Addr.Variable.descr1 );
1154       ai->Addr.Variable.descr1 = NULL;
1155    }
1156    if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
1157       VG_(deleteXA)( ai->Addr.Variable.descr2 );
1158       ai->Addr.Variable.descr2 = NULL;
1159    }
1160    /* Assume (assert) that VG_(get_data_description) fills in descr1
1161       before it fills in descr2 */
1162    if (ai->Addr.Variable.descr1 == NULL)
1163       tl_assert(ai->Addr.Variable.descr2 == NULL);
1164    /* So did we get lucky? */
1165    if (ai->Addr.Variable.descr1 != NULL) {
1166       ai->tag = Addr_Variable;
1167       return;
1168    }
1169    /* -- Have a look at the low level data symbols - perhaps it's in
1170       there. -- */
1171    VG_(memset)( &ai->Addr.DataSym.name,
1172                 0, sizeof(ai->Addr.DataSym.name));
1173    if (VG_(get_datasym_and_offset)(
1174              a, &ai->Addr.DataSym.name[0],
1175              sizeof(ai->Addr.DataSym.name)-1,
1176              &ai->Addr.DataSym.offset )) {
1177       ai->tag = Addr_DataSym;
1178       tl_assert( ai->Addr.DataSym.name
1179                     [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
1180       return;
1181    }
1182    /* -- Perhaps it's on a thread's stack? -- */
1183    VG_(thread_stack_reset_iter)(&tid);
1184    while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
1185       if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
1186          ai->tag            = Addr_Stack;
1187          ai->Addr.Stack.tid = tid;
1188          return;
1189       }
1190    }
1191    /* -- last ditch attempt at classification -- */
1192    tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
1193    VG_(memset)( &ai->Addr.SectKind.objname,
1194                 0, sizeof(ai->Addr.SectKind.objname));
1195    VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
1196    sect = VG_(DebugInfo_sect_kind)( &ai->Addr.SectKind.objname[0],
1197                                     sizeof(ai->Addr.SectKind.objname)-1, a);
1198    if (sect != Vg_SectUnknown) {
1199       ai->tag = Addr_SectKind;
1200       ai->Addr.SectKind.kind = sect;
1201       tl_assert( ai->Addr.SectKind.objname
1202                     [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
1203       return;
1204    }
1205    /* -- Clueless ... -- */
1206    ai->tag = Addr_Unknown;
1207    return;
1208 }
1209 
1210 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1211    does not refer to a known origin. */
update_origin(ExeContext ** origin_ec,UInt otag)1212 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1213                             UInt otag )
1214 {
1215    UInt ecu = otag & ~3;
1216    *origin_ec = NULL;
1217    if (VG_(is_plausible_ECU)(ecu)) {
1218       *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1219    }
1220 }
1221 
1222 /* Updates the copy with address info if necessary (but not for all errors). */
MC_(update_Error_extra)1223 UInt MC_(update_Error_extra)( Error* err )
1224 {
1225    MC_Error* extra = VG_(get_error_extra)(err);
1226 
1227    switch (VG_(get_error_kind)(err)) {
1228    // These ones don't have addresses associated with them, and so don't
1229    // need any updating.
1230    case Err_CoreMem:
1231    //case Err_Value:
1232    //case Err_Cond:
1233    case Err_Overlap:
1234    // For Err_Leaks the returned size does not matter -- they are always
1235    // shown with VG_(unique_error)() so they 'extra' not copied.  But
1236    // we make it consistent with the others.
1237    case Err_Leak:
1238       return sizeof(MC_Error);
1239 
1240    // For value errors, get the ExeContext corresponding to the
1241    // origin tag.  Note that it is a kludge to assume that
1242    // a length-1 trace indicates a stack origin.  FIXME.
1243    case Err_Value:
1244       update_origin( &extra->Err.Value.origin_ec,
1245                      extra->Err.Value.otag );
1246       return sizeof(MC_Error);
1247    case Err_Cond:
1248       update_origin( &extra->Err.Cond.origin_ec,
1249                      extra->Err.Cond.otag );
1250       return sizeof(MC_Error);
1251    case Err_RegParam:
1252       update_origin( &extra->Err.RegParam.origin_ec,
1253                      extra->Err.RegParam.otag );
1254       return sizeof(MC_Error);
1255 
1256    // These ones always involve a memory address.
1257    case Err_Addr:
1258       describe_addr ( VG_(get_error_address)(err),
1259                       &extra->Err.Addr.ai );
1260       return sizeof(MC_Error);
1261    case Err_MemParam:
1262       describe_addr ( VG_(get_error_address)(err),
1263                       &extra->Err.MemParam.ai );
1264       update_origin( &extra->Err.MemParam.origin_ec,
1265                      extra->Err.MemParam.otag );
1266       return sizeof(MC_Error);
1267    case Err_Jump:
1268       describe_addr ( VG_(get_error_address)(err),
1269                       &extra->Err.Jump.ai );
1270       return sizeof(MC_Error);
1271    case Err_User:
1272       describe_addr ( VG_(get_error_address)(err),
1273                       &extra->Err.User.ai );
1274       update_origin( &extra->Err.User.origin_ec,
1275                      extra->Err.User.otag );
1276       return sizeof(MC_Error);
1277    case Err_Free:
1278       describe_addr ( VG_(get_error_address)(err),
1279                       &extra->Err.Free.ai );
1280       return sizeof(MC_Error);
1281    case Err_IllegalMempool:
1282       describe_addr ( VG_(get_error_address)(err),
1283                       &extra->Err.IllegalMempool.ai );
1284       return sizeof(MC_Error);
1285 
1286    // Err_FreeMismatches have already had their address described;  this is
1287    // possible because we have the MC_Chunk on hand when the error is
1288    // detected.  However, the address may be part of a user block, and if so
1289    // we override the pre-determined description with a user block one.
1290    case Err_FreeMismatch: {
1291       tl_assert(extra && Block_Mallocd ==
1292                 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1293       (void)client_block_maybe_describe( VG_(get_error_address)(err),
1294                                         &extra->Err.FreeMismatch.ai );
1295       return sizeof(MC_Error);
1296    }
1297 
1298    default: VG_(tool_panic)("mc_update_extra: bad errkind");
1299    }
1300 }
1301 
1302 
client_block_maybe_describe(Addr a,AddrInfo * ai)1303 static Bool client_block_maybe_describe( Addr a,
1304                                          /*OUT*/AddrInfo* ai )
1305 {
1306    UWord      i;
1307    CGenBlock* cgbs = NULL;
1308    UWord      cgb_used = 0;
1309 
1310    MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1311    if (cgbs == NULL)
1312       tl_assert(cgb_used == 0);
1313 
1314    /* Perhaps it's a general block ? */
1315    for (i = 0; i < cgb_used; i++) {
1316       if (cgbs[i].start == 0 && cgbs[i].size == 0)
1317          continue;
1318       // Use zero as the redzone for client blocks.
1319       if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1320          ai->tag = Addr_Block;
1321          ai->Addr.Block.block_kind = Block_UserG;
1322          ai->Addr.Block.block_desc = cgbs[i].desc;
1323          ai->Addr.Block.block_szB  = cgbs[i].size;
1324          ai->Addr.Block.rwoffset   = (Word)(a) - (Word)(cgbs[i].start);
1325          ai->Addr.Block.lastchange = cgbs[i].where;
1326          return True;
1327       }
1328    }
1329    return False;
1330 }
1331 
1332 
mempool_block_maybe_describe(Addr a,AddrInfo * ai)1333 static Bool mempool_block_maybe_describe( Addr a,
1334                                           /*OUT*/AddrInfo* ai )
1335 {
1336    MC_Mempool* mp;
1337    tl_assert( MC_(mempool_list) );
1338 
1339    VG_(HT_ResetIter)( MC_(mempool_list) );
1340    while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1341       if (mp->chunks != NULL) {
1342          MC_Chunk* mc;
1343          VG_(HT_ResetIter)(mp->chunks);
1344          while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1345             if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1346                ai->tag = Addr_Block;
1347                ai->Addr.Block.block_kind = Block_MempoolChunk;
1348                ai->Addr.Block.block_desc = "block";
1349                ai->Addr.Block.block_szB  = mc->szB;
1350                ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1351                ai->Addr.Block.lastchange = mc->where;
1352                return True;
1353             }
1354          }
1355       }
1356    }
1357    return False;
1358 }
1359 
1360 
1361 /*------------------------------------------------------------*/
1362 /*--- Suppressions                                         ---*/
1363 /*------------------------------------------------------------*/
1364 
1365 typedef
1366    enum {
1367       ParamSupp,     // Bad syscall params
1368       UserSupp,      // Errors arising from client-request checks
1369       CoreMemSupp,   // Memory errors in core (pthread ops, signal handling)
1370 
1371       // Undefined value errors of given size
1372       Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
1373 
1374       // Undefined value error in conditional.
1375       CondSupp,
1376 
1377       // Unaddressable read/write attempt at given size
1378       Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
1379 
1380       JumpSupp,      // Jump to unaddressable target
1381       FreeSupp,      // Invalid or mismatching free
1382       OverlapSupp,   // Overlapping blocks in memcpy(), strcpy(), etc
1383       LeakSupp,      // Something to be suppressed in a leak check.
1384       MempoolSupp,   // Memory pool suppression.
1385    }
1386    MC_SuppKind;
1387 
MC_(is_recognised_suppression)1388 Bool MC_(is_recognised_suppression) ( Char* name, Supp* su )
1389 {
1390    SuppKind skind;
1391 
1392    if      (VG_STREQ(name, "Param"))   skind = ParamSupp;
1393    else if (VG_STREQ(name, "User"))    skind = UserSupp;
1394    else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1395    else if (VG_STREQ(name, "Addr1"))   skind = Addr1Supp;
1396    else if (VG_STREQ(name, "Addr2"))   skind = Addr2Supp;
1397    else if (VG_STREQ(name, "Addr4"))   skind = Addr4Supp;
1398    else if (VG_STREQ(name, "Addr8"))   skind = Addr8Supp;
1399    else if (VG_STREQ(name, "Addr16"))  skind = Addr16Supp;
1400    else if (VG_STREQ(name, "Jump"))    skind = JumpSupp;
1401    else if (VG_STREQ(name, "Free"))    skind = FreeSupp;
1402    else if (VG_STREQ(name, "Leak"))    skind = LeakSupp;
1403    else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1404    else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1405    else if (VG_STREQ(name, "Cond"))    skind = CondSupp;
1406    else if (VG_STREQ(name, "Value0"))  skind = CondSupp; /* backwards compat */
1407    else if (VG_STREQ(name, "Value1"))  skind = Value1Supp;
1408    else if (VG_STREQ(name, "Value2"))  skind = Value2Supp;
1409    else if (VG_STREQ(name, "Value4"))  skind = Value4Supp;
1410    else if (VG_STREQ(name, "Value8"))  skind = Value8Supp;
1411    else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1412    else
1413       return False;
1414 
1415    VG_(set_supp_kind)(su, skind);
1416    return True;
1417 }
1418 
MC_(read_extra_suppression_info)1419 Bool MC_(read_extra_suppression_info) ( Int fd, Char** bufpp,
1420                                         SizeT* nBufp, Supp *su )
1421 {
1422    Bool eof;
1423 
1424    if (VG_(get_supp_kind)(su) == ParamSupp) {
1425       eof = VG_(get_line) ( fd, bufpp, nBufp, NULL );
1426       if (eof) return False;
1427       VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1428    }
1429    return True;
1430 }
1431 
MC_(error_matches_suppression)1432 Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
1433 {
1434    Int       su_szB;
1435    MC_Error* extra = VG_(get_error_extra)(err);
1436    ErrorKind ekind = VG_(get_error_kind )(err);
1437 
1438    switch (VG_(get_supp_kind)(su)) {
1439       case ParamSupp:
1440          return ((ekind == Err_RegParam || ekind == Err_MemParam)
1441               && VG_STREQ(VG_(get_error_string)(err),
1442                           VG_(get_supp_string)(su)));
1443 
1444       case UserSupp:
1445          return (ekind == Err_User);
1446 
1447       case CoreMemSupp:
1448          return (ekind == Err_CoreMem
1449               && VG_STREQ(VG_(get_error_string)(err),
1450                           VG_(get_supp_string)(su)));
1451 
1452       case Value1Supp: su_szB = 1; goto value_case;
1453       case Value2Supp: su_szB = 2; goto value_case;
1454       case Value4Supp: su_szB = 4; goto value_case;
1455       case Value8Supp: su_szB = 8; goto value_case;
1456       case Value16Supp:su_szB =16; goto value_case;
1457       value_case:
1458          return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1459 
1460       case CondSupp:
1461          return (ekind == Err_Cond);
1462 
1463       case Addr1Supp: su_szB = 1; goto addr_case;
1464       case Addr2Supp: su_szB = 2; goto addr_case;
1465       case Addr4Supp: su_szB = 4; goto addr_case;
1466       case Addr8Supp: su_szB = 8; goto addr_case;
1467       case Addr16Supp:su_szB =16; goto addr_case;
1468       addr_case:
1469          return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1470 
1471       case JumpSupp:
1472          return (ekind == Err_Jump);
1473 
1474       case FreeSupp:
1475          return (ekind == Err_Free || ekind == Err_FreeMismatch);
1476 
1477       case OverlapSupp:
1478          return (ekind == Err_Overlap);
1479 
1480       case LeakSupp:
1481          return (ekind == Err_Leak);
1482 
1483       case MempoolSupp:
1484          return (ekind == Err_IllegalMempool);
1485 
1486       default:
1487          VG_(printf)("Error:\n"
1488                      "  unknown suppression type %d\n",
1489                      VG_(get_supp_kind)(su));
1490          VG_(tool_panic)("unknown suppression type in "
1491                          "MC_(error_matches_suppression)");
1492    }
1493 }
1494 
MC_(get_error_name)1495 Char* MC_(get_error_name) ( Error* err )
1496 {
1497    switch (VG_(get_error_kind)(err)) {
1498    case Err_RegParam:       return "Param";
1499    case Err_MemParam:       return "Param";
1500    case Err_User:           return "User";
1501    case Err_FreeMismatch:   return "Free";
1502    case Err_IllegalMempool: return "Mempool";
1503    case Err_Free:           return "Free";
1504    case Err_Jump:           return "Jump";
1505    case Err_CoreMem:        return "CoreMem";
1506    case Err_Overlap:        return "Overlap";
1507    case Err_Leak:           return "Leak";
1508    case Err_Cond:           return "Cond";
1509    case Err_Addr: {
1510       MC_Error* extra = VG_(get_error_extra)(err);
1511       switch ( extra->Err.Addr.szB ) {
1512       case 1:               return "Addr1";
1513       case 2:               return "Addr2";
1514       case 4:               return "Addr4";
1515       case 8:               return "Addr8";
1516       case 16:              return "Addr16";
1517       default:              VG_(tool_panic)("unexpected size for Addr");
1518       }
1519    }
1520    case Err_Value: {
1521       MC_Error* extra = VG_(get_error_extra)(err);
1522       switch ( extra->Err.Value.szB ) {
1523       case 1:               return "Value1";
1524       case 2:               return "Value2";
1525       case 4:               return "Value4";
1526       case 8:               return "Value8";
1527       case 16:              return "Value16";
1528       default:              VG_(tool_panic)("unexpected size for Value");
1529       }
1530    }
1531    default:                 VG_(tool_panic)("get_error_name: unexpected type");
1532    }
1533 }
1534 
MC_(get_extra_suppression_info)1535 Bool MC_(get_extra_suppression_info) ( Error* err,
1536                                        /*OUT*/Char* buf, Int nBuf )
1537 {
1538    ErrorKind ekind = VG_(get_error_kind )(err);
1539    tl_assert(buf);
1540    tl_assert(nBuf >= 16); // stay sane
1541    if (Err_RegParam == ekind || Err_MemParam == ekind) {
1542       Char* errstr = VG_(get_error_string)(err);
1543       tl_assert(errstr);
1544       VG_(snprintf)(buf, nBuf-1, "%s", errstr);
1545       return True;
1546    } else {
1547       return False;
1548    }
1549 }
1550 
1551 
1552 /*--------------------------------------------------------------------*/
1553 /*--- end                                              mc_errors.c ---*/
1554 /*--------------------------------------------------------------------*/
1555