1
2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
6
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
10
11 Copyright (C) 2000-2011 Julian Seward
12 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30 */
31
32 #include "pub_tool_basics.h"
33 #include "pub_tool_gdbserver.h"
34 #include "pub_tool_hashtable.h" // For mc_include.h
35 #include "pub_tool_libcbase.h"
36 #include "pub_tool_libcassert.h"
37 #include "pub_tool_libcprint.h"
38 #include "pub_tool_machine.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_tooliface.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
45 #include "pub_tool_xarray.h"
46 #include "pub_tool_vki.h"
47 #include "pub_tool_libcfile.h"
48
49 #include "mc_include.h"
50
51
52 /*------------------------------------------------------------*/
53 /*--- Error types ---*/
54 /*------------------------------------------------------------*/
55
56 /* See comment in mc_include.h */
57 Bool MC_(any_value_errors) = False;
58
59
60 // Different kinds of blocks.
61 typedef enum {
62 Block_Mallocd = 111,
63 Block_Freed,
64 Block_Mempool,
65 Block_MempoolChunk,
66 Block_UserG
67 } BlockKind;
68
69 /* ------------------ Addresses -------------------- */
70
71 /* The classification of a faulting address. */
72 typedef
73 enum {
74 Addr_Undescribed, // as-yet unclassified
75 Addr_Unknown, // classification yielded nothing useful
76 Addr_Block, // in malloc'd/free'd block
77 Addr_Stack, // on a thread's stack
78 Addr_DataSym, // in a global data sym
79 Addr_Variable, // variable described by the debug info
80 Addr_SectKind // last-ditch classification attempt
81 }
82 AddrTag;
83
84 typedef
85 struct _AddrInfo
86 AddrInfo;
87
88 struct _AddrInfo {
89 AddrTag tag;
90 union {
91 // As-yet unclassified.
92 struct { } Undescribed;
93
94 // On a stack.
95 struct {
96 ThreadId tid; // Which thread's stack?
97 } Stack;
98
99 // This covers heap blocks (normal and from mempools) and user-defined
100 // blocks.
101 struct {
102 BlockKind block_kind;
103 Char* block_desc; // "block", "mempool" or user-defined
104 SizeT block_szB;
105 PtrdiffT rwoffset;
106 ExeContext* lastchange;
107 } Block;
108
109 // In a global .data symbol. This holds the first 127 chars of
110 // the variable's name (zero terminated), plus a (memory) offset.
111 struct {
112 Char name[128];
113 PtrdiffT offset;
114 } DataSym;
115
116 // Is described by Dwarf debug info. XArray*s of HChar.
117 struct {
118 XArray* /* of HChar */ descr1;
119 XArray* /* of HChar */ descr2;
120 } Variable;
121
122 // Could only narrow it down to be the PLT/GOT/etc of a given
123 // object. Better than nothing, perhaps.
124 struct {
125 Char objname[128];
126 VgSectKind kind;
127 } SectKind;
128
129 // Classification yielded nothing useful.
130 struct { } Unknown;
131
132 } Addr;
133 };
134
135 /* ------------------ Errors ----------------------- */
136
137 /* What kind of error it is. */
138 typedef
139 enum {
140 Err_Value,
141 Err_Cond,
142 Err_CoreMem,
143 Err_Addr,
144 Err_Jump,
145 Err_RegParam,
146 Err_MemParam,
147 Err_User,
148 Err_Free,
149 Err_FreeMismatch,
150 Err_Overlap,
151 Err_Leak,
152 Err_IllegalMempool,
153 }
154 MC_ErrorTag;
155
156
157 typedef struct _MC_Error MC_Error;
158
159 struct _MC_Error {
160 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
161 //MC_ErrorTag tag;
162
163 union {
164 // Use of an undefined value:
165 // - as a pointer in a load or store
166 // - as a jump target
167 struct {
168 SizeT szB; // size of value in bytes
169 // Origin info
170 UInt otag; // origin tag
171 ExeContext* origin_ec; // filled in later
172 } Value;
173
174 // Use of an undefined value in a conditional branch or move.
175 struct {
176 // Origin info
177 UInt otag; // origin tag
178 ExeContext* origin_ec; // filled in later
179 } Cond;
180
181 // Addressability error in core (signal-handling) operation.
182 // It would be good to get rid of this error kind, merge it with
183 // another one somehow.
184 struct {
185 } CoreMem;
186
187 // Use of an unaddressable memory location in a load or store.
188 struct {
189 Bool isWrite; // read or write?
190 SizeT szB; // not used for exec (jump) errors
191 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
192 AddrInfo ai;
193 } Addr;
194
195 // Jump to an unaddressable memory location.
196 struct {
197 AddrInfo ai;
198 } Jump;
199
200 // System call register input contains undefined bytes.
201 struct {
202 // Origin info
203 UInt otag; // origin tag
204 ExeContext* origin_ec; // filled in later
205 } RegParam;
206
207 // System call memory input contains undefined/unaddressable bytes
208 struct {
209 Bool isAddrErr; // Addressability or definedness error?
210 AddrInfo ai;
211 // Origin info
212 UInt otag; // origin tag
213 ExeContext* origin_ec; // filled in later
214 } MemParam;
215
216 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
217 struct {
218 Bool isAddrErr; // Addressability or definedness error?
219 AddrInfo ai;
220 // Origin info
221 UInt otag; // origin tag
222 ExeContext* origin_ec; // filled in later
223 } User;
224
225 // Program tried to free() something that's not a heap block (this
226 // covers double-frees). */
227 struct {
228 AddrInfo ai;
229 } Free;
230
231 // Program allocates heap block with one function
232 // (malloc/new/new[]/custom) and deallocates with not the matching one.
233 struct {
234 AddrInfo ai;
235 } FreeMismatch;
236
237 // Call to strcpy, memcpy, etc, with overlapping blocks.
238 struct {
239 Addr src; // Source block
240 Addr dst; // Destination block
241 Int szB; // Size in bytes; 0 if unused.
242 } Overlap;
243
244 // A memory leak.
245 struct {
246 UInt n_this_record;
247 UInt n_total_records;
248 LossRecord* lr;
249 } Leak;
250
251 // A memory pool error.
252 struct {
253 AddrInfo ai;
254 } IllegalMempool;
255
256 } Err;
257 };
258
259
260 /*------------------------------------------------------------*/
261 /*--- Printing errors ---*/
262 /*------------------------------------------------------------*/
263
264 /* This is the "this error is due to be printed shortly; so have a
265 look at it any print any preamble you want" function. Which, in
266 Memcheck, we don't use. Hence a no-op.
267 */
MC_(before_pp_Error)268 void MC_(before_pp_Error) ( Error* err ) {
269 }
270
271 /* Do a printf-style operation on either the XML or normal output
272 channel, depending on the setting of VG_(clo_xml).
273 */
emit_WRK(HChar * format,va_list vargs)274 static void emit_WRK ( HChar* format, va_list vargs )
275 {
276 if (VG_(clo_xml)) {
277 VG_(vprintf_xml)(format, vargs);
278 } else {
279 VG_(vmessage)(Vg_UserMsg, format, vargs);
280 }
281 }
282 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
emit(HChar * format,...)283 static void emit ( HChar* format, ... )
284 {
285 va_list vargs;
286 va_start(vargs, format);
287 emit_WRK(format, vargs);
288 va_end(vargs);
289 }
emiN(HChar * format,...)290 static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
291 {
292 va_list vargs;
293 va_start(vargs, format);
294 emit_WRK(format, vargs);
295 va_end(vargs);
296 }
297
298
mc_pp_AddrInfo(Addr a,AddrInfo * ai,Bool maybe_gcc)299 static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
300 {
301 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
302 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
303
304 switch (ai->tag) {
305 case Addr_Unknown:
306 if (maybe_gcc) {
307 emit( "%sAddress 0x%llx is just below the stack ptr. "
308 "To suppress, use: --workaround-gcc296-bugs=yes%s\n",
309 xpre, (ULong)a, xpost );
310 } else {
311 emit( "%sAddress 0x%llx "
312 "is not stack'd, malloc'd or (recently) free'd%s\n",
313 xpre, (ULong)a, xpost );
314 }
315 break;
316
317 case Addr_Stack:
318 emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
319 xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
320 break;
321
322 case Addr_Block: {
323 SizeT block_szB = ai->Addr.Block.block_szB;
324 PtrdiffT rwoffset = ai->Addr.Block.rwoffset;
325 SizeT delta;
326 const Char* relative;
327
328 if (rwoffset < 0) {
329 delta = (SizeT)(-rwoffset);
330 relative = "before";
331 } else if (rwoffset >= block_szB) {
332 delta = rwoffset - block_szB;
333 relative = "after";
334 } else {
335 delta = rwoffset;
336 relative = "inside";
337 }
338 emit(
339 "%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
340 xpre,
341 a, delta, relative, ai->Addr.Block.block_desc,
342 block_szB,
343 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
344 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
345 : "client-defined",
346 xpost
347 );
348 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
349 break;
350 }
351
352 case Addr_DataSym:
353 emiN( "%sAddress 0x%llx is %llu bytes "
354 "inside data symbol \"%pS\"%s\n",
355 xpre,
356 (ULong)a,
357 (ULong)ai->Addr.DataSym.offset,
358 ai->Addr.DataSym.name,
359 xpost );
360 break;
361
362 case Addr_Variable:
363 /* Note, no need for XML tags here, because descr1/2 will
364 already have <auxwhat> or <xauxwhat>s on them, in XML
365 mode. */
366 if (ai->Addr.Variable.descr1)
367 emit( "%s%s\n",
368 VG_(clo_xml) ? " " : " ",
369 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
370 if (ai->Addr.Variable.descr2)
371 emit( "%s%s\n",
372 VG_(clo_xml) ? " " : " ",
373 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
374 break;
375
376 case Addr_SectKind:
377 emiN( "%sAddress 0x%llx is in the %pS segment of %pS%s\n",
378 xpre,
379 (ULong)a,
380 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
381 ai->Addr.SectKind.objname,
382 xpost );
383 break;
384
385 default:
386 VG_(tool_panic)("mc_pp_AddrInfo");
387 }
388 }
389
str_leak_lossmode(Reachedness lossmode)390 static const HChar* str_leak_lossmode ( Reachedness lossmode )
391 {
392 const HChar *loss = "?";
393 switch (lossmode) {
394 case Unreached: loss = "definitely lost"; break;
395 case IndirectLeak: loss = "indirectly lost"; break;
396 case Possible: loss = "possibly lost"; break;
397 case Reachable: loss = "still reachable"; break;
398 }
399 return loss;
400 }
401
xml_leak_kind(Reachedness lossmode)402 static const HChar* xml_leak_kind ( Reachedness lossmode )
403 {
404 const HChar *loss = "?";
405 switch (lossmode) {
406 case Unreached: loss = "Leak_DefinitelyLost"; break;
407 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
408 case Possible: loss = "Leak_PossiblyLost"; break;
409 case Reachable: loss = "Leak_StillReachable"; break;
410 }
411 return loss;
412 }
413
mc_pp_origin(ExeContext * ec,UInt okind)414 static void mc_pp_origin ( ExeContext* ec, UInt okind )
415 {
416 HChar* src = NULL;
417 tl_assert(ec);
418
419 switch (okind) {
420 case MC_OKIND_STACK: src = " by a stack allocation"; break;
421 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
422 case MC_OKIND_USER: src = " by a client request"; break;
423 case MC_OKIND_UNKNOWN: src = ""; break;
424 }
425 tl_assert(src); /* guards against invalid 'okind' */
426
427 if (VG_(clo_xml)) {
428 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
429 src);
430 VG_(pp_ExeContext)( ec );
431 } else {
432 emit( " Uninitialised value was created%s\n", src);
433 VG_(pp_ExeContext)( ec );
434 }
435 }
436
MC_(snprintf_delta)437 char * MC_(snprintf_delta) (char * buf, Int size,
438 SizeT current_val, SizeT old_val,
439 LeakCheckDeltaMode delta_mode)
440 {
441 if (delta_mode == LCD_Any)
442 buf[0] = '\0';
443 else if (current_val >= old_val)
444 VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
445 else
446 VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
447
448 return buf;
449 }
450
MC_(pp_Error)451 void MC_(pp_Error) ( Error* err )
452 {
453 const Bool xml = VG_(clo_xml); /* a shorthand */
454 MC_Error* extra = VG_(get_error_extra)(err);
455
456 switch (VG_(get_error_kind)(err)) {
457 case Err_CoreMem:
458 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
459 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
460 signal handler frame. --njn */
461 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
462 // the following code is untested. Bad.
463 if (xml) {
464 emit( " <kind>CoreMemError</kind>\n" );
465 emiN( " <what>%pS contains unaddressable byte(s)</what>\n",
466 VG_(get_error_string)(err));
467 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
468 } else {
469 emit( "%s contains unaddressable byte(s)\n",
470 VG_(get_error_string)(err));
471 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
472 }
473 break;
474
475 case Err_Value:
476 MC_(any_value_errors) = True;
477 if (xml) {
478 emit( " <kind>UninitValue</kind>\n" );
479 emit( " <what>Use of uninitialised value of size %ld</what>\n",
480 extra->Err.Value.szB );
481 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
482 if (extra->Err.Value.origin_ec)
483 mc_pp_origin( extra->Err.Value.origin_ec,
484 extra->Err.Value.otag & 3 );
485 } else {
486 /* Could also show extra->Err.Cond.otag if debugging origin
487 tracking */
488 emit( "Use of uninitialised value of size %ld\n",
489 extra->Err.Value.szB );
490 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
491 if (extra->Err.Value.origin_ec)
492 mc_pp_origin( extra->Err.Value.origin_ec,
493 extra->Err.Value.otag & 3 );
494 }
495 break;
496
497 case Err_Cond:
498 MC_(any_value_errors) = True;
499 if (xml) {
500 emit( " <kind>UninitCondition</kind>\n" );
501 emit( " <what>Conditional jump or move depends"
502 " on uninitialised value(s)</what>\n" );
503 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
504 if (extra->Err.Cond.origin_ec)
505 mc_pp_origin( extra->Err.Cond.origin_ec,
506 extra->Err.Cond.otag & 3 );
507 } else {
508 /* Could also show extra->Err.Cond.otag if debugging origin
509 tracking */
510 emit( "Conditional jump or move depends"
511 " on uninitialised value(s)\n" );
512 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
513 if (extra->Err.Cond.origin_ec)
514 mc_pp_origin( extra->Err.Cond.origin_ec,
515 extra->Err.Cond.otag & 3 );
516 }
517 break;
518
519 case Err_RegParam:
520 MC_(any_value_errors) = True;
521 if (xml) {
522 emit( " <kind>SyscallParam</kind>\n" );
523 emiN( " <what>Syscall param %pS contains "
524 "uninitialised byte(s)</what>\n",
525 VG_(get_error_string)(err) );
526 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
527 if (extra->Err.RegParam.origin_ec)
528 mc_pp_origin( extra->Err.RegParam.origin_ec,
529 extra->Err.RegParam.otag & 3 );
530 } else {
531 emit( "Syscall param %s contains uninitialised byte(s)\n",
532 VG_(get_error_string)(err) );
533 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
534 if (extra->Err.RegParam.origin_ec)
535 mc_pp_origin( extra->Err.RegParam.origin_ec,
536 extra->Err.RegParam.otag & 3 );
537 }
538 break;
539
540 case Err_MemParam:
541 if (!extra->Err.MemParam.isAddrErr)
542 MC_(any_value_errors) = True;
543 if (xml) {
544 emit( " <kind>SyscallParam</kind>\n" );
545 emiN( " <what>Syscall param %pS points to %s byte(s)</what>\n",
546 VG_(get_error_string)(err),
547 extra->Err.MemParam.isAddrErr
548 ? "unaddressable" : "uninitialised" );
549 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
550 mc_pp_AddrInfo(VG_(get_error_address)(err),
551 &extra->Err.MemParam.ai, False);
552 if (extra->Err.MemParam.origin_ec
553 && !extra->Err.MemParam.isAddrErr)
554 mc_pp_origin( extra->Err.MemParam.origin_ec,
555 extra->Err.MemParam.otag & 3 );
556 } else {
557 emit( "Syscall param %s points to %s byte(s)\n",
558 VG_(get_error_string)(err),
559 extra->Err.MemParam.isAddrErr
560 ? "unaddressable" : "uninitialised" );
561 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
562 mc_pp_AddrInfo(VG_(get_error_address)(err),
563 &extra->Err.MemParam.ai, False);
564 if (extra->Err.MemParam.origin_ec
565 && !extra->Err.MemParam.isAddrErr)
566 mc_pp_origin( extra->Err.MemParam.origin_ec,
567 extra->Err.MemParam.otag & 3 );
568 }
569 break;
570
571 case Err_User:
572 if (!extra->Err.User.isAddrErr)
573 MC_(any_value_errors) = True;
574 if (xml) {
575 emit( " <kind>ClientCheck</kind>\n" );
576 emit( " <what>%s byte(s) found "
577 "during client check request</what>\n",
578 extra->Err.User.isAddrErr
579 ? "Unaddressable" : "Uninitialised" );
580 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
581 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
582 False);
583 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
584 mc_pp_origin( extra->Err.User.origin_ec,
585 extra->Err.User.otag & 3 );
586 } else {
587 emit( "%s byte(s) found during client check request\n",
588 extra->Err.User.isAddrErr
589 ? "Unaddressable" : "Uninitialised" );
590 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
591 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
592 False);
593 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
594 mc_pp_origin( extra->Err.User.origin_ec,
595 extra->Err.User.otag & 3 );
596 }
597 break;
598
599 case Err_Free:
600 if (xml) {
601 emit( " <kind>InvalidFree</kind>\n" );
602 emit( " <what>Invalid free() / delete / delete[]"
603 " / realloc()</what>\n" );
604 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
605 mc_pp_AddrInfo( VG_(get_error_address)(err),
606 &extra->Err.Free.ai, False );
607 } else {
608 emit( "Invalid free() / delete / delete[] / realloc()\n" );
609 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
610 mc_pp_AddrInfo( VG_(get_error_address)(err),
611 &extra->Err.Free.ai, False );
612 }
613 break;
614
615 case Err_FreeMismatch:
616 if (xml) {
617 emit( " <kind>MismatchedFree</kind>\n" );
618 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
619 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
620 mc_pp_AddrInfo(VG_(get_error_address)(err),
621 &extra->Err.FreeMismatch.ai, False);
622 } else {
623 emit( "Mismatched free() / delete / delete []\n" );
624 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
625 mc_pp_AddrInfo(VG_(get_error_address)(err),
626 &extra->Err.FreeMismatch.ai, False);
627 }
628 break;
629
630 case Err_Addr:
631 if (xml) {
632 emit( " <kind>Invalid%s</kind>\n",
633 extra->Err.Addr.isWrite ? "Write" : "Read" );
634 emit( " <what>Invalid %s of size %ld</what>\n",
635 extra->Err.Addr.isWrite ? "write" : "read",
636 extra->Err.Addr.szB );
637 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
638 mc_pp_AddrInfo( VG_(get_error_address)(err),
639 &extra->Err.Addr.ai,
640 extra->Err.Addr.maybe_gcc );
641 } else {
642 emit( "Invalid %s of size %ld\n",
643 extra->Err.Addr.isWrite ? "write" : "read",
644 extra->Err.Addr.szB );
645 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
646
647 mc_pp_AddrInfo( VG_(get_error_address)(err),
648 &extra->Err.Addr.ai,
649 extra->Err.Addr.maybe_gcc );
650 }
651 break;
652
653 case Err_Jump:
654 if (xml) {
655 emit( " <kind>InvalidJump</kind>\n" );
656 emit( " <what>Jump to the invalid address stated "
657 "on the next line</what>\n" );
658 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
659 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
660 False );
661 } else {
662 emit( "Jump to the invalid address stated on the next line\n" );
663 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
664 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
665 False );
666 }
667 break;
668
669 case Err_Overlap:
670 if (xml) {
671 emit( " <kind>Overlap</kind>\n" );
672 if (extra->Err.Overlap.szB == 0) {
673 emiN( " <what>Source and destination overlap "
674 "in %pS(%#lx, %#lx)\n</what>\n",
675 VG_(get_error_string)(err),
676 extra->Err.Overlap.dst, extra->Err.Overlap.src );
677 } else {
678 emit( " <what>Source and destination overlap "
679 "in %s(%#lx, %#lx, %d)</what>\n",
680 VG_(get_error_string)(err),
681 extra->Err.Overlap.dst, extra->Err.Overlap.src,
682 extra->Err.Overlap.szB );
683 }
684 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
685 } else {
686 if (extra->Err.Overlap.szB == 0) {
687 emiN( "Source and destination overlap in %pS(%#lx, %#lx)\n",
688 VG_(get_error_string)(err),
689 extra->Err.Overlap.dst, extra->Err.Overlap.src );
690 } else {
691 emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
692 VG_(get_error_string)(err),
693 extra->Err.Overlap.dst, extra->Err.Overlap.src,
694 extra->Err.Overlap.szB );
695 }
696 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
697 }
698 break;
699
700 case Err_IllegalMempool:
701 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
702 // the following code is untested. Bad.
703 if (xml) {
704 emit( " <kind>InvalidMemPool</kind>\n" );
705 emit( " <what>Illegal memory pool address</what>\n" );
706 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
707 mc_pp_AddrInfo( VG_(get_error_address)(err),
708 &extra->Err.IllegalMempool.ai, False );
709 } else {
710 emit( "Illegal memory pool address\n" );
711 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
712 mc_pp_AddrInfo( VG_(get_error_address)(err),
713 &extra->Err.IllegalMempool.ai, False );
714 }
715 break;
716
717 case Err_Leak: {
718 UInt n_this_record = extra->Err.Leak.n_this_record;
719 UInt n_total_records = extra->Err.Leak.n_total_records;
720 LossRecord* lr = extra->Err.Leak.lr;
721 // char arrays to produce the indication of increase/decrease in case
722 // of delta_mode != LCD_Any
723 char d_bytes[20];
724 char d_direct_bytes[20];
725 char d_indirect_bytes[20];
726 char d_num_blocks[20];
727
728 MC_(snprintf_delta) (d_bytes, 20,
729 lr->szB + lr->indirect_szB,
730 lr->old_szB + lr->old_indirect_szB,
731 MC_(detect_memory_leaks_last_delta_mode));
732 MC_(snprintf_delta) (d_direct_bytes, 20,
733 lr->szB,
734 lr->old_szB,
735 MC_(detect_memory_leaks_last_delta_mode));
736 MC_(snprintf_delta) (d_indirect_bytes, 20,
737 lr->indirect_szB,
738 lr->old_indirect_szB,
739 MC_(detect_memory_leaks_last_delta_mode));
740 MC_(snprintf_delta) (d_num_blocks, 20,
741 (SizeT) lr->num_blocks,
742 (SizeT) lr->old_num_blocks,
743 MC_(detect_memory_leaks_last_delta_mode));
744
745 if (xml) {
746 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
747 if (lr->indirect_szB > 0) {
748 emit( " <xwhat>\n" );
749 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
750 "in %'u%s blocks"
751 " are %s in loss record %'u of %'u</text>\n",
752 lr->szB + lr->indirect_szB, d_bytes,
753 lr->szB, d_direct_bytes,
754 lr->indirect_szB, d_indirect_bytes,
755 lr->num_blocks, d_num_blocks,
756 str_leak_lossmode(lr->key.state),
757 n_this_record, n_total_records );
758 // Nb: don't put commas in these XML numbers
759 emit( " <leakedbytes>%lu</leakedbytes>\n",
760 lr->szB + lr->indirect_szB );
761 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
762 emit( " </xwhat>\n" );
763 } else {
764 emit( " <xwhat>\n" );
765 emit( " <text>%'lu%s bytes in %'u%s blocks"
766 " are %s in loss record %'u of %'u</text>\n",
767 lr->szB, d_direct_bytes,
768 lr->num_blocks, d_num_blocks,
769 str_leak_lossmode(lr->key.state),
770 n_this_record, n_total_records );
771 emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
772 emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
773 emit( " </xwhat>\n" );
774 }
775 VG_(pp_ExeContext)(lr->key.allocated_at);
776 } else { /* ! if (xml) */
777 if (lr->indirect_szB > 0) {
778 emit(
779 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
780 " are %s in loss record %'u of %'u\n",
781 lr->szB + lr->indirect_szB, d_bytes,
782 lr->szB, d_direct_bytes,
783 lr->indirect_szB, d_indirect_bytes,
784 lr->num_blocks, d_num_blocks,
785 str_leak_lossmode(lr->key.state),
786 n_this_record, n_total_records
787 );
788 } else {
789 emit(
790 "%'lu%s bytes in %'u%s blocks are %s in loss record %'u of %'u\n",
791 lr->szB, d_direct_bytes,
792 lr->num_blocks, d_num_blocks,
793 str_leak_lossmode(lr->key.state),
794 n_this_record, n_total_records
795 );
796 }
797 VG_(pp_ExeContext)(lr->key.allocated_at);
798 } /* if (xml) */
799 break;
800 }
801
802 default:
803 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
804 VG_(get_error_kind)(err));
805 VG_(tool_panic)("unknown error code in mc_pp_Error)");
806 }
807
808 if (MC_(clo_summary_file)) {
809 /* Each time we report a warning, we replace the contents of the summary
810 * file with one line indicating the number of reported warnings.
811 * This way, at the end of memcheck execution we will have a file with
812 * one line saying
813 * Memcheck: XX warnings reported
814 * If there were no warnings, the file will not be changed.
815 * If memcheck crashes, the file will still contain the last summary.
816 * */
817 static int n_warnings = 0;
818 char buf[100];
819 SysRes sres = VG_(open)(MC_(clo_summary_file),
820 VKI_O_WRONLY|VKI_O_CREAT|VKI_O_TRUNC,
821 VKI_S_IRUSR|VKI_S_IWUSR);
822 if (sr_isError(sres)) {
823 VG_(tool_panic)("can not open the summary file");
824 }
825 n_warnings++;
826 VG_(snprintf)(buf, sizeof(buf), "Memcheck: %d warning(s) reported\n",
827 n_warnings);
828 VG_(write)(sr_Res(sres), buf, VG_(strlen)(buf));
829 VG_(close)(sr_Res(sres));
830 }
831 }
832
833 /*------------------------------------------------------------*/
834 /*--- Recording errors ---*/
835 /*------------------------------------------------------------*/
836
837 /* These many bytes below %ESP are considered addressible if we're
838 doing the --workaround-gcc296-bugs hack. */
839 #define VG_GCC296_BUG_STACK_SLOP 1024
840
841 /* Is this address within some small distance below %ESP? Used only
842 for the --workaround-gcc296-bugs kludge. */
is_just_below_ESP(Addr esp,Addr aa)843 static Bool is_just_below_ESP( Addr esp, Addr aa )
844 {
845 esp -= VG_STACK_REDZONE_SZB;
846 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
847 return True;
848 else
849 return False;
850 }
851
852 /* --- Called from generated and non-generated code --- */
853
MC_(record_address_error)854 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
855 Bool isWrite )
856 {
857 MC_Error extra;
858 Bool just_below_esp;
859
860 if (MC_(in_ignored_range)(a))
861 return;
862
863 if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
864 return;
865
866 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
867
868 /* If this is caused by an access immediately below %ESP, and the
869 user asks nicely, we just ignore it. */
870 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
871 return;
872
873 extra.Err.Addr.isWrite = isWrite;
874 extra.Err.Addr.szB = szB;
875 extra.Err.Addr.maybe_gcc = just_below_esp;
876 extra.Err.Addr.ai.tag = Addr_Undescribed;
877 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
878 }
879
MC_(record_value_error)880 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
881 {
882 MC_Error extra;
883 tl_assert( MC_(clo_mc_level) >= 2 );
884 if (otag > 0)
885 tl_assert( MC_(clo_mc_level) == 3 );
886 extra.Err.Value.szB = szB;
887 extra.Err.Value.otag = otag;
888 extra.Err.Value.origin_ec = NULL; /* Filled in later */
889 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
890 }
891
MC_(record_cond_error)892 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
893 {
894 MC_Error extra;
895 tl_assert( MC_(clo_mc_level) >= 2 );
896 if (otag > 0)
897 tl_assert( MC_(clo_mc_level) == 3 );
898 extra.Err.Cond.otag = otag;
899 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
900 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
901 }
902
903 /* --- Called from non-generated code --- */
904
905 /* This is for memory errors in signal-related memory. */
MC_(record_core_mem_error)906 void MC_(record_core_mem_error) ( ThreadId tid, Char* msg )
907 {
908 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
909 }
910
MC_(record_regparam_error)911 void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag )
912 {
913 MC_Error extra;
914 tl_assert(VG_INVALID_THREADID != tid);
915 if (otag > 0)
916 tl_assert( MC_(clo_mc_level) == 3 );
917 extra.Err.RegParam.otag = otag;
918 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
919 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
920 }
921
MC_(record_memparam_error)922 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
923 Bool isAddrErr, Char* msg, UInt otag )
924 {
925 MC_Error extra;
926 tl_assert(VG_INVALID_THREADID != tid);
927 if (!isAddrErr)
928 tl_assert( MC_(clo_mc_level) >= 2 );
929 if (otag != 0) {
930 tl_assert( MC_(clo_mc_level) == 3 );
931 tl_assert( !isAddrErr );
932 }
933 extra.Err.MemParam.isAddrErr = isAddrErr;
934 extra.Err.MemParam.ai.tag = Addr_Undescribed;
935 extra.Err.MemParam.otag = otag;
936 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
937 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
938 }
939
MC_(record_jump_error)940 void MC_(record_jump_error) ( ThreadId tid, Addr a )
941 {
942 MC_Error extra;
943 tl_assert(VG_INVALID_THREADID != tid);
944 extra.Err.Jump.ai.tag = Addr_Undescribed;
945 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
946 }
947
MC_(record_free_error)948 void MC_(record_free_error) ( ThreadId tid, Addr a )
949 {
950 MC_Error extra;
951 tl_assert(VG_INVALID_THREADID != tid);
952 extra.Err.Free.ai.tag = Addr_Undescribed;
953 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
954 }
955
MC_(record_freemismatch_error)956 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
957 {
958 MC_Error extra;
959 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
960 tl_assert(VG_INVALID_THREADID != tid);
961 ai->tag = Addr_Block;
962 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
963 ai->Addr.Block.block_desc = "block";
964 ai->Addr.Block.block_szB = mc->szB;
965 ai->Addr.Block.rwoffset = 0;
966 ai->Addr.Block.lastchange = mc->where;
967 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
968 &extra );
969 }
970
MC_(record_illegal_mempool_error)971 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
972 {
973 MC_Error extra;
974 tl_assert(VG_INVALID_THREADID != tid);
975 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
976 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
977 }
978
MC_(record_overlap_error)979 void MC_(record_overlap_error) ( ThreadId tid, Char* function,
980 Addr src, Addr dst, SizeT szB )
981 {
982 MC_Error extra;
983 tl_assert(VG_INVALID_THREADID != tid);
984 extra.Err.Overlap.src = src;
985 extra.Err.Overlap.dst = dst;
986 extra.Err.Overlap.szB = szB;
987 VG_(maybe_record_error)(
988 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
989 }
990
MC_(record_leak_error)991 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
992 UInt n_total_records, LossRecord* lr,
993 Bool print_record, Bool count_error )
994 {
995 MC_Error extra;
996 extra.Err.Leak.n_this_record = n_this_record;
997 extra.Err.Leak.n_total_records = n_total_records;
998 extra.Err.Leak.lr = lr;
999 return
1000 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
1001 lr->key.allocated_at, print_record,
1002 /*allow_GDB_attach*/False, count_error );
1003 }
1004
MC_(record_user_error)1005 void MC_(record_user_error) ( ThreadId tid, Addr a,
1006 Bool isAddrErr, UInt otag )
1007 {
1008 MC_Error extra;
1009 if (otag != 0) {
1010 tl_assert(!isAddrErr);
1011 tl_assert( MC_(clo_mc_level) == 3 );
1012 }
1013 if (!isAddrErr) {
1014 tl_assert( MC_(clo_mc_level) >= 2 );
1015 }
1016 tl_assert(VG_INVALID_THREADID != tid);
1017 extra.Err.User.isAddrErr = isAddrErr;
1018 extra.Err.User.ai.tag = Addr_Undescribed;
1019 extra.Err.User.otag = otag;
1020 extra.Err.User.origin_ec = NULL; /* Filled in later */
1021 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
1022 }
1023
1024 /*------------------------------------------------------------*/
1025 /*--- Other error operations ---*/
1026 /*------------------------------------------------------------*/
1027
1028 /* Compare error contexts, to detect duplicates. Note that if they
1029 are otherwise the same, the faulting addrs and associated rwoffsets
1030 are allowed to be different. */
MC_(eq_Error)1031 Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
1032 {
1033 MC_Error* extra1 = VG_(get_error_extra)(e1);
1034 MC_Error* extra2 = VG_(get_error_extra)(e2);
1035
1036 /* Guaranteed by calling function */
1037 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
1038
1039 switch (VG_(get_error_kind)(e1)) {
1040 case Err_CoreMem: {
1041 Char *e1s, *e2s;
1042 e1s = VG_(get_error_string)(e1);
1043 e2s = VG_(get_error_string)(e2);
1044 if (e1s == e2s) return True;
1045 if (VG_STREQ(e1s, e2s)) return True;
1046 return False;
1047 }
1048
1049 case Err_RegParam:
1050 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1051
1052 // Perhaps we should also check the addrinfo.akinds for equality.
1053 // That would result in more error reports, but only in cases where
1054 // a register contains uninitialised bytes and points to memory
1055 // containing uninitialised bytes. Currently, the 2nd of those to be
1056 // detected won't be reported. That is (nearly?) always the memory
1057 // error, which is good.
1058 case Err_MemParam:
1059 if (!VG_STREQ(VG_(get_error_string)(e1),
1060 VG_(get_error_string)(e2))) return False;
1061 // fall through
1062 case Err_User:
1063 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1064 ? True : False );
1065
1066 case Err_Free:
1067 case Err_FreeMismatch:
1068 case Err_Jump:
1069 case Err_IllegalMempool:
1070 case Err_Overlap:
1071 case Err_Cond:
1072 return True;
1073
1074 case Err_Addr:
1075 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1076 ? True : False );
1077
1078 case Err_Value:
1079 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1080 ? True : False );
1081
1082 case Err_Leak:
1083 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1084 "since it's handled with VG_(unique_error)()!");
1085
1086 default:
1087 VG_(printf)("Error:\n unknown error code %d\n",
1088 VG_(get_error_kind)(e1));
1089 VG_(tool_panic)("unknown error code in mc_eq_Error");
1090 }
1091 }
1092
1093 /* Functions used when searching MC_Chunk lists */
1094 static
addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk * mc,Addr a)1095 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1096 {
1097 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1098 MC_MALLOC_REDZONE_SZB );
1099 }
1100 static
addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk * mc,Addr a,SizeT rzB)1101 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1102 {
1103 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1104 rzB );
1105 }
1106
1107 // Forward declarations
1108 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1109 static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai );
1110
1111
1112 /* Describe an address as best you can, for error messages,
1113 putting the result in ai. */
describe_addr(Addr a,AddrInfo * ai)1114 static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
1115 {
1116 MC_Chunk* mc;
1117 ThreadId tid;
1118 Addr stack_min, stack_max;
1119 VgSectKind sect;
1120
1121 tl_assert(Addr_Undescribed == ai->tag);
1122
1123 /* -- Perhaps it's a user-named block? -- */
1124 if (client_block_maybe_describe( a, ai )) {
1125 return;
1126 }
1127 /* -- Perhaps it's in mempool block? -- */
1128 if (mempool_block_maybe_describe( a, ai )) {
1129 return;
1130 }
1131 /* -- Search for a recently freed block which might bracket it. -- */
1132 mc = MC_(get_freed_block_bracketting)( a );
1133 if (mc) {
1134 ai->tag = Addr_Block;
1135 ai->Addr.Block.block_kind = Block_Freed;
1136 ai->Addr.Block.block_desc = "block";
1137 ai->Addr.Block.block_szB = mc->szB;
1138 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1139 ai->Addr.Block.lastchange = mc->where;
1140 return;
1141 }
1142 /* -- Search for a currently malloc'd block which might bracket it. -- */
1143 VG_(HT_ResetIter)(MC_(malloc_list));
1144 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1145 if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1146 ai->tag = Addr_Block;
1147 ai->Addr.Block.block_kind = Block_Mallocd;
1148 ai->Addr.Block.block_desc = "block";
1149 ai->Addr.Block.block_szB = mc->szB;
1150 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1151 ai->Addr.Block.lastchange = mc->where;
1152 return;
1153 }
1154 }
1155 /* -- Perhaps the variable type/location data describes it? -- */
1156 ai->Addr.Variable.descr1
1157 = VG_(newXA)( VG_(malloc), "mc.da.descr1",
1158 VG_(free), sizeof(HChar) );
1159 ai->Addr.Variable.descr2
1160 = VG_(newXA)( VG_(malloc), "mc.da.descr2",
1161 VG_(free), sizeof(HChar) );
1162
1163 (void) VG_(get_data_description)( ai->Addr.Variable.descr1,
1164 ai->Addr.Variable.descr2, a );
1165 /* If there's nothing in descr1/2, free them. Why is it safe to to
1166 VG_(indexXA) at zero here? Because VG_(get_data_description)
1167 guarantees to zero terminate descr1/2 regardless of the outcome
1168 of the call. So there's always at least one element in each XA
1169 after the call.
1170 */
1171 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
1172 VG_(deleteXA)( ai->Addr.Variable.descr1 );
1173 ai->Addr.Variable.descr1 = NULL;
1174 }
1175 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
1176 VG_(deleteXA)( ai->Addr.Variable.descr2 );
1177 ai->Addr.Variable.descr2 = NULL;
1178 }
1179 /* Assume (assert) that VG_(get_data_description) fills in descr1
1180 before it fills in descr2 */
1181 if (ai->Addr.Variable.descr1 == NULL)
1182 tl_assert(ai->Addr.Variable.descr2 == NULL);
1183 /* So did we get lucky? */
1184 if (ai->Addr.Variable.descr1 != NULL) {
1185 ai->tag = Addr_Variable;
1186 return;
1187 }
1188 /* -- Have a look at the low level data symbols - perhaps it's in
1189 there. -- */
1190 VG_(memset)( &ai->Addr.DataSym.name,
1191 0, sizeof(ai->Addr.DataSym.name));
1192 if (VG_(get_datasym_and_offset)(
1193 a, &ai->Addr.DataSym.name[0],
1194 sizeof(ai->Addr.DataSym.name)-1,
1195 &ai->Addr.DataSym.offset )) {
1196 ai->tag = Addr_DataSym;
1197 tl_assert( ai->Addr.DataSym.name
1198 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
1199 return;
1200 }
1201 /* -- Perhaps it's on a thread's stack? -- */
1202 VG_(thread_stack_reset_iter)(&tid);
1203 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
1204 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
1205 ai->tag = Addr_Stack;
1206 ai->Addr.Stack.tid = tid;
1207 return;
1208 }
1209 }
1210 /* -- last ditch attempt at classification -- */
1211 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
1212 VG_(memset)( &ai->Addr.SectKind.objname,
1213 0, sizeof(ai->Addr.SectKind.objname));
1214 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
1215 sect = VG_(DebugInfo_sect_kind)( &ai->Addr.SectKind.objname[0],
1216 sizeof(ai->Addr.SectKind.objname)-1, a);
1217 if (sect != Vg_SectUnknown) {
1218 ai->tag = Addr_SectKind;
1219 ai->Addr.SectKind.kind = sect;
1220 tl_assert( ai->Addr.SectKind.objname
1221 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
1222 return;
1223 }
1224 /* -- Clueless ... -- */
1225 ai->tag = Addr_Unknown;
1226 return;
1227 }
1228
MC_(pp_describe_addr)1229 void MC_(pp_describe_addr) ( Addr a )
1230 {
1231 AddrInfo ai;
1232
1233 ai.tag = Addr_Undescribed;
1234 describe_addr (a, &ai);
1235 mc_pp_AddrInfo (a, &ai, /* maybe_gcc */ False);
1236 }
1237
1238 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1239 does not refer to a known origin. */
update_origin(ExeContext ** origin_ec,UInt otag)1240 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1241 UInt otag )
1242 {
1243 UInt ecu = otag & ~3;
1244 *origin_ec = NULL;
1245 if (VG_(is_plausible_ECU)(ecu)) {
1246 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1247 }
1248 }
1249
1250 /* Updates the copy with address info if necessary (but not for all errors). */
MC_(update_Error_extra)1251 UInt MC_(update_Error_extra)( Error* err )
1252 {
1253 MC_Error* extra = VG_(get_error_extra)(err);
1254
1255 switch (VG_(get_error_kind)(err)) {
1256 // These ones don't have addresses associated with them, and so don't
1257 // need any updating.
1258 case Err_CoreMem:
1259 //case Err_Value:
1260 //case Err_Cond:
1261 case Err_Overlap:
1262 // For Err_Leaks the returned size does not matter -- they are always
1263 // shown with VG_(unique_error)() so they 'extra' not copied. But
1264 // we make it consistent with the others.
1265 case Err_Leak:
1266 return sizeof(MC_Error);
1267
1268 // For value errors, get the ExeContext corresponding to the
1269 // origin tag. Note that it is a kludge to assume that
1270 // a length-1 trace indicates a stack origin. FIXME.
1271 case Err_Value:
1272 update_origin( &extra->Err.Value.origin_ec,
1273 extra->Err.Value.otag );
1274 return sizeof(MC_Error);
1275 case Err_Cond:
1276 update_origin( &extra->Err.Cond.origin_ec,
1277 extra->Err.Cond.otag );
1278 return sizeof(MC_Error);
1279 case Err_RegParam:
1280 update_origin( &extra->Err.RegParam.origin_ec,
1281 extra->Err.RegParam.otag );
1282 return sizeof(MC_Error);
1283
1284 // These ones always involve a memory address.
1285 case Err_Addr:
1286 describe_addr ( VG_(get_error_address)(err),
1287 &extra->Err.Addr.ai );
1288 return sizeof(MC_Error);
1289 case Err_MemParam:
1290 describe_addr ( VG_(get_error_address)(err),
1291 &extra->Err.MemParam.ai );
1292 update_origin( &extra->Err.MemParam.origin_ec,
1293 extra->Err.MemParam.otag );
1294 return sizeof(MC_Error);
1295 case Err_Jump:
1296 describe_addr ( VG_(get_error_address)(err),
1297 &extra->Err.Jump.ai );
1298 return sizeof(MC_Error);
1299 case Err_User:
1300 describe_addr ( VG_(get_error_address)(err),
1301 &extra->Err.User.ai );
1302 update_origin( &extra->Err.User.origin_ec,
1303 extra->Err.User.otag );
1304 return sizeof(MC_Error);
1305 case Err_Free:
1306 describe_addr ( VG_(get_error_address)(err),
1307 &extra->Err.Free.ai );
1308 return sizeof(MC_Error);
1309 case Err_IllegalMempool:
1310 describe_addr ( VG_(get_error_address)(err),
1311 &extra->Err.IllegalMempool.ai );
1312 return sizeof(MC_Error);
1313
1314 // Err_FreeMismatches have already had their address described; this is
1315 // possible because we have the MC_Chunk on hand when the error is
1316 // detected. However, the address may be part of a user block, and if so
1317 // we override the pre-determined description with a user block one.
1318 case Err_FreeMismatch: {
1319 tl_assert(extra && Block_Mallocd ==
1320 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1321 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1322 &extra->Err.FreeMismatch.ai );
1323 return sizeof(MC_Error);
1324 }
1325
1326 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1327 }
1328 }
1329
1330
client_block_maybe_describe(Addr a,AddrInfo * ai)1331 static Bool client_block_maybe_describe( Addr a,
1332 /*OUT*/AddrInfo* ai )
1333 {
1334 UWord i;
1335 CGenBlock* cgbs = NULL;
1336 UWord cgb_used = 0;
1337
1338 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1339 if (cgbs == NULL)
1340 tl_assert(cgb_used == 0);
1341
1342 /* Perhaps it's a general block ? */
1343 for (i = 0; i < cgb_used; i++) {
1344 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1345 continue;
1346 // Use zero as the redzone for client blocks.
1347 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1348 ai->tag = Addr_Block;
1349 ai->Addr.Block.block_kind = Block_UserG;
1350 ai->Addr.Block.block_desc = cgbs[i].desc;
1351 ai->Addr.Block.block_szB = cgbs[i].size;
1352 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1353 ai->Addr.Block.lastchange = cgbs[i].where;
1354 return True;
1355 }
1356 }
1357 return False;
1358 }
1359
1360
mempool_block_maybe_describe(Addr a,AddrInfo * ai)1361 static Bool mempool_block_maybe_describe( Addr a,
1362 /*OUT*/AddrInfo* ai )
1363 {
1364 MC_Mempool* mp;
1365 tl_assert( MC_(mempool_list) );
1366
1367 VG_(HT_ResetIter)( MC_(mempool_list) );
1368 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1369 if (mp->chunks != NULL) {
1370 MC_Chunk* mc;
1371 VG_(HT_ResetIter)(mp->chunks);
1372 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1373 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1374 ai->tag = Addr_Block;
1375 ai->Addr.Block.block_kind = Block_MempoolChunk;
1376 ai->Addr.Block.block_desc = "block";
1377 ai->Addr.Block.block_szB = mc->szB;
1378 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1379 ai->Addr.Block.lastchange = mc->where;
1380 return True;
1381 }
1382 }
1383 }
1384 }
1385 return False;
1386 }
1387
1388
1389 /*------------------------------------------------------------*/
1390 /*--- Suppressions ---*/
1391 /*------------------------------------------------------------*/
1392
1393 typedef
1394 enum {
1395 ParamSupp, // Bad syscall params
1396 UserSupp, // Errors arising from client-request checks
1397 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1398
1399 // Undefined value errors of given size
1400 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
1401
1402 // Undefined value error in conditional.
1403 CondSupp,
1404
1405 // Unaddressable read/write attempt at given size
1406 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
1407
1408 // https://bugs.kde.org/show_bug.cgi?id=256525
1409 UnaddrSupp, // Matches Addr*.
1410 UninitSupp, // Matches Value*, Param and Cond.
1411
1412 JumpSupp, // Jump to unaddressable target
1413 FreeSupp, // Invalid or mismatching free
1414 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1415 LeakSupp, // Something to be suppressed in a leak check.
1416 MempoolSupp, // Memory pool suppression.
1417 }
1418 MC_SuppKind;
1419
MC_(is_recognised_suppression)1420 Bool MC_(is_recognised_suppression) ( Char* name, Supp* su )
1421 {
1422 SuppKind skind;
1423
1424 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1425 else if (VG_STREQ(name, "User")) skind = UserSupp;
1426 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1427 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1428 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1429 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1430 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1431 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1432 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1433 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1434 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1435 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1436 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1437 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1438 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1439 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1440 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1441 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1442 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1443 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1444 // https://bugs.kde.org/show_bug.cgi?id=256525
1445 else if (VG_STREQ(name, "Unaddressable")) skind = UnaddrSupp;
1446 else if (VG_STREQ(name, "Unaddr")) skind = UnaddrSupp;
1447 else if (VG_STREQ(name, "Uninitialised")) skind = UninitSupp;
1448 else if (VG_STREQ(name, "Uninitialized")) skind = UninitSupp;
1449 else if (VG_STREQ(name, "Uninit")) skind = UninitSupp;
1450 else
1451 return False;
1452
1453 VG_(set_supp_kind)(su, skind);
1454 return True;
1455 }
1456
MC_(read_extra_suppression_info)1457 Bool MC_(read_extra_suppression_info) ( Int fd, Char** bufpp,
1458 SizeT* nBufp, Supp *su )
1459 {
1460 Bool eof;
1461
1462 if (VG_(get_supp_kind)(su) == ParamSupp) {
1463 eof = VG_(get_line) ( fd, bufpp, nBufp, NULL );
1464 if (eof) return False;
1465 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1466 }
1467 return True;
1468 }
1469
MC_(error_matches_suppression)1470 Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
1471 {
1472 Int su_szB;
1473 MC_Error* extra = VG_(get_error_extra)(err);
1474 ErrorKind ekind = VG_(get_error_kind )(err);
1475
1476 switch (VG_(get_supp_kind)(su)) {
1477 case ParamSupp:
1478 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1479 && VG_STREQ(VG_(get_error_string)(err),
1480 VG_(get_supp_string)(su)));
1481
1482 case UserSupp:
1483 return (ekind == Err_User);
1484
1485 case CoreMemSupp:
1486 return (ekind == Err_CoreMem
1487 && VG_STREQ(VG_(get_error_string)(err),
1488 VG_(get_supp_string)(su)));
1489
1490 case Value1Supp: su_szB = 1; goto value_case;
1491 case Value2Supp: su_szB = 2; goto value_case;
1492 case Value4Supp: su_szB = 4; goto value_case;
1493 case Value8Supp: su_szB = 8; goto value_case;
1494 case Value16Supp:su_szB =16; goto value_case;
1495 value_case:
1496 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1497
1498 case CondSupp:
1499 return (ekind == Err_Cond);
1500
1501 case Addr1Supp: su_szB = 1; goto addr_case;
1502 case Addr2Supp: su_szB = 2; goto addr_case;
1503 case Addr4Supp: su_szB = 4; goto addr_case;
1504 case Addr8Supp: su_szB = 8; goto addr_case;
1505 case Addr16Supp:su_szB =16; goto addr_case;
1506 addr_case:
1507 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1508
1509 // https://bugs.kde.org/show_bug.cgi?id=256525
1510 case UnaddrSupp:
1511 return (ekind == Err_Addr ||
1512 (ekind == Err_MemParam && extra->Err.MemParam.isAddrErr));
1513
1514 case UninitSupp:
1515 return (ekind == Err_Cond || ekind == Err_Value ||
1516 ekind == Err_RegParam ||
1517 (ekind == Err_MemParam && !extra->Err.MemParam.isAddrErr));
1518
1519 case JumpSupp:
1520 return (ekind == Err_Jump);
1521
1522 case FreeSupp:
1523 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1524
1525 case OverlapSupp:
1526 return (ekind == Err_Overlap);
1527
1528 case LeakSupp:
1529 return (ekind == Err_Leak);
1530
1531 case MempoolSupp:
1532 return (ekind == Err_IllegalMempool);
1533
1534 default:
1535 VG_(printf)("Error:\n"
1536 " unknown suppression type %d\n",
1537 VG_(get_supp_kind)(su));
1538 VG_(tool_panic)("unknown suppression type in "
1539 "MC_(error_matches_suppression)");
1540 }
1541 }
1542
MC_(get_error_name)1543 Char* MC_(get_error_name) ( Error* err )
1544 {
1545 switch (VG_(get_error_kind)(err)) {
1546 case Err_RegParam: return "Param";
1547 case Err_MemParam: return "Param";
1548 case Err_User: return "User";
1549 case Err_FreeMismatch: return "Free";
1550 case Err_IllegalMempool: return "Mempool";
1551 case Err_Free: return "Free";
1552 case Err_Jump: return "Jump";
1553 case Err_CoreMem: return "CoreMem";
1554 case Err_Overlap: return "Overlap";
1555 case Err_Leak: return "Leak";
1556 case Err_Cond: return "Cond";
1557 case Err_Addr: {
1558 MC_Error* extra = VG_(get_error_extra)(err);
1559 switch ( extra->Err.Addr.szB ) {
1560 case 1: return "Addr1";
1561 case 2: return "Addr2";
1562 case 4: return "Addr4";
1563 case 8: return "Addr8";
1564 case 16: return "Addr16";
1565 default: VG_(tool_panic)("unexpected size for Addr");
1566 }
1567 }
1568 case Err_Value: {
1569 MC_Error* extra = VG_(get_error_extra)(err);
1570 switch ( extra->Err.Value.szB ) {
1571 case 1: return "Value1";
1572 case 2: return "Value2";
1573 case 4: return "Value4";
1574 case 8: return "Value8";
1575 case 16: return "Value16";
1576 default: VG_(tool_panic)("unexpected size for Value");
1577 }
1578 }
1579 default: VG_(tool_panic)("get_error_name: unexpected type");
1580 }
1581 }
1582
MC_(get_extra_suppression_info)1583 Bool MC_(get_extra_suppression_info) ( Error* err,
1584 /*OUT*/Char* buf, Int nBuf )
1585 {
1586 ErrorKind ekind = VG_(get_error_kind )(err);
1587 tl_assert(buf);
1588 tl_assert(nBuf >= 16); // stay sane
1589 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1590 Char* errstr = VG_(get_error_string)(err);
1591 tl_assert(errstr);
1592 VG_(snprintf)(buf, nBuf-1, "%s", errstr);
1593 return True;
1594 } else {
1595 return False;
1596 }
1597 }
1598
1599
1600 /*--------------------------------------------------------------------*/
1601 /*--- end mc_errors.c ---*/
1602 /*--------------------------------------------------------------------*/
1603